spinlock.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_SPINLOCK_H
  10. #define _ASM_SPINLOCK_H
  11. #include <linux/compiler.h>
  12. #include <asm/barrier.h>
  13. #include <asm/processor.h>
  14. #include <asm/compiler.h>
  15. #include <asm/war.h>
  16. /*
  17. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  18. *
  19. * Simple spin lock operations. There are two variants, one clears IRQ's
  20. * on the local processor, one does not.
  21. *
  22. * These are fair FIFO ticket locks
  23. *
  24. * (the type definitions are in asm/spinlock_types.h)
  25. */
  26. /*
  27. * Ticket locks are conceptually two parts, one indicating the current head of
  28. * the queue, and the other indicating the current tail. The lock is acquired
  29. * by atomically noting the tail and incrementing it by one (thus adding
  30. * ourself to the queue and noting our position), then waiting until the head
  31. * becomes equal to the the initial value of the tail.
  32. */
  33. static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  34. {
  35. u32 counters = ACCESS_ONCE(lock->lock);
  36. return ((counters >> 16) ^ counters) & 0xffff;
  37. }
  38. static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  39. {
  40. return lock.h.serving_now == lock.h.ticket;
  41. }
  42. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  43. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  44. {
  45. u16 owner = READ_ONCE(lock->h.serving_now);
  46. smp_rmb();
  47. for (;;) {
  48. arch_spinlock_t tmp = READ_ONCE(*lock);
  49. if (tmp.h.serving_now == tmp.h.ticket ||
  50. tmp.h.serving_now != owner)
  51. break;
  52. cpu_relax();
  53. }
  54. smp_acquire__after_ctrl_dep();
  55. }
  56. static inline int arch_spin_is_contended(arch_spinlock_t *lock)
  57. {
  58. u32 counters = ACCESS_ONCE(lock->lock);
  59. return (((counters >> 16) - counters) & 0xffff) > 1;
  60. }
  61. #define arch_spin_is_contended arch_spin_is_contended
  62. static inline void arch_spin_lock(arch_spinlock_t *lock)
  63. {
  64. int my_ticket;
  65. int tmp;
  66. int inc = 0x10000;
  67. if (R10000_LLSC_WAR) {
  68. __asm__ __volatile__ (
  69. " .set push # arch_spin_lock \n"
  70. " .set noreorder \n"
  71. " \n"
  72. "1: ll %[ticket], %[ticket_ptr] \n"
  73. " addu %[my_ticket], %[ticket], %[inc] \n"
  74. " sc %[my_ticket], %[ticket_ptr] \n"
  75. " beqzl %[my_ticket], 1b \n"
  76. " nop \n"
  77. " srl %[my_ticket], %[ticket], 16 \n"
  78. " andi %[ticket], %[ticket], 0xffff \n"
  79. " bne %[ticket], %[my_ticket], 4f \n"
  80. " subu %[ticket], %[my_ticket], %[ticket] \n"
  81. "2: \n"
  82. " .subsection 2 \n"
  83. "4: andi %[ticket], %[ticket], 0xffff \n"
  84. " sll %[ticket], 5 \n"
  85. " \n"
  86. "6: bnez %[ticket], 6b \n"
  87. " subu %[ticket], 1 \n"
  88. " \n"
  89. " lhu %[ticket], %[serving_now_ptr] \n"
  90. " beq %[ticket], %[my_ticket], 2b \n"
  91. " subu %[ticket], %[my_ticket], %[ticket] \n"
  92. " b 4b \n"
  93. " subu %[ticket], %[ticket], 1 \n"
  94. " .previous \n"
  95. " .set pop \n"
  96. : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
  97. [serving_now_ptr] "+m" (lock->h.serving_now),
  98. [ticket] "=&r" (tmp),
  99. [my_ticket] "=&r" (my_ticket)
  100. : [inc] "r" (inc));
  101. } else {
  102. __asm__ __volatile__ (
  103. " .set push # arch_spin_lock \n"
  104. " .set noreorder \n"
  105. " \n"
  106. "1: ll %[ticket], %[ticket_ptr] \n"
  107. " addu %[my_ticket], %[ticket], %[inc] \n"
  108. " sc %[my_ticket], %[ticket_ptr] \n"
  109. " beqz %[my_ticket], 1b \n"
  110. " srl %[my_ticket], %[ticket], 16 \n"
  111. " andi %[ticket], %[ticket], 0xffff \n"
  112. " bne %[ticket], %[my_ticket], 4f \n"
  113. " subu %[ticket], %[my_ticket], %[ticket] \n"
  114. "2: .insn \n"
  115. " .subsection 2 \n"
  116. "4: andi %[ticket], %[ticket], 0xffff \n"
  117. " sll %[ticket], 5 \n"
  118. " \n"
  119. "6: bnez %[ticket], 6b \n"
  120. " subu %[ticket], 1 \n"
  121. " \n"
  122. " lhu %[ticket], %[serving_now_ptr] \n"
  123. " beq %[ticket], %[my_ticket], 2b \n"
  124. " subu %[ticket], %[my_ticket], %[ticket] \n"
  125. " b 4b \n"
  126. " subu %[ticket], %[ticket], 1 \n"
  127. " .previous \n"
  128. " .set pop \n"
  129. : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
  130. [serving_now_ptr] "+m" (lock->h.serving_now),
  131. [ticket] "=&r" (tmp),
  132. [my_ticket] "=&r" (my_ticket)
  133. : [inc] "r" (inc));
  134. }
  135. smp_llsc_mb();
  136. }
  137. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  138. {
  139. unsigned int serving_now = lock->h.serving_now + 1;
  140. wmb();
  141. lock->h.serving_now = (u16)serving_now;
  142. nudge_writes();
  143. }
  144. static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  145. {
  146. int tmp, tmp2, tmp3;
  147. int inc = 0x10000;
  148. if (R10000_LLSC_WAR) {
  149. __asm__ __volatile__ (
  150. " .set push # arch_spin_trylock \n"
  151. " .set noreorder \n"
  152. " \n"
  153. "1: ll %[ticket], %[ticket_ptr] \n"
  154. " srl %[my_ticket], %[ticket], 16 \n"
  155. " andi %[now_serving], %[ticket], 0xffff \n"
  156. " bne %[my_ticket], %[now_serving], 3f \n"
  157. " addu %[ticket], %[ticket], %[inc] \n"
  158. " sc %[ticket], %[ticket_ptr] \n"
  159. " beqzl %[ticket], 1b \n"
  160. " li %[ticket], 1 \n"
  161. "2: \n"
  162. " .subsection 2 \n"
  163. "3: b 2b \n"
  164. " li %[ticket], 0 \n"
  165. " .previous \n"
  166. " .set pop \n"
  167. : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
  168. [ticket] "=&r" (tmp),
  169. [my_ticket] "=&r" (tmp2),
  170. [now_serving] "=&r" (tmp3)
  171. : [inc] "r" (inc));
  172. } else {
  173. __asm__ __volatile__ (
  174. " .set push # arch_spin_trylock \n"
  175. " .set noreorder \n"
  176. " \n"
  177. "1: ll %[ticket], %[ticket_ptr] \n"
  178. " srl %[my_ticket], %[ticket], 16 \n"
  179. " andi %[now_serving], %[ticket], 0xffff \n"
  180. " bne %[my_ticket], %[now_serving], 3f \n"
  181. " addu %[ticket], %[ticket], %[inc] \n"
  182. " sc %[ticket], %[ticket_ptr] \n"
  183. " beqz %[ticket], 1b \n"
  184. " li %[ticket], 1 \n"
  185. "2: .insn \n"
  186. " .subsection 2 \n"
  187. "3: b 2b \n"
  188. " li %[ticket], 0 \n"
  189. " .previous \n"
  190. " .set pop \n"
  191. : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
  192. [ticket] "=&r" (tmp),
  193. [my_ticket] "=&r" (tmp2),
  194. [now_serving] "=&r" (tmp3)
  195. : [inc] "r" (inc));
  196. }
  197. smp_llsc_mb();
  198. return tmp;
  199. }
  200. /*
  201. * Read-write spinlocks, allowing multiple readers but only one writer.
  202. *
  203. * NOTE! it is quite common to have readers in interrupts but no interrupt
  204. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  205. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  206. * read-locks.
  207. */
  208. /*
  209. * read_can_lock - would read_trylock() succeed?
  210. * @lock: the rwlock in question.
  211. */
  212. #define arch_read_can_lock(rw) ((rw)->lock >= 0)
  213. /*
  214. * write_can_lock - would write_trylock() succeed?
  215. * @lock: the rwlock in question.
  216. */
  217. #define arch_write_can_lock(rw) (!(rw)->lock)
  218. static inline void arch_read_lock(arch_rwlock_t *rw)
  219. {
  220. unsigned int tmp;
  221. if (R10000_LLSC_WAR) {
  222. __asm__ __volatile__(
  223. " .set noreorder # arch_read_lock \n"
  224. "1: ll %1, %2 \n"
  225. " bltz %1, 1b \n"
  226. " addu %1, 1 \n"
  227. " sc %1, %0 \n"
  228. " beqzl %1, 1b \n"
  229. " nop \n"
  230. " .set reorder \n"
  231. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  232. : GCC_OFF_SMALL_ASM() (rw->lock)
  233. : "memory");
  234. } else {
  235. do {
  236. __asm__ __volatile__(
  237. "1: ll %1, %2 # arch_read_lock \n"
  238. " bltz %1, 1b \n"
  239. " addu %1, 1 \n"
  240. "2: sc %1, %0 \n"
  241. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  242. : GCC_OFF_SMALL_ASM() (rw->lock)
  243. : "memory");
  244. } while (unlikely(!tmp));
  245. }
  246. smp_llsc_mb();
  247. }
  248. static inline void arch_read_unlock(arch_rwlock_t *rw)
  249. {
  250. unsigned int tmp;
  251. smp_mb__before_llsc();
  252. if (R10000_LLSC_WAR) {
  253. __asm__ __volatile__(
  254. "1: ll %1, %2 # arch_read_unlock \n"
  255. " addiu %1, -1 \n"
  256. " sc %1, %0 \n"
  257. " beqzl %1, 1b \n"
  258. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  259. : GCC_OFF_SMALL_ASM() (rw->lock)
  260. : "memory");
  261. } else {
  262. do {
  263. __asm__ __volatile__(
  264. "1: ll %1, %2 # arch_read_unlock \n"
  265. " addiu %1, -1 \n"
  266. " sc %1, %0 \n"
  267. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  268. : GCC_OFF_SMALL_ASM() (rw->lock)
  269. : "memory");
  270. } while (unlikely(!tmp));
  271. }
  272. }
  273. static inline void arch_write_lock(arch_rwlock_t *rw)
  274. {
  275. unsigned int tmp;
  276. if (R10000_LLSC_WAR) {
  277. __asm__ __volatile__(
  278. " .set noreorder # arch_write_lock \n"
  279. "1: ll %1, %2 \n"
  280. " bnez %1, 1b \n"
  281. " lui %1, 0x8000 \n"
  282. " sc %1, %0 \n"
  283. " beqzl %1, 1b \n"
  284. " nop \n"
  285. " .set reorder \n"
  286. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  287. : GCC_OFF_SMALL_ASM() (rw->lock)
  288. : "memory");
  289. } else {
  290. do {
  291. __asm__ __volatile__(
  292. "1: ll %1, %2 # arch_write_lock \n"
  293. " bnez %1, 1b \n"
  294. " lui %1, 0x8000 \n"
  295. "2: sc %1, %0 \n"
  296. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
  297. : GCC_OFF_SMALL_ASM() (rw->lock)
  298. : "memory");
  299. } while (unlikely(!tmp));
  300. }
  301. smp_llsc_mb();
  302. }
  303. static inline void arch_write_unlock(arch_rwlock_t *rw)
  304. {
  305. smp_mb__before_llsc();
  306. __asm__ __volatile__(
  307. " # arch_write_unlock \n"
  308. " sw $0, %0 \n"
  309. : "=m" (rw->lock)
  310. : "m" (rw->lock)
  311. : "memory");
  312. }
  313. static inline int arch_read_trylock(arch_rwlock_t *rw)
  314. {
  315. unsigned int tmp;
  316. int ret;
  317. if (R10000_LLSC_WAR) {
  318. __asm__ __volatile__(
  319. " .set noreorder # arch_read_trylock \n"
  320. " li %2, 0 \n"
  321. "1: ll %1, %3 \n"
  322. " bltz %1, 2f \n"
  323. " addu %1, 1 \n"
  324. " sc %1, %0 \n"
  325. " .set reorder \n"
  326. " beqzl %1, 1b \n"
  327. " nop \n"
  328. __WEAK_LLSC_MB
  329. " li %2, 1 \n"
  330. "2: \n"
  331. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
  332. : GCC_OFF_SMALL_ASM() (rw->lock)
  333. : "memory");
  334. } else {
  335. __asm__ __volatile__(
  336. " .set noreorder # arch_read_trylock \n"
  337. " li %2, 0 \n"
  338. "1: ll %1, %3 \n"
  339. " bltz %1, 2f \n"
  340. " addu %1, 1 \n"
  341. " sc %1, %0 \n"
  342. " beqz %1, 1b \n"
  343. " nop \n"
  344. " .set reorder \n"
  345. __WEAK_LLSC_MB
  346. " li %2, 1 \n"
  347. "2: .insn \n"
  348. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
  349. : GCC_OFF_SMALL_ASM() (rw->lock)
  350. : "memory");
  351. }
  352. return ret;
  353. }
  354. static inline int arch_write_trylock(arch_rwlock_t *rw)
  355. {
  356. unsigned int tmp;
  357. int ret;
  358. if (R10000_LLSC_WAR) {
  359. __asm__ __volatile__(
  360. " .set noreorder # arch_write_trylock \n"
  361. " li %2, 0 \n"
  362. "1: ll %1, %3 \n"
  363. " bnez %1, 2f \n"
  364. " lui %1, 0x8000 \n"
  365. " sc %1, %0 \n"
  366. " beqzl %1, 1b \n"
  367. " nop \n"
  368. __WEAK_LLSC_MB
  369. " li %2, 1 \n"
  370. " .set reorder \n"
  371. "2: \n"
  372. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
  373. : GCC_OFF_SMALL_ASM() (rw->lock)
  374. : "memory");
  375. } else {
  376. do {
  377. __asm__ __volatile__(
  378. " ll %1, %3 # arch_write_trylock \n"
  379. " li %2, 0 \n"
  380. " bnez %1, 2f \n"
  381. " lui %1, 0x8000 \n"
  382. " sc %1, %0 \n"
  383. " li %2, 1 \n"
  384. "2: .insn \n"
  385. : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
  386. "=&r" (ret)
  387. : GCC_OFF_SMALL_ASM() (rw->lock)
  388. : "memory");
  389. } while (unlikely(!tmp));
  390. smp_llsc_mb();
  391. }
  392. return ret;
  393. }
  394. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  395. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  396. #define arch_spin_relax(lock) cpu_relax()
  397. #define arch_read_relax(lock) cpu_relax()
  398. #define arch_write_relax(lock) cpu_relax()
  399. #endif /* _ASM_SPINLOCK_H */