lock_futex.go 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // Copyright 2011 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build dragonfly freebsd linux
  5. package runtime
  6. import "unsafe"
  7. // This implementation depends on OS-specific implementations of
  8. //
  9. // runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
  10. // Atomically,
  11. // if(*addr == val) sleep
  12. // Might be woken up spuriously; that's allowed.
  13. // Don't sleep longer than ns; ns < 0 means forever.
  14. //
  15. // runtime·futexwakeup(uint32 *addr, uint32 cnt)
  16. // If any procs are sleeping on addr, wake up at most cnt.
  17. const (
  18. mutex_unlocked = 0
  19. mutex_locked = 1
  20. mutex_sleeping = 2
  21. active_spin = 4
  22. active_spin_cnt = 30
  23. passive_spin = 1
  24. )
  25. // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
  26. // mutex_sleeping means that there is presumably at least one sleeping thread.
  27. // Note that there can be spinning threads during all states - they do not
  28. // affect mutex's state.
  29. func futexsleep(addr *uint32, val uint32, ns int64)
  30. func futexwakeup(addr *uint32, cnt uint32)
  31. // We use the uintptr mutex.key and note.key as a uint32.
  32. func key32(p *uintptr) *uint32 {
  33. return (*uint32)(unsafe.Pointer(p))
  34. }
  35. func lock(l *mutex) {
  36. gp := getg()
  37. if gp.m.locks < 0 {
  38. gothrow("runtime·lock: lock count")
  39. }
  40. gp.m.locks++
  41. // Speculative grab for lock.
  42. v := xchg(key32(&l.key), mutex_locked)
  43. if v == mutex_unlocked {
  44. return
  45. }
  46. // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
  47. // depending on whether there is a thread sleeping
  48. // on this mutex. If we ever change l->key from
  49. // MUTEX_SLEEPING to some other value, we must be
  50. // careful to change it back to MUTEX_SLEEPING before
  51. // returning, to ensure that the sleeping thread gets
  52. // its wakeup call.
  53. wait := v
  54. // On uniprocessors, no point spinning.
  55. // On multiprocessors, spin for ACTIVE_SPIN attempts.
  56. spin := 0
  57. if ncpu > 1 {
  58. spin = active_spin
  59. }
  60. for {
  61. // Try for lock, spinning.
  62. for i := 0; i < spin; i++ {
  63. for l.key == mutex_unlocked {
  64. if cas(key32(&l.key), mutex_unlocked, wait) {
  65. return
  66. }
  67. }
  68. procyield(active_spin_cnt)
  69. }
  70. // Try for lock, rescheduling.
  71. for i := 0; i < passive_spin; i++ {
  72. for l.key == mutex_unlocked {
  73. if cas(key32(&l.key), mutex_unlocked, wait) {
  74. return
  75. }
  76. }
  77. osyield()
  78. }
  79. // Sleep.
  80. v = xchg(key32(&l.key), mutex_sleeping)
  81. if v == mutex_unlocked {
  82. return
  83. }
  84. wait = mutex_sleeping
  85. futexsleep(key32(&l.key), mutex_sleeping, -1)
  86. }
  87. }
  88. func unlock(l *mutex) {
  89. v := xchg(key32(&l.key), mutex_unlocked)
  90. if v == mutex_unlocked {
  91. gothrow("unlock of unlocked lock")
  92. }
  93. if v == mutex_sleeping {
  94. futexwakeup(key32(&l.key), 1)
  95. }
  96. gp := getg()
  97. gp.m.locks--
  98. if gp.m.locks < 0 {
  99. gothrow("runtime·unlock: lock count")
  100. }
  101. if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
  102. gp.stackguard0 = stackPreempt
  103. }
  104. }
  105. // One-time notifications.
  106. func noteclear(n *note) {
  107. n.key = 0
  108. }
  109. func notewakeup(n *note) {
  110. old := xchg(key32(&n.key), 1)
  111. if old != 0 {
  112. print("notewakeup - double wakeup (", old, ")\n")
  113. gothrow("notewakeup - double wakeup")
  114. }
  115. futexwakeup(key32(&n.key), 1)
  116. }
  117. func notesleep(n *note) {
  118. gp := getg()
  119. if gp != gp.m.g0 {
  120. gothrow("notesleep not on g0")
  121. }
  122. for atomicload(key32(&n.key)) == 0 {
  123. gp.m.blocked = true
  124. futexsleep(key32(&n.key), 0, -1)
  125. gp.m.blocked = false
  126. }
  127. }
  128. //go:nosplit
  129. func notetsleep_internal(n *note, ns int64) bool {
  130. gp := getg()
  131. if ns < 0 {
  132. for atomicload(key32(&n.key)) == 0 {
  133. gp.m.blocked = true
  134. futexsleep(key32(&n.key), 0, -1)
  135. gp.m.blocked = false
  136. }
  137. return true
  138. }
  139. if atomicload(key32(&n.key)) != 0 {
  140. return true
  141. }
  142. deadline := nanotime() + ns
  143. for {
  144. gp.m.blocked = true
  145. futexsleep(key32(&n.key), 0, ns)
  146. gp.m.blocked = false
  147. if atomicload(key32(&n.key)) != 0 {
  148. break
  149. }
  150. now := nanotime()
  151. if now >= deadline {
  152. break
  153. }
  154. ns = deadline - now
  155. }
  156. return atomicload(key32(&n.key)) != 0
  157. }
  158. func notetsleep(n *note, ns int64) bool {
  159. gp := getg()
  160. if gp != gp.m.g0 && gp.m.gcing == 0 {
  161. gothrow("notetsleep not on g0")
  162. }
  163. return notetsleep_internal(n, ns)
  164. }
  165. // same as runtime·notetsleep, but called on user g (not g0)
  166. // calls only nosplit functions between entersyscallblock/exitsyscall
  167. func notetsleepg(n *note, ns int64) bool {
  168. gp := getg()
  169. if gp == gp.m.g0 {
  170. gothrow("notetsleepg on g0")
  171. }
  172. entersyscallblock()
  173. ok := notetsleep_internal(n, ns)
  174. exitsyscall()
  175. return ok
  176. }