threadpool.nim 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. #
  2. #
  3. # Nim's Runtime Library
  4. # (c) Copyright 2015 Andreas Rumpf
  5. #
  6. # See the file "copying.txt", included in this
  7. # distribution, for details about the copyright.
  8. #
  9. ## Implements Nim's `parallel & spawn statements <manual_experimental.html#parallel-amp-spawn>`_.
  10. ##
  11. ## Unstable API.
  12. ##
  13. ## See also
  14. ## ========
  15. ## * `threads module <threads.html>`_ for basic thread support
  16. ## * `locks module <locks.html>`_ for locks and condition variables
  17. ## * `asyncdispatch module <asyncdispatch.html>`_ for asynchronous IO
  18. when not compileOption("threads"):
  19. {.error: "Threadpool requires --threads:on option.".}
  20. import cpuinfo, cpuload, locks, os
  21. when defined(nimPreviewSlimSystem):
  22. import std/[assertions, typedthreads, sysatomics]
  23. {.push stackTrace:off.}
  24. type
  25. Semaphore = object
  26. c: Cond
  27. L: Lock
  28. counter: int
  29. proc initSemaphore(cv: var Semaphore) =
  30. initCond(cv.c)
  31. initLock(cv.L)
  32. proc destroySemaphore(cv: var Semaphore) {.inline.} =
  33. deinitCond(cv.c)
  34. deinitLock(cv.L)
  35. proc blockUntil(cv: var Semaphore) =
  36. acquire(cv.L)
  37. while cv.counter <= 0:
  38. wait(cv.c, cv.L)
  39. dec cv.counter
  40. release(cv.L)
  41. proc signal(cv: var Semaphore) =
  42. acquire(cv.L)
  43. inc cv.counter
  44. release(cv.L)
  45. signal(cv.c)
  46. const CacheLineSize = 64 # true for most archs
  47. type
  48. Barrier {.compilerproc.} = object
  49. entered: int
  50. cv: Semaphore # Semaphore takes 3 words at least
  51. left {.align(CacheLineSize).}: int
  52. interest {.align(CacheLineSize).} : bool # whether the master is interested in the "all done" event
  53. proc barrierEnter(b: ptr Barrier) {.compilerproc, inline.} =
  54. # due to the signaling between threads, it is ensured we are the only
  55. # one with access to 'entered' so we don't need 'atomicInc' here:
  56. inc b.entered
  57. # also we need no 'fence' instructions here as soon 'nimArgsPassingDone'
  58. # will be called which already will perform a fence for us.
  59. proc barrierLeave(b: ptr Barrier) {.compilerproc, inline.} =
  60. atomicInc b.left
  61. when not defined(x86): fence()
  62. # We may not have seen the final value of b.entered yet,
  63. # so we need to check for >= instead of ==.
  64. if b.interest and b.left >= b.entered: signal(b.cv)
  65. proc openBarrier(b: ptr Barrier) {.compilerproc, inline.} =
  66. b.entered = 0
  67. b.left = 0
  68. b.interest = false
  69. proc closeBarrier(b: ptr Barrier) {.compilerproc.} =
  70. fence()
  71. if b.left != b.entered:
  72. b.cv.initSemaphore()
  73. fence()
  74. b.interest = true
  75. fence()
  76. while b.left != b.entered: blockUntil(b.cv)
  77. destroySemaphore(b.cv)
  78. {.pop.}
  79. # ----------------------------------------------------------------------------
  80. type
  81. AwaitInfo = object
  82. cv: Semaphore
  83. idx: int
  84. FlowVarBase* = ref FlowVarBaseObj ## Untyped base class for `FlowVar[T] <#FlowVar>`_.
  85. FlowVarBaseObj {.acyclic.} = object of RootObj
  86. ready, usesSemaphore, awaited: bool
  87. cv: Semaphore # for 'blockUntilAny' support
  88. ai: ptr AwaitInfo
  89. idx: int
  90. data: pointer # we incRef and unref it to keep it alive; note this MUST NOT
  91. # be RootRef here otherwise the wrong GC keeps track of it!
  92. owner: pointer # ptr Worker
  93. FlowVarObj[T] {.acyclic.} = object of FlowVarBaseObj
  94. blob: T
  95. FlowVar*[T] {.compilerproc.} = ref FlowVarObj[T] ## A data flow variable.
  96. ToFreeQueue = object
  97. len: int
  98. lock: Lock
  99. empty: Semaphore
  100. data: array[128, pointer]
  101. WorkerProc = proc (thread, args: pointer) {.nimcall, gcsafe.}
  102. Worker = object
  103. taskArrived: Semaphore
  104. taskStarted: Semaphore #\
  105. # task data:
  106. f: WorkerProc
  107. data: pointer
  108. ready: bool # put it here for correct alignment!
  109. initialized: bool # whether it has even been initialized
  110. shutdown: bool # the pool requests to shut down this worker thread
  111. q: ToFreeQueue
  112. readyForTask: Semaphore
  113. const threadpoolWaitMs {.intdefine.}: int = 100
  114. proc blockUntil*(fv: var FlowVarBaseObj) =
  115. ## Waits until the value for `fv` arrives.
  116. ##
  117. ## Usually it is not necessary to call this explicitly.
  118. if fv.usesSemaphore and not fv.awaited:
  119. fv.awaited = true
  120. blockUntil(fv.cv)
  121. destroySemaphore(fv.cv)
  122. proc selectWorker(w: ptr Worker; fn: WorkerProc; data: pointer): bool =
  123. if cas(addr w.ready, true, false):
  124. w.data = data
  125. w.f = fn
  126. signal(w.taskArrived)
  127. blockUntil(w.taskStarted)
  128. result = true
  129. proc cleanFlowVars(w: ptr Worker) =
  130. let q = addr(w.q)
  131. acquire(q.lock)
  132. for i in 0 ..< q.len:
  133. GC_unref(cast[RootRef](q.data[i]))
  134. #echo "GC_unref"
  135. q.len = 0
  136. release(q.lock)
  137. proc wakeupWorkerToProcessQueue(w: ptr Worker) =
  138. # we have to ensure it's us who wakes up the owning thread.
  139. # This is quite horrible code, but it runs so rarely that it doesn't matter:
  140. while not cas(addr w.ready, true, false):
  141. cpuRelax()
  142. discard
  143. w.data = nil
  144. w.f = proc (w, a: pointer) {.nimcall.} =
  145. let w = cast[ptr Worker](w)
  146. cleanFlowVars(w)
  147. signal(w.q.empty)
  148. signal(w.taskArrived)
  149. proc attach(fv: FlowVarBase; i: int): bool =
  150. acquire(fv.cv.L)
  151. if fv.cv.counter <= 0:
  152. fv.idx = i
  153. result = true
  154. else:
  155. result = false
  156. release(fv.cv.L)
  157. proc finished(fv: var FlowVarBaseObj) =
  158. doAssert fv.ai.isNil, "flowVar is still attached to an 'blockUntilAny'"
  159. # we have to protect against the rare cases where the owner of the flowVar
  160. # simply disregards the flowVar and yet the "flowVar" has not yet written
  161. # anything to it:
  162. blockUntil(fv)
  163. if fv.data.isNil: return
  164. let owner = cast[ptr Worker](fv.owner)
  165. let q = addr(owner.q)
  166. acquire(q.lock)
  167. while not (q.len < q.data.len):
  168. #echo "EXHAUSTED!"
  169. release(q.lock)
  170. wakeupWorkerToProcessQueue(owner)
  171. blockUntil(q.empty)
  172. acquire(q.lock)
  173. q.data[q.len] = cast[pointer](fv.data)
  174. inc q.len
  175. release(q.lock)
  176. fv.data = nil
  177. # the worker thread waits for "data" to be set to nil before shutting down
  178. owner.data = nil
  179. proc `=destroy`[T](fv: var FlowVarObj[T]) =
  180. finished(fv)
  181. `=destroy`(fv.blob)
  182. proc nimCreateFlowVar[T](): FlowVar[T] {.compilerproc.} =
  183. new(result)
  184. proc nimFlowVarCreateSemaphore(fv: FlowVarBase) {.compilerproc.} =
  185. fv.cv.initSemaphore()
  186. fv.usesSemaphore = true
  187. proc nimFlowVarSignal(fv: FlowVarBase) {.compilerproc.} =
  188. if fv.ai != nil:
  189. acquire(fv.ai.cv.L)
  190. fv.ai.idx = fv.idx
  191. inc fv.ai.cv.counter
  192. release(fv.ai.cv.L)
  193. signal(fv.ai.cv.c)
  194. if fv.usesSemaphore:
  195. signal(fv.cv)
  196. proc awaitAndThen*[T](fv: FlowVar[T]; action: proc (x: T) {.closure.}) =
  197. ## Blocks until `fv` is available and then passes its value
  198. ## to `action`.
  199. ##
  200. ## Note that due to Nim's parameter passing semantics, this
  201. ## means that `T` doesn't need to be copied, so `awaitAndThen` can
  202. ## sometimes be more efficient than the `^ proc <#^,FlowVar[T]>`_.
  203. blockUntil(fv[])
  204. when defined(nimV2):
  205. action(fv.blob)
  206. elif T is string or T is seq:
  207. action(cast[T](fv.data))
  208. elif T is ref:
  209. {.error: "'awaitAndThen' not available for FlowVar[ref]".}
  210. else:
  211. action(fv.blob)
  212. finished(fv[])
  213. proc unsafeRead*[T](fv: FlowVar[ref T]): ptr T =
  214. ## Blocks until the value is available and then returns this value.
  215. blockUntil(fv[])
  216. when defined(nimV2):
  217. result = cast[ptr T](fv.blob)
  218. else:
  219. result = cast[ptr T](fv.data)
  220. finished(fv[])
  221. proc `^`*[T](fv: FlowVar[T]): T =
  222. ## Blocks until the value is available and then returns this value.
  223. blockUntil(fv[])
  224. when not defined(nimV2) and (T is string or T is seq or T is ref):
  225. deepCopy result, cast[T](fv.data)
  226. else:
  227. result = fv.blob
  228. finished(fv[])
  229. proc blockUntilAny*(flowVars: openArray[FlowVarBase]): int =
  230. ## Awaits any of the given `flowVars`. Returns the index of one `flowVar`
  231. ## for which a value arrived.
  232. ##
  233. ## A `flowVar` only supports one call to `blockUntilAny` at the same time.
  234. ## That means if you `blockUntilAny([a,b])` and `blockUntilAny([b,c])`
  235. ## the second call will only block until `c`. If there is no `flowVar` left
  236. ## to be able to wait on, -1 is returned.
  237. ##
  238. ## **Note:** This results in non-deterministic behaviour and should be avoided.
  239. var ai: AwaitInfo
  240. ai.cv.initSemaphore()
  241. var conflicts = 0
  242. result = -1
  243. for i in 0 .. flowVars.high:
  244. if cas(addr flowVars[i].ai, nil, addr ai):
  245. if not attach(flowVars[i], i):
  246. result = i
  247. break
  248. else:
  249. inc conflicts
  250. if conflicts < flowVars.len:
  251. if result < 0:
  252. blockUntil(ai.cv)
  253. result = ai.idx
  254. for i in 0 .. flowVars.high:
  255. discard cas(addr flowVars[i].ai, addr ai, nil)
  256. destroySemaphore(ai.cv)
  257. proc isReady*(fv: FlowVarBase): bool =
  258. ## Determines whether the specified `FlowVarBase`'s value is available.
  259. ##
  260. ## If `true`, awaiting `fv` will not block.
  261. if fv.usesSemaphore and not fv.awaited:
  262. acquire(fv.cv.L)
  263. result = fv.cv.counter > 0
  264. release(fv.cv.L)
  265. else:
  266. result = true
  267. proc nimArgsPassingDone(p: pointer) {.compilerproc.} =
  268. let w = cast[ptr Worker](p)
  269. signal(w.taskStarted)
  270. const
  271. MaxThreadPoolSize* {.intdefine.} = 256 ## Maximum size of the thread pool. 256 threads
  272. ## should be good enough for anybody ;-)
  273. MaxDistinguishedThread* {.intdefine.} = 32 ## Maximum number of "distinguished" threads.
  274. type
  275. ThreadId* = range[0..MaxDistinguishedThread-1] ## A thread identifier.
  276. var
  277. currentPoolSize: int
  278. maxPoolSize = MaxThreadPoolSize
  279. minPoolSize = 4
  280. gSomeReady: Semaphore
  281. readyWorker: ptr Worker
  282. # A workaround for recursion deadlock issue
  283. # https://github.com/nim-lang/Nim/issues/4597
  284. var
  285. numSlavesLock: Lock
  286. numSlavesRunning {.guard: numSlavesLock.}: int
  287. numSlavesWaiting {.guard: numSlavesLock.}: int
  288. isSlave {.threadvar.}: bool
  289. numSlavesLock.initLock
  290. gSomeReady.initSemaphore()
  291. proc slave(w: ptr Worker) {.thread.} =
  292. isSlave = true
  293. while true:
  294. if w.shutdown:
  295. w.shutdown = false
  296. atomicDec currentPoolSize
  297. while true:
  298. if w.data != nil:
  299. sleep(threadpoolWaitMs)
  300. else:
  301. # The flowvar finalizer ("finished()") set w.data to nil, so we can
  302. # safely terminate the thread.
  303. #
  304. # TODO: look for scenarios in which the flowvar is never finalized, so
  305. # a shut down thread gets stuck in this loop until the main thread exits.
  306. break
  307. break
  308. when declared(atomicStoreN):
  309. atomicStoreN(addr(w.ready), true, ATOMIC_SEQ_CST)
  310. else:
  311. w.ready = true
  312. readyWorker = w
  313. signal(gSomeReady)
  314. blockUntil(w.taskArrived)
  315. # XXX Somebody needs to look into this (why does this assertion fail
  316. # in Visual Studio?)
  317. when not defined(vcc) and not defined(tcc): assert(not w.ready)
  318. withLock numSlavesLock:
  319. inc numSlavesRunning
  320. w.f(w, w.data)
  321. withLock numSlavesLock:
  322. dec numSlavesRunning
  323. if w.q.len != 0: w.cleanFlowVars
  324. proc distinguishedSlave(w: ptr Worker) {.thread.} =
  325. while true:
  326. when declared(atomicStoreN):
  327. atomicStoreN(addr(w.ready), true, ATOMIC_SEQ_CST)
  328. else:
  329. w.ready = true
  330. signal(w.readyForTask)
  331. blockUntil(w.taskArrived)
  332. assert(not w.ready)
  333. w.f(w, w.data)
  334. if w.q.len != 0: w.cleanFlowVars
  335. var
  336. workers: array[MaxThreadPoolSize, Thread[ptr Worker]]
  337. workersData: array[MaxThreadPoolSize, Worker]
  338. distinguished: array[MaxDistinguishedThread, Thread[ptr Worker]]
  339. distinguishedData: array[MaxDistinguishedThread, Worker]
  340. when defined(nimPinToCpu):
  341. var gCpus: Natural
  342. proc setMinPoolSize*(size: range[1..MaxThreadPoolSize]) =
  343. ## Sets the minimum thread pool size. The default value of this is 4.
  344. minPoolSize = size
  345. proc setMaxPoolSize*(size: range[1..MaxThreadPoolSize]) =
  346. ## Sets the maximum thread pool size. The default value of this
  347. ## is `MaxThreadPoolSize <#MaxThreadPoolSize>`_.
  348. maxPoolSize = size
  349. if currentPoolSize > maxPoolSize:
  350. for i in maxPoolSize..currentPoolSize-1:
  351. let w = addr(workersData[i])
  352. w.shutdown = true
  353. when defined(nimRecursiveSpawn):
  354. var localThreadId {.threadvar.}: int
  355. proc activateWorkerThread(i: int) {.noinline.} =
  356. workersData[i].taskArrived.initSemaphore()
  357. workersData[i].taskStarted.initSemaphore()
  358. workersData[i].initialized = true
  359. workersData[i].q.empty.initSemaphore()
  360. initLock(workersData[i].q.lock)
  361. createThread(workers[i], slave, addr(workersData[i]))
  362. when defined(nimRecursiveSpawn):
  363. localThreadId = i+1
  364. when defined(nimPinToCpu):
  365. if gCpus > 0: pinToCpu(workers[i], i mod gCpus)
  366. proc activateDistinguishedThread(i: int) {.noinline.} =
  367. distinguishedData[i].taskArrived.initSemaphore()
  368. distinguishedData[i].taskStarted.initSemaphore()
  369. distinguishedData[i].initialized = true
  370. distinguishedData[i].q.empty.initSemaphore()
  371. initLock(distinguishedData[i].q.lock)
  372. distinguishedData[i].readyForTask.initSemaphore()
  373. createThread(distinguished[i], distinguishedSlave, addr(distinguishedData[i]))
  374. proc setup() =
  375. let p = countProcessors()
  376. when defined(nimPinToCpu):
  377. gCpus = p
  378. currentPoolSize = min(p, MaxThreadPoolSize)
  379. readyWorker = addr(workersData[0])
  380. for i in 0..<currentPoolSize: activateWorkerThread(i)
  381. proc preferSpawn*(): bool =
  382. ## Use this proc to determine quickly if a `spawn` or a direct call is
  383. ## preferable.
  384. ##
  385. ## If it returns `true`, a `spawn` may make sense. In general
  386. ## it is not necessary to call this directly; use the `spawnX template
  387. ## <#spawnX.t>`_ instead.
  388. result = gSomeReady.counter > 0
  389. proc spawn*(call: sink typed) {.magic: "Spawn".} =
  390. ## Always spawns a new task, so that the `call` is never executed on
  391. ## the calling thread.
  392. ##
  393. ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
  394. ## return type that is either `void` or compatible with `FlowVar[T]`.
  395. discard "It uses `nimSpawn3` internally"
  396. proc pinnedSpawn*(id: ThreadId; call: sink typed) {.magic: "Spawn".} =
  397. ## Always spawns a new task on the worker thread with `id`, so that
  398. ## the `call` is **always** executed on the thread.
  399. ##
  400. ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
  401. ## return type that is either `void` or compatible with `FlowVar[T]`.
  402. discard "It uses `nimSpawn4` internally"
  403. template spawnX*(call) =
  404. ## Spawns a new task if a CPU core is ready, otherwise executes the
  405. ## call in the calling thread.
  406. ##
  407. ## Usually, it is advised to use the `spawn proc <#spawn,sinktyped>`_
  408. ## in order to not block the producer for an unknown amount of time.
  409. ##
  410. ## `call` has to be a proc call `p(...)` where `p` is gcsafe and has a
  411. ## return type that is either 'void' or compatible with `FlowVar[T]`.
  412. (if preferSpawn(): spawn call else: call)
  413. proc parallel*(body: untyped) {.magic: "Parallel".}
  414. ## A parallel section can be used to execute a block in parallel.
  415. ##
  416. ## `body` has to be in a DSL that is a particular subset of the language.
  417. ##
  418. ## Please refer to `the manual <manual_experimental.html#parallel-amp-spawn>`_
  419. ## for further information.
  420. var
  421. state: ThreadPoolState
  422. stateLock: Lock
  423. initLock stateLock
  424. proc nimSpawn3(fn: WorkerProc; data: pointer) {.compilerproc.} =
  425. # implementation of 'spawn' that is used by the code generator.
  426. while true:
  427. if selectWorker(readyWorker, fn, data): return
  428. for i in 0..<currentPoolSize:
  429. if selectWorker(addr(workersData[i]), fn, data): return
  430. # determine what to do, but keep in mind this is expensive too:
  431. # state.calls < maxPoolSize: warmup phase
  432. # (state.calls and 127) == 0: periodic check
  433. if state.calls < maxPoolSize or (state.calls and 127) == 0:
  434. # ensure the call to 'advice' is atomic:
  435. if tryAcquire(stateLock):
  436. if currentPoolSize < minPoolSize:
  437. if not workersData[currentPoolSize].initialized:
  438. activateWorkerThread(currentPoolSize)
  439. let w = addr(workersData[currentPoolSize])
  440. atomicInc currentPoolSize
  441. if selectWorker(w, fn, data):
  442. release(stateLock)
  443. return
  444. case advice(state)
  445. of doNothing: discard
  446. of doCreateThread:
  447. if currentPoolSize < maxPoolSize:
  448. if not workersData[currentPoolSize].initialized:
  449. activateWorkerThread(currentPoolSize)
  450. let w = addr(workersData[currentPoolSize])
  451. atomicInc currentPoolSize
  452. if selectWorker(w, fn, data):
  453. release(stateLock)
  454. return
  455. # else we didn't succeed but some other thread, so do nothing.
  456. of doShutdownThread:
  457. if currentPoolSize > minPoolSize:
  458. let w = addr(workersData[currentPoolSize-1])
  459. w.shutdown = true
  460. # we don't free anything here. Too dangerous.
  461. release(stateLock)
  462. # else the acquire failed, but this means some
  463. # other thread succeeded, so we don't need to do anything here.
  464. when defined(nimRecursiveSpawn):
  465. if localThreadId > 0:
  466. # we are a worker thread, so instead of waiting for something which
  467. # might as well never happen (see tparallel_quicksort), we run the task
  468. # on the current thread instead.
  469. var self = addr(workersData[localThreadId-1])
  470. fn(self, data)
  471. blockUntil(self.taskStarted)
  472. return
  473. if isSlave:
  474. # Run under lock until `numSlavesWaiting` increment to avoid a
  475. # race (otherwise two last threads might start waiting together)
  476. withLock numSlavesLock:
  477. if numSlavesRunning <= numSlavesWaiting + 1:
  478. # All the other slaves are waiting
  479. # If we wait now, we-re deadlocked until
  480. # an external spawn happens !
  481. if currentPoolSize < maxPoolSize:
  482. if not workersData[currentPoolSize].initialized:
  483. activateWorkerThread(currentPoolSize)
  484. let w = addr(workersData[currentPoolSize])
  485. atomicInc currentPoolSize
  486. if selectWorker(w, fn, data):
  487. return
  488. else:
  489. # There is no place in the pool. We're deadlocked.
  490. # echo "Deadlock!"
  491. discard
  492. inc numSlavesWaiting
  493. blockUntil(gSomeReady)
  494. if isSlave:
  495. withLock numSlavesLock:
  496. dec numSlavesWaiting
  497. var
  498. distinguishedLock: Lock
  499. initLock distinguishedLock
  500. proc nimSpawn4(fn: WorkerProc; data: pointer; id: ThreadId) {.compilerproc.} =
  501. acquire(distinguishedLock)
  502. if not distinguishedData[id].initialized:
  503. activateDistinguishedThread(id)
  504. release(distinguishedLock)
  505. while true:
  506. if selectWorker(addr(distinguishedData[id]), fn, data): break
  507. blockUntil(distinguishedData[id].readyForTask)
  508. proc sync*() =
  509. ## A simple barrier to wait for all `spawn`ed tasks.
  510. ##
  511. ## If you need more elaborate waiting, you have to use an explicit barrier.
  512. while true:
  513. var allReady = true
  514. for i in 0 ..< currentPoolSize:
  515. if not allReady: break
  516. allReady = allReady and workersData[i].ready
  517. if allReady: break
  518. sleep(threadpoolWaitMs)
  519. # We cannot "blockUntil(gSomeReady)" because workers may be shut down between
  520. # the time we establish that some are not "ready" and the time we wait for a
  521. # "signal(gSomeReady)" from inside "slave()" that can never come.
  522. setup()