tconvexhull.nim 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. discard """
  2. matrix: "--mm:refc"
  3. output: '''
  4. '''
  5. """
  6. # parallel convex hull for Nim bigbreak
  7. # nim c --threads:on -d:release pconvex_hull.nim
  8. import algorithm, sequtils, threadpool
  9. type Point = tuple[x, y: float]
  10. proc cmpPoint(a, b: Point): int =
  11. result = cmp(a.x, b.x)
  12. if result == 0:
  13. result = cmp(a.y, b.y)
  14. template cross[T](o, a, b: T): untyped =
  15. (a.x - o.x) * (b.y - o.y) - (a.y - o.y) * (b.x - o.x)
  16. template pro(): untyped =
  17. while lr1 > 0 and cross(result[lr1 - 1], result[lr1], p[i]) <= 0:
  18. discard result.pop
  19. lr1 -= 1
  20. result.add(p[i])
  21. lr1 += 1
  22. proc half[T](p: seq[T]; upper: bool): seq[T] =
  23. var i, lr1: int
  24. result = @[]
  25. lr1 = -1
  26. if upper:
  27. i = 0
  28. while i <= high(p):
  29. pro()
  30. i += 1
  31. else:
  32. i = high(p)
  33. while i >= low(p):
  34. pro()
  35. i -= 1
  36. discard result.pop
  37. proc convex_hull[T](points: var seq[T], cmp: proc(x, y: T): int {.closure.}) : seq[T] =
  38. if len(points) < 2: return points
  39. points.sort(cmp)
  40. var ul: array[2, FlowVar[seq[T]]]
  41. parallel:
  42. for k in 0..ul.high:
  43. ul[k] = spawn half[T](points, k == 0)
  44. result = concat(^ul[0], ^ul[1])
  45. var s = map(toSeq(0..9999), proc(x: int): Point = (float(x div 100), float(x mod 100)))
  46. # On some runs, this pool size reduction will set the "shutdown" attribute on the
  47. # worker thread that executes our spawned task, before we can read the flowvars.
  48. setMaxPoolSize 2
  49. for i in 0..2:
  50. doAssert convex_hull[Point](s, cmpPoint) ==
  51. @[(0.0, 0.0), (99.0, 0.0), (99.0, 99.0), (0.0, 99.0)]