alloc.nim 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. #
  2. #
  3. # Nim's Runtime Library
  4. # (c) Copyright 2012 Andreas Rumpf
  5. #
  6. # See the file "copying.txt", included in this
  7. # distribution, for details about the copyright.
  8. #
  9. # Low level allocator for Nim. Has been designed to support the GC.
  10. {.push profiler:off.}
  11. include osalloc
  12. template track(op, address, size) =
  13. when defined(memTracker):
  14. memTrackerOp(op, address, size)
  15. # We manage *chunks* of memory. Each chunk is a multiple of the page size.
  16. # Each chunk starts at an address that is divisible by the page size.
  17. const
  18. InitialMemoryRequest = 128 * PageSize # 0.5 MB
  19. SmallChunkSize = PageSize
  20. MaxFli = 30
  21. MaxLog2Sli = 5 # 32, this cannot be increased without changing 'uint32'
  22. # everywhere!
  23. MaxSli = 1 shl MaxLog2Sli
  24. FliOffset = 6
  25. RealFli = MaxFli - FliOffset
  26. # size of chunks in last matrix bin
  27. MaxBigChunkSize = 1 shl MaxFli - 1 shl (MaxFli-MaxLog2Sli-1)
  28. HugeChunkSize = MaxBigChunkSize + 1
  29. type
  30. PTrunk = ptr Trunk
  31. Trunk = object
  32. next: PTrunk # all nodes are connected with this pointer
  33. key: int # start address at bit 0
  34. bits: array[0..IntsPerTrunk-1, int] # a bit vector
  35. TrunkBuckets = array[0..255, PTrunk]
  36. IntSet = object
  37. data: TrunkBuckets
  38. type
  39. AlignType = BiggestFloat
  40. FreeCell {.final, pure.} = object
  41. next: ptr FreeCell # next free cell in chunk (overlaid with refcount)
  42. zeroField: int # 0 means cell is not used (overlaid with typ field)
  43. # 1 means cell is manually managed pointer
  44. # otherwise a PNimType is stored in there
  45. PChunk = ptr BaseChunk
  46. PBigChunk = ptr BigChunk
  47. PSmallChunk = ptr SmallChunk
  48. BaseChunk {.pure, inheritable.} = object
  49. prevSize: int # size of previous chunk; for coalescing
  50. # 0th bit == 1 if 'used
  51. size: int # if < PageSize it is a small chunk
  52. SmallChunk = object of BaseChunk
  53. next, prev: PSmallChunk # chunks of the same size
  54. freeList: ptr FreeCell
  55. free: int # how many bytes remain
  56. acc: int # accumulator for small object allocation
  57. when defined(cpu32):
  58. align: int
  59. data: AlignType # start of usable memory
  60. BigChunk = object of BaseChunk # not necessarily > PageSize!
  61. next, prev: PBigChunk # chunks of the same (or bigger) size
  62. data: AlignType # start of usable memory
  63. template smallChunkOverhead(): untyped = sizeof(SmallChunk)-sizeof(AlignType)
  64. template bigChunkOverhead(): untyped = sizeof(BigChunk)-sizeof(AlignType)
  65. # ------------- chunk table ---------------------------------------------------
  66. # We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
  67. # endings of big chunks. This is needed by the merging operation. The only
  68. # remaining operation is best-fit for big chunks. Since there is a size-limit
  69. # for big chunks (because greater than the limit means they are returned back
  70. # to the OS), a fixed size array can be used.
  71. type
  72. PLLChunk = ptr LLChunk
  73. LLChunk = object ## *low-level* chunk
  74. size: int # remaining size
  75. acc: int # accumulator
  76. next: PLLChunk # next low-level chunk; only needed for dealloc
  77. PAvlNode = ptr AvlNode
  78. AvlNode = object
  79. link: array[0..1, PAvlNode] # Left (0) and right (1) links
  80. key, upperBound: int
  81. level: int
  82. HeapLinks = object
  83. len: int
  84. chunks: array[30, (PBigChunk, int)]
  85. next: ptr HeapLinks
  86. MemRegion = object
  87. minLargeObj, maxLargeObj: int
  88. freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
  89. flBitmap: uint32
  90. slBitmap: array[RealFli, uint32]
  91. matrix: array[RealFli, array[MaxSli, PBigChunk]]
  92. llmem: PLLChunk
  93. currMem, maxMem, freeMem, occ: int # memory sizes (allocated from OS)
  94. lastSize: int # needed for the case that OS gives us pages linearly
  95. chunkStarts: IntSet
  96. root, deleted, last, freeAvlNodes: PAvlNode
  97. locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
  98. nextChunkSize: int
  99. bottomData: AvlNode
  100. heapLinks: HeapLinks
  101. when defined(nimTypeNames):
  102. allocCounter, deallocCounter: int
  103. const
  104. fsLookupTable: array[byte, int8] = [
  105. -1'i8, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  106. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  107. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  108. 5, 5, 5, 5, 5, 5, 5, 5,
  109. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  110. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  111. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  112. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  113. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  114. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  115. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  116. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  117. 7, 7, 7, 7, 7, 7, 7, 7
  118. ]
  119. proc msbit(x: uint32): int {.inline.} =
  120. let a = if x <= 0xff_ff:
  121. (if x <= 0xff: 0 else: 8)
  122. else:
  123. (if x <= 0xff_ff_ff: 16 else: 24)
  124. result = int(fsLookupTable[byte(x shr a)]) + a
  125. proc lsbit(x: uint32): int {.inline.} =
  126. msbit(x and ((not x) + 1))
  127. proc setBit(nr: int; dest: var uint32) {.inline.} =
  128. dest = dest or (1u32 shl (nr and 0x1f))
  129. proc clearBit(nr: int; dest: var uint32) {.inline.} =
  130. dest = dest and not (1u32 shl (nr and 0x1f))
  131. proc mappingSearch(r, fl, sl: var int) {.inline.} =
  132. #let t = (1 shl (msbit(uint32 r) - MaxLog2Sli)) - 1
  133. # This diverges from the standard TLSF algorithm because we need to ensure
  134. # PageSize alignment:
  135. let t = roundup((1 shl (msbit(uint32 r) - MaxLog2Sli)), PageSize) - 1
  136. r = r + t
  137. r = r and not t
  138. r = min(r, MaxBigChunkSize)
  139. fl = msbit(uint32 r)
  140. sl = (r shr (fl - MaxLog2Sli)) - MaxSli
  141. dec fl, FliOffset
  142. sysAssert((r and PageMask) == 0, "mappingSearch: still not aligned")
  143. # See http://www.gii.upv.es/tlsf/files/papers/tlsf_desc.pdf for details of
  144. # this algorithm.
  145. proc mappingInsert(r: int): tuple[fl, sl: int] {.inline.} =
  146. sysAssert((r and PageMask) == 0, "mappingInsert: still not aligned")
  147. result.fl = msbit(uint32 r)
  148. result.sl = (r shr (result.fl - MaxLog2Sli)) - MaxSli
  149. dec result.fl, FliOffset
  150. template mat(): untyped = a.matrix[fl][sl]
  151. proc findSuitableBlock(a: MemRegion; fl, sl: var int): PBigChunk {.inline.} =
  152. let tmp = a.slBitmap[fl] and (not 0u32 shl sl)
  153. result = nil
  154. if tmp != 0:
  155. sl = lsbit(tmp)
  156. result = mat()
  157. else:
  158. fl = lsbit(a.flBitmap and (not 0u32 shl (fl + 1)))
  159. if fl > 0:
  160. sl = lsbit(a.slBitmap[fl])
  161. result = mat()
  162. template clearBits(sl, fl) =
  163. clearBit(sl, a.slBitmap[fl])
  164. if a.slBitmap[fl] == 0u32:
  165. # do not forget to cascade:
  166. clearBit(fl, a.flBitmap)
  167. proc removeChunkFromMatrix(a: var MemRegion; b: PBigChunk) =
  168. let (fl, sl) = mappingInsert(b.size)
  169. if b.next != nil: b.next.prev = b.prev
  170. if b.prev != nil: b.prev.next = b.next
  171. if mat() == b:
  172. mat() = b.next
  173. if mat() == nil:
  174. clearBits(sl, fl)
  175. b.prev = nil
  176. b.next = nil
  177. proc removeChunkFromMatrix2(a: var MemRegion; b: PBigChunk; fl, sl: int) =
  178. mat() = b.next
  179. if mat() != nil:
  180. mat().prev = nil
  181. else:
  182. clearBits(sl, fl)
  183. b.prev = nil
  184. b.next = nil
  185. proc addChunkToMatrix(a: var MemRegion; b: PBigChunk) =
  186. let (fl, sl) = mappingInsert(b.size)
  187. b.prev = nil
  188. b.next = mat()
  189. if mat() != nil:
  190. mat().prev = b
  191. mat() = b
  192. setBit(sl, a.slBitmap[fl])
  193. setBit(fl, a.flBitmap)
  194. {.push stack_trace: off.}
  195. proc initAllocator() = discard "nothing to do anymore"
  196. {.pop.}
  197. proc incCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  198. inc(a.currMem, bytes)
  199. proc decCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  200. a.maxMem = max(a.maxMem, a.currMem)
  201. dec(a.currMem, bytes)
  202. proc getMaxMem(a: var MemRegion): int =
  203. # Since we update maxPagesCount only when freeing pages,
  204. # maxPagesCount may not be up to date. Thus we use the
  205. # maximum of these both values here:
  206. result = max(a.currMem, a.maxMem)
  207. proc llAlloc(a: var MemRegion, size: int): pointer =
  208. # *low-level* alloc for the memory managers data structures. Deallocation
  209. # is done at the end of the allocator's life time.
  210. if a.llmem == nil or size > a.llmem.size:
  211. # the requested size is ``roundup(size+sizeof(LLChunk), PageSize)``, but
  212. # since we know ``size`` is a (small) constant, we know the requested size
  213. # is one page:
  214. sysAssert roundup(size+sizeof(LLChunk), PageSize) == PageSize, "roundup 6"
  215. var old = a.llmem # can be nil and is correct with nil
  216. a.llmem = cast[PLLChunk](osAllocPages(PageSize))
  217. when defined(avlcorruption):
  218. trackLocation(a.llmem, PageSize)
  219. incCurrMem(a, PageSize)
  220. a.llmem.size = PageSize - sizeof(LLChunk)
  221. a.llmem.acc = sizeof(LLChunk)
  222. a.llmem.next = old
  223. result = cast[pointer](cast[ByteAddress](a.llmem) + a.llmem.acc)
  224. dec(a.llmem.size, size)
  225. inc(a.llmem.acc, size)
  226. zeroMem(result, size)
  227. proc getBottom(a: var MemRegion): PAvlNode =
  228. result = addr(a.bottomData)
  229. if result.link[0] == nil:
  230. result.link[0] = result
  231. result.link[1] = result
  232. proc allocAvlNode(a: var MemRegion, key, upperBound: int): PAvlNode =
  233. if a.freeAvlNodes != nil:
  234. result = a.freeAvlNodes
  235. a.freeAvlNodes = a.freeAvlNodes.link[0]
  236. else:
  237. result = cast[PAvlNode](llAlloc(a, sizeof(AvlNode)))
  238. when defined(avlcorruption):
  239. cprintf("tracking location: %p\n", result)
  240. result.key = key
  241. result.upperBound = upperBound
  242. let bottom = getBottom(a)
  243. result.link[0] = bottom
  244. result.link[1] = bottom
  245. result.level = 1
  246. #when defined(avlcorruption):
  247. # track("allocAvlNode", result, sizeof(AvlNode))
  248. sysAssert(bottom == addr(a.bottomData), "bottom data")
  249. sysAssert(bottom.link[0] == bottom, "bottom link[0]")
  250. sysAssert(bottom.link[1] == bottom, "bottom link[1]")
  251. proc deallocAvlNode(a: var MemRegion, n: PAvlNode) {.inline.} =
  252. n.link[0] = a.freeAvlNodes
  253. a.freeAvlNodes = n
  254. proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) =
  255. var it = addr(a.heapLinks)
  256. while it != nil and it.len >= it.chunks.len: it = it.next
  257. if it == nil:
  258. var n = cast[ptr HeapLinks](llAlloc(a, sizeof(HeapLinks)))
  259. n.next = a.heapLinks.next
  260. a.heapLinks.next = n
  261. n.chunks[0] = (p, size)
  262. n.len = 1
  263. else:
  264. let L = it.len
  265. it.chunks[L] = (p, size)
  266. inc it.len
  267. include "system/avltree"
  268. proc llDeallocAll(a: var MemRegion) =
  269. var it = a.llmem
  270. while it != nil:
  271. # we know each block in the list has the size of 1 page:
  272. var next = it.next
  273. osDeallocPages(it, PageSize)
  274. it = next
  275. a.llmem = nil
  276. proc intSetGet(t: IntSet, key: int): PTrunk =
  277. var it = t.data[key and high(t.data)]
  278. while it != nil:
  279. if it.key == key: return it
  280. it = it.next
  281. result = nil
  282. proc intSetPut(a: var MemRegion, t: var IntSet, key: int): PTrunk =
  283. result = intSetGet(t, key)
  284. if result == nil:
  285. result = cast[PTrunk](llAlloc(a, sizeof(result[])))
  286. result.next = t.data[key and high(t.data)]
  287. t.data[key and high(t.data)] = result
  288. result.key = key
  289. proc contains(s: IntSet, key: int): bool =
  290. var t = intSetGet(s, key shr TrunkShift)
  291. if t != nil:
  292. var u = key and TrunkMask
  293. result = (t.bits[u shr IntShift] and (1 shl (u and IntMask))) != 0
  294. else:
  295. result = false
  296. proc incl(a: var MemRegion, s: var IntSet, key: int) =
  297. var t = intSetPut(a, s, key shr TrunkShift)
  298. var u = key and TrunkMask
  299. t.bits[u shr IntShift] = t.bits[u shr IntShift] or (1 shl (u and IntMask))
  300. proc excl(s: var IntSet, key: int) =
  301. var t = intSetGet(s, key shr TrunkShift)
  302. if t != nil:
  303. var u = key and TrunkMask
  304. t.bits[u shr IntShift] = t.bits[u shr IntShift] and not
  305. (1 shl (u and IntMask))
  306. iterator elements(t: IntSet): int {.inline.} =
  307. # while traversing it is forbidden to change the set!
  308. for h in 0..high(t.data):
  309. var r = t.data[h]
  310. while r != nil:
  311. var i = 0
  312. while i <= high(r.bits):
  313. var w = r.bits[i] # taking a copy of r.bits[i] here is correct, because
  314. # modifying operations are not allowed during traversation
  315. var j = 0
  316. while w != 0: # test all remaining bits for zero
  317. if (w and 1) != 0: # the bit is set!
  318. yield (r.key shl TrunkShift) or (i shl IntShift +% j)
  319. inc(j)
  320. w = w shr 1
  321. inc(i)
  322. r = r.next
  323. proc isSmallChunk(c: PChunk): bool {.inline.} =
  324. return c.size <= SmallChunkSize-smallChunkOverhead()
  325. proc chunkUnused(c: PChunk): bool {.inline.} =
  326. result = (c.prevSize and 1) == 0
  327. iterator allObjects(m: var MemRegion): pointer {.inline.} =
  328. m.locked = true
  329. for s in elements(m.chunkStarts):
  330. # we need to check here again as it could have been modified:
  331. if s in m.chunkStarts:
  332. let c = cast[PChunk](s shl PageShift)
  333. if not chunkUnused(c):
  334. if isSmallChunk(c):
  335. var c = cast[PSmallChunk](c)
  336. let size = c.size
  337. var a = cast[ByteAddress](addr(c.data))
  338. let limit = a + c.acc
  339. while a <% limit:
  340. yield cast[pointer](a)
  341. a = a +% size
  342. else:
  343. let c = cast[PBigChunk](c)
  344. yield addr(c.data)
  345. m.locked = false
  346. proc iterToProc*(iter: typed, envType: typedesc; procName: untyped) {.
  347. magic: "Plugin", compileTime.}
  348. proc isCell(p: pointer): bool {.inline.} =
  349. result = cast[ptr FreeCell](p).zeroField >% 1
  350. # ------------- chunk management ----------------------------------------------
  351. proc pageIndex(c: PChunk): int {.inline.} =
  352. result = cast[ByteAddress](c) shr PageShift
  353. proc pageIndex(p: pointer): int {.inline.} =
  354. result = cast[ByteAddress](p) shr PageShift
  355. proc pageAddr(p: pointer): PChunk {.inline.} =
  356. result = cast[PChunk](cast[ByteAddress](p) and not PageMask)
  357. #sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
  358. when false:
  359. proc writeFreeList(a: MemRegion) =
  360. var it = a.freeChunksList
  361. c_fprintf(stdout, "freeChunksList: %p\n", it)
  362. while it != nil:
  363. c_fprintf(stdout, "it: %p, next: %p, prev: %p, size: %ld\n",
  364. it, it.next, it.prev, it.size)
  365. it = it.next
  366. const nimMaxHeap {.intdefine.} = 0
  367. proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
  368. when not defined(emscripten):
  369. if not a.blockChunkSizeIncrease:
  370. let usedMem = a.occ #a.currMem # - a.freeMem
  371. when nimMaxHeap != 0:
  372. if usedMem > nimMaxHeap * 1024 * 1024:
  373. raiseOutOfMem()
  374. if usedMem < 64 * 1024:
  375. a.nextChunkSize = PageSize*4
  376. else:
  377. a.nextChunkSize = min(roundup(usedMem shr 2, PageSize), a.nextChunkSize * 2)
  378. a.nextChunkSize = min(a.nextChunkSize, MaxBigChunkSize)
  379. var size = size
  380. if size > a.nextChunkSize:
  381. result = cast[PBigChunk](osAllocPages(size))
  382. else:
  383. result = cast[PBigChunk](osTryAllocPages(a.nextChunkSize))
  384. if result == nil:
  385. result = cast[PBigChunk](osAllocPages(size))
  386. a.blockChunkSizeIncrease = true
  387. else:
  388. size = a.nextChunkSize
  389. incCurrMem(a, size)
  390. inc(a.freeMem, size)
  391. a.addHeapLink(result, size)
  392. when defined(debugHeapLinks):
  393. cprintf("owner: %p; result: %p; next pointer %p; size: %ld\n", addr(a),
  394. result, result.heapLink, result.size)
  395. when defined(memtracker):
  396. trackLocation(addr result.size, sizeof(int))
  397. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
  398. #zeroMem(result, size)
  399. result.next = nil
  400. result.prev = nil
  401. result.size = size
  402. # update next.prevSize:
  403. var nxt = cast[ByteAddress](result) +% size
  404. sysAssert((nxt and PageMask) == 0, "requestOsChunks 2")
  405. var next = cast[PChunk](nxt)
  406. if pageIndex(next) in a.chunkStarts:
  407. #echo("Next already allocated!")
  408. next.prevSize = size or (next.prevSize and 1)
  409. # set result.prevSize:
  410. var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
  411. var prv = cast[ByteAddress](result) -% lastSize
  412. sysAssert((nxt and PageMask) == 0, "requestOsChunks 3")
  413. var prev = cast[PChunk](prv)
  414. if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
  415. #echo("Prev already allocated!")
  416. result.prevSize = lastSize or (result.prevSize and 1)
  417. else:
  418. result.prevSize = 0 or (result.prevSize and 1) # unknown
  419. # but do not overwrite 'used' field
  420. a.lastSize = size # for next request
  421. sysAssert((cast[int](result) and PageMask) == 0, "requestOschunks: unaligned chunk")
  422. proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
  423. result = contains(a.chunkStarts, pageIndex(p))
  424. proc contains[T](list, x: T): bool =
  425. var it = list
  426. while it != nil:
  427. if it == x: return true
  428. it = it.next
  429. proc listAdd[T](head: var T, c: T) {.inline.} =
  430. sysAssert(c notin head, "listAdd 1")
  431. sysAssert c.prev == nil, "listAdd 2"
  432. sysAssert c.next == nil, "listAdd 3"
  433. c.next = head
  434. if head != nil:
  435. sysAssert head.prev == nil, "listAdd 4"
  436. head.prev = c
  437. head = c
  438. proc listRemove[T](head: var T, c: T) {.inline.} =
  439. sysAssert(c in head, "listRemove")
  440. if c == head:
  441. head = c.next
  442. sysAssert c.prev == nil, "listRemove 2"
  443. if head != nil: head.prev = nil
  444. else:
  445. sysAssert c.prev != nil, "listRemove 3"
  446. c.prev.next = c.next
  447. if c.next != nil: c.next.prev = c.prev
  448. c.next = nil
  449. c.prev = nil
  450. proc updatePrevSize(a: var MemRegion, c: PBigChunk,
  451. prevSize: int) {.inline.} =
  452. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  453. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize")
  454. if isAccessible(a, ri):
  455. ri.prevSize = prevSize or (ri.prevSize and 1)
  456. proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk =
  457. result = cast[PBigChunk](cast[ByteAddress](c) +% size)
  458. result.size = c.size - size
  459. track("result.size", addr result.size, sizeof(int))
  460. # XXX check if these two nil assignments are dead code given
  461. # addChunkToMatrix's implementation:
  462. result.next = nil
  463. result.prev = nil
  464. # size and not used:
  465. result.prevSize = size
  466. sysAssert((size and 1) == 0, "splitChunk 2")
  467. sysAssert((size and PageMask) == 0,
  468. "splitChunk: size is not a multiple of the PageSize")
  469. updatePrevSize(a, c, result.size)
  470. c.size = size
  471. incl(a, a.chunkStarts, pageIndex(result))
  472. proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
  473. let rest = splitChunk2(a, c, size)
  474. addChunkToMatrix(a, rest)
  475. proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
  476. var c = c
  477. sysAssert(c.size >= PageSize, "freeBigChunk")
  478. inc(a.freeMem, c.size)
  479. c.prevSize = c.prevSize and not 1 # set 'used' to false
  480. when coalescLeft:
  481. let prevSize = c.prevSize
  482. if prevSize != 0:
  483. var le = cast[PChunk](cast[ByteAddress](c) -% prevSize)
  484. sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4")
  485. if isAccessible(a, le) and chunkUnused(le):
  486. sysAssert(not isSmallChunk(le), "freeBigChunk 5")
  487. if not isSmallChunk(le) and le.size < MaxBigChunkSize:
  488. removeChunkFromMatrix(a, cast[PBigChunk](le))
  489. inc(le.size, c.size)
  490. excl(a.chunkStarts, pageIndex(c))
  491. c = cast[PBigChunk](le)
  492. if c.size > MaxBigChunkSize:
  493. let rest = splitChunk2(a, c, MaxBigChunkSize)
  494. addChunkToMatrix(a, c)
  495. c = rest
  496. when coalescRight:
  497. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  498. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "freeBigChunk 2")
  499. if isAccessible(a, ri) and chunkUnused(ri):
  500. sysAssert(not isSmallChunk(ri), "freeBigChunk 3")
  501. if not isSmallChunk(ri) and c.size < MaxBigChunkSize:
  502. removeChunkFromMatrix(a, cast[PBigChunk](ri))
  503. inc(c.size, ri.size)
  504. excl(a.chunkStarts, pageIndex(ri))
  505. if c.size > MaxBigChunkSize:
  506. let rest = splitChunk2(a, c, MaxBigChunkSize)
  507. addChunkToMatrix(a, rest)
  508. addChunkToMatrix(a, c)
  509. proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
  510. sysAssert(size > 0, "getBigChunk 2")
  511. var size = size # roundup(size, PageSize)
  512. var fl, sl: int
  513. mappingSearch(size, fl, sl)
  514. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  515. result = findSuitableBlock(a, fl, sl)
  516. if result == nil:
  517. if size < InitialMemoryRequest:
  518. result = requestOsChunks(a, InitialMemoryRequest)
  519. splitChunk(a, result, size)
  520. else:
  521. result = requestOsChunks(a, size)
  522. # if we over allocated split the chunk:
  523. if result.size > size:
  524. splitChunk(a, result, size)
  525. else:
  526. removeChunkFromMatrix2(a, result, fl, sl)
  527. if result.size >= size + PageSize:
  528. splitChunk(a, result, size)
  529. # set 'used' to to true:
  530. result.prevSize = 1
  531. track("setUsedToFalse", addr result.size, sizeof(int))
  532. incl(a, a.chunkStarts, pageIndex(result))
  533. dec(a.freeMem, size)
  534. proc getHugeChunk(a: var MemRegion; size: int): PBigChunk =
  535. result = cast[PBigChunk](osAllocPages(size))
  536. incCurrMem(a, size)
  537. # XXX add this to the heap links. But also remove it from it later.
  538. when false: a.addHeapLink(result, size)
  539. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "getHugeChunk")
  540. result.next = nil
  541. result.prev = nil
  542. result.size = size
  543. # set 'used' to to true:
  544. result.prevSize = 1
  545. incl(a, a.chunkStarts, pageIndex(result))
  546. proc freeHugeChunk(a: var MemRegion; c: PBigChunk) =
  547. let size = c.size
  548. sysAssert(size >= HugeChunkSize, "freeHugeChunk: invalid size")
  549. excl(a.chunkStarts, pageIndex(c))
  550. decCurrMem(a, size)
  551. osDeallocPages(c, size)
  552. proc getSmallChunk(a: var MemRegion): PSmallChunk =
  553. var res = getBigChunk(a, PageSize)
  554. sysAssert res.prev == nil, "getSmallChunk 1"
  555. sysAssert res.next == nil, "getSmallChunk 2"
  556. result = cast[PSmallChunk](res)
  557. # -----------------------------------------------------------------------------
  558. proc isAllocatedPtr(a: MemRegion, p: pointer): bool {.benign.}
  559. when true:
  560. template allocInv(a: MemRegion): bool = true
  561. else:
  562. proc allocInv(a: MemRegion): bool =
  563. ## checks some (not all yet) invariants of the allocator's data structures.
  564. for s in low(a.freeSmallChunks)..high(a.freeSmallChunks):
  565. var c = a.freeSmallChunks[s]
  566. while not (c == nil):
  567. if c.next == c:
  568. echo "[SYSASSERT] c.next == c"
  569. return false
  570. if not (c.size == s * MemAlign):
  571. echo "[SYSASSERT] c.size != s * MemAlign"
  572. return false
  573. var it = c.freeList
  574. while not (it == nil):
  575. if not (it.zeroField == 0):
  576. echo "[SYSASSERT] it.zeroField != 0"
  577. c_printf("%ld %p\n", it.zeroField, it)
  578. return false
  579. it = it.next
  580. c = c.next
  581. result = true
  582. when false:
  583. var
  584. rsizes: array[50_000, int]
  585. rsizesLen: int
  586. proc trackSize(size: int) =
  587. rsizes[rsizesLen] = size
  588. inc rsizesLen
  589. proc untrackSize(size: int) =
  590. for i in 0 .. rsizesLen-1:
  591. if rsizes[i] == size:
  592. rsizes[i] = rsizes[rsizesLen-1]
  593. dec rsizesLen
  594. return
  595. c_fprintf(stdout, "%ld\n", size)
  596. sysAssert(false, "untracked size!")
  597. else:
  598. template trackSize(x) = discard
  599. template untrackSize(x) = discard
  600. when false:
  601. # not yet used by the GCs
  602. proc rawTryAlloc(a: var MemRegion; requestedSize: int): pointer =
  603. sysAssert(allocInv(a), "rawAlloc: begin")
  604. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  605. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  606. var size = roundup(requestedSize, MemAlign)
  607. inc a.occ, size
  608. trackSize(size)
  609. sysAssert(size >= requestedSize, "insufficient allocated size!")
  610. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  611. if size <= SmallChunkSize-smallChunkOverhead():
  612. # allocate a small block: for small chunks, we use only its next pointer
  613. var s = size div MemAlign
  614. var c = a.freeSmallChunks[s]
  615. if c == nil:
  616. result = nil
  617. else:
  618. sysAssert c.size == size, "rawAlloc 6"
  619. if c.freeList == nil:
  620. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  621. "rawAlloc 7")
  622. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  623. inc(c.acc, size)
  624. else:
  625. result = c.freeList
  626. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  627. c.freeList = c.freeList.next
  628. dec(c.free, size)
  629. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  630. if c.free < size:
  631. listRemove(a.freeSmallChunks[s], c)
  632. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  633. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  634. size == 0, "rawAlloc 21")
  635. sysAssert(allocInv(a), "rawAlloc: end small size")
  636. else:
  637. inc size, bigChunkOverhead()
  638. var fl, sl: int
  639. mappingSearch(size, fl, sl)
  640. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  641. let c = findSuitableBlock(a, fl, sl)
  642. if c != nil:
  643. removeChunkFromMatrix2(a, c, fl, sl)
  644. if c.size >= size + PageSize:
  645. splitChunk(a, c, size)
  646. # set 'used' to to true:
  647. c.prevSize = 1
  648. incl(a, a.chunkStarts, pageIndex(c))
  649. dec(a.freeMem, size)
  650. result = addr(c.data)
  651. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  652. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  653. if a.root == nil: a.root = getBottom(a)
  654. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  655. else:
  656. result = nil
  657. proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
  658. when defined(nimTypeNames):
  659. inc(a.allocCounter)
  660. sysAssert(allocInv(a), "rawAlloc: begin")
  661. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  662. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  663. var size = roundup(requestedSize, MemAlign)
  664. sysAssert(size >= requestedSize, "insufficient allocated size!")
  665. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  666. if size <= SmallChunkSize-smallChunkOverhead():
  667. # allocate a small block: for small chunks, we use only its next pointer
  668. var s = size div MemAlign
  669. var c = a.freeSmallChunks[s]
  670. if c == nil:
  671. c = getSmallChunk(a)
  672. c.freeList = nil
  673. sysAssert c.size == PageSize, "rawAlloc 3"
  674. c.size = size
  675. c.acc = size
  676. c.free = SmallChunkSize - smallChunkOverhead() - size
  677. c.next = nil
  678. c.prev = nil
  679. listAdd(a.freeSmallChunks[s], c)
  680. result = addr(c.data)
  681. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4")
  682. else:
  683. sysAssert(allocInv(a), "rawAlloc: begin c != nil")
  684. sysAssert c.next != c, "rawAlloc 5"
  685. #if c.size != size:
  686. # c_fprintf(stdout, "csize: %lld; size %lld\n", c.size, size)
  687. sysAssert c.size == size, "rawAlloc 6"
  688. if c.freeList == nil:
  689. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  690. "rawAlloc 7")
  691. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  692. inc(c.acc, size)
  693. else:
  694. result = c.freeList
  695. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  696. c.freeList = c.freeList.next
  697. dec(c.free, size)
  698. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  699. sysAssert(allocInv(a), "rawAlloc: end c != nil")
  700. sysAssert(allocInv(a), "rawAlloc: before c.free < size")
  701. if c.free < size:
  702. sysAssert(allocInv(a), "rawAlloc: before listRemove test")
  703. listRemove(a.freeSmallChunks[s], c)
  704. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  705. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  706. size == 0, "rawAlloc 21")
  707. sysAssert(allocInv(a), "rawAlloc: end small size")
  708. inc a.occ, size
  709. trackSize(c.size)
  710. else:
  711. size = requestedSize + bigChunkOverhead() # roundup(requestedSize+bigChunkOverhead(), PageSize)
  712. # allocate a large block
  713. var c = if size >= HugeChunkSize: getHugeChunk(a, size)
  714. else: getBigChunk(a, size)
  715. sysAssert c.prev == nil, "rawAlloc 10"
  716. sysAssert c.next == nil, "rawAlloc 11"
  717. result = addr(c.data)
  718. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  719. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  720. if a.root == nil: a.root = getBottom(a)
  721. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  722. inc a.occ, c.size
  723. trackSize(c.size)
  724. sysAssert(isAccessible(a, result), "rawAlloc 14")
  725. sysAssert(allocInv(a), "rawAlloc: end")
  726. when logAlloc: cprintf("var pointer_%p = alloc(%ld)\n", result, requestedSize)
  727. proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer =
  728. result = rawAlloc(a, requestedSize)
  729. zeroMem(result, requestedSize)
  730. proc rawDealloc(a: var MemRegion, p: pointer) =
  731. when defined(nimTypeNames):
  732. inc(a.deallocCounter)
  733. #sysAssert(isAllocatedPtr(a, p), "rawDealloc: no allocated pointer")
  734. sysAssert(allocInv(a), "rawDealloc: begin")
  735. var c = pageAddr(p)
  736. if isSmallChunk(c):
  737. # `p` is within a small chunk:
  738. var c = cast[PSmallChunk](c)
  739. var s = c.size
  740. dec a.occ, s
  741. untrackSize(s)
  742. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case A)"
  743. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  744. s == 0, "rawDealloc 3")
  745. var f = cast[ptr FreeCell](p)
  746. #echo("setting to nil: ", $cast[ByteAddress](addr(f.zeroField)))
  747. sysAssert(f.zeroField != 0, "rawDealloc 1")
  748. f.zeroField = 0
  749. f.next = c.freeList
  750. c.freeList = f
  751. when overwriteFree:
  752. # set to 0xff to check for usage after free bugs:
  753. nimSetMem(cast[pointer](cast[int](p) +% sizeof(FreeCell)), -1'i32,
  754. s -% sizeof(FreeCell))
  755. # check if it is not in the freeSmallChunks[s] list:
  756. if c.free < s:
  757. # add it to the freeSmallChunks[s] array:
  758. listAdd(a.freeSmallChunks[s div MemAlign], c)
  759. inc(c.free, s)
  760. else:
  761. inc(c.free, s)
  762. if c.free == SmallChunkSize-smallChunkOverhead():
  763. listRemove(a.freeSmallChunks[s div MemAlign], c)
  764. c.size = SmallChunkSize
  765. freeBigChunk(a, cast[PBigChunk](c))
  766. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  767. s == 0, "rawDealloc 2")
  768. else:
  769. # set to 0xff to check for usage after free bugs:
  770. when overwriteFree: nimSetMem(p, -1'i32, c.size -% bigChunkOverhead())
  771. # free big chunk
  772. var c = cast[PBigChunk](c)
  773. dec a.occ, c.size
  774. untrackSize(c.size)
  775. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case B)"
  776. a.deleted = getBottom(a)
  777. del(a, a.root, cast[int](addr(c.data)))
  778. if c.size >= HugeChunkSize: freeHugeChunk(a, c)
  779. else: freeBigChunk(a, c)
  780. sysAssert(allocInv(a), "rawDealloc: end")
  781. when logAlloc: cprintf("dealloc(pointer_%p)\n", p)
  782. proc isAllocatedPtr(a: MemRegion, p: pointer): bool =
  783. if isAccessible(a, p):
  784. var c = pageAddr(p)
  785. if not chunkUnused(c):
  786. if isSmallChunk(c):
  787. var c = cast[PSmallChunk](c)
  788. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  789. smallChunkOverhead()
  790. result = (c.acc >% offset) and (offset %% c.size == 0) and
  791. (cast[ptr FreeCell](p).zeroField >% 1)
  792. else:
  793. var c = cast[PBigChunk](c)
  794. result = p == addr(c.data) and cast[ptr FreeCell](p).zeroField >% 1
  795. proc prepareForInteriorPointerChecking(a: var MemRegion) {.inline.} =
  796. a.minLargeObj = lowGauge(a.root)
  797. a.maxLargeObj = highGauge(a.root)
  798. proc interiorAllocatedPtr(a: MemRegion, p: pointer): pointer =
  799. if isAccessible(a, p):
  800. var c = pageAddr(p)
  801. if not chunkUnused(c):
  802. if isSmallChunk(c):
  803. var c = cast[PSmallChunk](c)
  804. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  805. smallChunkOverhead()
  806. if c.acc >% offset:
  807. sysAssert(cast[ByteAddress](addr(c.data)) +% offset ==
  808. cast[ByteAddress](p), "offset is not what you think it is")
  809. var d = cast[ptr FreeCell](cast[ByteAddress](addr(c.data)) +%
  810. offset -% (offset %% c.size))
  811. if d.zeroField >% 1:
  812. result = d
  813. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  814. else:
  815. var c = cast[PBigChunk](c)
  816. var d = addr(c.data)
  817. if p >= d and cast[ptr FreeCell](d).zeroField >% 1:
  818. result = d
  819. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  820. else:
  821. var q = cast[int](p)
  822. if q >=% a.minLargeObj and q <=% a.maxLargeObj:
  823. # this check is highly effective! Test fails for 99,96% of all checks on
  824. # an x86-64.
  825. var avlNode = inRange(a.root, q)
  826. if avlNode != nil:
  827. var k = cast[pointer](avlNode.key)
  828. var c = cast[PBigChunk](pageAddr(k))
  829. sysAssert(addr(c.data) == k, " k is not the same as addr(c.data)!")
  830. if cast[ptr FreeCell](k).zeroField >% 1:
  831. result = k
  832. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  833. proc ptrSize(p: pointer): int =
  834. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  835. var c = pageAddr(p)
  836. sysAssert(not chunkUnused(c), "ptrSize")
  837. result = c.size -% sizeof(FreeCell)
  838. if not isSmallChunk(c):
  839. dec result, bigChunkOverhead()
  840. proc alloc(allocator: var MemRegion, size: Natural): pointer {.gcsafe.} =
  841. result = rawAlloc(allocator, size+sizeof(FreeCell))
  842. cast[ptr FreeCell](result).zeroField = 1 # mark it as used
  843. sysAssert(not isAllocatedPtr(allocator, result), "alloc")
  844. result = cast[pointer](cast[ByteAddress](result) +% sizeof(FreeCell))
  845. track("alloc", result, size)
  846. proc alloc0(allocator: var MemRegion, size: Natural): pointer =
  847. result = alloc(allocator, size)
  848. zeroMem(result, size)
  849. proc dealloc(allocator: var MemRegion, p: pointer) =
  850. sysAssert(p != nil, "dealloc: p is nil")
  851. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  852. sysAssert(x != nil, "dealloc: x is nil")
  853. sysAssert(isAccessible(allocator, x), "is not accessible")
  854. sysAssert(cast[ptr FreeCell](x).zeroField == 1, "dealloc: object header corrupted")
  855. rawDealloc(allocator, x)
  856. sysAssert(not isAllocatedPtr(allocator, x), "dealloc: object still accessible")
  857. track("dealloc", p, 0)
  858. proc realloc(allocator: var MemRegion, p: pointer, newsize: Natural): pointer =
  859. if newsize > 0:
  860. result = alloc0(allocator, newsize)
  861. if p != nil:
  862. copyMem(result, p, min(ptrSize(p), newsize))
  863. dealloc(allocator, p)
  864. elif p != nil:
  865. dealloc(allocator, p)
  866. proc deallocOsPages(a: var MemRegion) =
  867. # we free every 'ordinarily' allocated page by iterating over the page bits:
  868. var it = addr(a.heapLinks)
  869. while true:
  870. let next = it.next
  871. for i in 0..it.len-1:
  872. let (p, size) = it.chunks[i]
  873. when defined(debugHeapLinks):
  874. cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a),
  875. it, it.size, next)
  876. sysAssert size >= PageSize, "origSize too small"
  877. osDeallocPages(p, size)
  878. it = next
  879. if it == nil: break
  880. # And then we free the pages that are in use for the page bits:
  881. llDeallocAll(a)
  882. proc getFreeMem(a: MemRegion): int {.inline.} = result = a.freeMem
  883. proc getTotalMem(a: MemRegion): int {.inline.} = result = a.currMem
  884. proc getOccupiedMem(a: MemRegion): int {.inline.} =
  885. result = a.occ
  886. # a.currMem - a.freeMem
  887. when defined(nimTypeNames):
  888. proc getMemCounters(a: MemRegion): (int, int) {.inline.} =
  889. (a.allocCounter, a.deallocCounter)
  890. # ---------------------- thread memory region -------------------------------
  891. template instantiateForRegion(allocator: untyped) =
  892. {.push stackTrace: off.}
  893. when defined(fulldebug):
  894. proc interiorAllocatedPtr*(p: pointer): pointer =
  895. result = interiorAllocatedPtr(allocator, p)
  896. proc isAllocatedPtr*(p: pointer): bool =
  897. let p = cast[pointer](cast[ByteAddress](p)-%ByteAddress(sizeof(Cell)))
  898. result = isAllocatedPtr(allocator, p)
  899. proc deallocOsPages = deallocOsPages(allocator)
  900. proc alloc(size: Natural): pointer =
  901. result = alloc(allocator, size)
  902. proc alloc0(size: Natural): pointer =
  903. result = alloc0(allocator, size)
  904. proc dealloc(p: pointer) =
  905. dealloc(allocator, p)
  906. proc realloc(p: pointer, newsize: Natural): pointer =
  907. result = realloc(allocator, p, newSize)
  908. when false:
  909. proc countFreeMem(): int =
  910. # only used for assertions
  911. var it = allocator.freeChunksList
  912. while it != nil:
  913. inc(result, it.size)
  914. it = it.next
  915. proc getFreeMem(): int =
  916. result = allocator.freeMem
  917. #sysAssert(result == countFreeMem())
  918. proc getTotalMem(): int = return allocator.currMem
  919. proc getOccupiedMem(): int = return allocator.occ #getTotalMem() - getFreeMem()
  920. proc getMaxMem*(): int = return getMaxMem(allocator)
  921. when defined(nimTypeNames):
  922. proc getMemCounters*(): (int, int) = getMemCounters(allocator)
  923. # -------------------- shared heap region ----------------------------------
  924. when hasThreadSupport:
  925. var sharedHeap: MemRegion
  926. var heapLock: SysLock
  927. initSysLock(heapLock)
  928. proc allocShared(size: Natural): pointer =
  929. when hasThreadSupport:
  930. acquireSys(heapLock)
  931. result = alloc(sharedHeap, size)
  932. releaseSys(heapLock)
  933. else:
  934. result = alloc(size)
  935. proc allocShared0(size: Natural): pointer =
  936. result = allocShared(size)
  937. zeroMem(result, size)
  938. proc deallocShared(p: pointer) =
  939. when hasThreadSupport:
  940. acquireSys(heapLock)
  941. dealloc(sharedHeap, p)
  942. releaseSys(heapLock)
  943. else:
  944. dealloc(p)
  945. proc reallocShared(p: pointer, newsize: Natural): pointer =
  946. when hasThreadSupport:
  947. acquireSys(heapLock)
  948. result = realloc(sharedHeap, p, newsize)
  949. releaseSys(heapLock)
  950. else:
  951. result = realloc(p, newSize)
  952. when hasThreadSupport:
  953. template sharedMemStatsShared(v: int) {.immediate.} =
  954. acquireSys(heapLock)
  955. result = v
  956. releaseSys(heapLock)
  957. proc getFreeSharedMem(): int =
  958. sharedMemStatsShared(sharedHeap.freeMem)
  959. proc getTotalSharedMem(): int =
  960. sharedMemStatsShared(sharedHeap.currMem)
  961. proc getOccupiedSharedMem(): int =
  962. sharedMemStatsShared(sharedHeap.occ)
  963. #sharedMemStatsShared(sharedHeap.currMem - sharedHeap.freeMem)
  964. {.pop.}
  965. {.pop.}