alloc.nim 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. #
  2. #
  3. # Nim's Runtime Library
  4. # (c) Copyright 2012 Andreas Rumpf
  5. #
  6. # See the file "copying.txt", included in this
  7. # distribution, for details about the copyright.
  8. #
  9. # Low level allocator for Nim. Has been designed to support the GC.
  10. {.push profiler:off.}
  11. include osalloc
  12. template track(op, address, size) =
  13. when defined(memTracker):
  14. memTrackerOp(op, address, size)
  15. # We manage *chunks* of memory. Each chunk is a multiple of the page size.
  16. # Each chunk starts at an address that is divisible by the page size.
  17. const
  18. nimMinHeapPages {.intdefine.} = 128 # 0.5 MB
  19. SmallChunkSize = PageSize
  20. MaxFli = 30
  21. MaxLog2Sli = 5 # 32, this cannot be increased without changing 'uint32'
  22. # everywhere!
  23. MaxSli = 1 shl MaxLog2Sli
  24. FliOffset = 6
  25. RealFli = MaxFli - FliOffset
  26. # size of chunks in last matrix bin
  27. MaxBigChunkSize = 1 shl MaxFli - 1 shl (MaxFli-MaxLog2Sli-1)
  28. HugeChunkSize = MaxBigChunkSize + 1
  29. type
  30. PTrunk = ptr Trunk
  31. Trunk = object
  32. next: PTrunk # all nodes are connected with this pointer
  33. key: int # start address at bit 0
  34. bits: array[0..IntsPerTrunk-1, uint] # a bit vector
  35. TrunkBuckets = array[0..255, PTrunk]
  36. IntSet = object
  37. data: TrunkBuckets
  38. type
  39. FreeCell {.final, pure.} = object
  40. next: ptr FreeCell # next free cell in chunk (overlaid with refcount)
  41. when not defined(gcDestructors):
  42. zeroField: int # 0 means cell is not used (overlaid with typ field)
  43. # 1 means cell is manually managed pointer
  44. # otherwise a PNimType is stored in there
  45. else:
  46. alignment: int
  47. PChunk = ptr BaseChunk
  48. PBigChunk = ptr BigChunk
  49. PSmallChunk = ptr SmallChunk
  50. BaseChunk {.pure, inheritable.} = object
  51. prevSize: int # size of previous chunk; for coalescing
  52. # 0th bit == 1 if 'used
  53. size: int # if < PageSize it is a small chunk
  54. SmallChunk = object of BaseChunk
  55. next, prev: PSmallChunk # chunks of the same size
  56. freeList: ptr FreeCell
  57. free: int # how many bytes remain
  58. acc: int # accumulator for small object allocation
  59. when defined(nimAlignPragma):
  60. data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory
  61. else:
  62. data: UncheckedArray[byte]
  63. BigChunk = object of BaseChunk # not necessarily > PageSize!
  64. next, prev: PBigChunk # chunks of the same (or bigger) size
  65. when defined(nimAlignPragma):
  66. data {.align: MemAlign.}: UncheckedArray[byte] # start of usable memory
  67. else:
  68. data: UncheckedArray[byte]
  69. template smallChunkOverhead(): untyped = sizeof(SmallChunk)
  70. template bigChunkOverhead(): untyped = sizeof(BigChunk)
  71. # ------------- chunk table ---------------------------------------------------
  72. # We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
  73. # endings of big chunks. This is needed by the merging operation. The only
  74. # remaining operation is best-fit for big chunks. Since there is a size-limit
  75. # for big chunks (because greater than the limit means they are returned back
  76. # to the OS), a fixed size array can be used.
  77. type
  78. PLLChunk = ptr LLChunk
  79. LLChunk = object ## *low-level* chunk
  80. size: int # remaining size
  81. acc: int # accumulator
  82. next: PLLChunk # next low-level chunk; only needed for dealloc
  83. PAvlNode = ptr AvlNode
  84. AvlNode = object
  85. link: array[0..1, PAvlNode] # Left (0) and right (1) links
  86. key, upperBound: int
  87. level: int
  88. HeapLinks = object
  89. len: int
  90. chunks: array[30, (PBigChunk, int)]
  91. next: ptr HeapLinks
  92. MemRegion = object
  93. minLargeObj, maxLargeObj: int
  94. freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
  95. flBitmap: uint32
  96. slBitmap: array[RealFli, uint32]
  97. matrix: array[RealFli, array[MaxSli, PBigChunk]]
  98. llmem: PLLChunk
  99. currMem, maxMem, freeMem, occ: int # memory sizes (allocated from OS)
  100. lastSize: int # needed for the case that OS gives us pages linearly
  101. chunkStarts: IntSet
  102. root, deleted, last, freeAvlNodes: PAvlNode
  103. locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
  104. nextChunkSize: int
  105. bottomData: AvlNode
  106. heapLinks: HeapLinks
  107. when defined(nimTypeNames):
  108. allocCounter, deallocCounter: int
  109. const
  110. fsLookupTable: array[byte, int8] = [
  111. -1'i8, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  112. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  113. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  114. 5, 5, 5, 5, 5, 5, 5, 5,
  115. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  116. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  117. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  118. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  119. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  120. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  121. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  122. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  123. 7, 7, 7, 7, 7, 7, 7, 7
  124. ]
  125. proc msbit(x: uint32): int {.inline.} =
  126. let a = if x <= 0xff_ff'u32:
  127. (if x <= 0xff: 0 else: 8)
  128. else:
  129. (if x <= 0xff_ff_ff'u32: 16 else: 24)
  130. result = int(fsLookupTable[byte(x shr a)]) + a
  131. proc lsbit(x: uint32): int {.inline.} =
  132. msbit(x and ((not x) + 1))
  133. proc setBit(nr: int; dest: var uint32) {.inline.} =
  134. dest = dest or (1u32 shl (nr and 0x1f))
  135. proc clearBit(nr: int; dest: var uint32) {.inline.} =
  136. dest = dest and not (1u32 shl (nr and 0x1f))
  137. proc mappingSearch(r, fl, sl: var int) {.inline.} =
  138. #let t = (1 shl (msbit(uint32 r) - MaxLog2Sli)) - 1
  139. # This diverges from the standard TLSF algorithm because we need to ensure
  140. # PageSize alignment:
  141. let t = roundup((1 shl (msbit(uint32 r) - MaxLog2Sli)), PageSize) - 1
  142. r = r + t
  143. r = r and not t
  144. r = min(r, MaxBigChunkSize)
  145. fl = msbit(uint32 r)
  146. sl = (r shr (fl - MaxLog2Sli)) - MaxSli
  147. dec fl, FliOffset
  148. sysAssert((r and PageMask) == 0, "mappingSearch: still not aligned")
  149. # See http://www.gii.upv.es/tlsf/files/papers/tlsf_desc.pdf for details of
  150. # this algorithm.
  151. proc mappingInsert(r: int): tuple[fl, sl: int] {.inline.} =
  152. sysAssert((r and PageMask) == 0, "mappingInsert: still not aligned")
  153. result.fl = msbit(uint32 r)
  154. result.sl = (r shr (result.fl - MaxLog2Sli)) - MaxSli
  155. dec result.fl, FliOffset
  156. template mat(): untyped = a.matrix[fl][sl]
  157. proc findSuitableBlock(a: MemRegion; fl, sl: var int): PBigChunk {.inline.} =
  158. let tmp = a.slBitmap[fl] and (not 0u32 shl sl)
  159. result = nil
  160. if tmp != 0:
  161. sl = lsbit(tmp)
  162. result = mat()
  163. else:
  164. fl = lsbit(a.flBitmap and (not 0u32 shl (fl + 1)))
  165. if fl > 0:
  166. sl = lsbit(a.slBitmap[fl])
  167. result = mat()
  168. template clearBits(sl, fl) =
  169. clearBit(sl, a.slBitmap[fl])
  170. if a.slBitmap[fl] == 0u32:
  171. # do not forget to cascade:
  172. clearBit(fl, a.flBitmap)
  173. proc removeChunkFromMatrix(a: var MemRegion; b: PBigChunk) =
  174. let (fl, sl) = mappingInsert(b.size)
  175. if b.next != nil: b.next.prev = b.prev
  176. if b.prev != nil: b.prev.next = b.next
  177. if mat() == b:
  178. mat() = b.next
  179. if mat() == nil:
  180. clearBits(sl, fl)
  181. b.prev = nil
  182. b.next = nil
  183. proc removeChunkFromMatrix2(a: var MemRegion; b: PBigChunk; fl, sl: int) =
  184. mat() = b.next
  185. if mat() != nil:
  186. mat().prev = nil
  187. else:
  188. clearBits(sl, fl)
  189. b.prev = nil
  190. b.next = nil
  191. proc addChunkToMatrix(a: var MemRegion; b: PBigChunk) =
  192. let (fl, sl) = mappingInsert(b.size)
  193. b.prev = nil
  194. b.next = mat()
  195. if mat() != nil:
  196. mat().prev = b
  197. mat() = b
  198. setBit(sl, a.slBitmap[fl])
  199. setBit(fl, a.flBitmap)
  200. proc incCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  201. inc(a.currMem, bytes)
  202. proc decCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  203. a.maxMem = max(a.maxMem, a.currMem)
  204. dec(a.currMem, bytes)
  205. proc getMaxMem(a: var MemRegion): int =
  206. # Since we update maxPagesCount only when freeing pages,
  207. # maxPagesCount may not be up to date. Thus we use the
  208. # maximum of these both values here:
  209. result = max(a.currMem, a.maxMem)
  210. proc llAlloc(a: var MemRegion, size: int): pointer =
  211. # *low-level* alloc for the memory managers data structures. Deallocation
  212. # is done at the end of the allocator's life time.
  213. if a.llmem == nil or size > a.llmem.size:
  214. # the requested size is ``roundup(size+sizeof(LLChunk), PageSize)``, but
  215. # since we know ``size`` is a (small) constant, we know the requested size
  216. # is one page:
  217. sysAssert roundup(size+sizeof(LLChunk), PageSize) == PageSize, "roundup 6"
  218. var old = a.llmem # can be nil and is correct with nil
  219. a.llmem = cast[PLLChunk](osAllocPages(PageSize))
  220. when defined(avlcorruption):
  221. trackLocation(a.llmem, PageSize)
  222. incCurrMem(a, PageSize)
  223. a.llmem.size = PageSize - sizeof(LLChunk)
  224. a.llmem.acc = sizeof(LLChunk)
  225. a.llmem.next = old
  226. result = cast[pointer](cast[ByteAddress](a.llmem) + a.llmem.acc)
  227. dec(a.llmem.size, size)
  228. inc(a.llmem.acc, size)
  229. zeroMem(result, size)
  230. proc getBottom(a: var MemRegion): PAvlNode =
  231. result = addr(a.bottomData)
  232. if result.link[0] == nil:
  233. result.link[0] = result
  234. result.link[1] = result
  235. proc allocAvlNode(a: var MemRegion, key, upperBound: int): PAvlNode =
  236. if a.freeAvlNodes != nil:
  237. result = a.freeAvlNodes
  238. a.freeAvlNodes = a.freeAvlNodes.link[0]
  239. else:
  240. result = cast[PAvlNode](llAlloc(a, sizeof(AvlNode)))
  241. when defined(avlcorruption):
  242. cprintf("tracking location: %p\n", result)
  243. result.key = key
  244. result.upperBound = upperBound
  245. let bottom = getBottom(a)
  246. result.link[0] = bottom
  247. result.link[1] = bottom
  248. result.level = 1
  249. #when defined(avlcorruption):
  250. # track("allocAvlNode", result, sizeof(AvlNode))
  251. sysAssert(bottom == addr(a.bottomData), "bottom data")
  252. sysAssert(bottom.link[0] == bottom, "bottom link[0]")
  253. sysAssert(bottom.link[1] == bottom, "bottom link[1]")
  254. proc deallocAvlNode(a: var MemRegion, n: PAvlNode) {.inline.} =
  255. n.link[0] = a.freeAvlNodes
  256. a.freeAvlNodes = n
  257. proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) =
  258. var it = addr(a.heapLinks)
  259. while it != nil and it.len >= it.chunks.len: it = it.next
  260. if it == nil:
  261. var n = cast[ptr HeapLinks](llAlloc(a, sizeof(HeapLinks)))
  262. n.next = a.heapLinks.next
  263. a.heapLinks.next = n
  264. n.chunks[0] = (p, size)
  265. n.len = 1
  266. else:
  267. let L = it.len
  268. it.chunks[L] = (p, size)
  269. inc it.len
  270. include "system/avltree"
  271. proc llDeallocAll(a: var MemRegion) =
  272. var it = a.llmem
  273. while it != nil:
  274. # we know each block in the list has the size of 1 page:
  275. var next = it.next
  276. osDeallocPages(it, PageSize)
  277. it = next
  278. a.llmem = nil
  279. proc intSetGet(t: IntSet, key: int): PTrunk =
  280. var it = t.data[key and high(t.data)]
  281. while it != nil:
  282. if it.key == key: return it
  283. it = it.next
  284. result = nil
  285. proc intSetPut(a: var MemRegion, t: var IntSet, key: int): PTrunk =
  286. result = intSetGet(t, key)
  287. if result == nil:
  288. result = cast[PTrunk](llAlloc(a, sizeof(result[])))
  289. result.next = t.data[key and high(t.data)]
  290. t.data[key and high(t.data)] = result
  291. result.key = key
  292. proc contains(s: IntSet, key: int): bool =
  293. var t = intSetGet(s, key shr TrunkShift)
  294. if t != nil:
  295. var u = key and TrunkMask
  296. result = (t.bits[u shr IntShift] and (uint(1) shl (u and IntMask))) != 0
  297. else:
  298. result = false
  299. proc incl(a: var MemRegion, s: var IntSet, key: int) =
  300. var t = intSetPut(a, s, key shr TrunkShift)
  301. var u = key and TrunkMask
  302. t.bits[u shr IntShift] = t.bits[u shr IntShift] or (uint(1) shl (u and IntMask))
  303. proc excl(s: var IntSet, key: int) =
  304. var t = intSetGet(s, key shr TrunkShift)
  305. if t != nil:
  306. var u = key and TrunkMask
  307. t.bits[u shr IntShift] = t.bits[u shr IntShift] and not
  308. (uint(1) shl (u and IntMask))
  309. iterator elements(t: IntSet): int {.inline.} =
  310. # while traversing it is forbidden to change the set!
  311. for h in 0..high(t.data):
  312. var r = t.data[h]
  313. while r != nil:
  314. var i = 0
  315. while i <= high(r.bits):
  316. var w = r.bits[i] # taking a copy of r.bits[i] here is correct, because
  317. # modifying operations are not allowed during traversation
  318. var j = 0
  319. while w != 0: # test all remaining bits for zero
  320. if (w and 1) != 0: # the bit is set!
  321. yield (r.key shl TrunkShift) or (i shl IntShift +% j)
  322. inc(j)
  323. w = w shr 1
  324. inc(i)
  325. r = r.next
  326. proc isSmallChunk(c: PChunk): bool {.inline.} =
  327. return c.size <= SmallChunkSize-smallChunkOverhead()
  328. proc chunkUnused(c: PChunk): bool {.inline.} =
  329. result = (c.prevSize and 1) == 0
  330. iterator allObjects(m: var MemRegion): pointer {.inline.} =
  331. m.locked = true
  332. for s in elements(m.chunkStarts):
  333. # we need to check here again as it could have been modified:
  334. if s in m.chunkStarts:
  335. let c = cast[PChunk](s shl PageShift)
  336. if not chunkUnused(c):
  337. if isSmallChunk(c):
  338. var c = cast[PSmallChunk](c)
  339. let size = c.size
  340. var a = cast[ByteAddress](addr(c.data))
  341. let limit = a + c.acc
  342. while a <% limit:
  343. yield cast[pointer](a)
  344. a = a +% size
  345. else:
  346. let c = cast[PBigChunk](c)
  347. yield addr(c.data)
  348. m.locked = false
  349. proc iterToProc*(iter: typed, envType: typedesc; procName: untyped) {.
  350. magic: "Plugin", compileTime.}
  351. when not defined(gcDestructors):
  352. proc isCell(p: pointer): bool {.inline.} =
  353. result = cast[ptr FreeCell](p).zeroField >% 1
  354. # ------------- chunk management ----------------------------------------------
  355. proc pageIndex(c: PChunk): int {.inline.} =
  356. result = cast[ByteAddress](c) shr PageShift
  357. proc pageIndex(p: pointer): int {.inline.} =
  358. result = cast[ByteAddress](p) shr PageShift
  359. proc pageAddr(p: pointer): PChunk {.inline.} =
  360. result = cast[PChunk](cast[ByteAddress](p) and not PageMask)
  361. #sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
  362. when false:
  363. proc writeFreeList(a: MemRegion) =
  364. var it = a.freeChunksList
  365. c_fprintf(stdout, "freeChunksList: %p\n", it)
  366. while it != nil:
  367. c_fprintf(stdout, "it: %p, next: %p, prev: %p, size: %ld\n",
  368. it, it.next, it.prev, it.size)
  369. it = it.next
  370. const nimMaxHeap {.intdefine.} = 0
  371. proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
  372. when not defined(emscripten):
  373. if not a.blockChunkSizeIncrease:
  374. let usedMem = a.occ #a.currMem # - a.freeMem
  375. when nimMaxHeap != 0:
  376. if usedMem > nimMaxHeap * 1024 * 1024:
  377. raiseOutOfMem()
  378. if usedMem < 64 * 1024:
  379. a.nextChunkSize = PageSize*4
  380. else:
  381. a.nextChunkSize = min(roundup(usedMem shr 2, PageSize), a.nextChunkSize * 2)
  382. a.nextChunkSize = min(a.nextChunkSize, MaxBigChunkSize)
  383. var size = size
  384. if size > a.nextChunkSize:
  385. result = cast[PBigChunk](osAllocPages(size))
  386. else:
  387. result = cast[PBigChunk](osTryAllocPages(a.nextChunkSize))
  388. if result == nil:
  389. result = cast[PBigChunk](osAllocPages(size))
  390. a.blockChunkSizeIncrease = true
  391. else:
  392. size = a.nextChunkSize
  393. incCurrMem(a, size)
  394. inc(a.freeMem, size)
  395. a.addHeapLink(result, size)
  396. when defined(debugHeapLinks):
  397. cprintf("owner: %p; result: %p; next pointer %p; size: %ld\n", addr(a),
  398. result, result.heapLink, result.size)
  399. when defined(memtracker):
  400. trackLocation(addr result.size, sizeof(int))
  401. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
  402. #zeroMem(result, size)
  403. result.next = nil
  404. result.prev = nil
  405. result.size = size
  406. # update next.prevSize:
  407. var nxt = cast[ByteAddress](result) +% size
  408. sysAssert((nxt and PageMask) == 0, "requestOsChunks 2")
  409. var next = cast[PChunk](nxt)
  410. if pageIndex(next) in a.chunkStarts:
  411. #echo("Next already allocated!")
  412. next.prevSize = size or (next.prevSize and 1)
  413. # set result.prevSize:
  414. var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
  415. var prv = cast[ByteAddress](result) -% lastSize
  416. sysAssert((nxt and PageMask) == 0, "requestOsChunks 3")
  417. var prev = cast[PChunk](prv)
  418. if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
  419. #echo("Prev already allocated!")
  420. result.prevSize = lastSize or (result.prevSize and 1)
  421. else:
  422. result.prevSize = 0 or (result.prevSize and 1) # unknown
  423. # but do not overwrite 'used' field
  424. a.lastSize = size # for next request
  425. sysAssert((cast[int](result) and PageMask) == 0, "requestOschunks: unaligned chunk")
  426. proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
  427. result = contains(a.chunkStarts, pageIndex(p))
  428. proc contains[T](list, x: T): bool =
  429. var it = list
  430. while it != nil:
  431. if it == x: return true
  432. it = it.next
  433. proc listAdd[T](head: var T, c: T) {.inline.} =
  434. sysAssert(c notin head, "listAdd 1")
  435. sysAssert c.prev == nil, "listAdd 2"
  436. sysAssert c.next == nil, "listAdd 3"
  437. c.next = head
  438. if head != nil:
  439. sysAssert head.prev == nil, "listAdd 4"
  440. head.prev = c
  441. head = c
  442. proc listRemove[T](head: var T, c: T) {.inline.} =
  443. sysAssert(c in head, "listRemove")
  444. if c == head:
  445. head = c.next
  446. sysAssert c.prev == nil, "listRemove 2"
  447. if head != nil: head.prev = nil
  448. else:
  449. sysAssert c.prev != nil, "listRemove 3"
  450. c.prev.next = c.next
  451. if c.next != nil: c.next.prev = c.prev
  452. c.next = nil
  453. c.prev = nil
  454. proc updatePrevSize(a: var MemRegion, c: PBigChunk,
  455. prevSize: int) {.inline.} =
  456. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  457. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize")
  458. if isAccessible(a, ri):
  459. ri.prevSize = prevSize or (ri.prevSize and 1)
  460. proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk =
  461. result = cast[PBigChunk](cast[ByteAddress](c) +% size)
  462. result.size = c.size - size
  463. track("result.size", addr result.size, sizeof(int))
  464. # XXX check if these two nil assignments are dead code given
  465. # addChunkToMatrix's implementation:
  466. result.next = nil
  467. result.prev = nil
  468. # size and not used:
  469. result.prevSize = size
  470. sysAssert((size and 1) == 0, "splitChunk 2")
  471. sysAssert((size and PageMask) == 0,
  472. "splitChunk: size is not a multiple of the PageSize")
  473. updatePrevSize(a, c, result.size)
  474. c.size = size
  475. incl(a, a.chunkStarts, pageIndex(result))
  476. proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
  477. let rest = splitChunk2(a, c, size)
  478. addChunkToMatrix(a, rest)
  479. proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
  480. var c = c
  481. sysAssert(c.size >= PageSize, "freeBigChunk")
  482. inc(a.freeMem, c.size)
  483. c.prevSize = c.prevSize and not 1 # set 'used' to false
  484. when coalescLeft:
  485. let prevSize = c.prevSize
  486. if prevSize != 0:
  487. var le = cast[PChunk](cast[ByteAddress](c) -% prevSize)
  488. sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4")
  489. if isAccessible(a, le) and chunkUnused(le):
  490. sysAssert(not isSmallChunk(le), "freeBigChunk 5")
  491. if not isSmallChunk(le) and le.size < MaxBigChunkSize:
  492. removeChunkFromMatrix(a, cast[PBigChunk](le))
  493. inc(le.size, c.size)
  494. excl(a.chunkStarts, pageIndex(c))
  495. c = cast[PBigChunk](le)
  496. if c.size > MaxBigChunkSize:
  497. let rest = splitChunk2(a, c, MaxBigChunkSize)
  498. addChunkToMatrix(a, c)
  499. c = rest
  500. when coalescRight:
  501. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  502. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "freeBigChunk 2")
  503. if isAccessible(a, ri) and chunkUnused(ri):
  504. sysAssert(not isSmallChunk(ri), "freeBigChunk 3")
  505. if not isSmallChunk(ri) and c.size < MaxBigChunkSize:
  506. removeChunkFromMatrix(a, cast[PBigChunk](ri))
  507. inc(c.size, ri.size)
  508. excl(a.chunkStarts, pageIndex(ri))
  509. if c.size > MaxBigChunkSize:
  510. let rest = splitChunk2(a, c, MaxBigChunkSize)
  511. addChunkToMatrix(a, rest)
  512. addChunkToMatrix(a, c)
  513. proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
  514. sysAssert(size > 0, "getBigChunk 2")
  515. var size = size # roundup(size, PageSize)
  516. var fl = 0
  517. var sl = 0
  518. mappingSearch(size, fl, sl)
  519. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  520. result = findSuitableBlock(a, fl, sl)
  521. if result == nil:
  522. if size < nimMinHeapPages * PageSize:
  523. result = requestOsChunks(a, nimMinHeapPages * PageSize)
  524. splitChunk(a, result, size)
  525. else:
  526. result = requestOsChunks(a, size)
  527. # if we over allocated split the chunk:
  528. if result.size > size:
  529. splitChunk(a, result, size)
  530. else:
  531. removeChunkFromMatrix2(a, result, fl, sl)
  532. if result.size >= size + PageSize:
  533. splitChunk(a, result, size)
  534. # set 'used' to to true:
  535. result.prevSize = 1
  536. track("setUsedToFalse", addr result.size, sizeof(int))
  537. incl(a, a.chunkStarts, pageIndex(result))
  538. dec(a.freeMem, size)
  539. proc getHugeChunk(a: var MemRegion; size: int): PBigChunk =
  540. result = cast[PBigChunk](osAllocPages(size))
  541. incCurrMem(a, size)
  542. # XXX add this to the heap links. But also remove it from it later.
  543. when false: a.addHeapLink(result, size)
  544. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "getHugeChunk")
  545. result.next = nil
  546. result.prev = nil
  547. result.size = size
  548. # set 'used' to to true:
  549. result.prevSize = 1
  550. incl(a, a.chunkStarts, pageIndex(result))
  551. proc freeHugeChunk(a: var MemRegion; c: PBigChunk) =
  552. let size = c.size
  553. sysAssert(size >= HugeChunkSize, "freeHugeChunk: invalid size")
  554. excl(a.chunkStarts, pageIndex(c))
  555. decCurrMem(a, size)
  556. osDeallocPages(c, size)
  557. proc getSmallChunk(a: var MemRegion): PSmallChunk =
  558. var res = getBigChunk(a, PageSize)
  559. sysAssert res.prev == nil, "getSmallChunk 1"
  560. sysAssert res.next == nil, "getSmallChunk 2"
  561. result = cast[PSmallChunk](res)
  562. # -----------------------------------------------------------------------------
  563. when not defined(gcDestructors):
  564. proc isAllocatedPtr(a: MemRegion, p: pointer): bool {.benign.}
  565. when true:
  566. template allocInv(a: MemRegion): bool = true
  567. else:
  568. proc allocInv(a: MemRegion): bool =
  569. ## checks some (not all yet) invariants of the allocator's data structures.
  570. for s in low(a.freeSmallChunks)..high(a.freeSmallChunks):
  571. var c = a.freeSmallChunks[s]
  572. while not (c == nil):
  573. if c.next == c:
  574. echo "[SYSASSERT] c.next == c"
  575. return false
  576. if not (c.size == s * MemAlign):
  577. echo "[SYSASSERT] c.size != s * MemAlign"
  578. return false
  579. var it = c.freeList
  580. while not (it == nil):
  581. if not (it.zeroField == 0):
  582. echo "[SYSASSERT] it.zeroField != 0"
  583. c_printf("%ld %p\n", it.zeroField, it)
  584. return false
  585. it = it.next
  586. c = c.next
  587. result = true
  588. when false:
  589. var
  590. rsizes: array[50_000, int]
  591. rsizesLen: int
  592. proc trackSize(size: int) =
  593. rsizes[rsizesLen] = size
  594. inc rsizesLen
  595. proc untrackSize(size: int) =
  596. for i in 0 .. rsizesLen-1:
  597. if rsizes[i] == size:
  598. rsizes[i] = rsizes[rsizesLen-1]
  599. dec rsizesLen
  600. return
  601. c_fprintf(stdout, "%ld\n", size)
  602. sysAssert(false, "untracked size!")
  603. else:
  604. template trackSize(x) = discard
  605. template untrackSize(x) = discard
  606. when false:
  607. # not yet used by the GCs
  608. proc rawTryAlloc(a: var MemRegion; requestedSize: int): pointer =
  609. sysAssert(allocInv(a), "rawAlloc: begin")
  610. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  611. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  612. var size = roundup(requestedSize, MemAlign)
  613. inc a.occ, size
  614. trackSize(size)
  615. sysAssert(size >= requestedSize, "insufficient allocated size!")
  616. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  617. if size <= SmallChunkSize-smallChunkOverhead():
  618. # allocate a small block: for small chunks, we use only its next pointer
  619. var s = size div MemAlign
  620. var c = a.freeSmallChunks[s]
  621. if c == nil:
  622. result = nil
  623. else:
  624. sysAssert c.size == size, "rawAlloc 6"
  625. if c.freeList == nil:
  626. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  627. "rawAlloc 7")
  628. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  629. inc(c.acc, size)
  630. else:
  631. result = c.freeList
  632. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  633. c.freeList = c.freeList.next
  634. dec(c.free, size)
  635. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  636. if c.free < size:
  637. listRemove(a.freeSmallChunks[s], c)
  638. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  639. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  640. size == 0, "rawAlloc 21")
  641. sysAssert(allocInv(a), "rawAlloc: end small size")
  642. else:
  643. inc size, bigChunkOverhead()
  644. var fl, sl: int
  645. mappingSearch(size, fl, sl)
  646. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  647. let c = findSuitableBlock(a, fl, sl)
  648. if c != nil:
  649. removeChunkFromMatrix2(a, c, fl, sl)
  650. if c.size >= size + PageSize:
  651. splitChunk(a, c, size)
  652. # set 'used' to to true:
  653. c.prevSize = 1
  654. incl(a, a.chunkStarts, pageIndex(c))
  655. dec(a.freeMem, size)
  656. result = addr(c.data)
  657. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  658. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  659. if a.root == nil: a.root = getBottom(a)
  660. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  661. else:
  662. result = nil
  663. proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
  664. when defined(nimTypeNames):
  665. inc(a.allocCounter)
  666. sysAssert(allocInv(a), "rawAlloc: begin")
  667. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  668. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  669. var size = roundup(requestedSize, MemAlign)
  670. sysAssert(size >= requestedSize, "insufficient allocated size!")
  671. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  672. if size <= SmallChunkSize-smallChunkOverhead():
  673. # allocate a small block: for small chunks, we use only its next pointer
  674. var s = size div MemAlign
  675. var c = a.freeSmallChunks[s]
  676. if c == nil:
  677. c = getSmallChunk(a)
  678. c.freeList = nil
  679. sysAssert c.size == PageSize, "rawAlloc 3"
  680. c.size = size
  681. c.acc = size
  682. c.free = SmallChunkSize - smallChunkOverhead() - size
  683. c.next = nil
  684. c.prev = nil
  685. listAdd(a.freeSmallChunks[s], c)
  686. result = addr(c.data)
  687. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4")
  688. else:
  689. sysAssert(allocInv(a), "rawAlloc: begin c != nil")
  690. sysAssert c.next != c, "rawAlloc 5"
  691. #if c.size != size:
  692. # c_fprintf(stdout, "csize: %lld; size %lld\n", c.size, size)
  693. sysAssert c.size == size, "rawAlloc 6"
  694. if c.freeList == nil:
  695. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  696. "rawAlloc 7")
  697. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  698. inc(c.acc, size)
  699. else:
  700. result = c.freeList
  701. when not defined(gcDestructors):
  702. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  703. c.freeList = c.freeList.next
  704. dec(c.free, size)
  705. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  706. sysAssert(allocInv(a), "rawAlloc: end c != nil")
  707. sysAssert(allocInv(a), "rawAlloc: before c.free < size")
  708. if c.free < size:
  709. sysAssert(allocInv(a), "rawAlloc: before listRemove test")
  710. listRemove(a.freeSmallChunks[s], c)
  711. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  712. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  713. size == 0, "rawAlloc 21")
  714. sysAssert(allocInv(a), "rawAlloc: end small size")
  715. inc a.occ, size
  716. trackSize(c.size)
  717. else:
  718. size = requestedSize + bigChunkOverhead() # roundup(requestedSize+bigChunkOverhead(), PageSize)
  719. # allocate a large block
  720. var c = if size >= HugeChunkSize: getHugeChunk(a, size)
  721. else: getBigChunk(a, size)
  722. sysAssert c.prev == nil, "rawAlloc 10"
  723. sysAssert c.next == nil, "rawAlloc 11"
  724. result = addr(c.data)
  725. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  726. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  727. if a.root == nil: a.root = getBottom(a)
  728. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  729. inc a.occ, c.size
  730. trackSize(c.size)
  731. sysAssert(isAccessible(a, result), "rawAlloc 14")
  732. sysAssert(allocInv(a), "rawAlloc: end")
  733. when logAlloc: cprintf("var pointer_%p = alloc(%ld)\n", result, requestedSize)
  734. proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer =
  735. result = rawAlloc(a, requestedSize)
  736. zeroMem(result, requestedSize)
  737. proc rawDealloc(a: var MemRegion, p: pointer) =
  738. when defined(nimTypeNames):
  739. inc(a.deallocCounter)
  740. #sysAssert(isAllocatedPtr(a, p), "rawDealloc: no allocated pointer")
  741. sysAssert(allocInv(a), "rawDealloc: begin")
  742. var c = pageAddr(p)
  743. if isSmallChunk(c):
  744. # `p` is within a small chunk:
  745. var c = cast[PSmallChunk](c)
  746. var s = c.size
  747. dec a.occ, s
  748. untrackSize(s)
  749. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case A)"
  750. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  751. s == 0, "rawDealloc 3")
  752. var f = cast[ptr FreeCell](p)
  753. when not defined(gcDestructors):
  754. #echo("setting to nil: ", $cast[ByteAddress](addr(f.zeroField)))
  755. sysAssert(f.zeroField != 0, "rawDealloc 1")
  756. f.zeroField = 0
  757. f.next = c.freeList
  758. c.freeList = f
  759. when overwriteFree:
  760. # set to 0xff to check for usage after free bugs:
  761. nimSetMem(cast[pointer](cast[int](p) +% sizeof(FreeCell)), -1'i32,
  762. s -% sizeof(FreeCell))
  763. # check if it is not in the freeSmallChunks[s] list:
  764. if c.free < s:
  765. # add it to the freeSmallChunks[s] array:
  766. listAdd(a.freeSmallChunks[s div MemAlign], c)
  767. inc(c.free, s)
  768. else:
  769. inc(c.free, s)
  770. if c.free == SmallChunkSize-smallChunkOverhead():
  771. listRemove(a.freeSmallChunks[s div MemAlign], c)
  772. c.size = SmallChunkSize
  773. freeBigChunk(a, cast[PBigChunk](c))
  774. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  775. s == 0, "rawDealloc 2")
  776. else:
  777. # set to 0xff to check for usage after free bugs:
  778. when overwriteFree: nimSetMem(p, -1'i32, c.size -% bigChunkOverhead())
  779. # free big chunk
  780. var c = cast[PBigChunk](c)
  781. dec a.occ, c.size
  782. untrackSize(c.size)
  783. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case B)"
  784. a.deleted = getBottom(a)
  785. del(a, a.root, cast[int](addr(c.data)))
  786. if c.size >= HugeChunkSize: freeHugeChunk(a, c)
  787. else: freeBigChunk(a, c)
  788. sysAssert(allocInv(a), "rawDealloc: end")
  789. when logAlloc: cprintf("dealloc(pointer_%p)\n", p)
  790. when not defined(gcDestructors):
  791. proc isAllocatedPtr(a: MemRegion, p: pointer): bool =
  792. if isAccessible(a, p):
  793. var c = pageAddr(p)
  794. if not chunkUnused(c):
  795. if isSmallChunk(c):
  796. var c = cast[PSmallChunk](c)
  797. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  798. smallChunkOverhead()
  799. result = (c.acc >% offset) and (offset %% c.size == 0) and
  800. (cast[ptr FreeCell](p).zeroField >% 1)
  801. else:
  802. var c = cast[PBigChunk](c)
  803. result = p == addr(c.data) and cast[ptr FreeCell](p).zeroField >% 1
  804. proc prepareForInteriorPointerChecking(a: var MemRegion) {.inline.} =
  805. a.minLargeObj = lowGauge(a.root)
  806. a.maxLargeObj = highGauge(a.root)
  807. proc interiorAllocatedPtr(a: MemRegion, p: pointer): pointer =
  808. if isAccessible(a, p):
  809. var c = pageAddr(p)
  810. if not chunkUnused(c):
  811. if isSmallChunk(c):
  812. var c = cast[PSmallChunk](c)
  813. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  814. smallChunkOverhead()
  815. if c.acc >% offset:
  816. sysAssert(cast[ByteAddress](addr(c.data)) +% offset ==
  817. cast[ByteAddress](p), "offset is not what you think it is")
  818. var d = cast[ptr FreeCell](cast[ByteAddress](addr(c.data)) +%
  819. offset -% (offset %% c.size))
  820. if d.zeroField >% 1:
  821. result = d
  822. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  823. else:
  824. var c = cast[PBigChunk](c)
  825. var d = addr(c.data)
  826. if p >= d and cast[ptr FreeCell](d).zeroField >% 1:
  827. result = d
  828. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  829. else:
  830. var q = cast[int](p)
  831. if q >=% a.minLargeObj and q <=% a.maxLargeObj:
  832. # this check is highly effective! Test fails for 99,96% of all checks on
  833. # an x86-64.
  834. var avlNode = inRange(a.root, q)
  835. if avlNode != nil:
  836. var k = cast[pointer](avlNode.key)
  837. var c = cast[PBigChunk](pageAddr(k))
  838. sysAssert(addr(c.data) == k, " k is not the same as addr(c.data)!")
  839. if cast[ptr FreeCell](k).zeroField >% 1:
  840. result = k
  841. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  842. proc ptrSize(p: pointer): int =
  843. when not defined(gcDestructors):
  844. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  845. var c = pageAddr(p)
  846. sysAssert(not chunkUnused(c), "ptrSize")
  847. result = c.size -% sizeof(FreeCell)
  848. if not isSmallChunk(c):
  849. dec result, bigChunkOverhead()
  850. else:
  851. var c = pageAddr(p)
  852. sysAssert(not chunkUnused(c), "ptrSize")
  853. result = c.size
  854. if not isSmallChunk(c):
  855. dec result, bigChunkOverhead()
  856. proc alloc(allocator: var MemRegion, size: Natural): pointer {.gcsafe.} =
  857. when not defined(gcDestructors):
  858. result = rawAlloc(allocator, size+sizeof(FreeCell))
  859. cast[ptr FreeCell](result).zeroField = 1 # mark it as used
  860. sysAssert(not isAllocatedPtr(allocator, result), "alloc")
  861. result = cast[pointer](cast[ByteAddress](result) +% sizeof(FreeCell))
  862. track("alloc", result, size)
  863. else:
  864. result = rawAlloc(allocator, size)
  865. proc alloc0(allocator: var MemRegion, size: Natural): pointer =
  866. result = alloc(allocator, size)
  867. zeroMem(result, size)
  868. proc dealloc(allocator: var MemRegion, p: pointer) =
  869. when not defined(gcDestructors):
  870. sysAssert(p != nil, "dealloc: p is nil")
  871. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  872. sysAssert(x != nil, "dealloc: x is nil")
  873. sysAssert(isAccessible(allocator, x), "is not accessible")
  874. sysAssert(cast[ptr FreeCell](x).zeroField == 1, "dealloc: object header corrupted")
  875. rawDealloc(allocator, x)
  876. sysAssert(not isAllocatedPtr(allocator, x), "dealloc: object still accessible")
  877. track("dealloc", p, 0)
  878. else:
  879. rawDealloc(allocator, p)
  880. proc realloc(allocator: var MemRegion, p: pointer, newsize: Natural): pointer =
  881. if newsize > 0:
  882. result = alloc(allocator, newsize)
  883. if p != nil:
  884. copyMem(result, p, min(ptrSize(p), newsize))
  885. dealloc(allocator, p)
  886. elif p != nil:
  887. dealloc(allocator, p)
  888. proc realloc0(allocator: var MemRegion, p: pointer, oldsize, newsize: Natural): pointer =
  889. result = realloc(allocator, p, newsize)
  890. if newsize > oldsize:
  891. zeroMem(cast[pointer](cast[uint](result) + uint(oldsize)), newsize - oldsize)
  892. proc deallocOsPages(a: var MemRegion) =
  893. # we free every 'ordinarily' allocated page by iterating over the page bits:
  894. var it = addr(a.heapLinks)
  895. while true:
  896. let next = it.next
  897. for i in 0..it.len-1:
  898. let (p, size) = it.chunks[i]
  899. when defined(debugHeapLinks):
  900. cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a),
  901. it, it.size, next)
  902. sysAssert size >= PageSize, "origSize too small"
  903. osDeallocPages(p, size)
  904. it = next
  905. if it == nil: break
  906. # And then we free the pages that are in use for the page bits:
  907. llDeallocAll(a)
  908. proc getFreeMem(a: MemRegion): int {.inline.} = result = a.freeMem
  909. proc getTotalMem(a: MemRegion): int {.inline.} = result = a.currMem
  910. proc getOccupiedMem(a: MemRegion): int {.inline.} =
  911. result = a.occ
  912. # a.currMem - a.freeMem
  913. when defined(nimTypeNames):
  914. proc getMemCounters(a: MemRegion): (int, int) {.inline.} =
  915. (a.allocCounter, a.deallocCounter)
  916. # ---------------------- thread memory region -------------------------------
  917. template instantiateForRegion(allocator: untyped) {.dirty.} =
  918. {.push stackTrace: off.}
  919. when defined(fulldebug):
  920. proc interiorAllocatedPtr*(p: pointer): pointer =
  921. result = interiorAllocatedPtr(allocator, p)
  922. proc isAllocatedPtr*(p: pointer): bool =
  923. let p = cast[pointer](cast[ByteAddress](p)-%ByteAddress(sizeof(Cell)))
  924. result = isAllocatedPtr(allocator, p)
  925. proc deallocOsPages = deallocOsPages(allocator)
  926. proc allocImpl(size: Natural): pointer =
  927. result = alloc(allocator, size)
  928. proc alloc0Impl(size: Natural): pointer =
  929. result = alloc0(allocator, size)
  930. proc deallocImpl(p: pointer) =
  931. dealloc(allocator, p)
  932. proc reallocImpl(p: pointer, newSize: Natural): pointer =
  933. result = realloc(allocator, p, newSize)
  934. proc realloc0Impl(p: pointer, oldSize, newSize: Natural): pointer =
  935. result = realloc(allocator, p, newSize)
  936. if newSize > oldSize:
  937. zeroMem(cast[pointer](cast[int](result) + oldSize), newSize - oldSize)
  938. when false:
  939. proc countFreeMem(): int =
  940. # only used for assertions
  941. var it = allocator.freeChunksList
  942. while it != nil:
  943. inc(result, it.size)
  944. it = it.next
  945. proc getFreeMem(): int =
  946. result = allocator.freeMem
  947. #sysAssert(result == countFreeMem())
  948. proc getTotalMem(): int = return allocator.currMem
  949. proc getOccupiedMem(): int = return allocator.occ #getTotalMem() - getFreeMem()
  950. proc getMaxMem*(): int = return getMaxMem(allocator)
  951. when defined(nimTypeNames):
  952. proc getMemCounters*(): (int, int) = getMemCounters(allocator)
  953. # -------------------- shared heap region ----------------------------------
  954. when hasThreadSupport:
  955. var sharedHeap: MemRegion
  956. var heapLock: SysLock
  957. initSysLock(heapLock)
  958. proc allocSharedImpl(size: Natural): pointer =
  959. when hasThreadSupport:
  960. acquireSys(heapLock)
  961. result = alloc(sharedHeap, size)
  962. releaseSys(heapLock)
  963. else:
  964. result = allocImpl(size)
  965. proc allocShared0Impl(size: Natural): pointer =
  966. result = allocSharedImpl(size)
  967. zeroMem(result, size)
  968. proc deallocSharedImpl(p: pointer) =
  969. when hasThreadSupport:
  970. acquireSys(heapLock)
  971. dealloc(sharedHeap, p)
  972. releaseSys(heapLock)
  973. else:
  974. deallocImpl(p)
  975. proc reallocSharedImpl(p: pointer, newSize: Natural): pointer =
  976. when hasThreadSupport:
  977. acquireSys(heapLock)
  978. result = realloc(sharedHeap, p, newSize)
  979. releaseSys(heapLock)
  980. else:
  981. result = reallocImpl(p, newSize)
  982. proc reallocShared0Impl(p: pointer, oldSize, newSize: Natural): pointer =
  983. when hasThreadSupport:
  984. acquireSys(heapLock)
  985. result = realloc0(sharedHeap, p, oldSize, newSize)
  986. releaseSys(heapLock)
  987. else:
  988. result = realloc0Impl(p, oldSize, newSize)
  989. when hasThreadSupport:
  990. template sharedMemStatsShared(v: int) =
  991. acquireSys(heapLock)
  992. result = v
  993. releaseSys(heapLock)
  994. proc getFreeSharedMem(): int =
  995. sharedMemStatsShared(sharedHeap.freeMem)
  996. proc getTotalSharedMem(): int =
  997. sharedMemStatsShared(sharedHeap.currMem)
  998. proc getOccupiedSharedMem(): int =
  999. sharedMemStatsShared(sharedHeap.occ)
  1000. #sharedMemStatsShared(sharedHeap.currMem - sharedHeap.freeMem)
  1001. {.pop.}
  1002. {.pop.}