gc.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354
  1. /* Copyright (C) 1995, 96, 97, 98, 99, 2000, 2002 Free Software Foundation, Inc.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License as published by
  5. * the Free Software Foundation; either version 2, or (at your option)
  6. * any later version.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this software; see the file COPYING. If not, write to
  15. * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
  16. * Boston, MA 02111-1307 USA
  17. *
  18. * As a special exception, the Free Software Foundation gives permission
  19. * for additional uses of the text contained in its release of GUILE.
  20. *
  21. * The exception is that, if you link the GUILE library with other files
  22. * to produce an executable, this does not by itself cause the
  23. * resulting executable to be covered by the GNU General Public License.
  24. * Your use of that executable is in no way restricted on account of
  25. * linking the GUILE library code into it.
  26. *
  27. * This exception does not however invalidate any other reasons why
  28. * the executable file might be covered by the GNU General Public License.
  29. *
  30. * This exception applies only to the code released by the
  31. * Free Software Foundation under the name GUILE. If you copy
  32. * code from other Free Software Foundation releases into a copy of
  33. * GUILE, as the General Public License permits, the exception does
  34. * not apply to the code that you add in this way. To avoid misleading
  35. * anyone as to the status of such modified files, you must delete
  36. * this exception notice from them.
  37. *
  38. * If you write modifications of your own for GUILE, it is your choice
  39. * whether to permit this exception to apply to your modifications.
  40. * If you do not wish that, delete this exception notice. */
  41. /* #define DEBUGINFO */
  42. #include <stdio.h>
  43. #include "libguile/_scm.h"
  44. #include "libguile/stime.h"
  45. #include "libguile/stackchk.h"
  46. #include "libguile/struct.h"
  47. #include "libguile/smob.h"
  48. #include "libguile/unif.h"
  49. #include "libguile/async.h"
  50. #include "libguile/ports.h"
  51. #include "libguile/root.h"
  52. #include "libguile/strings.h"
  53. #include "libguile/vectors.h"
  54. #include "libguile/weaks.h"
  55. #include "libguile/hashtab.h"
  56. #include "libguile/validate.h"
  57. #include "libguile/gc.h"
  58. #ifdef GUILE_DEBUG_MALLOC
  59. #include "libguile/debug-malloc.h"
  60. #endif
  61. #ifdef HAVE_MALLOC_H
  62. #include <malloc.h>
  63. #endif
  64. #ifdef HAVE_UNISTD_H
  65. #include <unistd.h>
  66. #endif
  67. #ifdef __STDC__
  68. #include <stdarg.h>
  69. #define var_start(x, y) va_start(x, y)
  70. #else
  71. #include <varargs.h>
  72. #define var_start(x, y) va_start(x)
  73. #endif
  74. /* {heap tuning parameters}
  75. *
  76. * These are parameters for controlling memory allocation. The heap
  77. * is the area out of which scm_cons, and object headers are allocated.
  78. *
  79. * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
  80. * 64 bit machine. The units of the _SIZE parameters are bytes.
  81. * Cons pairs and object headers occupy one heap cell.
  82. *
  83. * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
  84. * allocated initially the heap will grow by half its current size
  85. * each subsequent time more heap is needed.
  86. *
  87. * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
  88. * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
  89. * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
  90. * is in scm_init_storage() and alloc_some_heap() in sys.c
  91. *
  92. * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
  93. * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
  94. *
  95. * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
  96. * is needed.
  97. *
  98. * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
  99. * trigger a GC.
  100. *
  101. * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
  102. * reclaimed by a GC triggered by must_malloc. If less than this is
  103. * reclaimed, the trigger threshold is raised. [I don't know what a
  104. * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
  105. * work around a oscillation that caused almost constant GC.]
  106. */
  107. /*
  108. * Heap size 45000 and 40% min yield gives quick startup and no extra
  109. * heap allocation. Having higher values on min yield may lead to
  110. * large heaps, especially if code behaviour is varying its
  111. * maximum consumption between different freelists.
  112. */
  113. int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
  114. int scm_default_min_yield_1 = 40;
  115. #define SCM_CLUSTER_SIZE_1 2000L
  116. int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
  117. /* The following value may seem large, but note that if we get to GC at
  118. * all, this means that we have a numerically intensive application
  119. */
  120. int scm_default_min_yield_2 = 40;
  121. #define SCM_CLUSTER_SIZE_2 1000L
  122. int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
  123. #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
  124. #ifdef _QC
  125. # define SCM_HEAP_SEG_SIZE 32768L
  126. #else
  127. # ifdef sequent
  128. # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
  129. # else
  130. # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
  131. # endif
  132. #endif
  133. /* Make heap grow with factor 1.5 */
  134. #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
  135. #define SCM_INIT_MALLOC_LIMIT 100000
  136. #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
  137. /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
  138. bounds for allocated storage */
  139. #ifdef PROT386
  140. /*in 386 protected mode we must only adjust the offset */
  141. # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
  142. # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
  143. #else
  144. # ifdef _UNICOS
  145. # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
  146. # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
  147. # else
  148. # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
  149. # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
  150. # endif /* UNICOS */
  151. #endif /* PROT386 */
  152. #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
  153. #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
  154. #define SCM_HEAP_SIZE \
  155. (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
  156. #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
  157. /* scm_freelists
  158. */
  159. typedef struct scm_freelist_t {
  160. /* collected cells */
  161. SCM cells;
  162. /* number of cells left to collect before cluster is full */
  163. unsigned int left_to_collect;
  164. /* number of clusters which have been allocated */
  165. unsigned int clusters_allocated;
  166. /* a list of freelists, each of size cluster_size,
  167. * except the last one which may be shorter
  168. */
  169. SCM clusters;
  170. SCM *clustertail;
  171. /* this is the number of objects in each cluster, including the spine cell */
  172. int cluster_size;
  173. /* indicates that we should grow heap instead of GC:ing
  174. */
  175. int grow_heap_p;
  176. /* minimum yield on this list in order not to grow the heap
  177. */
  178. long min_yield;
  179. /* defines min_yield as percent of total heap size
  180. */
  181. int min_yield_fraction;
  182. /* number of cells per object on this list */
  183. int span;
  184. /* number of collected cells during last GC */
  185. long collected;
  186. /* number of collected cells during penultimate GC */
  187. long collected_1;
  188. /* total number of cells in heap segments
  189. * belonging to this list.
  190. */
  191. long heap_size;
  192. } scm_freelist_t;
  193. SCM scm_freelist = SCM_EOL;
  194. scm_freelist_t scm_master_freelist = {
  195. SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
  196. };
  197. SCM scm_freelist2 = SCM_EOL;
  198. scm_freelist_t scm_master_freelist2 = {
  199. SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
  200. };
  201. /* scm_mtrigger
  202. * is the number of bytes of must_malloc allocation needed to trigger gc.
  203. */
  204. unsigned long scm_mtrigger;
  205. /* scm_gc_heap_lock
  206. * If set, don't expand the heap. Set only during gc, during which no allocation
  207. * is supposed to take place anyway.
  208. */
  209. int scm_gc_heap_lock = 0;
  210. /* GC Blocking
  211. * Don't pause for collection if this is set -- just
  212. * expand the heap.
  213. */
  214. int scm_block_gc = 1;
  215. /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
  216. * collection (GC) more space is allocated for the heap.
  217. */
  218. #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
  219. /* During collection, this accumulates objects holding
  220. * weak references.
  221. */
  222. SCM scm_weak_vectors;
  223. /* GC Statistics Keeping
  224. */
  225. unsigned long scm_cells_allocated = 0;
  226. long scm_mallocated = 0;
  227. unsigned long scm_gc_cells_collected;
  228. unsigned long scm_gc_yield;
  229. static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
  230. unsigned long scm_gc_malloc_collected;
  231. unsigned long scm_gc_ports_collected;
  232. unsigned long scm_gc_rt;
  233. unsigned long scm_gc_time_taken = 0;
  234. SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
  235. SCM_SYMBOL (sym_heap_size, "cell-heap-size");
  236. SCM_SYMBOL (sym_mallocated, "bytes-malloced");
  237. SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
  238. SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
  239. SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
  240. typedef struct scm_heap_seg_data_t
  241. {
  242. /* lower and upper bounds of the segment */
  243. SCM_CELLPTR bounds[2];
  244. /* address of the head-of-freelist pointer for this segment's cells.
  245. All segments usually point to the same one, scm_freelist. */
  246. scm_freelist_t *freelist;
  247. /* number of cells per object in this segment */
  248. int span;
  249. } scm_heap_seg_data_t;
  250. static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
  251. static void alloc_some_heap (scm_freelist_t *);
  252. /* Debugging functions. */
  253. #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
  254. /* Return the number of the heap segment containing CELL. */
  255. static int
  256. which_seg (SCM cell)
  257. {
  258. int i;
  259. for (i = 0; i < scm_n_heap_segs; i++)
  260. if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
  261. && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
  262. return i;
  263. fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
  264. SCM_UNPACK (cell));
  265. abort ();
  266. }
  267. static void
  268. map_free_list (scm_freelist_t *master, SCM freelist)
  269. {
  270. int last_seg = -1, count = 0;
  271. SCM f;
  272. for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
  273. {
  274. int this_seg = which_seg (f);
  275. if (this_seg != last_seg)
  276. {
  277. if (last_seg != -1)
  278. fprintf (stderr, " %5d %d-cells in segment %d\n",
  279. count, master->span, last_seg);
  280. last_seg = this_seg;
  281. count = 0;
  282. }
  283. count++;
  284. }
  285. if (last_seg != -1)
  286. fprintf (stderr, " %5d %d-cells in segment %d\n",
  287. count, master->span, last_seg);
  288. }
  289. SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
  290. (),
  291. "Print debugging information about the free-list.\n"
  292. "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
  293. #define FUNC_NAME s_scm_map_free_list
  294. {
  295. int i;
  296. fprintf (stderr, "%d segments total (%d:%d",
  297. scm_n_heap_segs,
  298. scm_heap_table[0].span,
  299. scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
  300. for (i = 1; i < scm_n_heap_segs; i++)
  301. fprintf (stderr, ", %d:%d",
  302. scm_heap_table[i].span,
  303. scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
  304. fprintf (stderr, ")\n");
  305. map_free_list (&scm_master_freelist, scm_freelist);
  306. map_free_list (&scm_master_freelist2, scm_freelist2);
  307. fflush (stderr);
  308. return SCM_UNSPECIFIED;
  309. }
  310. #undef FUNC_NAME
  311. static int last_cluster;
  312. static int last_size;
  313. static int
  314. free_list_length (char *title, int i, SCM freelist)
  315. {
  316. SCM ls;
  317. int n = 0;
  318. for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
  319. if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
  320. ++n;
  321. else
  322. {
  323. fprintf (stderr, "bad cell in %s at position %d\n", title, n);
  324. abort ();
  325. }
  326. if (n != last_size)
  327. {
  328. if (i > 0)
  329. {
  330. if (last_cluster == i - 1)
  331. fprintf (stderr, "\t%d\n", last_size);
  332. else
  333. fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
  334. }
  335. if (i >= 0)
  336. fprintf (stderr, "%s %d", title, i);
  337. else
  338. fprintf (stderr, "%s\t%d\n", title, n);
  339. last_cluster = i;
  340. last_size = n;
  341. }
  342. return n;
  343. }
  344. static void
  345. free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
  346. {
  347. SCM clusters;
  348. int i = 0, len, n = 0;
  349. fprintf (stderr, "%s\n\n", title);
  350. n += free_list_length ("free list", -1, freelist);
  351. for (clusters = master->clusters;
  352. SCM_NNULLP (clusters);
  353. clusters = SCM_CDR (clusters))
  354. {
  355. len = free_list_length ("cluster", i++, SCM_CAR (clusters));
  356. n += len;
  357. }
  358. if (last_cluster == i - 1)
  359. fprintf (stderr, "\t%d\n", last_size);
  360. else
  361. fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
  362. fprintf (stderr, "\ntotal %d objects\n\n", n);
  363. }
  364. SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
  365. (),
  366. "Print debugging information about the free-list.\n"
  367. "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
  368. #define FUNC_NAME s_scm_free_list_length
  369. {
  370. free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
  371. free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
  372. return SCM_UNSPECIFIED;
  373. }
  374. #undef FUNC_NAME
  375. #endif
  376. #ifdef GUILE_DEBUG_FREELIST
  377. /* Number of calls to SCM_NEWCELL since startup. */
  378. static unsigned long scm_newcell_count;
  379. static unsigned long scm_newcell2_count;
  380. /* Search freelist for anything that isn't marked as a free cell.
  381. Abort if we find something. */
  382. static void
  383. scm_check_freelist (SCM freelist)
  384. {
  385. SCM f;
  386. int i = 0;
  387. for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
  388. if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
  389. {
  390. fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
  391. scm_newcell_count, i);
  392. fflush (stderr);
  393. abort ();
  394. }
  395. }
  396. static int scm_debug_check_freelist = 0;
  397. SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
  398. (SCM flag),
  399. "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
  400. "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
  401. "compile-time flag was selected.\n")
  402. #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
  403. {
  404. SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
  405. return SCM_UNSPECIFIED;
  406. }
  407. #undef FUNC_NAME
  408. SCM
  409. scm_debug_newcell (void)
  410. {
  411. SCM new;
  412. scm_newcell_count++;
  413. if (scm_debug_check_freelist)
  414. {
  415. scm_check_freelist (scm_freelist);
  416. scm_gc();
  417. }
  418. /* The rest of this is supposed to be identical to the SCM_NEWCELL
  419. macro. */
  420. if (SCM_IMP (scm_freelist))
  421. new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
  422. else
  423. {
  424. new = scm_freelist;
  425. scm_freelist = SCM_CDR (scm_freelist);
  426. SCM_SETCAR (new, scm_tc16_allocated);
  427. }
  428. return new;
  429. }
  430. SCM
  431. scm_debug_newcell2 (void)
  432. {
  433. SCM new;
  434. scm_newcell2_count++;
  435. if (scm_debug_check_freelist)
  436. {
  437. scm_check_freelist (scm_freelist2);
  438. scm_gc ();
  439. }
  440. /* The rest of this is supposed to be identical to the SCM_NEWCELL
  441. macro. */
  442. if (SCM_IMP (scm_freelist2))
  443. new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
  444. else
  445. {
  446. new = scm_freelist2;
  447. scm_freelist2 = SCM_CDR (scm_freelist2);
  448. SCM_SETCAR (new, scm_tc16_allocated);
  449. }
  450. return new;
  451. }
  452. #endif /* GUILE_DEBUG_FREELIST */
  453. static unsigned long
  454. master_cells_allocated (scm_freelist_t *master)
  455. {
  456. int objects = master->clusters_allocated * (master->cluster_size - 1);
  457. if (SCM_NULLP (master->clusters))
  458. objects -= master->left_to_collect;
  459. return master->span * objects;
  460. }
  461. static unsigned long
  462. freelist_length (SCM freelist)
  463. {
  464. int n;
  465. for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
  466. ++n;
  467. return n;
  468. }
  469. static unsigned long
  470. compute_cells_allocated ()
  471. {
  472. return (scm_cells_allocated
  473. + master_cells_allocated (&scm_master_freelist)
  474. + master_cells_allocated (&scm_master_freelist2)
  475. - scm_master_freelist.span * freelist_length (scm_freelist)
  476. - scm_master_freelist2.span * freelist_length (scm_freelist2));
  477. }
  478. /* {Scheme Interface to GC}
  479. */
  480. SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
  481. (),
  482. "Returns an association list of statistics about Guile's current use of storage. ")
  483. #define FUNC_NAME s_scm_gc_stats
  484. {
  485. int i;
  486. int n;
  487. SCM heap_segs;
  488. long int local_scm_mtrigger;
  489. long int local_scm_mallocated;
  490. long int local_scm_heap_size;
  491. long int local_scm_cells_allocated;
  492. long int local_scm_gc_time_taken;
  493. SCM answer;
  494. SCM_DEFER_INTS;
  495. scm_block_gc = 1;
  496. retry:
  497. heap_segs = SCM_EOL;
  498. n = scm_n_heap_segs;
  499. for (i = scm_n_heap_segs; i--; )
  500. heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
  501. scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
  502. heap_segs);
  503. if (scm_n_heap_segs != n)
  504. goto retry;
  505. scm_block_gc = 0;
  506. /* Below, we cons to produce the resulting list. We want a snapshot of
  507. * the heap situation before consing.
  508. */
  509. local_scm_mtrigger = scm_mtrigger;
  510. local_scm_mallocated = scm_mallocated;
  511. local_scm_heap_size = SCM_HEAP_SIZE;
  512. local_scm_cells_allocated = compute_cells_allocated ();
  513. local_scm_gc_time_taken = scm_gc_time_taken;
  514. answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
  515. scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
  516. scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
  517. scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
  518. scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
  519. scm_cons (sym_heap_segments, heap_segs),
  520. SCM_UNDEFINED);
  521. SCM_ALLOW_INTS;
  522. return answer;
  523. }
  524. #undef FUNC_NAME
  525. void
  526. scm_gc_start (const char *what)
  527. {
  528. scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
  529. scm_gc_cells_collected = 0;
  530. scm_gc_yield_1 = scm_gc_yield;
  531. scm_gc_yield = (scm_cells_allocated
  532. + master_cells_allocated (&scm_master_freelist)
  533. + master_cells_allocated (&scm_master_freelist2));
  534. scm_gc_malloc_collected = 0;
  535. scm_gc_ports_collected = 0;
  536. }
  537. void
  538. scm_gc_end ()
  539. {
  540. scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
  541. scm_gc_time_taken += scm_gc_rt;
  542. scm_system_async_mark (scm_gc_async);
  543. }
  544. SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
  545. (SCM obj),
  546. "Return an integer that for the lifetime of @var{obj} is uniquely\n"
  547. "returned by this function for @var{obj}")
  548. #define FUNC_NAME s_scm_object_address
  549. {
  550. return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
  551. }
  552. #undef FUNC_NAME
  553. SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
  554. (),
  555. "Scans all of SCM objects and reclaims for further use those that are\n"
  556. "no longer accessible.")
  557. #define FUNC_NAME s_scm_gc
  558. {
  559. SCM_DEFER_INTS;
  560. scm_igc ("call");
  561. SCM_ALLOW_INTS;
  562. return SCM_UNSPECIFIED;
  563. }
  564. #undef FUNC_NAME
  565. /* {C Interface For When GC is Triggered}
  566. */
  567. static void
  568. adjust_min_yield (scm_freelist_t *freelist)
  569. {
  570. /* min yield is adjusted upwards so that next predicted total yield
  571. * (allocated cells actually freed by GC) becomes
  572. * `min_yield_fraction' of total heap size. Note, however, that
  573. * the absolute value of min_yield will correspond to `collected'
  574. * on one master (the one which currently is triggering GC).
  575. *
  576. * The reason why we look at total yield instead of cells collected
  577. * on one list is that we want to take other freelists into account.
  578. * On this freelist, we know that (local) yield = collected cells,
  579. * but that's probably not the case on the other lists.
  580. *
  581. * (We might consider computing a better prediction, for example
  582. * by computing an average over multiple GC:s.)
  583. */
  584. if (freelist->min_yield_fraction)
  585. {
  586. /* Pick largest of last two yields. */
  587. int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
  588. - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
  589. #ifdef DEBUGINFO
  590. fprintf (stderr, " after GC = %d, delta = %d\n",
  591. scm_cells_allocated,
  592. delta);
  593. #endif
  594. if (delta > 0)
  595. freelist->min_yield += delta;
  596. }
  597. }
  598. /* When we get POSIX threads support, the master will be global and
  599. * common while the freelist will be individual for each thread.
  600. */
  601. SCM
  602. scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
  603. {
  604. SCM cell;
  605. ++scm_ints_disabled;
  606. do
  607. {
  608. if (SCM_NULLP (master->clusters))
  609. {
  610. if (master->grow_heap_p)
  611. {
  612. master->grow_heap_p = 0;
  613. alloc_some_heap (master);
  614. }
  615. else
  616. {
  617. #ifdef DEBUGINFO
  618. fprintf (stderr, "allocated = %d, ",
  619. scm_cells_allocated
  620. + master_cells_allocated (&scm_master_freelist)
  621. + master_cells_allocated (&scm_master_freelist2));
  622. #endif
  623. scm_igc ("cells");
  624. adjust_min_yield (master);
  625. }
  626. }
  627. cell = SCM_CAR (master->clusters);
  628. master->clusters = SCM_CDR (master->clusters);
  629. ++master->clusters_allocated;
  630. }
  631. while (SCM_NULLP (cell));
  632. --scm_ints_disabled;
  633. *freelist = SCM_CDR (cell);
  634. SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
  635. return cell;
  636. }
  637. #if 0
  638. /* This is a support routine which can be used to reserve a cluster
  639. * for some special use, such as debugging. It won't be useful until
  640. * free cells are preserved between garbage collections.
  641. */
  642. void
  643. scm_alloc_cluster (scm_freelist_t *master)
  644. {
  645. SCM freelist, cell;
  646. cell = scm_gc_for_newcell (master, &freelist);
  647. SCM_SETCDR (cell, freelist);
  648. return cell;
  649. }
  650. #endif
  651. SCM scm_after_gc_hook;
  652. scm_c_hook_t scm_before_gc_c_hook;
  653. scm_c_hook_t scm_before_mark_c_hook;
  654. scm_c_hook_t scm_before_sweep_c_hook;
  655. scm_c_hook_t scm_after_sweep_c_hook;
  656. scm_c_hook_t scm_after_gc_c_hook;
  657. void
  658. scm_igc (const char *what)
  659. {
  660. int j;
  661. scm_c_hook_run (&scm_before_gc_c_hook, 0);
  662. #ifdef DEBUGINFO
  663. fprintf (stderr,
  664. SCM_NULLP (scm_freelist)
  665. ? "*"
  666. : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
  667. #endif
  668. #ifdef USE_THREADS
  669. /* During the critical section, only the current thread may run. */
  670. SCM_THREAD_CRITICAL_SECTION_START;
  671. #endif
  672. /* fprintf (stderr, "gc: %s\n", what); */
  673. scm_gc_start (what);
  674. if (!scm_stack_base || scm_block_gc)
  675. {
  676. scm_gc_end ();
  677. return;
  678. }
  679. if (scm_mallocated < 0)
  680. /* The byte count of allocated objects has underflowed. This is
  681. probably because you forgot to report the sizes of objects you
  682. have allocated, by calling scm_done_malloc or some such. When
  683. the GC freed them, it subtracted their size from
  684. scm_mallocated, which underflowed. */
  685. abort ();
  686. if (scm_gc_heap_lock)
  687. /* We've invoked the collector while a GC is already in progress.
  688. That should never happen. */
  689. abort ();
  690. ++scm_gc_heap_lock;
  691. /* unprotect any struct types with no instances */
  692. #if 0
  693. {
  694. SCM type_list;
  695. SCM * pos;
  696. pos = &scm_type_obj_list;
  697. type_list = scm_type_obj_list;
  698. while (type_list != SCM_EOL)
  699. if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
  700. {
  701. pos = SCM_CDRLOC (type_list);
  702. type_list = SCM_CDR (type_list);
  703. }
  704. else
  705. {
  706. *pos = SCM_CDR (type_list);
  707. type_list = SCM_CDR (type_list);
  708. }
  709. }
  710. #endif
  711. /* flush dead entries from the continuation stack */
  712. {
  713. int x;
  714. int bound;
  715. SCM * elts;
  716. elts = SCM_VELTS (scm_continuation_stack);
  717. bound = SCM_LENGTH (scm_continuation_stack);
  718. x = SCM_INUM (scm_continuation_stack_ptr);
  719. while (x < bound)
  720. {
  721. elts[x] = SCM_BOOL_F;
  722. ++x;
  723. }
  724. }
  725. scm_c_hook_run (&scm_before_mark_c_hook, 0);
  726. #ifndef USE_THREADS
  727. /* Protect from the C stack. This must be the first marking
  728. * done because it provides information about what objects
  729. * are "in-use" by the C code. "in-use" objects are those
  730. * for which the values from SCM_LENGTH and SCM_CHARS must remain
  731. * usable. This requirement is stricter than a liveness
  732. * requirement -- in particular, it constrains the implementation
  733. * of scm_vector_set_length_x.
  734. */
  735. SCM_FLUSH_REGISTER_WINDOWS;
  736. /* This assumes that all registers are saved into the jmp_buf */
  737. setjmp (scm_save_regs_gc_mark);
  738. scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
  739. ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
  740. sizeof scm_save_regs_gc_mark)
  741. / sizeof (SCM_STACKITEM)));
  742. {
  743. scm_sizet stack_len = scm_stack_size (scm_stack_base);
  744. #ifdef SCM_STACK_GROWS_UP
  745. scm_mark_locations (scm_stack_base, stack_len);
  746. #else
  747. scm_mark_locations (scm_stack_base - stack_len, stack_len);
  748. #endif
  749. }
  750. #else /* USE_THREADS */
  751. /* Mark every thread's stack and registers */
  752. scm_threads_mark_stacks ();
  753. #endif /* USE_THREADS */
  754. /* FIXME: insert a phase to un-protect string-data preserved
  755. * in scm_vector_set_length_x.
  756. */
  757. j = SCM_NUM_PROTECTS;
  758. while (j--)
  759. scm_gc_mark (scm_sys_protects[j]);
  760. /* FIXME: we should have a means to register C functions to be run
  761. * in different phases of GC
  762. */
  763. scm_mark_subr_table ();
  764. #ifndef USE_THREADS
  765. scm_gc_mark (scm_root->handle);
  766. #endif
  767. scm_c_hook_run (&scm_before_sweep_c_hook, 0);
  768. scm_gc_sweep ();
  769. scm_c_hook_run (&scm_after_sweep_c_hook, 0);
  770. --scm_gc_heap_lock;
  771. scm_gc_end ();
  772. #ifdef USE_THREADS
  773. SCM_THREAD_CRITICAL_SECTION_END;
  774. #endif
  775. scm_c_hook_run (&scm_after_gc_c_hook, 0);
  776. }
  777. /* {Mark/Sweep}
  778. */
  779. /* Mark an object precisely.
  780. */
  781. void
  782. scm_gc_mark (SCM p)
  783. {
  784. register long i;
  785. register SCM ptr;
  786. ptr = p;
  787. gc_mark_loop:
  788. if (SCM_IMP (ptr))
  789. return;
  790. gc_mark_nimp:
  791. if (SCM_NCELLP (ptr))
  792. scm_wta (ptr, "rogue pointer in heap", NULL);
  793. switch (SCM_TYP7 (ptr))
  794. {
  795. case scm_tcs_cons_nimcar:
  796. if (SCM_GCMARKP (ptr))
  797. break;
  798. SCM_SETGCMARK (ptr);
  799. if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
  800. {
  801. ptr = SCM_CAR (ptr);
  802. goto gc_mark_nimp;
  803. }
  804. scm_gc_mark (SCM_CAR (ptr));
  805. ptr = SCM_GCCDR (ptr);
  806. goto gc_mark_nimp;
  807. case scm_tcs_cons_imcar:
  808. if (SCM_GCMARKP (ptr))
  809. break;
  810. SCM_SETGCMARK (ptr);
  811. ptr = SCM_GCCDR (ptr);
  812. goto gc_mark_loop;
  813. case scm_tc7_pws:
  814. if (SCM_GCMARKP (ptr))
  815. break;
  816. SCM_SETGCMARK (ptr);
  817. scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
  818. ptr = SCM_GCCDR (ptr);
  819. goto gc_mark_loop;
  820. case scm_tcs_cons_gloc:
  821. if (SCM_GCMARKP (ptr))
  822. break;
  823. SCM_SETGCMARK (ptr);
  824. {
  825. /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
  826. * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
  827. * to a heap cell. If it is a struct, the cell word #0 of ptr is a
  828. * pointer to a struct vtable data region. The fact that these are
  829. * accessed in the same way restricts the possibilites to change the
  830. * data layout of structs or heap cells.
  831. */
  832. scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
  833. scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
  834. switch (vtable_data [scm_vtable_index_vcell])
  835. {
  836. default:
  837. {
  838. /* ptr is a gloc */
  839. SCM gloc_car = SCM_PACK (word0);
  840. scm_gc_mark (gloc_car);
  841. ptr = SCM_GCCDR (ptr);
  842. goto gc_mark_loop;
  843. }
  844. case 1: /* ! */
  845. case 0: /* ! */
  846. {
  847. /* ptr is a struct */
  848. SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
  849. int len = SCM_LENGTH (layout);
  850. char * fields_desc = SCM_CHARS (layout);
  851. /* We're using SCM_GCCDR here like STRUCT_DATA, except
  852. that it removes the mark */
  853. scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
  854. if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
  855. {
  856. scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
  857. scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
  858. }
  859. if (len)
  860. {
  861. int x;
  862. for (x = 0; x < len - 2; x += 2, ++struct_data)
  863. if (fields_desc[x] == 'p')
  864. scm_gc_mark (SCM_PACK (*struct_data));
  865. if (fields_desc[x] == 'p')
  866. {
  867. if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
  868. for (x = *struct_data; x; --x)
  869. scm_gc_mark (SCM_PACK (*++struct_data));
  870. else
  871. scm_gc_mark (SCM_PACK (*struct_data));
  872. }
  873. }
  874. if (vtable_data [scm_vtable_index_vcell] == 0)
  875. {
  876. vtable_data [scm_vtable_index_vcell] = 1;
  877. ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
  878. goto gc_mark_loop;
  879. }
  880. }
  881. }
  882. }
  883. break;
  884. case scm_tcs_closures:
  885. if (SCM_GCMARKP (ptr))
  886. break;
  887. SCM_SETGCMARK (ptr);
  888. if (SCM_IMP (SCM_CDR (ptr)))
  889. {
  890. ptr = SCM_CLOSCAR (ptr);
  891. goto gc_mark_nimp;
  892. }
  893. scm_gc_mark (SCM_CLOSCAR (ptr));
  894. ptr = SCM_GCCDR (ptr);
  895. goto gc_mark_nimp;
  896. case scm_tc7_vector:
  897. case scm_tc7_lvector:
  898. #ifdef CCLO
  899. case scm_tc7_cclo:
  900. #endif
  901. if (SCM_GC8MARKP (ptr))
  902. break;
  903. SCM_SETGC8MARK (ptr);
  904. i = SCM_LENGTH (ptr);
  905. if (i == 0)
  906. break;
  907. while (--i > 0)
  908. if (SCM_NIMP (SCM_VELTS (ptr)[i]))
  909. scm_gc_mark (SCM_VELTS (ptr)[i]);
  910. ptr = SCM_VELTS (ptr)[0];
  911. goto gc_mark_loop;
  912. case scm_tc7_contin:
  913. if SCM_GC8MARKP
  914. (ptr) break;
  915. SCM_SETGC8MARK (ptr);
  916. if (SCM_VELTS (ptr))
  917. scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
  918. (scm_sizet)
  919. (SCM_LENGTH (ptr) +
  920. (sizeof (SCM_STACKITEM) + -1 +
  921. sizeof (scm_contregs)) /
  922. sizeof (SCM_STACKITEM)));
  923. break;
  924. #ifdef HAVE_ARRAYS
  925. case scm_tc7_bvect:
  926. case scm_tc7_byvect:
  927. case scm_tc7_ivect:
  928. case scm_tc7_uvect:
  929. case scm_tc7_fvect:
  930. case scm_tc7_dvect:
  931. case scm_tc7_cvect:
  932. case scm_tc7_svect:
  933. #ifdef HAVE_LONG_LONGS
  934. case scm_tc7_llvect:
  935. #endif
  936. #endif
  937. case scm_tc7_string:
  938. SCM_SETGC8MARK (ptr);
  939. break;
  940. case scm_tc7_substring:
  941. if (SCM_GC8MARKP(ptr))
  942. break;
  943. SCM_SETGC8MARK (ptr);
  944. ptr = SCM_CDR (ptr);
  945. goto gc_mark_loop;
  946. case scm_tc7_wvect:
  947. if (SCM_GC8MARKP(ptr))
  948. break;
  949. SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
  950. scm_weak_vectors = ptr;
  951. SCM_SETGC8MARK (ptr);
  952. if (SCM_IS_WHVEC_ANY (ptr))
  953. {
  954. int x;
  955. int len;
  956. int weak_keys;
  957. int weak_values;
  958. len = SCM_LENGTH (ptr);
  959. weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
  960. weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
  961. for (x = 0; x < len; ++x)
  962. {
  963. SCM alist;
  964. alist = SCM_VELTS (ptr)[x];
  965. /* mark everything on the alist except the keys or
  966. * values, according to weak_values and weak_keys. */
  967. while ( SCM_CONSP (alist)
  968. && !SCM_GCMARKP (alist)
  969. && SCM_CONSP (SCM_CAR (alist)))
  970. {
  971. SCM kvpair;
  972. SCM next_alist;
  973. kvpair = SCM_CAR (alist);
  974. next_alist = SCM_CDR (alist);
  975. /*
  976. * Do not do this:
  977. * SCM_SETGCMARK (alist);
  978. * SCM_SETGCMARK (kvpair);
  979. *
  980. * It may be that either the key or value is protected by
  981. * an escaped reference to part of the spine of this alist.
  982. * If we mark the spine here, and only mark one or neither of the
  983. * key and value, they may never be properly marked.
  984. * This leads to a horrible situation in which an alist containing
  985. * freelist cells is exported.
  986. *
  987. * So only mark the spines of these arrays last of all marking.
  988. * If somebody confuses us by constructing a weak vector
  989. * with a circular alist then we are hosed, but at least we
  990. * won't prematurely drop table entries.
  991. */
  992. if (!weak_keys)
  993. scm_gc_mark (SCM_CAR (kvpair));
  994. if (!weak_values)
  995. scm_gc_mark (SCM_GCCDR (kvpair));
  996. alist = next_alist;
  997. }
  998. if (SCM_NIMP (alist))
  999. scm_gc_mark (alist);
  1000. }
  1001. }
  1002. break;
  1003. case scm_tc7_msymbol:
  1004. if (SCM_GC8MARKP(ptr))
  1005. break;
  1006. SCM_SETGC8MARK (ptr);
  1007. scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
  1008. ptr = SCM_SYMBOL_PROPS (ptr);
  1009. goto gc_mark_loop;
  1010. case scm_tc7_ssymbol:
  1011. if (SCM_GC8MARKP(ptr))
  1012. break;
  1013. SCM_SETGC8MARK (ptr);
  1014. break;
  1015. case scm_tcs_subrs:
  1016. break;
  1017. case scm_tc7_port:
  1018. i = SCM_PTOBNUM (ptr);
  1019. if (!(i < scm_numptob))
  1020. goto def;
  1021. if (SCM_GC8MARKP (ptr))
  1022. break;
  1023. SCM_SETGC8MARK (ptr);
  1024. if (SCM_PTAB_ENTRY(ptr))
  1025. scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
  1026. if (scm_ptobs[i].mark)
  1027. {
  1028. ptr = (scm_ptobs[i].mark) (ptr);
  1029. goto gc_mark_loop;
  1030. }
  1031. else
  1032. return;
  1033. break;
  1034. case scm_tc7_smob:
  1035. if (SCM_GC8MARKP (ptr))
  1036. break;
  1037. SCM_SETGC8MARK (ptr);
  1038. switch (SCM_GCTYP16 (ptr))
  1039. { /* should be faster than going through scm_smobs */
  1040. case scm_tc_free_cell:
  1041. /* printf("found free_cell %X ", ptr); fflush(stdout); */
  1042. case scm_tc16_allocated:
  1043. case scm_tc16_big:
  1044. case scm_tc16_real:
  1045. case scm_tc16_complex:
  1046. break;
  1047. default:
  1048. i = SCM_SMOBNUM (ptr);
  1049. if (!(i < scm_numsmob))
  1050. goto def;
  1051. if (scm_smobs[i].mark)
  1052. {
  1053. ptr = (scm_smobs[i].mark) (ptr);
  1054. goto gc_mark_loop;
  1055. }
  1056. else
  1057. return;
  1058. }
  1059. break;
  1060. default:
  1061. def:scm_wta (ptr, "unknown type in ", "gc_mark");
  1062. }
  1063. }
  1064. /* Mark a Region Conservatively
  1065. */
  1066. void
  1067. scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
  1068. {
  1069. register long m = n;
  1070. register int i, j;
  1071. register SCM_CELLPTR ptr;
  1072. while (0 <= --m)
  1073. if (SCM_CELLP (* (SCM *) &x[m]))
  1074. {
  1075. ptr = SCM2PTR (* (SCM *) &x[m]);
  1076. i = 0;
  1077. j = scm_n_heap_segs - 1;
  1078. if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
  1079. && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
  1080. {
  1081. while (i <= j)
  1082. {
  1083. int seg_id;
  1084. seg_id = -1;
  1085. if ( (i == j)
  1086. || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
  1087. seg_id = i;
  1088. else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
  1089. seg_id = j;
  1090. else
  1091. {
  1092. int k;
  1093. k = (i + j) / 2;
  1094. if (k == i)
  1095. break;
  1096. if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
  1097. {
  1098. j = k;
  1099. ++i;
  1100. if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
  1101. continue;
  1102. else
  1103. break;
  1104. }
  1105. else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
  1106. {
  1107. i = k;
  1108. --j;
  1109. if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
  1110. continue;
  1111. else
  1112. break;
  1113. }
  1114. }
  1115. if (scm_heap_table[seg_id].span == 1
  1116. || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
  1117. scm_gc_mark (* (SCM *) &x[m]);
  1118. break;
  1119. }
  1120. }
  1121. }
  1122. }
  1123. /* The function scm_cellp determines whether an SCM value can be regarded as a
  1124. * pointer to a cell on the heap. Binary search is used in order to determine
  1125. * the heap segment that contains the cell.
  1126. */
  1127. int
  1128. scm_cellp (SCM value)
  1129. {
  1130. if (SCM_CELLP (value)) {
  1131. scm_cell * ptr = SCM2PTR (value);
  1132. unsigned int i = 0;
  1133. unsigned int j = scm_n_heap_segs - 1;
  1134. while (i < j) {
  1135. int k = (i + j) / 2;
  1136. if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
  1137. j = k;
  1138. } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
  1139. i = k + 1;
  1140. }
  1141. }
  1142. if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
  1143. && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
  1144. && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
  1145. return 1;
  1146. } else {
  1147. return 0;
  1148. }
  1149. } else {
  1150. return 0;
  1151. }
  1152. }
  1153. static void
  1154. gc_sweep_freelist_start (scm_freelist_t *freelist)
  1155. {
  1156. freelist->cells = SCM_EOL;
  1157. freelist->left_to_collect = freelist->cluster_size;
  1158. freelist->clusters_allocated = 0;
  1159. freelist->clusters = SCM_EOL;
  1160. freelist->clustertail = &freelist->clusters;
  1161. freelist->collected_1 = freelist->collected;
  1162. freelist->collected = 0;
  1163. }
  1164. static void
  1165. gc_sweep_freelist_finish (scm_freelist_t *freelist)
  1166. {
  1167. int collected;
  1168. *freelist->clustertail = freelist->cells;
  1169. if (SCM_NNULLP (freelist->cells))
  1170. {
  1171. SCM c = freelist->cells;
  1172. SCM_SETCAR (c, SCM_CDR (c));
  1173. SCM_SETCDR (c, SCM_EOL);
  1174. freelist->collected +=
  1175. freelist->span * (freelist->cluster_size - freelist->left_to_collect);
  1176. }
  1177. scm_gc_cells_collected += freelist->collected;
  1178. /* Although freelist->min_yield is used to test freelist->collected
  1179. * (which is the local GC yield for freelist), it is adjusted so
  1180. * that *total* yield is freelist->min_yield_fraction of total heap
  1181. * size. This means that a too low yield is compensated by more
  1182. * heap on the list which is currently doing most work, which is
  1183. * just what we want.
  1184. */
  1185. collected = SCM_MAX (freelist->collected_1, freelist->collected);
  1186. freelist->grow_heap_p = (collected < freelist->min_yield);
  1187. }
  1188. void
  1189. scm_gc_sweep ()
  1190. {
  1191. register SCM_CELLPTR ptr;
  1192. register SCM nfreelist;
  1193. register scm_freelist_t *freelist;
  1194. register long m;
  1195. register int span;
  1196. long i;
  1197. scm_sizet seg_size;
  1198. m = 0;
  1199. gc_sweep_freelist_start (&scm_master_freelist);
  1200. gc_sweep_freelist_start (&scm_master_freelist2);
  1201. for (i = 0; i < scm_n_heap_segs; i++)
  1202. {
  1203. register unsigned int left_to_collect;
  1204. register scm_sizet j;
  1205. /* Unmarked cells go onto the front of the freelist this heap
  1206. segment points to. Rather than updating the real freelist
  1207. pointer as we go along, we accumulate the new head in
  1208. nfreelist. Then, if it turns out that the entire segment is
  1209. free, we free (i.e., malloc's free) the whole segment, and
  1210. simply don't assign nfreelist back into the real freelist. */
  1211. freelist = scm_heap_table[i].freelist;
  1212. nfreelist = freelist->cells;
  1213. left_to_collect = freelist->left_to_collect;
  1214. span = scm_heap_table[i].span;
  1215. ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
  1216. seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
  1217. for (j = seg_size + span; j -= span; ptr += span)
  1218. {
  1219. SCM scmptr = PTR2SCM (ptr);
  1220. switch SCM_TYP7 (scmptr)
  1221. {
  1222. case scm_tcs_cons_gloc:
  1223. {
  1224. /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
  1225. * struct or a gloc. See the corresponding comment in
  1226. * scm_gc_mark.
  1227. */
  1228. scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
  1229. scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
  1230. if (SCM_GCMARKP (scmptr))
  1231. {
  1232. if (vtable_data [scm_vtable_index_vcell] == 1)
  1233. vtable_data [scm_vtable_index_vcell] = 0;
  1234. goto cmrkcontinue;
  1235. }
  1236. else
  1237. {
  1238. if (vtable_data [scm_vtable_index_vcell] == 0
  1239. || vtable_data [scm_vtable_index_vcell] == 1)
  1240. {
  1241. scm_struct_free_t free
  1242. = (scm_struct_free_t) vtable_data[scm_struct_i_free];
  1243. m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
  1244. }
  1245. }
  1246. }
  1247. break;
  1248. case scm_tcs_cons_imcar:
  1249. case scm_tcs_cons_nimcar:
  1250. case scm_tcs_closures:
  1251. case scm_tc7_pws:
  1252. if (SCM_GCMARKP (scmptr))
  1253. goto cmrkcontinue;
  1254. break;
  1255. case scm_tc7_wvect:
  1256. if (SCM_GC8MARKP (scmptr))
  1257. {
  1258. goto c8mrkcontinue;
  1259. }
  1260. else
  1261. {
  1262. m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
  1263. scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
  1264. break;
  1265. }
  1266. case scm_tc7_vector:
  1267. case scm_tc7_lvector:
  1268. #ifdef CCLO
  1269. case scm_tc7_cclo:
  1270. #endif
  1271. if (SCM_GC8MARKP (scmptr))
  1272. goto c8mrkcontinue;
  1273. m += (SCM_LENGTH (scmptr) * sizeof (SCM));
  1274. freechars:
  1275. scm_must_free (SCM_CHARS (scmptr));
  1276. /* SCM_SETCHARS(scmptr, 0);*/
  1277. break;
  1278. #ifdef HAVE_ARRAYS
  1279. case scm_tc7_bvect:
  1280. if SCM_GC8MARKP (scmptr)
  1281. goto c8mrkcontinue;
  1282. m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
  1283. goto freechars;
  1284. case scm_tc7_byvect:
  1285. if SCM_GC8MARKP (scmptr)
  1286. goto c8mrkcontinue;
  1287. m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
  1288. goto freechars;
  1289. case scm_tc7_ivect:
  1290. case scm_tc7_uvect:
  1291. if SCM_GC8MARKP (scmptr)
  1292. goto c8mrkcontinue;
  1293. m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
  1294. goto freechars;
  1295. case scm_tc7_svect:
  1296. if SCM_GC8MARKP (scmptr)
  1297. goto c8mrkcontinue;
  1298. m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
  1299. goto freechars;
  1300. #ifdef HAVE_LONG_LONGS
  1301. case scm_tc7_llvect:
  1302. if SCM_GC8MARKP (scmptr)
  1303. goto c8mrkcontinue;
  1304. m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
  1305. goto freechars;
  1306. #endif
  1307. case scm_tc7_fvect:
  1308. if SCM_GC8MARKP (scmptr)
  1309. goto c8mrkcontinue;
  1310. m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
  1311. goto freechars;
  1312. case scm_tc7_dvect:
  1313. if SCM_GC8MARKP (scmptr)
  1314. goto c8mrkcontinue;
  1315. m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
  1316. goto freechars;
  1317. case scm_tc7_cvect:
  1318. if SCM_GC8MARKP (scmptr)
  1319. goto c8mrkcontinue;
  1320. m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
  1321. goto freechars;
  1322. #endif
  1323. case scm_tc7_substring:
  1324. if (SCM_GC8MARKP (scmptr))
  1325. goto c8mrkcontinue;
  1326. break;
  1327. case scm_tc7_string:
  1328. if (SCM_GC8MARKP (scmptr))
  1329. goto c8mrkcontinue;
  1330. m += SCM_HUGE_LENGTH (scmptr) + 1;
  1331. goto freechars;
  1332. case scm_tc7_msymbol:
  1333. if (SCM_GC8MARKP (scmptr))
  1334. goto c8mrkcontinue;
  1335. m += (SCM_LENGTH (scmptr) + 1
  1336. + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
  1337. scm_must_free ((char *)SCM_SLOTS (scmptr));
  1338. break;
  1339. case scm_tc7_contin:
  1340. if SCM_GC8MARKP (scmptr)
  1341. goto c8mrkcontinue;
  1342. m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
  1343. if (SCM_VELTS (scmptr))
  1344. goto freechars;
  1345. case scm_tc7_ssymbol:
  1346. if SCM_GC8MARKP(scmptr)
  1347. goto c8mrkcontinue;
  1348. break;
  1349. case scm_tcs_subrs:
  1350. continue;
  1351. case scm_tc7_port:
  1352. if SCM_GC8MARKP (scmptr)
  1353. goto c8mrkcontinue;
  1354. if SCM_OPENP (scmptr)
  1355. {
  1356. int k = SCM_PTOBNUM (scmptr);
  1357. if (!(k < scm_numptob))
  1358. goto sweeperr;
  1359. /* Keep "revealed" ports alive. */
  1360. if (scm_revealed_count (scmptr) > 0)
  1361. continue;
  1362. /* Yes, I really do mean scm_ptobs[k].free */
  1363. /* rather than ftobs[k].close. .close */
  1364. /* is for explicit CLOSE-PORT by user */
  1365. m += (scm_ptobs[k].free) (scmptr);
  1366. SCM_SETSTREAM (scmptr, 0);
  1367. scm_remove_from_port_table (scmptr);
  1368. scm_gc_ports_collected++;
  1369. SCM_SETAND_CAR (scmptr, ~SCM_OPN);
  1370. }
  1371. break;
  1372. case scm_tc7_smob:
  1373. switch SCM_GCTYP16 (scmptr)
  1374. {
  1375. case scm_tc_free_cell:
  1376. case scm_tc16_real:
  1377. if SCM_GC8MARKP (scmptr)
  1378. goto c8mrkcontinue;
  1379. break;
  1380. #ifdef SCM_BIGDIG
  1381. case scm_tc16_big:
  1382. if SCM_GC8MARKP (scmptr)
  1383. goto c8mrkcontinue;
  1384. m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
  1385. goto freechars;
  1386. #endif /* def SCM_BIGDIG */
  1387. case scm_tc16_complex:
  1388. if SCM_GC8MARKP (scmptr)
  1389. goto c8mrkcontinue;
  1390. m += 2 * sizeof (double);
  1391. goto freechars;
  1392. default:
  1393. if SCM_GC8MARKP (scmptr)
  1394. goto c8mrkcontinue;
  1395. {
  1396. int k;
  1397. k = SCM_SMOBNUM (scmptr);
  1398. if (!(k < scm_numsmob))
  1399. goto sweeperr;
  1400. m += (scm_smobs[k].free) (scmptr);
  1401. break;
  1402. }
  1403. }
  1404. break;
  1405. default:
  1406. sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
  1407. }
  1408. #if 0
  1409. if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
  1410. exit (2);
  1411. #endif
  1412. if (!--left_to_collect)
  1413. {
  1414. SCM_SETCAR (scmptr, nfreelist);
  1415. *freelist->clustertail = scmptr;
  1416. freelist->clustertail = SCM_CDRLOC (scmptr);
  1417. nfreelist = SCM_EOL;
  1418. freelist->collected += span * freelist->cluster_size;
  1419. left_to_collect = freelist->cluster_size;
  1420. }
  1421. else
  1422. {
  1423. /* Stick the new cell on the front of nfreelist. It's
  1424. critical that we mark this cell as freed; otherwise, the
  1425. conservative collector might trace it as some other type
  1426. of object. */
  1427. SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
  1428. SCM_SETCDR (scmptr, nfreelist);
  1429. nfreelist = scmptr;
  1430. }
  1431. continue;
  1432. c8mrkcontinue:
  1433. SCM_CLRGC8MARK (scmptr);
  1434. continue;
  1435. cmrkcontinue:
  1436. SCM_CLRGCMARK (scmptr);
  1437. }
  1438. #ifdef GC_FREE_SEGMENTS
  1439. if (n == seg_size)
  1440. {
  1441. register long j;
  1442. freelist->heap_size -= seg_size;
  1443. free ((char *) scm_heap_table[i].bounds[0]);
  1444. scm_heap_table[i].bounds[0] = 0;
  1445. for (j = i + 1; j < scm_n_heap_segs; j++)
  1446. scm_heap_table[j - 1] = scm_heap_table[j];
  1447. scm_n_heap_segs -= 1;
  1448. i--; /* We need to scan the segment just moved. */
  1449. }
  1450. else
  1451. #endif /* ifdef GC_FREE_SEGMENTS */
  1452. {
  1453. /* Update the real freelist pointer to point to the head of
  1454. the list of free cells we've built for this segment. */
  1455. freelist->cells = nfreelist;
  1456. freelist->left_to_collect = left_to_collect;
  1457. }
  1458. #ifdef GUILE_DEBUG_FREELIST
  1459. scm_check_freelist (freelist == &scm_master_freelist
  1460. ? scm_freelist
  1461. : scm_freelist2);
  1462. scm_map_free_list ();
  1463. #endif
  1464. }
  1465. gc_sweep_freelist_finish (&scm_master_freelist);
  1466. gc_sweep_freelist_finish (&scm_master_freelist2);
  1467. /* When we move to POSIX threads private freelists should probably
  1468. be GC-protected instead. */
  1469. scm_freelist = SCM_EOL;
  1470. scm_freelist2 = SCM_EOL;
  1471. scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
  1472. scm_gc_yield -= scm_cells_allocated;
  1473. scm_mallocated -= m;
  1474. scm_gc_malloc_collected = m;
  1475. }
  1476. /* {Front end to malloc}
  1477. *
  1478. * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
  1479. *
  1480. * These functions provide services comperable to malloc, realloc, and
  1481. * free. They are for allocating malloced parts of scheme objects.
  1482. * The primary purpose of the front end is to impose calls to gc.
  1483. */
  1484. /* scm_must_malloc
  1485. * Return newly malloced storage or throw an error.
  1486. *
  1487. * The parameter WHAT is a string for error reporting.
  1488. * If the threshold scm_mtrigger will be passed by this
  1489. * allocation, or if the first call to malloc fails,
  1490. * garbage collect -- on the presumption that some objects
  1491. * using malloced storage may be collected.
  1492. *
  1493. * The limit scm_mtrigger may be raised by this allocation.
  1494. */
  1495. void *
  1496. scm_must_malloc (scm_sizet size, const char *what)
  1497. {
  1498. void *ptr;
  1499. unsigned long nm = scm_mallocated + size;
  1500. if (nm <= scm_mtrigger)
  1501. {
  1502. SCM_SYSCALL (ptr = malloc (size));
  1503. if (NULL != ptr)
  1504. {
  1505. scm_mallocated = nm;
  1506. #ifdef GUILE_DEBUG_MALLOC
  1507. scm_malloc_register (ptr, what);
  1508. #endif
  1509. return ptr;
  1510. }
  1511. }
  1512. scm_igc (what);
  1513. nm = scm_mallocated + size;
  1514. SCM_SYSCALL (ptr = malloc (size));
  1515. if (NULL != ptr)
  1516. {
  1517. scm_mallocated = nm;
  1518. if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
  1519. if (nm > scm_mtrigger)
  1520. scm_mtrigger = nm + nm / 2;
  1521. else
  1522. scm_mtrigger += scm_mtrigger / 2;
  1523. }
  1524. #ifdef GUILE_DEBUG_MALLOC
  1525. scm_malloc_register (ptr, what);
  1526. #endif
  1527. return ptr;
  1528. }
  1529. scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
  1530. return 0; /* never reached */
  1531. }
  1532. /* scm_must_realloc
  1533. * is similar to scm_must_malloc.
  1534. */
  1535. void *
  1536. scm_must_realloc (void *where,
  1537. scm_sizet old_size,
  1538. scm_sizet size,
  1539. const char *what)
  1540. {
  1541. void *ptr;
  1542. scm_sizet nm = scm_mallocated + size - old_size;
  1543. if (nm <= scm_mtrigger)
  1544. {
  1545. SCM_SYSCALL (ptr = realloc (where, size));
  1546. if (NULL != ptr)
  1547. {
  1548. scm_mallocated = nm;
  1549. #ifdef GUILE_DEBUG_MALLOC
  1550. scm_malloc_reregister (where, ptr, what);
  1551. #endif
  1552. return ptr;
  1553. }
  1554. }
  1555. scm_igc (what);
  1556. nm = scm_mallocated + size - old_size;
  1557. SCM_SYSCALL (ptr = realloc (where, size));
  1558. if (NULL != ptr)
  1559. {
  1560. scm_mallocated = nm;
  1561. if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
  1562. if (nm > scm_mtrigger)
  1563. scm_mtrigger = nm + nm / 2;
  1564. else
  1565. scm_mtrigger += scm_mtrigger / 2;
  1566. }
  1567. #ifdef GUILE_DEBUG_MALLOC
  1568. scm_malloc_reregister (where, ptr, what);
  1569. #endif
  1570. return ptr;
  1571. }
  1572. scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
  1573. return 0; /* never reached */
  1574. }
  1575. void
  1576. scm_must_free (void *obj)
  1577. {
  1578. #ifdef GUILE_DEBUG_MALLOC
  1579. scm_malloc_unregister (obj);
  1580. #endif
  1581. if (obj)
  1582. free (obj);
  1583. else
  1584. scm_wta (SCM_INUM0, "already free", "");
  1585. }
  1586. /* Announce that there has been some malloc done that will be freed
  1587. * during gc. A typical use is for a smob that uses some malloced
  1588. * memory but can not get it from scm_must_malloc (for whatever
  1589. * reason). When a new object of this smob is created you call
  1590. * scm_done_malloc with the size of the object. When your smob free
  1591. * function is called, be sure to include this size in the return
  1592. * value. */
  1593. void
  1594. scm_done_malloc (long size)
  1595. {
  1596. scm_mallocated += size;
  1597. if (scm_mallocated > scm_mtrigger)
  1598. {
  1599. scm_igc ("foreign mallocs");
  1600. if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
  1601. {
  1602. if (scm_mallocated > scm_mtrigger)
  1603. scm_mtrigger = scm_mallocated + scm_mallocated / 2;
  1604. else
  1605. scm_mtrigger += scm_mtrigger / 2;
  1606. }
  1607. }
  1608. }
  1609. /* {Heap Segments}
  1610. *
  1611. * Each heap segment is an array of objects of a particular size.
  1612. * Every segment has an associated (possibly shared) freelist.
  1613. * A table of segment records is kept that records the upper and
  1614. * lower extents of the segment; this is used during the conservative
  1615. * phase of gc to identify probably gc roots (because they point
  1616. * into valid segments at reasonable offsets). */
  1617. /* scm_expmem
  1618. * is true if the first segment was smaller than INIT_HEAP_SEG.
  1619. * If scm_expmem is set to one, subsequent segment allocations will
  1620. * allocate segments of size SCM_EXPHEAP(scm_heap_size).
  1621. */
  1622. int scm_expmem = 0;
  1623. scm_sizet scm_max_segment_size;
  1624. /* scm_heap_org
  1625. * is the lowest base address of any heap segment.
  1626. */
  1627. SCM_CELLPTR scm_heap_org;
  1628. scm_heap_seg_data_t * scm_heap_table = 0;
  1629. int scm_n_heap_segs = 0;
  1630. /* init_heap_seg
  1631. * initializes a new heap segment and return the number of objects it contains.
  1632. *
  1633. * The segment origin, segment size in bytes, and the span of objects
  1634. * in cells are input parameters. The freelist is both input and output.
  1635. *
  1636. * This function presume that the scm_heap_table has already been expanded
  1637. * to accomodate a new segment record.
  1638. */
  1639. static scm_sizet
  1640. init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
  1641. {
  1642. register SCM_CELLPTR ptr;
  1643. SCM_CELLPTR seg_end;
  1644. int new_seg_index;
  1645. int n_new_cells;
  1646. int span = freelist->span;
  1647. if (seg_org == NULL)
  1648. return 0;
  1649. ptr = CELL_UP (seg_org, span);
  1650. /* Compute the ceiling on valid object pointers w/in this segment.
  1651. */
  1652. seg_end = CELL_DN ((char *) seg_org + size, span);
  1653. /* Find the right place and insert the segment record.
  1654. *
  1655. */
  1656. for (new_seg_index = 0;
  1657. ( (new_seg_index < scm_n_heap_segs)
  1658. && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
  1659. new_seg_index++)
  1660. ;
  1661. {
  1662. int i;
  1663. for (i = scm_n_heap_segs; i > new_seg_index; --i)
  1664. scm_heap_table[i] = scm_heap_table[i - 1];
  1665. }
  1666. ++scm_n_heap_segs;
  1667. scm_heap_table[new_seg_index].span = span;
  1668. scm_heap_table[new_seg_index].freelist = freelist;
  1669. scm_heap_table[new_seg_index].bounds[0] = ptr;
  1670. scm_heap_table[new_seg_index].bounds[1] = seg_end;
  1671. /* Compute the least valid object pointer w/in this segment
  1672. */
  1673. ptr = CELL_UP (ptr, span);
  1674. /*n_new_cells*/
  1675. n_new_cells = seg_end - ptr;
  1676. freelist->heap_size += n_new_cells;
  1677. /* Partition objects in this segment into clusters */
  1678. {
  1679. SCM clusters;
  1680. SCM *clusterp = &clusters;
  1681. int n_cluster_cells = span * freelist->cluster_size;
  1682. while (n_new_cells > span) /* at least one spine + one freecell */
  1683. {
  1684. /* Determine end of cluster
  1685. */
  1686. if (n_new_cells >= n_cluster_cells)
  1687. {
  1688. seg_end = ptr + n_cluster_cells;
  1689. n_new_cells -= n_cluster_cells;
  1690. }
  1691. else
  1692. /* [cmm] looks like the segment size doesn't divide cleanly by
  1693. cluster size. bad cmm! */
  1694. abort();
  1695. /* Allocate cluster spine
  1696. */
  1697. *clusterp = PTR2SCM (ptr);
  1698. SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
  1699. clusterp = SCM_CDRLOC (*clusterp);
  1700. ptr += span;
  1701. while (ptr < seg_end)
  1702. {
  1703. SCM scmptr = PTR2SCM (ptr);
  1704. SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
  1705. SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
  1706. ptr += span;
  1707. }
  1708. SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
  1709. }
  1710. /* Patch up the last cluster pointer in the segment
  1711. * to join it to the input freelist.
  1712. */
  1713. *clusterp = freelist->clusters;
  1714. freelist->clusters = clusters;
  1715. }
  1716. #ifdef DEBUGINFO
  1717. fprintf (stderr, "H");
  1718. #endif
  1719. return size;
  1720. }
  1721. static scm_sizet
  1722. round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
  1723. {
  1724. scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
  1725. return
  1726. (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
  1727. + ALIGNMENT_SLACK (freelist);
  1728. }
  1729. static void
  1730. alloc_some_heap (scm_freelist_t *freelist)
  1731. {
  1732. scm_heap_seg_data_t * tmptable;
  1733. SCM_CELLPTR ptr;
  1734. long len;
  1735. /* Critical code sections (such as the garbage collector)
  1736. * aren't supposed to add heap segments.
  1737. */
  1738. if (scm_gc_heap_lock)
  1739. scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
  1740. /* Expand the heap tables to have room for the new segment.
  1741. * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
  1742. * only if the allocation of the segment itself succeeds.
  1743. */
  1744. len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
  1745. SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
  1746. realloc ((char *)scm_heap_table, len)));
  1747. if (!tmptable)
  1748. scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
  1749. else
  1750. scm_heap_table = tmptable;
  1751. /* Pick a size for the new heap segment.
  1752. * The rule for picking the size of a segment is explained in
  1753. * gc.h
  1754. */
  1755. {
  1756. /* Assure that the new segment is predicted to be large enough.
  1757. *
  1758. * New yield should at least equal GC fraction of new heap size, i.e.
  1759. *
  1760. * y + dh > f * (h + dh)
  1761. *
  1762. * y : yield
  1763. * f : min yield fraction
  1764. * h : heap size
  1765. * dh : size of new heap segment
  1766. *
  1767. * This gives dh > (f * h - y) / (1 - f)
  1768. */
  1769. int f = freelist->min_yield_fraction;
  1770. long h = SCM_HEAP_SIZE;
  1771. long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
  1772. len = SCM_EXPHEAP (freelist->heap_size);
  1773. #ifdef DEBUGINFO
  1774. fprintf (stderr, "(%d < %d)", len, min_cells);
  1775. #endif
  1776. if (len < min_cells)
  1777. len = min_cells + freelist->cluster_size;
  1778. len *= sizeof (scm_cell);
  1779. /* force new sampling */
  1780. freelist->collected = LONG_MAX;
  1781. }
  1782. if (len > scm_max_segment_size)
  1783. len = scm_max_segment_size;
  1784. {
  1785. scm_sizet smallest;
  1786. smallest = CLUSTER_SIZE_IN_BYTES (freelist);
  1787. if (len < smallest)
  1788. len = smallest;
  1789. /* Allocate with decaying ambition. */
  1790. while ((len >= SCM_MIN_HEAP_SEG_SIZE)
  1791. && (len >= smallest))
  1792. {
  1793. scm_sizet rounded_len = round_to_cluster_size (freelist, len);
  1794. SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
  1795. if (ptr)
  1796. {
  1797. init_heap_seg (ptr, rounded_len, freelist);
  1798. return;
  1799. }
  1800. len /= 2;
  1801. }
  1802. }
  1803. scm_wta (SCM_UNDEFINED, "could not grow", "heap");
  1804. }
  1805. SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
  1806. (SCM name),
  1807. "")
  1808. #define FUNC_NAME s_scm_unhash_name
  1809. {
  1810. int x;
  1811. int bound;
  1812. SCM_VALIDATE_SYMBOL (1,name);
  1813. SCM_DEFER_INTS;
  1814. bound = scm_n_heap_segs;
  1815. for (x = 0; x < bound; ++x)
  1816. {
  1817. SCM_CELLPTR p;
  1818. SCM_CELLPTR pbound;
  1819. p = scm_heap_table[x].bounds[0];
  1820. pbound = scm_heap_table[x].bounds[1];
  1821. while (p < pbound)
  1822. {
  1823. SCM cell = PTR2SCM (p);
  1824. if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
  1825. {
  1826. /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
  1827. * struct cell. See the corresponding comment in scm_gc_mark.
  1828. */
  1829. scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
  1830. SCM gloc_car = SCM_PACK (word0); /* access as gloc */
  1831. SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
  1832. if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
  1833. && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
  1834. {
  1835. SCM_SET_CELL_OBJECT_0 (cell, name);
  1836. }
  1837. }
  1838. ++p;
  1839. }
  1840. }
  1841. SCM_ALLOW_INTS;
  1842. return name;
  1843. }
  1844. #undef FUNC_NAME
  1845. /* {GC Protection Helper Functions}
  1846. */
  1847. void
  1848. scm_remember (SCM *ptr)
  1849. { /* empty */ }
  1850. /*
  1851. These crazy functions prevent garbage collection
  1852. of arguments after the first argument by
  1853. ensuring they remain live throughout the
  1854. function because they are used in the last
  1855. line of the code block.
  1856. It'd be better to have a nice compiler hint to
  1857. aid the conservative stack-scanning GC. --03/09/00 gjb */
  1858. SCM
  1859. scm_return_first (SCM elt, ...)
  1860. {
  1861. return elt;
  1862. }
  1863. int
  1864. scm_return_first_int (int i, ...)
  1865. {
  1866. return i;
  1867. }
  1868. SCM
  1869. scm_permanent_object (SCM obj)
  1870. {
  1871. SCM_REDEFER_INTS;
  1872. scm_permobjs = scm_cons (obj, scm_permobjs);
  1873. SCM_REALLOW_INTS;
  1874. return obj;
  1875. }
  1876. /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
  1877. other references are dropped, until the object is unprotected by calling
  1878. scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
  1879. i. e. it is possible to protect the same object several times, but it is
  1880. necessary to unprotect the object the same number of times to actually get
  1881. the object unprotected. It is an error to unprotect an object more often
  1882. than it has been protected before. The function scm_protect_object returns
  1883. OBJ.
  1884. */
  1885. /* Implementation note: For every object X, there is a counter which
  1886. scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
  1887. */
  1888. SCM
  1889. scm_protect_object (SCM obj)
  1890. {
  1891. SCM handle;
  1892. /* This critical section barrier will be replaced by a mutex. */
  1893. SCM_REDEFER_INTS;
  1894. handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
  1895. SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
  1896. SCM_REALLOW_INTS;
  1897. return obj;
  1898. }
  1899. /* Remove any protection for OBJ established by a prior call to
  1900. scm_protect_object. This function returns OBJ.
  1901. See scm_protect_object for more information. */
  1902. SCM
  1903. scm_unprotect_object (SCM obj)
  1904. {
  1905. SCM handle;
  1906. /* This critical section barrier will be replaced by a mutex. */
  1907. SCM_REDEFER_INTS;
  1908. handle = scm_hashq_get_handle (scm_protects, obj);
  1909. if (SCM_IMP (handle))
  1910. {
  1911. fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
  1912. abort ();
  1913. }
  1914. else
  1915. {
  1916. unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
  1917. if (count == 0)
  1918. scm_hashq_remove_x (scm_protects, obj);
  1919. else
  1920. SCM_SETCDR (handle, SCM_MAKINUM (count));
  1921. }
  1922. SCM_REALLOW_INTS;
  1923. return obj;
  1924. }
  1925. int terminating;
  1926. /* called on process termination. */
  1927. #ifdef HAVE_ATEXIT
  1928. static void
  1929. cleanup (void)
  1930. #else
  1931. #ifdef HAVE_ON_EXIT
  1932. extern int on_exit (void (*procp) (), int arg);
  1933. static void
  1934. cleanup (int status, void *arg)
  1935. #else
  1936. #error Dont know how to setup a cleanup handler on your system.
  1937. #endif
  1938. #endif
  1939. {
  1940. terminating = 1;
  1941. scm_flush_all_ports ();
  1942. }
  1943. static int
  1944. make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
  1945. {
  1946. scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
  1947. if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
  1948. rounded_size,
  1949. freelist))
  1950. {
  1951. rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
  1952. if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
  1953. rounded_size,
  1954. freelist))
  1955. return 1;
  1956. }
  1957. else
  1958. scm_expmem = 1;
  1959. if (freelist->min_yield_fraction)
  1960. freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
  1961. / 100);
  1962. freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
  1963. return 0;
  1964. }
  1965. static void
  1966. init_freelist (scm_freelist_t *freelist,
  1967. int span,
  1968. int cluster_size,
  1969. int min_yield)
  1970. {
  1971. freelist->clusters = SCM_EOL;
  1972. freelist->cluster_size = cluster_size + 1;
  1973. freelist->left_to_collect = 0;
  1974. freelist->clusters_allocated = 0;
  1975. freelist->min_yield = 0;
  1976. freelist->min_yield_fraction = min_yield;
  1977. freelist->span = span;
  1978. freelist->collected = 0;
  1979. freelist->collected_1 = 0;
  1980. freelist->heap_size = 0;
  1981. }
  1982. int
  1983. scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
  1984. scm_sizet init_heap_size_2, int gc_trigger_2,
  1985. scm_sizet max_segment_size)
  1986. {
  1987. scm_sizet j;
  1988. if (!init_heap_size_1)
  1989. init_heap_size_1 = scm_default_init_heap_size_1;
  1990. if (!init_heap_size_2)
  1991. init_heap_size_2 = scm_default_init_heap_size_2;
  1992. j = SCM_NUM_PROTECTS;
  1993. while (j)
  1994. scm_sys_protects[--j] = SCM_BOOL_F;
  1995. scm_block_gc = 1;
  1996. scm_freelist = SCM_EOL;
  1997. scm_freelist2 = SCM_EOL;
  1998. init_freelist (&scm_master_freelist,
  1999. 1, SCM_CLUSTER_SIZE_1,
  2000. gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
  2001. init_freelist (&scm_master_freelist2,
  2002. 2, SCM_CLUSTER_SIZE_2,
  2003. gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
  2004. scm_max_segment_size
  2005. = max_segment_size ? max_segment_size : scm_default_max_segment_size;
  2006. scm_expmem = 0;
  2007. j = SCM_HEAP_SEG_SIZE;
  2008. scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
  2009. scm_heap_table = ((scm_heap_seg_data_t *)
  2010. scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
  2011. if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
  2012. make_initial_segment (init_heap_size_2, &scm_master_freelist2))
  2013. return 1;
  2014. /* scm_hplims[0] can change. do not remove scm_heap_org */
  2015. scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
  2016. scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
  2017. scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
  2018. scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
  2019. scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
  2020. scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
  2021. /* Initialise the list of ports. */
  2022. scm_port_table = (scm_port **)
  2023. malloc (sizeof (scm_port *) * scm_port_table_room);
  2024. if (!scm_port_table)
  2025. return 1;
  2026. #ifdef HAVE_ATEXIT
  2027. atexit (cleanup);
  2028. #else
  2029. #ifdef HAVE_ON_EXIT
  2030. on_exit (cleanup, 0);
  2031. #endif
  2032. #endif
  2033. scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
  2034. SCM_SETCDR (scm_undefineds, scm_undefineds);
  2035. scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
  2036. scm_nullstr = scm_makstr (0L, 0);
  2037. scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
  2038. scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
  2039. scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
  2040. scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
  2041. scm_stand_in_procs = SCM_EOL;
  2042. scm_permobjs = SCM_EOL;
  2043. scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
  2044. scm_asyncs = SCM_EOL;
  2045. scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
  2046. scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
  2047. #ifdef SCM_BIGDIG
  2048. scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
  2049. #endif
  2050. return 0;
  2051. }
  2052. void
  2053. scm_init_gc ()
  2054. {
  2055. scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
  2056. #include "libguile/gc.x"
  2057. }
  2058. /*
  2059. Local Variables:
  2060. c-file-style: "gnu"
  2061. End:
  2062. */