heapdump.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // Copyright 2014 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // Implementation of runtime/debug.WriteHeapDump. Writes all
  5. // objects in the heap plus additional info (roots, threads,
  6. // finalizers, etc.) to a file.
  7. // The format of the dumped file is described at
  8. // http://code.google.com/p/go-wiki/wiki/heapdump13
  9. #include "runtime.h"
  10. #include "arch.h"
  11. #include "malloc.h"
  12. #include "mgc0.h"
  13. #include "go-type.h"
  14. #include "go-defer.h"
  15. #include "go-panic.h"
  16. #define hash __hash
  17. #define KindNoPointers GO_NO_POINTERS
  18. enum {
  19. FieldKindEol = 0,
  20. FieldKindPtr = 1,
  21. FieldKindString = 2,
  22. FieldKindSlice = 3,
  23. FieldKindIface = 4,
  24. FieldKindEface = 5,
  25. TagEOF = 0,
  26. TagObject = 1,
  27. TagOtherRoot = 2,
  28. TagType = 3,
  29. TagGoRoutine = 4,
  30. TagStackFrame = 5,
  31. TagParams = 6,
  32. TagFinalizer = 7,
  33. TagItab = 8,
  34. TagOSThread = 9,
  35. TagMemStats = 10,
  36. TagQueuedFinalizer = 11,
  37. TagData = 12,
  38. TagBss = 13,
  39. TagDefer = 14,
  40. TagPanic = 15,
  41. TagMemProf = 16,
  42. TagAllocSample = 17,
  43. TypeInfo_Conservative = 127,
  44. };
  45. // static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
  46. // static void dumpfields(uintptr *prog);
  47. static void dumpefacetypes(void *obj, uintptr size, const Type *type, uintptr kind);
  48. // fd to write the dump to.
  49. static uintptr dumpfd;
  50. // buffer of pending write data
  51. enum {
  52. BufSize = 4096,
  53. };
  54. static byte buf[BufSize];
  55. static uintptr nbuf;
  56. static void
  57. hwrite(const byte *data, uintptr len)
  58. {
  59. if(len + nbuf <= BufSize) {
  60. runtime_memmove(buf + nbuf, data, len);
  61. nbuf += len;
  62. return;
  63. }
  64. runtime_write(dumpfd, buf, nbuf);
  65. if(len >= BufSize) {
  66. runtime_write(dumpfd, data, len);
  67. nbuf = 0;
  68. } else {
  69. runtime_memmove(buf, data, len);
  70. nbuf = len;
  71. }
  72. }
  73. static void
  74. flush(void)
  75. {
  76. runtime_write(dumpfd, buf, nbuf);
  77. nbuf = 0;
  78. }
  79. // Cache of types that have been serialized already.
  80. // We use a type's hash field to pick a bucket.
  81. // Inside a bucket, we keep a list of types that
  82. // have been serialized so far, most recently used first.
  83. // Note: when a bucket overflows we may end up
  84. // serializing a type more than once. That's ok.
  85. enum {
  86. TypeCacheBuckets = 256, // must be a power of 2
  87. TypeCacheAssoc = 4,
  88. };
  89. typedef struct TypeCacheBucket TypeCacheBucket;
  90. struct TypeCacheBucket {
  91. const Type *t[TypeCacheAssoc];
  92. };
  93. static TypeCacheBucket typecache[TypeCacheBuckets];
  94. // dump a uint64 in a varint format parseable by encoding/binary
  95. static void
  96. dumpint(uint64 v)
  97. {
  98. byte buf[10];
  99. int32 n;
  100. n = 0;
  101. while(v >= 0x80) {
  102. buf[n++] = v | 0x80;
  103. v >>= 7;
  104. }
  105. buf[n++] = v;
  106. hwrite(buf, n);
  107. }
  108. static void
  109. dumpbool(bool b)
  110. {
  111. dumpint(b ? 1 : 0);
  112. }
  113. // dump varint uint64 length followed by memory contents
  114. static void
  115. dumpmemrange(const byte *data, uintptr len)
  116. {
  117. dumpint(len);
  118. hwrite(data, len);
  119. }
  120. static void
  121. dumpstr(String s)
  122. {
  123. dumpmemrange(s.str, s.len);
  124. }
  125. static void
  126. dumpcstr(const int8 *c)
  127. {
  128. dumpmemrange((const byte*)c, runtime_findnull((const byte*)c));
  129. }
  130. // dump information for a type
  131. static void
  132. dumptype(const Type *t)
  133. {
  134. TypeCacheBucket *b;
  135. int32 i, j;
  136. if(t == nil) {
  137. return;
  138. }
  139. // If we've definitely serialized the type before,
  140. // no need to do it again.
  141. b = &typecache[t->hash & (TypeCacheBuckets-1)];
  142. if(t == b->t[0]) return;
  143. for(i = 1; i < TypeCacheAssoc; i++) {
  144. if(t == b->t[i]) {
  145. // Move-to-front
  146. for(j = i; j > 0; j--) {
  147. b->t[j] = b->t[j-1];
  148. }
  149. b->t[0] = t;
  150. return;
  151. }
  152. }
  153. // Might not have been dumped yet. Dump it and
  154. // remember we did so.
  155. for(j = TypeCacheAssoc-1; j > 0; j--) {
  156. b->t[j] = b->t[j-1];
  157. }
  158. b->t[0] = t;
  159. // dump the type
  160. dumpint(TagType);
  161. dumpint((uintptr)t);
  162. dumpint(t->__size);
  163. if(t->__uncommon == nil || t->__uncommon->__pkg_path == nil || t->__uncommon->__name == nil) {
  164. dumpstr(*t->__reflection);
  165. } else {
  166. dumpint(t->__uncommon->__pkg_path->len + 1 + t->__uncommon->__name->len);
  167. hwrite(t->__uncommon->__pkg_path->str, t->__uncommon->__pkg_path->len);
  168. hwrite((const byte*)".", 1);
  169. hwrite(t->__uncommon->__name->str, t->__uncommon->__name->len);
  170. }
  171. dumpbool(t->__size > PtrSize || (t->__code & KindNoPointers) == 0);
  172. // dumpfields((uintptr*)t->gc + 1);
  173. }
  174. // returns true if object is scannable
  175. static bool
  176. scannable(byte *obj)
  177. {
  178. uintptr *b, off, shift;
  179. off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start; // word offset
  180. b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
  181. shift = off % wordsPerBitmapWord;
  182. return ((*b >> shift) & bitScan) != 0;
  183. }
  184. // dump an object
  185. static void
  186. dumpobj(byte *obj, uintptr size, const Type *type, uintptr kind)
  187. {
  188. if(type != nil) {
  189. dumptype(type);
  190. dumpefacetypes(obj, size, type, kind);
  191. }
  192. dumpint(TagObject);
  193. dumpint((uintptr)obj);
  194. dumpint((uintptr)type);
  195. dumpint(kind);
  196. dumpmemrange(obj, size);
  197. }
  198. static void
  199. dumpotherroot(const char *description, byte *to)
  200. {
  201. dumpint(TagOtherRoot);
  202. dumpcstr((const int8 *)description);
  203. dumpint((uintptr)to);
  204. }
  205. static void
  206. dumpfinalizer(byte *obj, FuncVal *fn, const FuncType* ft, const PtrType *ot)
  207. {
  208. dumpint(TagFinalizer);
  209. dumpint((uintptr)obj);
  210. dumpint((uintptr)fn);
  211. dumpint((uintptr)fn->fn);
  212. dumpint((uintptr)ft);
  213. dumpint((uintptr)ot);
  214. }
  215. typedef struct ChildInfo ChildInfo;
  216. struct ChildInfo {
  217. // Information passed up from the callee frame about
  218. // the layout of the outargs region.
  219. uintptr argoff; // where the arguments start in the frame
  220. uintptr arglen; // size of args region
  221. BitVector args; // if args.n >= 0, pointer map of args region
  222. byte *sp; // callee sp
  223. uintptr depth; // depth in call stack (0 == most recent)
  224. };
  225. static void
  226. dumpgoroutine(G *gp)
  227. {
  228. // ChildInfo child;
  229. Defer *d;
  230. Panic *p;
  231. dumpint(TagGoRoutine);
  232. dumpint((uintptr)gp);
  233. dumpint((uintptr)0);
  234. dumpint(gp->goid);
  235. dumpint(gp->gopc);
  236. dumpint(gp->status);
  237. dumpbool(gp->issystem);
  238. dumpbool(gp->isbackground);
  239. dumpint(gp->waitsince);
  240. dumpcstr((const int8 *)gp->waitreason);
  241. dumpint((uintptr)0);
  242. dumpint((uintptr)gp->m);
  243. dumpint((uintptr)gp->defer);
  244. dumpint((uintptr)gp->panic);
  245. // dump stack
  246. // child.args.n = -1;
  247. // child.arglen = 0;
  248. // child.sp = nil;
  249. // child.depth = 0;
  250. // if(!ScanStackByFrames)
  251. // runtime_throw("need frame info to dump stacks");
  252. // runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
  253. // dump defer & panic records
  254. for(d = gp->defer; d != nil; d = d->__next) {
  255. dumpint(TagDefer);
  256. dumpint((uintptr)d);
  257. dumpint((uintptr)gp);
  258. dumpint((uintptr)d->__arg);
  259. dumpint((uintptr)d->__frame);
  260. dumpint((uintptr)d->__pfn);
  261. dumpint((uintptr)0);
  262. dumpint((uintptr)d->__next);
  263. }
  264. for (p = gp->panic; p != nil; p = p->__next) {
  265. dumpint(TagPanic);
  266. dumpint((uintptr)p);
  267. dumpint((uintptr)gp);
  268. dumpint((uintptr)p->__arg.__type_descriptor);
  269. dumpint((uintptr)p->__arg.__object);
  270. dumpint((uintptr)0);
  271. dumpint((uintptr)p->__next);
  272. }
  273. }
  274. static void
  275. dumpgs(void)
  276. {
  277. G *gp;
  278. uint32 i;
  279. // goroutines & stacks
  280. for(i = 0; i < runtime_allglen; i++) {
  281. gp = runtime_allg[i];
  282. switch(gp->status){
  283. default:
  284. runtime_printf("unexpected G.status %d\n", gp->status);
  285. runtime_throw("mark - bad status");
  286. case Gdead:
  287. break;
  288. case Grunnable:
  289. case Gsyscall:
  290. case Gwaiting:
  291. dumpgoroutine(gp);
  292. break;
  293. }
  294. }
  295. }
  296. static void
  297. finq_callback(FuncVal *fn, void *obj, const FuncType *ft, const PtrType *ot)
  298. {
  299. dumpint(TagQueuedFinalizer);
  300. dumpint((uintptr)obj);
  301. dumpint((uintptr)fn);
  302. dumpint((uintptr)fn->fn);
  303. dumpint((uintptr)ft);
  304. dumpint((uintptr)ot);
  305. }
  306. static void
  307. dumproots(void)
  308. {
  309. MSpan *s, **allspans;
  310. uint32 spanidx;
  311. Special *sp;
  312. SpecialFinalizer *spf;
  313. byte *p;
  314. // data segment
  315. // dumpint(TagData);
  316. // dumpint((uintptr)data);
  317. // dumpmemrange(data, edata - data);
  318. // dumpfields((uintptr*)gcdata + 1);
  319. // bss segment
  320. // dumpint(TagBss);
  321. // dumpint((uintptr)bss);
  322. // dumpmemrange(bss, ebss - bss);
  323. // dumpfields((uintptr*)gcbss + 1);
  324. // MSpan.types
  325. allspans = runtime_mheap.allspans;
  326. for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
  327. s = allspans[spanidx];
  328. if(s->state == MSpanInUse) {
  329. // The garbage collector ignores type pointers stored in MSpan.types:
  330. // - Compiler-generated types are stored outside of heap.
  331. // - The reflect package has runtime-generated types cached in its data structures.
  332. // The garbage collector relies on finding the references via that cache.
  333. switch(s->types.compression) {
  334. case MTypes_Empty:
  335. case MTypes_Single:
  336. break;
  337. case MTypes_Words:
  338. case MTypes_Bytes:
  339. dumpotherroot("runtime type info", (byte*)s->types.data);
  340. break;
  341. }
  342. // Finalizers
  343. for(sp = s->specials; sp != nil; sp = sp->next) {
  344. if(sp->kind != KindSpecialFinalizer)
  345. continue;
  346. spf = (SpecialFinalizer*)sp;
  347. p = (byte*)((s->start << PageShift) + spf->offset);
  348. dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
  349. }
  350. }
  351. }
  352. // Finalizer queue
  353. runtime_iterate_finq(finq_callback);
  354. }
  355. // Bit vector of free marks.
  356. // Needs to be as big as the largest number of objects per span.
  357. static byte hfree[PageSize/8];
  358. static void
  359. dumpobjs(void)
  360. {
  361. uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
  362. MSpan *s;
  363. MLink *l;
  364. byte *p;
  365. const Type *t;
  366. for(i = 0; i < runtime_mheap.nspan; i++) {
  367. s = runtime_mheap.allspans[i];
  368. if(s->state != MSpanInUse)
  369. continue;
  370. p = (byte*)(s->start << PageShift);
  371. size = s->elemsize;
  372. n = (s->npages << PageShift) / size;
  373. if(n > PageSize/8)
  374. runtime_throw("free array doesn't have enough entries");
  375. for(l = s->freelist; l != nil; l = l->next) {
  376. hfree[((byte*)l - p) / size] = true;
  377. }
  378. for(j = 0; j < n; j++, p += size) {
  379. if(hfree[j]) {
  380. hfree[j] = false;
  381. continue;
  382. }
  383. off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;
  384. bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
  385. shift = off % wordsPerBitmapWord;
  386. bits = *bitp >> shift;
  387. // Skip FlagNoGC allocations (stacks)
  388. if((bits & bitAllocated) == 0)
  389. continue;
  390. // extract type and kind
  391. ti = runtime_gettype(p);
  392. t = (Type*)(ti & ~(uintptr)(PtrSize-1));
  393. kind = ti & (PtrSize-1);
  394. // dump it
  395. if(kind == TypeInfo_Chan)
  396. t = ((const ChanType*)t)->__element_type; // use element type for chan encoding
  397. if(t == nil && scannable(p))
  398. kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
  399. dumpobj(p, size, t, kind);
  400. }
  401. }
  402. }
  403. static void
  404. dumpparams(void)
  405. {
  406. byte *x;
  407. dumpint(TagParams);
  408. x = (byte*)1;
  409. if(*(byte*)&x == 1)
  410. dumpbool(false); // little-endian ptrs
  411. else
  412. dumpbool(true); // big-endian ptrs
  413. dumpint(PtrSize);
  414. dumpint(runtime_Hchansize);
  415. dumpint((uintptr)runtime_mheap.arena_start);
  416. dumpint((uintptr)runtime_mheap.arena_used);
  417. dumpint(0);
  418. dumpcstr((const int8 *)"");
  419. dumpint(runtime_ncpu);
  420. }
  421. static void
  422. dumpms(void)
  423. {
  424. M *mp;
  425. for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
  426. dumpint(TagOSThread);
  427. dumpint((uintptr)mp);
  428. dumpint(mp->id);
  429. dumpint(0);
  430. }
  431. }
  432. static void
  433. dumpmemstats(void)
  434. {
  435. int32 i;
  436. dumpint(TagMemStats);
  437. dumpint(mstats.alloc);
  438. dumpint(mstats.total_alloc);
  439. dumpint(mstats.sys);
  440. dumpint(mstats.nlookup);
  441. dumpint(mstats.nmalloc);
  442. dumpint(mstats.nfree);
  443. dumpint(mstats.heap_alloc);
  444. dumpint(mstats.heap_sys);
  445. dumpint(mstats.heap_idle);
  446. dumpint(mstats.heap_inuse);
  447. dumpint(mstats.heap_released);
  448. dumpint(mstats.heap_objects);
  449. dumpint(mstats.stacks_inuse);
  450. dumpint(mstats.stacks_sys);
  451. dumpint(mstats.mspan_inuse);
  452. dumpint(mstats.mspan_sys);
  453. dumpint(mstats.mcache_inuse);
  454. dumpint(mstats.mcache_sys);
  455. dumpint(mstats.buckhash_sys);
  456. dumpint(mstats.gc_sys);
  457. dumpint(mstats.other_sys);
  458. dumpint(mstats.next_gc);
  459. dumpint(mstats.last_gc);
  460. dumpint(mstats.pause_total_ns);
  461. for(i = 0; i < 256; i++)
  462. dumpint(mstats.pause_ns[i]);
  463. dumpint(mstats.numgc);
  464. }
  465. static void
  466. dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintptr allocs, uintptr frees)
  467. {
  468. uintptr i, pc;
  469. byte buf[20];
  470. dumpint(TagMemProf);
  471. dumpint((uintptr)b);
  472. dumpint(size);
  473. dumpint(nstk);
  474. for(i = 0; i < nstk; i++) {
  475. pc = stk[i].pc;
  476. if(stk[i].function.len == 0) {
  477. runtime_snprintf(buf, sizeof(buf), "%X", (uint64)pc);
  478. dumpcstr((int8*)buf);
  479. dumpcstr((const int8*)"?");
  480. dumpint(0);
  481. } else {
  482. dumpstr(stk[i].function);
  483. dumpstr(stk[i].filename);
  484. dumpint(stk[i].lineno);
  485. }
  486. }
  487. dumpint(allocs);
  488. dumpint(frees);
  489. }
  490. static void
  491. dumpmemprof(void)
  492. {
  493. MSpan *s, **allspans;
  494. uint32 spanidx;
  495. Special *sp;
  496. SpecialProfile *spp;
  497. byte *p;
  498. runtime_iterate_memprof(dumpmemprof_callback);
  499. allspans = runtime_mheap.allspans;
  500. for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
  501. s = allspans[spanidx];
  502. if(s->state != MSpanInUse)
  503. continue;
  504. for(sp = s->specials; sp != nil; sp = sp->next) {
  505. if(sp->kind != KindSpecialProfile)
  506. continue;
  507. spp = (SpecialProfile*)sp;
  508. p = (byte*)((s->start << PageShift) + spp->offset);
  509. dumpint(TagAllocSample);
  510. dumpint((uintptr)p);
  511. dumpint((uintptr)spp->b);
  512. }
  513. }
  514. }
  515. static void
  516. mdump(G *gp)
  517. {
  518. const byte *hdr;
  519. uintptr i;
  520. MSpan *s;
  521. // make sure we're done sweeping
  522. for(i = 0; i < runtime_mheap.nspan; i++) {
  523. s = runtime_mheap.allspans[i];
  524. if(s->state == MSpanInUse)
  525. runtime_MSpan_EnsureSwept(s);
  526. }
  527. runtime_memclr((byte*)&typecache[0], sizeof(typecache));
  528. hdr = (const byte*)"go1.3 heap dump\n";
  529. hwrite(hdr, runtime_findnull(hdr));
  530. dumpparams();
  531. dumpobjs();
  532. dumpgs();
  533. dumpms();
  534. dumproots();
  535. dumpmemstats();
  536. dumpmemprof();
  537. dumpint(TagEOF);
  538. flush();
  539. gp->param = nil;
  540. gp->status = Grunning;
  541. runtime_gogo(gp);
  542. }
  543. void runtime_debug_WriteHeapDump(uintptr)
  544. __asm__(GOSYM_PREFIX "runtime_debug.WriteHeapDump");
  545. void
  546. runtime_debug_WriteHeapDump(uintptr fd)
  547. {
  548. M *m;
  549. G *g;
  550. // Stop the world.
  551. runtime_semacquire(&runtime_worldsema, false);
  552. m = runtime_m();
  553. m->gcing = 1;
  554. m->locks++;
  555. runtime_stoptheworld();
  556. // Update stats so we can dump them.
  557. // As a side effect, flushes all the MCaches so the MSpan.freelist
  558. // lists contain all the free objects.
  559. runtime_updatememstats(nil);
  560. // Set dump file.
  561. dumpfd = fd;
  562. // Call dump routine on M stack.
  563. g = runtime_g();
  564. g->status = Gwaiting;
  565. g->waitreason = "dumping heap";
  566. runtime_mcall(mdump);
  567. // Reset dump file.
  568. dumpfd = 0;
  569. // Start up the world again.
  570. m->gcing = 0;
  571. runtime_semrelease(&runtime_worldsema);
  572. runtime_starttheworld();
  573. m->locks--;
  574. }
  575. // Runs the specified gc program. Calls the callback for every
  576. // pointer-like field specified by the program and passes to the
  577. // callback the kind and offset of that field within the object.
  578. // offset is the offset in the object of the start of the program.
  579. // Returns a pointer to the opcode that ended the gc program (either
  580. // GC_END or GC_ARRAY_NEXT).
  581. /*
  582. static uintptr*
  583. playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
  584. {
  585. uintptr len, elemsize, i, *end;
  586. for(;;) {
  587. switch(prog[0]) {
  588. case GC_END:
  589. return prog;
  590. case GC_PTR:
  591. callback(arg, FieldKindPtr, offset + prog[1]);
  592. prog += 3;
  593. break;
  594. case GC_APTR:
  595. callback(arg, FieldKindPtr, offset + prog[1]);
  596. prog += 2;
  597. break;
  598. case GC_ARRAY_START:
  599. len = prog[2];
  600. elemsize = prog[3];
  601. end = nil;
  602. for(i = 0; i < len; i++) {
  603. end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
  604. if(end[0] != GC_ARRAY_NEXT)
  605. runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
  606. }
  607. prog = end + 1;
  608. break;
  609. case GC_ARRAY_NEXT:
  610. return prog;
  611. case GC_CALL:
  612. playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
  613. prog += 3;
  614. break;
  615. case GC_CHAN_PTR:
  616. callback(arg, FieldKindPtr, offset + prog[1]);
  617. prog += 3;
  618. break;
  619. case GC_STRING:
  620. callback(arg, FieldKindString, offset + prog[1]);
  621. prog += 2;
  622. break;
  623. case GC_EFACE:
  624. callback(arg, FieldKindEface, offset + prog[1]);
  625. prog += 2;
  626. break;
  627. case GC_IFACE:
  628. callback(arg, FieldKindIface, offset + prog[1]);
  629. prog += 2;
  630. break;
  631. case GC_SLICE:
  632. callback(arg, FieldKindSlice, offset + prog[1]);
  633. prog += 3;
  634. break;
  635. case GC_REGION:
  636. playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
  637. prog += 4;
  638. break;
  639. default:
  640. runtime_printf("%D\n", (uint64)prog[0]);
  641. runtime_throw("bad gc op");
  642. }
  643. }
  644. }
  645. static void
  646. dump_callback(void *p, uintptr kind, uintptr offset)
  647. {
  648. USED(&p);
  649. dumpint(kind);
  650. dumpint(offset);
  651. }
  652. // dumpint() the kind & offset of each field in an object.
  653. static void
  654. dumpfields(uintptr *prog)
  655. {
  656. playgcprog(0, prog, dump_callback, nil);
  657. dumpint(FieldKindEol);
  658. }
  659. static void
  660. dumpeface_callback(void *p, uintptr kind, uintptr offset)
  661. {
  662. Eface *e;
  663. if(kind != FieldKindEface)
  664. return;
  665. e = (Eface*)((byte*)p + offset);
  666. dumptype(e->__type_descriptor);
  667. }
  668. */
  669. // The heap dump reader needs to be able to disambiguate
  670. // Eface entries. So it needs to know every type that might
  671. // appear in such an entry. The following two routines accomplish
  672. // that.
  673. // Dump all the types that appear in the type field of
  674. // any Eface contained in obj.
  675. static void
  676. dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *type, uintptr kind)
  677. {
  678. uintptr i;
  679. switch(kind) {
  680. case TypeInfo_SingleObject:
  681. //playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
  682. break;
  683. case TypeInfo_Array:
  684. for(i = 0; i <= size - type->__size; i += type->__size)
  685. //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
  686. break;
  687. case TypeInfo_Chan:
  688. if(type->__size == 0) // channels may have zero-sized objects in them
  689. break;
  690. for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size)
  691. //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
  692. break;
  693. }
  694. }