Add-rpmalloc-for-musl.patch 150 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025
  1. From 39d7b7471761a3ac1ea0400cf6745968b7edd6bb Mon Sep 17 00:00:00 2001
  2. From: 12101111 <w12101111@gmail.com>
  3. Date: Wed, 13 Apr 2022 21:02:01 +0800
  4. Subject: [PATCH 1/2] Add rpmalloc for musl
  5. ---
  6. Makefile | 2 +-
  7. README | 2 +
  8. configure | 1 +
  9. ldso/dynlink.c | 12 +
  10. src/env/__libc_start_main.c | 4 +
  11. src/internal/pthread_impl.h | 1 +
  12. src/malloc/lite_malloc.c | 5 +
  13. src/malloc/rpmalloc/glue.h | 46 +
  14. src/malloc/rpmalloc/rpmalloc.c | 3422 ++++++++++++++++++++++++++++++++
  15. src/malloc/rpmalloc/rpmalloc.h | 362 ++++
  16. src/thread/pthread_create.c | 6 +
  17. 11 files changed, 3862 insertions(+), 1 deletion(-)
  18. create mode 100644 src/malloc/rpmalloc/glue.h
  19. create mode 100644 src/malloc/rpmalloc/rpmalloc.c
  20. create mode 100644 src/malloc/rpmalloc/rpmalloc.h
  21. diff --git a/Makefile b/Makefile
  22. index e8cc4436..77fc6016 100644
  23. --- a/Makefile
  24. +++ b/Makefile
  25. @@ -44,7 +44,7 @@ LIBCC = -lgcc
  26. CPPFLAGS =
  27. CFLAGS =
  28. CFLAGS_AUTO = -Os -pipe
  29. -CFLAGS_C99FSE = -std=c99 -ffreestanding -nostdinc
  30. +CFLAGS_C99FSE = -std=c99 -ffreestanding -nostdinc -isystem $(shell $(CC) -print-file-name=include)
  31. CFLAGS_ALL = $(CFLAGS_C99FSE)
  32. CFLAGS_ALL += -D_XOPEN_SOURCE=700 -I$(srcdir)/arch/$(ARCH) -I$(srcdir)/arch/generic -Iobj/src/internal -I$(srcdir)/src/include -I$(srcdir)/src/internal -Iobj/include -I$(srcdir)/include
  33. diff --git a/README b/README
  34. index a30eb112..50f3e8ef 100644
  35. --- a/README
  36. +++ b/README
  37. @@ -1,4 +1,6 @@
  38. +musl libc with rpmalloc
  39. +======
  40. musl libc
  41. musl, pronounced like the word "mussel", is an MIT-licensed
  42. diff --git a/configure b/configure
  43. index ca5cbc0b..24f4122d 100755
  44. --- a/configure
  45. +++ b/configure
  46. @@ -347,6 +347,7 @@ esac
  47. #
  48. tryflag CFLAGS_C99FSE -std=c99
  49. tryflag CFLAGS_C99FSE -nostdinc
  50. +tryflag CFLAGS_C99FSE "-isystem $($CC -print-file-name=include)"
  51. tryflag CFLAGS_C99FSE -ffreestanding \
  52. || tryflag CFLAGS_C99FSE -fno-builtin
  53. tryflag CFLAGS_C99FSE -fexcess-precision=standard \
  54. diff --git a/ldso/dynlink.c b/ldso/dynlink.c
  55. index 5b9c8be4..20627170 100644
  56. --- a/ldso/dynlink.c
  57. +++ b/ldso/dynlink.c
  58. @@ -150,6 +150,7 @@ static struct fdpic_dummy_loadmap app_dummy_loadmap;
  59. struct debug *_dl_debug_addr = &debug;
  60. extern hidden int __malloc_replaced;
  61. +extern hidden int __malloc_process_init();
  62. hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
  63. @@ -1550,6 +1551,11 @@ static void do_init_fini(struct dso **queue)
  64. void __libc_start_init(void)
  65. {
  66. + /* Initialize pre process structure of malloc implementation */
  67. + if (__malloc_process_init() < 0) {
  68. + dprintf(2, "failed to initialize malloc\n");
  69. + _exit(127);
  70. + }
  71. do_init_fini(main_ctor_queue);
  72. if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
  73. free(main_ctor_queue);
  74. @@ -1886,6 +1892,12 @@ void __dls3(size_t *sp, size_t *auxv)
  75. reclaim_gaps(&app);
  76. reclaim_gaps(&ldso);
  77. + /* Initialize pre process structure of malloc implementation */
  78. + if (__malloc_process_init() < 0) {
  79. + dprintf(2, "%s: failed to initialize malloc\n", argv[0]);
  80. + _exit(127);
  81. + }
  82. +
  83. /* Load preload/needed libraries, add symbols to global namespace. */
  84. ldso.deps = (struct dso **)no_deps;
  85. if (env_preload) load_preload(env_preload);
  86. diff --git a/src/env/__libc_start_main.c b/src/env/__libc_start_main.c
  87. index c5b277bd..2da2db71 100644
  88. --- a/src/env/__libc_start_main.c
  89. +++ b/src/env/__libc_start_main.c
  90. @@ -7,6 +7,8 @@
  91. #include "atomic.h"
  92. #include "libc.h"
  93. +extern hidden int __malloc_process_init();
  94. +
  95. static void dummy(void) {}
  96. weak_alias(dummy, _init);
  97. @@ -58,6 +60,8 @@ void __init_libc(char **envp, char *pn)
  98. static void libc_start_init(void)
  99. {
  100. + /* Initialize pre process structure of malloc implementation */
  101. + if (__malloc_process_init() < 0) a_crash();
  102. _init();
  103. uintptr_t a = (uintptr_t)&__init_array_start;
  104. for (; a<(uintptr_t)&__init_array_end; a+=sizeof(void(*)()))
  105. diff --git a/src/internal/pthread_impl.h b/src/internal/pthread_impl.h
  106. index de2b9d8b..84adf894 100644
  107. --- a/src/internal/pthread_impl.h
  108. +++ b/src/internal/pthread_impl.h
  109. @@ -58,6 +58,7 @@ struct pthread {
  110. volatile int killlock[1];
  111. char *dlerror_buf;
  112. void *stdio_locks;
  113. + void *heap;
  114. /* Part 3 -- the positions of these fields relative to
  115. * the end of the structure is external and internal ABI. */
  116. diff --git a/src/malloc/lite_malloc.c b/src/malloc/lite_malloc.c
  117. index 43a988fb..ae0f6370 100644
  118. --- a/src/malloc/lite_malloc.c
  119. +++ b/src/malloc/lite_malloc.c
  120. @@ -116,3 +116,8 @@ static void *default_malloc(size_t n)
  121. }
  122. weak_alias(default_malloc, malloc);
  123. +
  124. +static void dummy(void) {}
  125. +weak_alias(dummy, __malloc_process_init);
  126. +weak_alias(dummy, __malloc_thread_init);
  127. +weak_alias(dummy, __malloc_thread_finalize);
  128. diff --git a/src/malloc/rpmalloc/glue.h b/src/malloc/rpmalloc/glue.h
  129. new file mode 100644
  130. index 00000000..96b70a0d
  131. --- /dev/null
  132. +++ b/src/malloc/rpmalloc/glue.h
  133. @@ -0,0 +1,46 @@
  134. +#include "rpmalloc.h"
  135. +#if (defined(__GNUC__) && __GNUC__ >= 9)
  136. + #pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward
  137. + #define hidden_alias(fun) __attribute__((alias(#fun), used, visibility("hidden"), copy(fun)));
  138. + #define public_alias(fun) __attribute__((alias(#fun), used, visibility("default"), copy(fun)));
  139. +#else
  140. + #define hidden_alias(fun) __attribute__((alias(#fun), used, visibility("hidden")));
  141. + #define public_alias(fun) __attribute__((alias(#fun), used, visibility("default"), copy(fun)));
  142. +#endif
  143. +
  144. +void* __libc_malloc_impl(size_t) hidden_alias(rpmalloc)
  145. +void* __libc_realloc(void*, size_t) hidden_alias(rprealloc)
  146. +void __libc_free(void*) hidden_alias(rpfree)
  147. +void *aligned_alloc(size_t align, size_t size) public_alias(rpaligned_alloc)
  148. +size_t malloc_usable_size(void * p) public_alias(rpmalloc_usable_size)
  149. +
  150. +hidden void __malloc_atfork(int who) {}
  151. +hidden void __malloc_donate(char *start, char *end) {}
  152. +hidden int __malloc_process_init() {
  153. + rpmalloc_set_main_thread();
  154. + return rpmalloc_initialize();
  155. +}
  156. +hidden void __malloc_thread_init() {
  157. + rpmalloc_thread_initialize();
  158. +}
  159. +hidden void __malloc_thread_finalize() {
  160. + rpmalloc_thread_finalize(1);
  161. +}
  162. +
  163. +#include "pthread_impl.h"
  164. +
  165. +static inline heap_t* get_thread_heap_raw(void) {
  166. + pthread_t self = __pthread_self();
  167. + return (heap_t *) self->heap;
  168. +}
  169. +
  170. +static inline uintptr_t get_thread_id(void) {
  171. + pthread_t self = __pthread_self();
  172. + return (uintptr_t) self;
  173. +}
  174. +
  175. +static void set_thread_heap(heap_t* heap) {
  176. + pthread_t self = __pthread_self();
  177. + self->heap = (void *)heap;
  178. + if (heap) heap->owner_thread = get_thread_id();
  179. +}
  180. diff --git a/src/malloc/rpmalloc/rpmalloc.c b/src/malloc/rpmalloc/rpmalloc.c
  181. new file mode 100644
  182. index 00000000..05e563c6
  183. --- /dev/null
  184. +++ b/src/malloc/rpmalloc/rpmalloc.c
  185. @@ -0,0 +1,3422 @@
  186. +/* rpmalloc.c - Memory allocator - Public Domain - 2016-2020 Mattias Jansson
  187. + *
  188. + * This library provides a cross-platform lock free thread caching malloc implementation in C11.
  189. + * The latest source code is always available at
  190. + *
  191. + * https://github.com/mjansson/rpmalloc
  192. + *
  193. + * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
  194. + *
  195. + */
  196. +
  197. +#include "rpmalloc.h"
  198. +
  199. +////////////
  200. +///
  201. +/// Build time configurable limits
  202. +///
  203. +//////
  204. +
  205. +#if defined(__clang__)
  206. +#pragma clang diagnostic ignored "-Wunused-macros"
  207. +#pragma clang diagnostic ignored "-Wunused-function"
  208. +#if __has_warning("-Wreserved-identifier")
  209. +#pragma clang diagnostic ignored "-Wreserved-identifier"
  210. +#endif
  211. +#elif defined(__GNUC__)
  212. +#pragma GCC diagnostic ignored "-Wunused-macros"
  213. +#pragma GCC diagnostic ignored "-Wunused-function"
  214. +#endif
  215. +
  216. +#ifndef HEAP_ARRAY_SIZE
  217. +//! Size of heap hashmap
  218. +#define HEAP_ARRAY_SIZE 47
  219. +#endif
  220. +#ifndef ENABLE_THREAD_CACHE
  221. +//! Enable per-thread cache
  222. +#define ENABLE_THREAD_CACHE 1
  223. +#endif
  224. +#ifndef ENABLE_GLOBAL_CACHE
  225. +//! Enable global cache shared between all threads, requires thread cache
  226. +#define ENABLE_GLOBAL_CACHE 1
  227. +#endif
  228. +#ifndef ENABLE_VALIDATE_ARGS
  229. +//! Enable validation of args to public entry points
  230. +#define ENABLE_VALIDATE_ARGS 0
  231. +#endif
  232. +#ifndef ENABLE_STATISTICS
  233. +//! Enable statistics collection
  234. +#define ENABLE_STATISTICS 0
  235. +#endif
  236. +#ifndef ENABLE_ASSERTS
  237. +//! Enable asserts
  238. +#define ENABLE_ASSERTS 0
  239. +#endif
  240. +#ifndef ENABLE_OVERRIDE
  241. +//! Override standard library malloc/free and new/delete entry points
  242. +#define ENABLE_OVERRIDE 0
  243. +#endif
  244. +#ifndef ENABLE_PRELOAD
  245. +//! Support preloading
  246. +#define ENABLE_PRELOAD 0
  247. +#endif
  248. +#ifndef DISABLE_UNMAP
  249. +//! Disable unmapping memory pages (also enables unlimited cache)
  250. +#define DISABLE_UNMAP 0
  251. +#endif
  252. +#ifndef ENABLE_UNLIMITED_CACHE
  253. +//! Enable unlimited global cache (no unmapping until finalization)
  254. +#define ENABLE_UNLIMITED_CACHE 0
  255. +#endif
  256. +#ifndef ENABLE_ADAPTIVE_THREAD_CACHE
  257. +//! Enable adaptive thread cache size based on use heuristics
  258. +#define ENABLE_ADAPTIVE_THREAD_CACHE 0
  259. +#endif
  260. +#ifndef DEFAULT_SPAN_MAP_COUNT
  261. +//! Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)
  262. +#define DEFAULT_SPAN_MAP_COUNT 64
  263. +#endif
  264. +#ifndef GLOBAL_CACHE_MULTIPLIER
  265. +//! Multiplier for global cache
  266. +#define GLOBAL_CACHE_MULTIPLIER 8
  267. +#endif
  268. +
  269. +#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE
  270. +#error Must use global cache if unmap is disabled
  271. +#endif
  272. +
  273. +#if DISABLE_UNMAP
  274. +#undef ENABLE_UNLIMITED_CACHE
  275. +#define ENABLE_UNLIMITED_CACHE 1
  276. +#endif
  277. +
  278. +#if !ENABLE_GLOBAL_CACHE
  279. +#undef ENABLE_UNLIMITED_CACHE
  280. +#define ENABLE_UNLIMITED_CACHE 0
  281. +#endif
  282. +
  283. +#if !ENABLE_THREAD_CACHE
  284. +#undef ENABLE_ADAPTIVE_THREAD_CACHE
  285. +#define ENABLE_ADAPTIVE_THREAD_CACHE 0
  286. +#endif
  287. +
  288. +#if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)
  289. +# define PLATFORM_WINDOWS 1
  290. +# define PLATFORM_POSIX 0
  291. +#else
  292. +# define PLATFORM_WINDOWS 0
  293. +# define PLATFORM_POSIX 1
  294. +#endif
  295. +
  296. +/// Platform and arch specifics
  297. +#if defined(_MSC_VER) && !defined(__clang__)
  298. +# pragma warning (disable: 5105)
  299. +# ifndef FORCEINLINE
  300. +# define FORCEINLINE inline __forceinline
  301. +# endif
  302. +# define _Static_assert static_assert
  303. +#else
  304. +# ifndef FORCEINLINE
  305. +# define FORCEINLINE inline __attribute__((__always_inline__))
  306. +# endif
  307. +#endif
  308. +#if PLATFORM_WINDOWS
  309. +# ifndef WIN32_LEAN_AND_MEAN
  310. +# define WIN32_LEAN_AND_MEAN
  311. +# endif
  312. +# include <windows.h>
  313. +# if ENABLE_VALIDATE_ARGS
  314. +# include <intsafe.h>
  315. +# endif
  316. +#else
  317. +# include <unistd.h>
  318. +# include <stdio.h>
  319. +# include <stdlib.h>
  320. +# include <time.h>
  321. +# if defined(__APPLE__)
  322. +# include <TargetConditionals.h>
  323. +# if !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
  324. +# include <mach/mach_vm.h>
  325. +# include <mach/vm_statistics.h>
  326. +# endif
  327. +# include <pthread.h>
  328. +# endif
  329. +# if defined(__HAIKU__) || defined(__TINYC__)
  330. +# include <pthread.h>
  331. +# endif
  332. +#endif
  333. +
  334. +#include <stdint.h>
  335. +#include <string.h>
  336. +#include <errno.h>
  337. +
  338. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  339. +#include <fibersapi.h>
  340. +static DWORD fls_key;
  341. +#endif
  342. +
  343. +#if PLATFORM_POSIX
  344. +# include <sys/mman.h>
  345. +# include <sched.h>
  346. +# ifdef __FreeBSD__
  347. +# include <sys/sysctl.h>
  348. +# define MAP_HUGETLB MAP_ALIGNED_SUPER
  349. +# ifndef PROT_MAX
  350. +# define PROT_MAX(f) 0
  351. +# endif
  352. +# else
  353. +# define PROT_MAX(f) 0
  354. +# endif
  355. +# ifdef __sun
  356. +extern int madvise(caddr_t, size_t, int);
  357. +# endif
  358. +# ifndef MAP_UNINITIALIZED
  359. +# define MAP_UNINITIALIZED 0
  360. +# endif
  361. +#endif
  362. +#include <errno.h>
  363. +
  364. +#if ENABLE_ASSERTS
  365. +# undef NDEBUG
  366. +# if defined(_MSC_VER) && !defined(_DEBUG)
  367. +# define _DEBUG
  368. +# endif
  369. +# include <assert.h>
  370. +#define RPMALLOC_TOSTRING_M(x) #x
  371. +#define RPMALLOC_TOSTRING(x) RPMALLOC_TOSTRING_M(x)
  372. +#define rpmalloc_assert(truth, message) \
  373. + do { \
  374. + if (!(truth)) { \
  375. + if (_memory_config.error_callback) { \
  376. + _memory_config.error_callback( \
  377. + message " (" RPMALLOC_TOSTRING(truth) ") at " __FILE__ ":" RPMALLOC_TOSTRING(__LINE__)); \
  378. + } else { \
  379. + assert((truth) && message); \
  380. + } \
  381. + } \
  382. + } while (0)
  383. +#else
  384. +# define rpmalloc_assert(truth, message) do {} while(0)
  385. +#endif
  386. +#if ENABLE_STATISTICS
  387. +# include <stdio.h>
  388. +#endif
  389. +
  390. +//////
  391. +///
  392. +/// Atomic access abstraction (since MSVC does not do C11 yet)
  393. +///
  394. +//////
  395. +
  396. +#if defined(_MSC_VER) && !defined(__clang__)
  397. +
  398. +typedef volatile long atomic32_t;
  399. +typedef volatile long long atomic64_t;
  400. +typedef volatile void* atomicptr_t;
  401. +
  402. +static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return *src; }
  403. +static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { *dst = val; }
  404. +static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return (int32_t)InterlockedIncrement(val); }
  405. +static FORCEINLINE int32_t atomic_decr32(atomic32_t* val) { return (int32_t)InterlockedDecrement(val); }
  406. +static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return (int32_t)InterlockedExchangeAdd(val, add) + add; }
  407. +static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, int32_t ref) { return (InterlockedCompareExchange(dst, val, ref) == ref) ? 1 : 0; }
  408. +static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { *dst = val; }
  409. +static FORCEINLINE int64_t atomic_load64(atomic64_t* src) { return *src; }
  410. +static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return (int64_t)InterlockedExchangeAdd64(val, add) + add; }
  411. +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
  412. +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { *dst = val; }
  413. +static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { *dst = val; }
  414. +static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return (void*)InterlockedExchangePointer((void* volatile*)dst, val); }
  415. +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (InterlockedCompareExchangePointer((void* volatile*)dst, val, ref) == ref) ? 1 : 0; }
  416. +
  417. +#define EXPECTED(x) (x)
  418. +#define UNEXPECTED(x) (x)
  419. +
  420. +#else
  421. +
  422. +#include <stdatomic.h>
  423. +
  424. +typedef volatile _Atomic(int32_t) atomic32_t;
  425. +typedef volatile _Atomic(int64_t) atomic64_t;
  426. +typedef volatile _Atomic(void*) atomicptr_t;
  427. +
  428. +static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
  429. +static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
  430. +static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1; }
  431. +static FORCEINLINE int32_t atomic_decr32(atomic32_t* val) { return atomic_fetch_add_explicit(val, -1, memory_order_relaxed) - 1; }
  432. +static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add; }
  433. +static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, int32_t ref) { return atomic_compare_exchange_weak_explicit(dst, &ref, val, memory_order_acquire, memory_order_relaxed); }
  434. +static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_release); }
  435. +static FORCEINLINE int64_t atomic_load64(atomic64_t* val) { return atomic_load_explicit(val, memory_order_relaxed); }
  436. +static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add; }
  437. +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
  438. +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
  439. +static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_release); }
  440. +static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return atomic_exchange_explicit(dst, val, memory_order_acquire); }
  441. +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_compare_exchange_weak_explicit(dst, &ref, val, memory_order_relaxed, memory_order_relaxed); }
  442. +
  443. +#define EXPECTED(x) __builtin_expect((x), 1)
  444. +#define UNEXPECTED(x) __builtin_expect((x), 0)
  445. +
  446. +#endif
  447. +
  448. +////////////
  449. +///
  450. +/// Statistics related functions (evaluate to nothing when statistics not enabled)
  451. +///
  452. +//////
  453. +
  454. +#if ENABLE_STATISTICS
  455. +# define _rpmalloc_stat_inc(counter) atomic_incr32(counter)
  456. +# define _rpmalloc_stat_dec(counter) atomic_decr32(counter)
  457. +# define _rpmalloc_stat_add(counter, value) atomic_add32(counter, (int32_t)(value))
  458. +# define _rpmalloc_stat_add64(counter, value) atomic_add64(counter, (int64_t)(value))
  459. +# define _rpmalloc_stat_add_peak(counter, value, peak) do { int32_t _cur_count = atomic_add32(counter, (int32_t)(value)); if (_cur_count > (peak)) peak = _cur_count; } while (0)
  460. +# define _rpmalloc_stat_sub(counter, value) atomic_add32(counter, -(int32_t)(value))
  461. +# define _rpmalloc_stat_inc_alloc(heap, class_idx) do { \
  462. + int32_t alloc_current = atomic_incr32(&heap->size_class_use[class_idx].alloc_current); \
  463. + if (alloc_current > heap->size_class_use[class_idx].alloc_peak) \
  464. + heap->size_class_use[class_idx].alloc_peak = alloc_current; \
  465. + atomic_incr32(&heap->size_class_use[class_idx].alloc_total); \
  466. +} while(0)
  467. +# define _rpmalloc_stat_inc_free(heap, class_idx) do { \
  468. + atomic_decr32(&heap->size_class_use[class_idx].alloc_current); \
  469. + atomic_incr32(&heap->size_class_use[class_idx].free_total); \
  470. +} while(0)
  471. +#else
  472. +# define _rpmalloc_stat_inc(counter) do {} while(0)
  473. +# define _rpmalloc_stat_dec(counter) do {} while(0)
  474. +# define _rpmalloc_stat_add(counter, value) do {} while(0)
  475. +# define _rpmalloc_stat_add64(counter, value) do {} while(0)
  476. +# define _rpmalloc_stat_add_peak(counter, value, peak) do {} while (0)
  477. +# define _rpmalloc_stat_sub(counter, value) do {} while(0)
  478. +# define _rpmalloc_stat_inc_alloc(heap, class_idx) do {} while(0)
  479. +# define _rpmalloc_stat_inc_free(heap, class_idx) do {} while(0)
  480. +#endif
  481. +
  482. +
  483. +///
  484. +/// Preconfigured limits and sizes
  485. +///
  486. +
  487. +//! Granularity of a small allocation block (must be power of two)
  488. +#define SMALL_GRANULARITY 16
  489. +//! Small granularity shift count
  490. +#define SMALL_GRANULARITY_SHIFT 4
  491. +//! Number of small block size classes
  492. +#define SMALL_CLASS_COUNT 65
  493. +//! Maximum size of a small block
  494. +#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))
  495. +//! Granularity of a medium allocation block
  496. +#define MEDIUM_GRANULARITY 512
  497. +//! Medium granularity shift count
  498. +#define MEDIUM_GRANULARITY_SHIFT 9
  499. +//! Number of medium block size classes
  500. +#define MEDIUM_CLASS_COUNT 61
  501. +//! Total number of small + medium size classes
  502. +#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)
  503. +//! Number of large block size classes
  504. +#define LARGE_CLASS_COUNT 63
  505. +//! Maximum size of a medium block
  506. +#define MEDIUM_SIZE_LIMIT (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))
  507. +//! Maximum size of a large block
  508. +#define LARGE_SIZE_LIMIT ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)
  509. +//! Size of a span header (must be a multiple of SMALL_GRANULARITY and a power of two)
  510. +#define SPAN_HEADER_SIZE 128
  511. +//! Number of spans in thread cache
  512. +#define MAX_THREAD_SPAN_CACHE 400
  513. +//! Number of spans to transfer between thread and global cache
  514. +#define THREAD_SPAN_CACHE_TRANSFER 64
  515. +//! Number of spans in thread cache for large spans (must be greater than LARGE_CLASS_COUNT / 2)
  516. +#define MAX_THREAD_SPAN_LARGE_CACHE 100
  517. +//! Number of spans to transfer between thread and global cache for large spans
  518. +#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6
  519. +
  520. +_Static_assert((SMALL_GRANULARITY & (SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
  521. +_Static_assert((SPAN_HEADER_SIZE & (SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
  522. +
  523. +#if ENABLE_VALIDATE_ARGS
  524. +//! Maximum allocation size to avoid integer overflow
  525. +#undef MAX_ALLOC_SIZE
  526. +#define MAX_ALLOC_SIZE (((size_t)-1) - _memory_span_size)
  527. +#endif
  528. +
  529. +#define pointer_offset(ptr, ofs) (void*)((char*)(ptr) + (ptrdiff_t)(ofs))
  530. +#define pointer_diff(first, second) (ptrdiff_t)((const char*)(first) - (const char*)(second))
  531. +
  532. +#define INVALID_POINTER ((void*)((uintptr_t)-1))
  533. +
  534. +#define SIZE_CLASS_LARGE SIZE_CLASS_COUNT
  535. +#define SIZE_CLASS_HUGE ((uint32_t)-1)
  536. +
  537. +////////////
  538. +///
  539. +/// Data types
  540. +///
  541. +//////
  542. +
  543. +//! A memory heap, per thread
  544. +typedef struct heap_t heap_t;
  545. +//! Span of memory pages
  546. +typedef struct span_t span_t;
  547. +//! Span list
  548. +typedef struct span_list_t span_list_t;
  549. +//! Span active data
  550. +typedef struct span_active_t span_active_t;
  551. +//! Size class definition
  552. +typedef struct size_class_t size_class_t;
  553. +//! Global cache
  554. +typedef struct global_cache_t global_cache_t;
  555. +
  556. +//! Flag indicating span is the first (master) span of a split superspan
  557. +#define SPAN_FLAG_MASTER 1U
  558. +//! Flag indicating span is a secondary (sub) span of a split superspan
  559. +#define SPAN_FLAG_SUBSPAN 2U
  560. +//! Flag indicating span has blocks with increased alignment
  561. +#define SPAN_FLAG_ALIGNED_BLOCKS 4U
  562. +//! Flag indicating an unmapped master span
  563. +#define SPAN_FLAG_UNMAPPED_MASTER 8U
  564. +
  565. +#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
  566. +struct span_use_t {
  567. + //! Current number of spans used (actually used, not in cache)
  568. + atomic32_t current;
  569. + //! High water mark of spans used
  570. + atomic32_t high;
  571. +#if ENABLE_STATISTICS
  572. + //! Number of spans in deferred list
  573. + atomic32_t spans_deferred;
  574. + //! Number of spans transitioned to global cache
  575. + atomic32_t spans_to_global;
  576. + //! Number of spans transitioned from global cache
  577. + atomic32_t spans_from_global;
  578. + //! Number of spans transitioned to thread cache
  579. + atomic32_t spans_to_cache;
  580. + //! Number of spans transitioned from thread cache
  581. + atomic32_t spans_from_cache;
  582. + //! Number of spans transitioned to reserved state
  583. + atomic32_t spans_to_reserved;
  584. + //! Number of spans transitioned from reserved state
  585. + atomic32_t spans_from_reserved;
  586. + //! Number of raw memory map calls
  587. + atomic32_t spans_map_calls;
  588. +#endif
  589. +};
  590. +typedef struct span_use_t span_use_t;
  591. +#endif
  592. +
  593. +#if ENABLE_STATISTICS
  594. +struct size_class_use_t {
  595. + //! Current number of allocations
  596. + atomic32_t alloc_current;
  597. + //! Peak number of allocations
  598. + int32_t alloc_peak;
  599. + //! Total number of allocations
  600. + atomic32_t alloc_total;
  601. + //! Total number of frees
  602. + atomic32_t free_total;
  603. + //! Number of spans in use
  604. + atomic32_t spans_current;
  605. + //! Number of spans transitioned to cache
  606. + int32_t spans_peak;
  607. + //! Number of spans transitioned to cache
  608. + atomic32_t spans_to_cache;
  609. + //! Number of spans transitioned from cache
  610. + atomic32_t spans_from_cache;
  611. + //! Number of spans transitioned from reserved state
  612. + atomic32_t spans_from_reserved;
  613. + //! Number of spans mapped
  614. + atomic32_t spans_map_calls;
  615. + int32_t unused;
  616. +};
  617. +typedef struct size_class_use_t size_class_use_t;
  618. +#endif
  619. +
  620. +// A span can either represent a single span of memory pages with size declared by span_map_count configuration variable,
  621. +// or a set of spans in a continuous region, a super span. Any reference to the term "span" usually refers to both a single
  622. +// span or a super span. A super span can further be divided into multiple spans (or this, super spans), where the first
  623. +// (super)span is the master and subsequent (super)spans are subspans. The master span keeps track of how many subspans
  624. +// that are still alive and mapped in virtual memory, and once all subspans and master have been unmapped the entire
  625. +// superspan region is released and unmapped (on Windows for example, the entire superspan range has to be released
  626. +// in the same call to release the virtual memory range, but individual subranges can be decommitted individually
  627. +// to reduce physical memory use).
  628. +struct span_t {
  629. + //! Free list
  630. + void* free_list;
  631. + //! Total block count of size class
  632. + uint32_t block_count;
  633. + //! Size class
  634. + uint32_t size_class;
  635. + //! Index of last block initialized in free list
  636. + uint32_t free_list_limit;
  637. + //! Number of used blocks remaining when in partial state
  638. + uint32_t used_count;
  639. + //! Deferred free list
  640. + atomicptr_t free_list_deferred;
  641. + //! Size of deferred free list, or list of spans when part of a cache list
  642. + uint32_t list_size;
  643. + //! Size of a block
  644. + uint32_t block_size;
  645. + //! Flags and counters
  646. + uint32_t flags;
  647. + //! Number of spans
  648. + uint32_t span_count;
  649. + //! Total span counter for master spans
  650. + uint32_t total_spans;
  651. + //! Offset from master span for subspans
  652. + uint32_t offset_from_master;
  653. + //! Remaining span counter, for master spans
  654. + atomic32_t remaining_spans;
  655. + //! Alignment offset
  656. + uint32_t align_offset;
  657. + //! Owning heap
  658. + heap_t* heap;
  659. + //! Next span
  660. + span_t* next;
  661. + //! Previous span
  662. + span_t* prev;
  663. +};
  664. +_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
  665. +
  666. +struct span_cache_t {
  667. + size_t count;
  668. + span_t* span[MAX_THREAD_SPAN_CACHE];
  669. +};
  670. +typedef struct span_cache_t span_cache_t;
  671. +
  672. +struct span_large_cache_t {
  673. + size_t count;
  674. + span_t* span[MAX_THREAD_SPAN_LARGE_CACHE];
  675. +};
  676. +typedef struct span_large_cache_t span_large_cache_t;
  677. +
  678. +struct heap_size_class_t {
  679. + //! Free list of active span
  680. + void* free_list;
  681. + //! Double linked list of partially used spans with free blocks.
  682. + // Previous span pointer in head points to tail span of list.
  683. + span_t* partial_span;
  684. + //! Early level cache of fully free spans
  685. + span_t* cache;
  686. +};
  687. +typedef struct heap_size_class_t heap_size_class_t;
  688. +
  689. +// Control structure for a heap, either a thread heap or a first class heap if enabled
  690. +struct heap_t {
  691. + //! Owning thread ID
  692. + uintptr_t owner_thread;
  693. + //! Free lists for each size class
  694. + heap_size_class_t size_class[SIZE_CLASS_COUNT];
  695. +#if ENABLE_THREAD_CACHE
  696. + //! Arrays of fully freed spans, single span
  697. + span_cache_t span_cache;
  698. +#endif
  699. + //! List of deferred free spans (single linked list)
  700. + atomicptr_t span_free_deferred;
  701. + //! Number of full spans
  702. + size_t full_span_count;
  703. + //! Mapped but unused spans
  704. + span_t* span_reserve;
  705. + //! Master span for mapped but unused spans
  706. + span_t* span_reserve_master;
  707. + //! Number of mapped but unused spans
  708. + uint32_t spans_reserved;
  709. + //! Child count
  710. + atomic32_t child_count;
  711. + //! Next heap in id list
  712. + heap_t* next_heap;
  713. + //! Next heap in orphan list
  714. + heap_t* next_orphan;
  715. + //! Heap ID
  716. + int32_t id;
  717. + //! Finalization state flag
  718. + int finalize;
  719. + //! Master heap owning the memory pages
  720. + heap_t* master_heap;
  721. +#if ENABLE_THREAD_CACHE
  722. + //! Arrays of fully freed spans, large spans with > 1 span count
  723. + span_large_cache_t span_large_cache[LARGE_CLASS_COUNT - 1];
  724. +#endif
  725. +#if RPMALLOC_FIRST_CLASS_HEAPS
  726. + //! Double linked list of fully utilized spans with free blocks for each size class.
  727. + // Previous span pointer in head points to tail span of list.
  728. + span_t* full_span[SIZE_CLASS_COUNT];
  729. + //! Double linked list of large and huge spans allocated by this heap
  730. + span_t* large_huge_span;
  731. +#endif
  732. +#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
  733. + //! Current and high water mark of spans used per span count
  734. + span_use_t span_use[LARGE_CLASS_COUNT];
  735. +#endif
  736. +#if ENABLE_STATISTICS
  737. + //! Allocation stats per size class
  738. + size_class_use_t size_class_use[SIZE_CLASS_COUNT + 1];
  739. + //! Number of bytes transitioned thread -> global
  740. + atomic64_t thread_to_global;
  741. + //! Number of bytes transitioned global -> thread
  742. + atomic64_t global_to_thread;
  743. +#endif
  744. +};
  745. +
  746. +// Size class for defining a block size bucket
  747. +struct size_class_t {
  748. + //! Size of blocks in this class
  749. + uint32_t block_size;
  750. + //! Number of blocks in each chunk
  751. + uint16_t block_count;
  752. + //! Class index this class is merged with
  753. + uint16_t class_idx;
  754. +};
  755. +_Static_assert(sizeof(size_class_t) == 8, "Size class size mismatch");
  756. +
  757. +struct global_cache_t {
  758. + //! Cache lock
  759. + atomic32_t lock;
  760. + //! Cache count
  761. + uint32_t count;
  762. +#if ENABLE_STATISTICS
  763. + //! Insert count
  764. + size_t insert_count;
  765. + //! Extract count
  766. + size_t extract_count;
  767. +#endif
  768. + //! Cached spans
  769. + span_t* span[GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE];
  770. + //! Unlimited cache overflow
  771. + span_t* overflow;
  772. +};
  773. +
  774. +////////////
  775. +///
  776. +/// Global data
  777. +///
  778. +//////
  779. +
  780. +//! Default span size (64KiB)
  781. +#define _memory_default_span_size (64 * 1024)
  782. +#define _memory_default_span_size_shift 16
  783. +#define _memory_default_span_mask (~((uintptr_t)(_memory_span_size - 1)))
  784. +
  785. +//! Initialized flag
  786. +static int _rpmalloc_initialized;
  787. +//! Main thread ID
  788. +static uintptr_t _rpmalloc_main_thread_id;
  789. +//! Configuration
  790. +static rpmalloc_config_t _memory_config;
  791. +//! Memory page size
  792. +static size_t _memory_page_size;
  793. +//! Shift to divide by page size
  794. +static size_t _memory_page_size_shift;
  795. +//! Granularity at which memory pages are mapped by OS
  796. +static size_t _memory_map_granularity;
  797. +#if RPMALLOC_CONFIGURABLE
  798. +//! Size of a span of memory pages
  799. +static size_t _memory_span_size;
  800. +//! Shift to divide by span size
  801. +static size_t _memory_span_size_shift;
  802. +//! Mask to get to start of a memory span
  803. +static uintptr_t _memory_span_mask;
  804. +#else
  805. +//! Hardwired span size
  806. +#define _memory_span_size _memory_default_span_size
  807. +#define _memory_span_size_shift _memory_default_span_size_shift
  808. +#define _memory_span_mask _memory_default_span_mask
  809. +#endif
  810. +//! Number of spans to map in each map call
  811. +static size_t _memory_span_map_count;
  812. +//! Number of spans to keep reserved in each heap
  813. +static size_t _memory_heap_reserve_count;
  814. +//! Global size classes
  815. +static size_class_t _memory_size_class[SIZE_CLASS_COUNT];
  816. +//! Run-time size limit of medium blocks
  817. +static size_t _memory_medium_size_limit;
  818. +//! Heap ID counter
  819. +static atomic32_t _memory_heap_id;
  820. +//! Huge page support
  821. +static int _memory_huge_pages;
  822. +#if ENABLE_GLOBAL_CACHE
  823. +//! Global span cache
  824. +static global_cache_t _memory_span_cache[LARGE_CLASS_COUNT];
  825. +#endif
  826. +//! Global reserved spans
  827. +static span_t* _memory_global_reserve;
  828. +//! Global reserved count
  829. +static size_t _memory_global_reserve_count;
  830. +//! Global reserved master
  831. +static span_t* _memory_global_reserve_master;
  832. +//! All heaps
  833. +static heap_t* _memory_heaps[HEAP_ARRAY_SIZE];
  834. +//! Used to restrict access to mapping memory for huge pages
  835. +static atomic32_t _memory_global_lock;
  836. +//! Orphaned heaps
  837. +static heap_t* _memory_orphan_heaps;
  838. +#if RPMALLOC_FIRST_CLASS_HEAPS
  839. +//! Orphaned heaps (first class heaps)
  840. +static heap_t* _memory_first_class_orphan_heaps;
  841. +#endif
  842. +#if ENABLE_STATISTICS
  843. +//! Allocations counter
  844. +static atomic64_t _allocation_counter;
  845. +//! Deallocations counter
  846. +static atomic64_t _deallocation_counter;
  847. +//! Active heap count
  848. +static atomic32_t _memory_active_heaps;
  849. +//! Number of currently mapped memory pages
  850. +static atomic32_t _mapped_pages;
  851. +//! Peak number of concurrently mapped memory pages
  852. +static int32_t _mapped_pages_peak;
  853. +//! Number of mapped master spans
  854. +static atomic32_t _master_spans;
  855. +//! Number of unmapped dangling master spans
  856. +static atomic32_t _unmapped_master_spans;
  857. +//! Running counter of total number of mapped memory pages since start
  858. +static atomic32_t _mapped_total;
  859. +//! Running counter of total number of unmapped memory pages since start
  860. +static atomic32_t _unmapped_total;
  861. +//! Number of currently mapped memory pages in OS calls
  862. +static atomic32_t _mapped_pages_os;
  863. +//! Number of currently allocated pages in huge allocations
  864. +static atomic32_t _huge_pages_current;
  865. +//! Peak number of currently allocated pages in huge allocations
  866. +static int32_t _huge_pages_peak;
  867. +#endif
  868. +
  869. +////////////
  870. +///
  871. +/// Thread local heap and ID
  872. +///
  873. +//////
  874. +
  875. +//! Get thread heap
  876. +static inline heap_t* get_thread_heap_raw(void);
  877. +
  878. +//! Get the current thread heap
  879. +static inline heap_t*
  880. +get_thread_heap(void) {
  881. + heap_t* heap = get_thread_heap_raw();
  882. +#if ENABLE_PRELOAD
  883. + if (EXPECTED(heap != 0))
  884. + return heap;
  885. + rpmalloc_initialize();
  886. + return get_thread_heap_raw();
  887. +#else
  888. + return heap;
  889. +#endif
  890. +}
  891. +
  892. +//! Fast thread ID
  893. +static inline uintptr_t get_thread_id(void);
  894. +
  895. +//! Set the current thread heap
  896. +static void set_thread_heap(heap_t* heap);
  897. +
  898. +//! Set main thread ID
  899. +static void
  900. +rpmalloc_set_main_thread(void) {
  901. + _rpmalloc_main_thread_id = get_thread_id();
  902. +}
  903. +
  904. +static void
  905. +_rpmalloc_spin(void) {
  906. +#if defined(_MSC_VER)
  907. + _mm_pause();
  908. +#elif defined(__x86_64__) || defined(__i386__)
  909. + __asm__ volatile("pause" ::: "memory");
  910. +#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
  911. + __asm__ volatile("yield" ::: "memory");
  912. +#elif defined(__powerpc__) || defined(__powerpc64__)
  913. + // No idea if ever been compiled in such archs but ... as precaution
  914. + __asm__ volatile("or 27,27,27");
  915. +#elif defined(__sparc__)
  916. + __asm__ volatile("rd %ccr, %g0 \n\trd %ccr, %g0 \n\trd %ccr, %g0");
  917. +#else
  918. + struct timespec ts = {0};
  919. + nanosleep(&ts, 0);
  920. +#endif
  921. +}
  922. +
  923. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  924. +static void NTAPI
  925. +_rpmalloc_thread_destructor(void* value) {
  926. +#if ENABLE_OVERRIDE
  927. + // If this is called on main thread it means rpmalloc_finalize
  928. + // has not been called and shutdown is forced (through _exit) or unclean
  929. + if (get_thread_id() == _rpmalloc_main_thread_id)
  930. + return;
  931. +#endif
  932. + if (value)
  933. + rpmalloc_thread_finalize(1);
  934. +}
  935. +#endif
  936. +
  937. +
  938. +////////////
  939. +///
  940. +/// Low level memory map/unmap
  941. +///
  942. +//////
  943. +
  944. +//! Map more virtual memory
  945. +// size is number of bytes to map
  946. +// offset receives the offset in bytes from start of mapped region
  947. +// returns address to start of mapped region to use
  948. +static void*
  949. +_rpmalloc_mmap(size_t size, size_t* offset) {
  950. + rpmalloc_assert(!(size % _memory_page_size), "Invalid mmap size");
  951. + rpmalloc_assert(size >= _memory_page_size, "Invalid mmap size");
  952. + void* address = _memory_config.memory_map(size, offset);
  953. + if (EXPECTED(address != 0)) {
  954. + _rpmalloc_stat_add_peak(&_mapped_pages, (size >> _memory_page_size_shift), _mapped_pages_peak);
  955. + _rpmalloc_stat_add(&_mapped_total, (size >> _memory_page_size_shift));
  956. + }
  957. + return address;
  958. +}
  959. +
  960. +//! Unmap virtual memory
  961. +// address is the memory address to unmap, as returned from _memory_map
  962. +// size is the number of bytes to unmap, which might be less than full region for a partial unmap
  963. +// offset is the offset in bytes to the actual mapped region, as set by _memory_map
  964. +// release is set to 0 for partial unmap, or size of entire range for a full unmap
  965. +static void
  966. +_rpmalloc_unmap(void* address, size_t size, size_t offset, size_t release) {
  967. + rpmalloc_assert(!release || (release >= size), "Invalid unmap size");
  968. + rpmalloc_assert(!release || (release >= _memory_page_size), "Invalid unmap size");
  969. + if (release) {
  970. + rpmalloc_assert(!(release % _memory_page_size), "Invalid unmap size");
  971. + _rpmalloc_stat_sub(&_mapped_pages, (release >> _memory_page_size_shift));
  972. + _rpmalloc_stat_add(&_unmapped_total, (release >> _memory_page_size_shift));
  973. + }
  974. + _memory_config.memory_unmap(address, size, offset, release);
  975. +}
  976. +
  977. +//! Default implementation to map new pages to virtual memory
  978. +static void*
  979. +_rpmalloc_mmap_os(size_t size, size_t* offset) {
  980. + //Either size is a heap (a single page) or a (multiple) span - we only need to align spans, and only if larger than map granularity
  981. + size_t padding = ((size >= _memory_span_size) && (_memory_span_size > _memory_map_granularity)) ? _memory_span_size : 0;
  982. + rpmalloc_assert(size >= _memory_page_size, "Invalid mmap size");
  983. +#if PLATFORM_WINDOWS
  984. + //Ok to MEM_COMMIT - according to MSDN, "actual physical pages are not allocated unless/until the virtual addresses are actually accessed"
  985. + void* ptr = VirtualAlloc(0, size + padding, (_memory_huge_pages ? MEM_LARGE_PAGES : 0) | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
  986. + if (!ptr) {
  987. + if (_memory_config.map_fail_callback) {
  988. + if (_memory_config.map_fail_callback(size + padding))
  989. + return _rpmalloc_mmap_os(size, offset);
  990. + } else {
  991. + rpmalloc_assert(ptr, "Failed to map virtual memory block");
  992. + }
  993. + return 0;
  994. + }
  995. +#else
  996. + int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;
  997. +# if defined(__APPLE__) && !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
  998. + int fd = (int)VM_MAKE_TAG(240U);
  999. + if (_memory_huge_pages)
  1000. + fd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
  1001. + void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, fd, 0);
  1002. +# elif defined(MAP_HUGETLB)
  1003. + void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE | PROT_MAX(PROT_READ | PROT_WRITE), (_memory_huge_pages ? MAP_HUGETLB : 0) | flags, -1, 0);
  1004. +# if defined(MADV_HUGEPAGE)
  1005. + // In some configurations, huge pages allocations might fail thus
  1006. + // we fallback to normal allocations and promote the region as transparent huge page
  1007. + if ((ptr == MAP_FAILED || !ptr) && _memory_huge_pages) {
  1008. + ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
  1009. + if (ptr && ptr != MAP_FAILED) {
  1010. + int prm = madvise(ptr, size + padding, MADV_HUGEPAGE);
  1011. + (void)prm;
  1012. + rpmalloc_assert((prm == 0), "Failed to promote the page to THP");
  1013. + }
  1014. + }
  1015. +# endif
  1016. +# elif defined(MAP_ALIGNED)
  1017. + const size_t align = (sizeof(size_t) * 8) - (size_t)(__builtin_clzl(size - 1));
  1018. + void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_ALIGNED(align) : 0) | flags, -1, 0);
  1019. +# elif defined(MAP_ALIGN)
  1020. + caddr_t base = (_memory_huge_pages ? (caddr_t)(4 << 20) : 0);
  1021. + void* ptr = mmap(base, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_ALIGN : 0) | flags, -1, 0);
  1022. +# else
  1023. + void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
  1024. +# endif
  1025. + if ((ptr == MAP_FAILED) || !ptr) {
  1026. + if (_memory_config.map_fail_callback) {
  1027. + if (_memory_config.map_fail_callback(size + padding))
  1028. + return _rpmalloc_mmap_os(size, offset);
  1029. + } else if (errno != ENOMEM) {
  1030. + rpmalloc_assert((ptr != MAP_FAILED) && ptr, "Failed to map virtual memory block");
  1031. + }
  1032. + return 0;
  1033. + }
  1034. +#endif
  1035. + _rpmalloc_stat_add(&_mapped_pages_os, (int32_t)((size + padding) >> _memory_page_size_shift));
  1036. + if (padding) {
  1037. + size_t final_padding = padding - ((uintptr_t)ptr & ~_memory_span_mask);
  1038. + rpmalloc_assert(final_padding <= _memory_span_size, "Internal failure in padding");
  1039. + rpmalloc_assert(final_padding <= padding, "Internal failure in padding");
  1040. + rpmalloc_assert(!(final_padding % 8), "Internal failure in padding");
  1041. + ptr = pointer_offset(ptr, final_padding);
  1042. + *offset = final_padding >> 3;
  1043. + }
  1044. + rpmalloc_assert((size < _memory_span_size) || !((uintptr_t)ptr & ~_memory_span_mask), "Internal failure in padding");
  1045. + return ptr;
  1046. +}
  1047. +
  1048. +//! Default implementation to unmap pages from virtual memory
  1049. +static void
  1050. +_rpmalloc_unmap_os(void* address, size_t size, size_t offset, size_t release) {
  1051. + rpmalloc_assert(release || (offset == 0), "Invalid unmap size");
  1052. + rpmalloc_assert(!release || (release >= _memory_page_size), "Invalid unmap size");
  1053. + rpmalloc_assert(size >= _memory_page_size, "Invalid unmap size");
  1054. + if (release && offset) {
  1055. + offset <<= 3;
  1056. + address = pointer_offset(address, -(int32_t)offset);
  1057. + if ((release >= _memory_span_size) && (_memory_span_size > _memory_map_granularity)) {
  1058. + //Padding is always one span size
  1059. + release += _memory_span_size;
  1060. + }
  1061. + }
  1062. +#if !DISABLE_UNMAP
  1063. +#if PLATFORM_WINDOWS
  1064. + if (!VirtualFree(address, release ? 0 : size, release ? MEM_RELEASE : MEM_DECOMMIT)) {
  1065. + rpmalloc_assert(0, "Failed to unmap virtual memory block");
  1066. + }
  1067. +#else
  1068. + if (release) {
  1069. + if (munmap(address, release)) {
  1070. + rpmalloc_assert(0, "Failed to unmap virtual memory block");
  1071. + }
  1072. + } else {
  1073. +#if defined(MADV_FREE_REUSABLE)
  1074. + int ret;
  1075. + while ((ret = madvise(address, size, MADV_FREE_REUSABLE)) == -1 && (errno == EAGAIN))
  1076. + errno = 0;
  1077. + if ((ret == -1) && (errno != 0)) {
  1078. +#elif defined(MADV_DONTNEED)
  1079. + if (madvise(address, size, MADV_DONTNEED)) {
  1080. +#elif defined(MADV_PAGEOUT)
  1081. + if (madvise(address, size, MADV_PAGEOUT)) {
  1082. +#elif defined(MADV_FREE)
  1083. + if (madvise(address, size, MADV_FREE)) {
  1084. +#else
  1085. + if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
  1086. +#endif
  1087. + rpmalloc_assert(0, "Failed to madvise virtual memory block as free");
  1088. + }
  1089. + }
  1090. +#endif
  1091. +#endif
  1092. + if (release)
  1093. + _rpmalloc_stat_sub(&_mapped_pages_os, release >> _memory_page_size_shift);
  1094. +}
  1095. +
  1096. +static void
  1097. +_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count);
  1098. +
  1099. +//! Use global reserved spans to fulfill a memory map request (reserve size must be checked by caller)
  1100. +static span_t*
  1101. +_rpmalloc_global_get_reserved_spans(size_t span_count) {
  1102. + span_t* span = _memory_global_reserve;
  1103. + _rpmalloc_span_mark_as_subspan_unless_master(_memory_global_reserve_master, span, span_count);
  1104. + _memory_global_reserve_count -= span_count;
  1105. + if (_memory_global_reserve_count)
  1106. + _memory_global_reserve = (span_t*)pointer_offset(span, span_count << _memory_span_size_shift);
  1107. + else
  1108. + _memory_global_reserve = 0;
  1109. + return span;
  1110. +}
  1111. +
  1112. +//! Store the given spans as global reserve (must only be called from within new heap allocation, not thread safe)
  1113. +static void
  1114. +_rpmalloc_global_set_reserved_spans(span_t* master, span_t* reserve, size_t reserve_span_count) {
  1115. + _memory_global_reserve_master = master;
  1116. + _memory_global_reserve_count = reserve_span_count;
  1117. + _memory_global_reserve = reserve;
  1118. +}
  1119. +
  1120. +
  1121. +////////////
  1122. +///
  1123. +/// Span linked list management
  1124. +///
  1125. +//////
  1126. +
  1127. +//! Add a span to double linked list at the head
  1128. +static void
  1129. +_rpmalloc_span_double_link_list_add(span_t** head, span_t* span) {
  1130. + if (*head)
  1131. + (*head)->prev = span;
  1132. + span->next = *head;
  1133. + *head = span;
  1134. +}
  1135. +
  1136. +//! Pop head span from double linked list
  1137. +static void
  1138. +_rpmalloc_span_double_link_list_pop_head(span_t** head, span_t* span) {
  1139. + rpmalloc_assert(*head == span, "Linked list corrupted");
  1140. + span = *head;
  1141. + *head = span->next;
  1142. +}
  1143. +
  1144. +//! Remove a span from double linked list
  1145. +static void
  1146. +_rpmalloc_span_double_link_list_remove(span_t** head, span_t* span) {
  1147. + rpmalloc_assert(*head, "Linked list corrupted");
  1148. + if (*head == span) {
  1149. + *head = span->next;
  1150. + } else {
  1151. + span_t* next_span = span->next;
  1152. + span_t* prev_span = span->prev;
  1153. + prev_span->next = next_span;
  1154. + if (EXPECTED(next_span != 0))
  1155. + next_span->prev = prev_span;
  1156. + }
  1157. +}
  1158. +
  1159. +
  1160. +////////////
  1161. +///
  1162. +/// Span control
  1163. +///
  1164. +//////
  1165. +
  1166. +static void
  1167. +_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span);
  1168. +
  1169. +static void
  1170. +_rpmalloc_heap_finalize(heap_t* heap);
  1171. +
  1172. +static void
  1173. +_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count);
  1174. +
  1175. +//! Declare the span to be a subspan and store distance from master span and span count
  1176. +static void
  1177. +_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count) {
  1178. + rpmalloc_assert((subspan != master) || (subspan->flags & SPAN_FLAG_MASTER), "Span master pointer and/or flag mismatch");
  1179. + if (subspan != master) {
  1180. + subspan->flags = SPAN_FLAG_SUBSPAN;
  1181. + subspan->offset_from_master = (uint32_t)((uintptr_t)pointer_diff(subspan, master) >> _memory_span_size_shift);
  1182. + subspan->align_offset = 0;
  1183. + }
  1184. + subspan->span_count = (uint32_t)span_count;
  1185. +}
  1186. +
  1187. +//! Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
  1188. +static span_t*
  1189. +_rpmalloc_span_map_from_reserve(heap_t* heap, size_t span_count) {
  1190. + //Update the heap span reserve
  1191. + span_t* span = heap->span_reserve;
  1192. + heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
  1193. + heap->spans_reserved -= (uint32_t)span_count;
  1194. +
  1195. + _rpmalloc_span_mark_as_subspan_unless_master(heap->span_reserve_master, span, span_count);
  1196. + if (span_count <= LARGE_CLASS_COUNT)
  1197. + _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_from_reserved);
  1198. +
  1199. + return span;
  1200. +}
  1201. +
  1202. +//! Get the aligned number of spans to map in based on wanted count, configured mapping granularity and the page size
  1203. +static size_t
  1204. +_rpmalloc_span_align_count(size_t span_count) {
  1205. + size_t request_count = (span_count > _memory_span_map_count) ? span_count : _memory_span_map_count;
  1206. + if ((_memory_page_size > _memory_span_size) && ((request_count * _memory_span_size) % _memory_page_size))
  1207. + request_count += _memory_span_map_count - (request_count % _memory_span_map_count);
  1208. + return request_count;
  1209. +}
  1210. +
  1211. +//! Setup a newly mapped span
  1212. +static void
  1213. +_rpmalloc_span_initialize(span_t* span, size_t total_span_count, size_t span_count, size_t align_offset) {
  1214. + span->total_spans = (uint32_t)total_span_count;
  1215. + span->span_count = (uint32_t)span_count;
  1216. + span->align_offset = (uint32_t)align_offset;
  1217. + span->flags = SPAN_FLAG_MASTER;
  1218. + atomic_store32(&span->remaining_spans, (int32_t)total_span_count);
  1219. +}
  1220. +
  1221. +static void
  1222. +_rpmalloc_span_unmap(span_t* span);
  1223. +
  1224. +//! Map an aligned set of spans, taking configured mapping granularity and the page size into account
  1225. +static span_t*
  1226. +_rpmalloc_span_map_aligned_count(heap_t* heap, size_t span_count) {
  1227. + //If we already have some, but not enough, reserved spans, release those to heap cache and map a new
  1228. + //full set of spans. Otherwise we would waste memory if page size > span size (huge pages)
  1229. + size_t aligned_span_count = _rpmalloc_span_align_count(span_count);
  1230. + size_t align_offset = 0;
  1231. + span_t* span = (span_t*)_rpmalloc_mmap(aligned_span_count * _memory_span_size, &align_offset);
  1232. + if (!span)
  1233. + return 0;
  1234. + _rpmalloc_span_initialize(span, aligned_span_count, span_count, align_offset);
  1235. + _rpmalloc_stat_inc(&_master_spans);
  1236. + if (span_count <= LARGE_CLASS_COUNT)
  1237. + _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_map_calls);
  1238. + if (aligned_span_count > span_count) {
  1239. + span_t* reserved_spans = (span_t*)pointer_offset(span, span_count * _memory_span_size);
  1240. + size_t reserved_count = aligned_span_count - span_count;
  1241. + if (heap->spans_reserved) {
  1242. + _rpmalloc_span_mark_as_subspan_unless_master(heap->span_reserve_master, heap->span_reserve, heap->spans_reserved);
  1243. + _rpmalloc_heap_cache_insert(heap, heap->span_reserve);
  1244. + }
  1245. + if (reserved_count > _memory_heap_reserve_count) {
  1246. + // If huge pages or eager spam map count, the global reserve spin lock is held by caller, _rpmalloc_span_map
  1247. + rpmalloc_assert(atomic_load32(&_memory_global_lock) == 1, "Global spin lock not held as expected");
  1248. + size_t remain_count = reserved_count - _memory_heap_reserve_count;
  1249. + reserved_count = _memory_heap_reserve_count;
  1250. + span_t* remain_span = (span_t*)pointer_offset(reserved_spans, reserved_count * _memory_span_size);
  1251. + if (_memory_global_reserve) {
  1252. + _rpmalloc_span_mark_as_subspan_unless_master(_memory_global_reserve_master, _memory_global_reserve, _memory_global_reserve_count);
  1253. + _rpmalloc_span_unmap(_memory_global_reserve);
  1254. + }
  1255. + _rpmalloc_global_set_reserved_spans(span, remain_span, remain_count);
  1256. + }
  1257. + _rpmalloc_heap_set_reserved_spans(heap, span, reserved_spans, reserved_count);
  1258. + }
  1259. + return span;
  1260. +}
  1261. +
  1262. +//! Map in memory pages for the given number of spans (or use previously reserved pages)
  1263. +static span_t*
  1264. +_rpmalloc_span_map(heap_t* heap, size_t span_count) {
  1265. + if (span_count <= heap->spans_reserved)
  1266. + return _rpmalloc_span_map_from_reserve(heap, span_count);
  1267. + span_t* span = 0;
  1268. + int use_global_reserve = (_memory_page_size > _memory_span_size) || (_memory_span_map_count > _memory_heap_reserve_count);
  1269. + if (use_global_reserve) {
  1270. + // If huge pages, make sure only one thread maps more memory to avoid bloat
  1271. + while (!atomic_cas32_acquire(&_memory_global_lock, 1, 0))
  1272. + _rpmalloc_spin();
  1273. + if (_memory_global_reserve_count >= span_count) {
  1274. + size_t reserve_count = (!heap->spans_reserved ? _memory_heap_reserve_count : span_count);
  1275. + if (_memory_global_reserve_count < reserve_count)
  1276. + reserve_count = _memory_global_reserve_count;
  1277. + span = _rpmalloc_global_get_reserved_spans(reserve_count);
  1278. + if (span) {
  1279. + if (reserve_count > span_count) {
  1280. + span_t* reserved_span = (span_t*)pointer_offset(span, span_count << _memory_span_size_shift);
  1281. + _rpmalloc_heap_set_reserved_spans(heap, _memory_global_reserve_master, reserved_span, reserve_count - span_count);
  1282. + }
  1283. + // Already marked as subspan in _rpmalloc_global_get_reserved_spans
  1284. + span->span_count = (uint32_t)span_count;
  1285. + }
  1286. + }
  1287. + }
  1288. + if (!span)
  1289. + span = _rpmalloc_span_map_aligned_count(heap, span_count);
  1290. + if (use_global_reserve)
  1291. + atomic_store32_release(&_memory_global_lock, 0);
  1292. + return span;
  1293. +}
  1294. +
  1295. +//! Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
  1296. +static void
  1297. +_rpmalloc_span_unmap(span_t* span) {
  1298. + rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  1299. + rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  1300. +
  1301. + int is_master = !!(span->flags & SPAN_FLAG_MASTER);
  1302. + span_t* master = is_master ? span : ((span_t*)pointer_offset(span, -(intptr_t)((uintptr_t)span->offset_from_master * _memory_span_size)));
  1303. + rpmalloc_assert(is_master || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  1304. + rpmalloc_assert(master->flags & SPAN_FLAG_MASTER, "Span flag corrupted");
  1305. +
  1306. + size_t span_count = span->span_count;
  1307. + if (!is_master) {
  1308. + //Directly unmap subspans (unless huge pages, in which case we defer and unmap entire page range with master)
  1309. + rpmalloc_assert(span->align_offset == 0, "Span align offset corrupted");
  1310. + if (_memory_span_size >= _memory_page_size)
  1311. + _rpmalloc_unmap(span, span_count * _memory_span_size, 0, 0);
  1312. + } else {
  1313. + //Special double flag to denote an unmapped master
  1314. + //It must be kept in memory since span header must be used
  1315. + span->flags |= SPAN_FLAG_MASTER | SPAN_FLAG_SUBSPAN | SPAN_FLAG_UNMAPPED_MASTER;
  1316. + _rpmalloc_stat_add(&_unmapped_master_spans, 1);
  1317. + }
  1318. +
  1319. + if (atomic_add32(&master->remaining_spans, -(int32_t)span_count) <= 0) {
  1320. + //Everything unmapped, unmap the master span with release flag to unmap the entire range of the super span
  1321. + rpmalloc_assert(!!(master->flags & SPAN_FLAG_MASTER) && !!(master->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  1322. + size_t unmap_count = master->span_count;
  1323. + if (_memory_span_size < _memory_page_size)
  1324. + unmap_count = master->total_spans;
  1325. + _rpmalloc_stat_sub(&_master_spans, 1);
  1326. + _rpmalloc_stat_sub(&_unmapped_master_spans, 1);
  1327. + _rpmalloc_unmap(master, unmap_count * _memory_span_size, master->align_offset, (size_t)master->total_spans * _memory_span_size);
  1328. + }
  1329. +}
  1330. +
  1331. +//! Move the span (used for small or medium allocations) to the heap thread cache
  1332. +static void
  1333. +_rpmalloc_span_release_to_cache(heap_t* heap, span_t* span) {
  1334. + rpmalloc_assert(heap == span->heap, "Span heap pointer corrupted");
  1335. + rpmalloc_assert(span->size_class < SIZE_CLASS_COUNT, "Invalid span size class");
  1336. + rpmalloc_assert(span->span_count == 1, "Invalid span count");
  1337. +#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
  1338. + atomic_decr32(&heap->span_use[0].current);
  1339. +#endif
  1340. + _rpmalloc_stat_dec(&heap->size_class_use[span->size_class].spans_current);
  1341. + if (!heap->finalize) {
  1342. + _rpmalloc_stat_inc(&heap->span_use[0].spans_to_cache);
  1343. + _rpmalloc_stat_inc(&heap->size_class_use[span->size_class].spans_to_cache);
  1344. + if (heap->size_class[span->size_class].cache)
  1345. + _rpmalloc_heap_cache_insert(heap, heap->size_class[span->size_class].cache);
  1346. + heap->size_class[span->size_class].cache = span;
  1347. + } else {
  1348. + _rpmalloc_span_unmap(span);
  1349. + }
  1350. +}
  1351. +
  1352. +//! Initialize a (partial) free list up to next system memory page, while reserving the first block
  1353. +//! as allocated, returning number of blocks in list
  1354. +static uint32_t
  1355. +free_list_partial_init(void** list, void** first_block, void* page_start, void* block_start, uint32_t block_count, uint32_t block_size) {
  1356. + rpmalloc_assert(block_count, "Internal failure");
  1357. + *first_block = block_start;
  1358. + if (block_count > 1) {
  1359. + void* free_block = pointer_offset(block_start, block_size);
  1360. + void* block_end = pointer_offset(block_start, (size_t)block_size * block_count);
  1361. + //If block size is less than half a memory page, bound init to next memory page boundary
  1362. + if (block_size < (_memory_page_size >> 1)) {
  1363. + void* page_end = pointer_offset(page_start, _memory_page_size);
  1364. + if (page_end < block_end)
  1365. + block_end = page_end;
  1366. + }
  1367. + *list = free_block;
  1368. + block_count = 2;
  1369. + void* next_block = pointer_offset(free_block, block_size);
  1370. + while (next_block < block_end) {
  1371. + *((void**)free_block) = next_block;
  1372. + free_block = next_block;
  1373. + ++block_count;
  1374. + next_block = pointer_offset(next_block, block_size);
  1375. + }
  1376. + *((void**)free_block) = 0;
  1377. + } else {
  1378. + *list = 0;
  1379. + }
  1380. + return block_count;
  1381. +}
  1382. +
  1383. +//! Initialize an unused span (from cache or mapped) to be new active span, putting the initial free list in heap class free list
  1384. +static void*
  1385. +_rpmalloc_span_initialize_new(heap_t* heap, heap_size_class_t* heap_size_class, span_t* span, uint32_t class_idx) {
  1386. + rpmalloc_assert(span->span_count == 1, "Internal failure");
  1387. + size_class_t* size_class = _memory_size_class + class_idx;
  1388. + span->size_class = class_idx;
  1389. + span->heap = heap;
  1390. + span->flags &= ~SPAN_FLAG_ALIGNED_BLOCKS;
  1391. + span->block_size = size_class->block_size;
  1392. + span->block_count = size_class->block_count;
  1393. + span->free_list = 0;
  1394. + span->list_size = 0;
  1395. + atomic_store_ptr_release(&span->free_list_deferred, 0);
  1396. +
  1397. + //Setup free list. Only initialize one system page worth of free blocks in list
  1398. + void* block;
  1399. + span->free_list_limit = free_list_partial_init(&heap_size_class->free_list, &block,
  1400. + span, pointer_offset(span, SPAN_HEADER_SIZE), size_class->block_count, size_class->block_size);
  1401. + //Link span as partial if there remains blocks to be initialized as free list, or full if fully initialized
  1402. + if (span->free_list_limit < span->block_count) {
  1403. + _rpmalloc_span_double_link_list_add(&heap_size_class->partial_span, span);
  1404. + span->used_count = span->free_list_limit;
  1405. + } else {
  1406. +#if RPMALLOC_FIRST_CLASS_HEAPS
  1407. + _rpmalloc_span_double_link_list_add(&heap->full_span[class_idx], span);
  1408. +#endif
  1409. + ++heap->full_span_count;
  1410. + span->used_count = span->block_count;
  1411. + }
  1412. + return block;
  1413. +}
  1414. +
  1415. +static void
  1416. +_rpmalloc_span_extract_free_list_deferred(span_t* span) {
  1417. + // We need acquire semantics on the CAS operation since we are interested in the list size
  1418. + // Refer to _rpmalloc_deallocate_defer_small_or_medium for further comments on this dependency
  1419. + do {
  1420. + span->free_list = atomic_exchange_ptr_acquire(&span->free_list_deferred, INVALID_POINTER);
  1421. + } while (span->free_list == INVALID_POINTER);
  1422. + span->used_count -= span->list_size;
  1423. + span->list_size = 0;
  1424. + atomic_store_ptr_release(&span->free_list_deferred, 0);
  1425. +}
  1426. +
  1427. +static int
  1428. +_rpmalloc_span_is_fully_utilized(span_t* span) {
  1429. + rpmalloc_assert(span->free_list_limit <= span->block_count, "Span free list corrupted");
  1430. + return !span->free_list && (span->free_list_limit >= span->block_count);
  1431. +}
  1432. +
  1433. +static int
  1434. +_rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list_head) {
  1435. + void* free_list = heap->size_class[iclass].free_list;
  1436. + span_t* class_span = (span_t*)((uintptr_t)free_list & _memory_span_mask);
  1437. + if (span == class_span) {
  1438. + // Adopt the heap class free list back into the span free list
  1439. + void* block = span->free_list;
  1440. + void* last_block = 0;
  1441. + while (block) {
  1442. + last_block = block;
  1443. + block = *((void**)block);
  1444. + }
  1445. + uint32_t free_count = 0;
  1446. + block = free_list;
  1447. + while (block) {
  1448. + ++free_count;
  1449. + block = *((void**)block);
  1450. + }
  1451. + if (last_block) {
  1452. + *((void**)last_block) = free_list;
  1453. + } else {
  1454. + span->free_list = free_list;
  1455. + }
  1456. + heap->size_class[iclass].free_list = 0;
  1457. + span->used_count -= free_count;
  1458. + }
  1459. + //If this assert triggers you have memory leaks
  1460. + rpmalloc_assert(span->list_size == span->used_count, "Memory leak detected");
  1461. + if (span->list_size == span->used_count) {
  1462. + _rpmalloc_stat_dec(&heap->span_use[0].current);
  1463. + _rpmalloc_stat_dec(&heap->size_class_use[iclass].spans_current);
  1464. + // This function only used for spans in double linked lists
  1465. + if (list_head)
  1466. + _rpmalloc_span_double_link_list_remove(list_head, span);
  1467. + _rpmalloc_span_unmap(span);
  1468. + return 1;
  1469. + }
  1470. + return 0;
  1471. +}
  1472. +
  1473. +
  1474. +////////////
  1475. +///
  1476. +/// Global cache
  1477. +///
  1478. +//////
  1479. +
  1480. +#if ENABLE_GLOBAL_CACHE
  1481. +
  1482. +//! Finalize a global cache
  1483. +static void
  1484. +_rpmalloc_global_cache_finalize(global_cache_t* cache) {
  1485. + while (!atomic_cas32_acquire(&cache->lock, 1, 0))
  1486. + _rpmalloc_spin();
  1487. +
  1488. + for (size_t ispan = 0; ispan < cache->count; ++ispan)
  1489. + _rpmalloc_span_unmap(cache->span[ispan]);
  1490. + cache->count = 0;
  1491. +
  1492. + while (cache->overflow) {
  1493. + span_t* span = cache->overflow;
  1494. + cache->overflow = span->next;
  1495. + _rpmalloc_span_unmap(span);
  1496. + }
  1497. +
  1498. + atomic_store32_release(&cache->lock, 0);
  1499. +}
  1500. +
  1501. +static void
  1502. +_rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t count) {
  1503. + const size_t cache_limit = (span_count == 1) ?
  1504. + GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE :
  1505. + GLOBAL_CACHE_MULTIPLIER * (MAX_THREAD_SPAN_LARGE_CACHE - (span_count >> 1));
  1506. +
  1507. + global_cache_t* cache = &_memory_span_cache[span_count - 1];
  1508. +
  1509. + size_t insert_count = count;
  1510. + while (!atomic_cas32_acquire(&cache->lock, 1, 0))
  1511. + _rpmalloc_spin();
  1512. +
  1513. +#if ENABLE_STATISTICS
  1514. + cache->insert_count += count;
  1515. +#endif
  1516. + if ((cache->count + insert_count) > cache_limit)
  1517. + insert_count = cache_limit - cache->count;
  1518. +
  1519. + memcpy(cache->span + cache->count, span, sizeof(span_t*) * insert_count);
  1520. + cache->count += (uint32_t)insert_count;
  1521. +
  1522. +#if ENABLE_UNLIMITED_CACHE
  1523. + while (insert_count < count) {
  1524. +#else
  1525. + // Enable unlimited cache if huge pages, or we will leak since it is unlikely that an entire huge page
  1526. + // will be unmapped, and we're unable to partially decommit a huge page
  1527. + while ((_memory_page_size > _memory_span_size) && (insert_count < count)) {
  1528. +#endif
  1529. + span_t* current_span = span[insert_count++];
  1530. + current_span->next = cache->overflow;
  1531. + cache->overflow = current_span;
  1532. + }
  1533. + atomic_store32_release(&cache->lock, 0);
  1534. +
  1535. + span_t* keep = 0;
  1536. + for (size_t ispan = insert_count; ispan < count; ++ispan) {
  1537. + span_t* current_span = span[ispan];
  1538. + // Keep master spans that has remaining subspans to avoid dangling them
  1539. + if ((current_span->flags & SPAN_FLAG_MASTER) &&
  1540. + (atomic_load32(&current_span->remaining_spans) > (int32_t)current_span->span_count)) {
  1541. + current_span->next = keep;
  1542. + keep = current_span;
  1543. + } else {
  1544. + _rpmalloc_span_unmap(current_span);
  1545. + }
  1546. + }
  1547. +
  1548. + if (keep) {
  1549. + while (!atomic_cas32_acquire(&cache->lock, 1, 0))
  1550. + _rpmalloc_spin();
  1551. +
  1552. + size_t islot = 0;
  1553. + while (keep) {
  1554. + for (; islot < cache->count; ++islot) {
  1555. + span_t* current_span = cache->span[islot];
  1556. + if (!(current_span->flags & SPAN_FLAG_MASTER) || ((current_span->flags & SPAN_FLAG_MASTER) &&
  1557. + (atomic_load32(&current_span->remaining_spans) <= (int32_t)current_span->span_count))) {
  1558. + _rpmalloc_span_unmap(current_span);
  1559. + cache->span[islot] = keep;
  1560. + break;
  1561. + }
  1562. + }
  1563. + if (islot == cache->count)
  1564. + break;
  1565. + keep = keep->next;
  1566. + }
  1567. +
  1568. + if (keep) {
  1569. + span_t* tail = keep;
  1570. + while (tail->next)
  1571. + tail = tail->next;
  1572. + tail->next = cache->overflow;
  1573. + cache->overflow = keep;
  1574. + }
  1575. +
  1576. + atomic_store32_release(&cache->lock, 0);
  1577. + }
  1578. +}
  1579. +
  1580. +static size_t
  1581. +_rpmalloc_global_cache_extract_spans(span_t** span, size_t span_count, size_t count) {
  1582. + global_cache_t* cache = &_memory_span_cache[span_count - 1];
  1583. +
  1584. + size_t extract_count = 0;
  1585. + while (!atomic_cas32_acquire(&cache->lock, 1, 0))
  1586. + _rpmalloc_spin();
  1587. +
  1588. +#if ENABLE_STATISTICS
  1589. + cache->extract_count += count;
  1590. +#endif
  1591. + size_t want = count - extract_count;
  1592. + if (want > cache->count)
  1593. + want = cache->count;
  1594. +
  1595. + memcpy(span + extract_count, cache->span + (cache->count - want), sizeof(span_t*) * want);
  1596. + cache->count -= (uint32_t)want;
  1597. + extract_count += want;
  1598. +
  1599. + while ((extract_count < count) && cache->overflow) {
  1600. + span_t* current_span = cache->overflow;
  1601. + span[extract_count++] = current_span;
  1602. + cache->overflow = current_span->next;
  1603. + }
  1604. +
  1605. +#if ENABLE_ASSERTS
  1606. + for (size_t ispan = 0; ispan < extract_count; ++ispan) {
  1607. + assert(span[ispan]->span_count == span_count);
  1608. + }
  1609. +#endif
  1610. +
  1611. + atomic_store32_release(&cache->lock, 0);
  1612. +
  1613. + return extract_count;
  1614. +}
  1615. +
  1616. +#endif
  1617. +
  1618. +////////////
  1619. +///
  1620. +/// Heap control
  1621. +///
  1622. +//////
  1623. +
  1624. +static void _rpmalloc_deallocate_huge(span_t*);
  1625. +
  1626. +//! Store the given spans as reserve in the given heap
  1627. +static void
  1628. +_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count) {
  1629. + heap->span_reserve_master = master;
  1630. + heap->span_reserve = reserve;
  1631. + heap->spans_reserved = (uint32_t)reserve_span_count;
  1632. +}
  1633. +
  1634. +//! Adopt the deferred span cache list, optionally extracting the first single span for immediate re-use
  1635. +static void
  1636. +_rpmalloc_heap_cache_adopt_deferred(heap_t* heap, span_t** single_span) {
  1637. + span_t* span = (span_t*)((void*)atomic_exchange_ptr_acquire(&heap->span_free_deferred, 0));
  1638. + while (span) {
  1639. + span_t* next_span = (span_t*)span->free_list;
  1640. + rpmalloc_assert(span->heap == heap, "Span heap pointer corrupted");
  1641. + if (EXPECTED(span->size_class < SIZE_CLASS_COUNT)) {
  1642. + rpmalloc_assert(heap->full_span_count, "Heap span counter corrupted");
  1643. + --heap->full_span_count;
  1644. + _rpmalloc_stat_dec(&heap->span_use[0].spans_deferred);
  1645. +#if RPMALLOC_FIRST_CLASS_HEAPS
  1646. + _rpmalloc_span_double_link_list_remove(&heap->full_span[span->size_class], span);
  1647. +#endif
  1648. + _rpmalloc_stat_dec(&heap->span_use[0].current);
  1649. + _rpmalloc_stat_dec(&heap->size_class_use[span->size_class].spans_current);
  1650. + if (single_span && !*single_span)
  1651. + *single_span = span;
  1652. + else
  1653. + _rpmalloc_heap_cache_insert(heap, span);
  1654. + } else {
  1655. + if (span->size_class == SIZE_CLASS_HUGE) {
  1656. + _rpmalloc_deallocate_huge(span);
  1657. + } else {
  1658. + rpmalloc_assert(span->size_class == SIZE_CLASS_LARGE, "Span size class invalid");
  1659. + rpmalloc_assert(heap->full_span_count, "Heap span counter corrupted");
  1660. + --heap->full_span_count;
  1661. +#if RPMALLOC_FIRST_CLASS_HEAPS
  1662. + _rpmalloc_span_double_link_list_remove(&heap->large_huge_span, span);
  1663. +#endif
  1664. + uint32_t idx = span->span_count - 1;
  1665. + _rpmalloc_stat_dec(&heap->span_use[idx].spans_deferred);
  1666. + _rpmalloc_stat_dec(&heap->span_use[idx].current);
  1667. + if (!idx && single_span && !*single_span)
  1668. + *single_span = span;
  1669. + else
  1670. + _rpmalloc_heap_cache_insert(heap, span);
  1671. + }
  1672. + }
  1673. + span = next_span;
  1674. + }
  1675. +}
  1676. +
  1677. +static void
  1678. +_rpmalloc_heap_unmap(heap_t* heap) {
  1679. + if (!heap->master_heap) {
  1680. + if ((heap->finalize > 1) && !atomic_load32(&heap->child_count)) {
  1681. + span_t* span = (span_t*)((uintptr_t)heap & _memory_span_mask);
  1682. + _rpmalloc_span_unmap(span);
  1683. + }
  1684. + } else {
  1685. + if (atomic_decr32(&heap->master_heap->child_count) == 0) {
  1686. + _rpmalloc_heap_unmap(heap->master_heap);
  1687. + }
  1688. + }
  1689. +}
  1690. +
  1691. +static void
  1692. +_rpmalloc_heap_global_finalize(heap_t* heap) {
  1693. + if (heap->finalize++ > 1) {
  1694. + --heap->finalize;
  1695. + return;
  1696. + }
  1697. +
  1698. + _rpmalloc_heap_finalize(heap);
  1699. +
  1700. +#if ENABLE_THREAD_CACHE
  1701. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  1702. + span_cache_t* span_cache;
  1703. + if (!iclass)
  1704. + span_cache = &heap->span_cache;
  1705. + else
  1706. + span_cache = (span_cache_t*)(heap->span_large_cache + (iclass - 1));
  1707. + for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
  1708. + _rpmalloc_span_unmap(span_cache->span[ispan]);
  1709. + span_cache->count = 0;
  1710. + }
  1711. +#endif
  1712. +
  1713. + if (heap->full_span_count) {
  1714. + --heap->finalize;
  1715. + return;
  1716. + }
  1717. +
  1718. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  1719. + if (heap->size_class[iclass].free_list || heap->size_class[iclass].partial_span) {
  1720. + --heap->finalize;
  1721. + return;
  1722. + }
  1723. + }
  1724. + //Heap is now completely free, unmap and remove from heap list
  1725. + size_t list_idx = (size_t)heap->id % HEAP_ARRAY_SIZE;
  1726. + heap_t* list_heap = _memory_heaps[list_idx];
  1727. + if (list_heap == heap) {
  1728. + _memory_heaps[list_idx] = heap->next_heap;
  1729. + } else {
  1730. + while (list_heap->next_heap != heap)
  1731. + list_heap = list_heap->next_heap;
  1732. + list_heap->next_heap = heap->next_heap;
  1733. + }
  1734. +
  1735. + _rpmalloc_heap_unmap(heap);
  1736. +}
  1737. +
  1738. +//! Insert a single span into thread heap cache, releasing to global cache if overflow
  1739. +static void
  1740. +_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
  1741. + if (UNEXPECTED(heap->finalize != 0)) {
  1742. + _rpmalloc_span_unmap(span);
  1743. + _rpmalloc_heap_global_finalize(heap);
  1744. + return;
  1745. + }
  1746. +#if ENABLE_THREAD_CACHE
  1747. + size_t span_count = span->span_count;
  1748. + _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_to_cache);
  1749. + if (span_count == 1) {
  1750. + span_cache_t* span_cache = &heap->span_cache;
  1751. + span_cache->span[span_cache->count++] = span;
  1752. + if (span_cache->count == MAX_THREAD_SPAN_CACHE) {
  1753. + const size_t remain_count = MAX_THREAD_SPAN_CACHE - THREAD_SPAN_CACHE_TRANSFER;
  1754. +#if ENABLE_GLOBAL_CACHE
  1755. + _rpmalloc_stat_add64(&heap->thread_to_global, THREAD_SPAN_CACHE_TRANSFER * _memory_span_size);
  1756. + _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_to_global, THREAD_SPAN_CACHE_TRANSFER);
  1757. + _rpmalloc_global_cache_insert_spans(span_cache->span + remain_count, span_count, THREAD_SPAN_CACHE_TRANSFER);
  1758. +#else
  1759. + for (size_t ispan = 0; ispan < THREAD_SPAN_CACHE_TRANSFER; ++ispan)
  1760. + _rpmalloc_span_unmap(span_cache->span[remain_count + ispan]);
  1761. +#endif
  1762. + span_cache->count = remain_count;
  1763. + }
  1764. + } else {
  1765. + size_t cache_idx = span_count - 2;
  1766. + span_large_cache_t* span_cache = heap->span_large_cache + cache_idx;
  1767. + span_cache->span[span_cache->count++] = span;
  1768. + const size_t cache_limit = (MAX_THREAD_SPAN_LARGE_CACHE - (span_count >> 1));
  1769. + if (span_cache->count == cache_limit) {
  1770. + const size_t transfer_limit = 2 + (cache_limit >> 2);
  1771. + const size_t transfer_count = (THREAD_SPAN_LARGE_CACHE_TRANSFER <= transfer_limit ? THREAD_SPAN_LARGE_CACHE_TRANSFER : transfer_limit);
  1772. + const size_t remain_count = cache_limit - transfer_count;
  1773. +#if ENABLE_GLOBAL_CACHE
  1774. + _rpmalloc_stat_add64(&heap->thread_to_global, transfer_count * span_count * _memory_span_size);
  1775. + _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_to_global, transfer_count);
  1776. + _rpmalloc_global_cache_insert_spans(span_cache->span + remain_count, span_count, transfer_count);
  1777. +#else
  1778. + for (size_t ispan = 0; ispan < transfer_count; ++ispan)
  1779. + _rpmalloc_span_unmap(span_cache->span[remain_count + ispan]);
  1780. +#endif
  1781. + span_cache->count = remain_count;
  1782. + }
  1783. + }
  1784. +#else
  1785. + (void)sizeof(heap);
  1786. + _rpmalloc_span_unmap(span);
  1787. +#endif
  1788. +}
  1789. +
  1790. +//! Extract the given number of spans from the different cache levels
  1791. +static span_t*
  1792. +_rpmalloc_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
  1793. + span_t* span = 0;
  1794. +#if ENABLE_THREAD_CACHE
  1795. + span_cache_t* span_cache;
  1796. + if (span_count == 1)
  1797. + span_cache = &heap->span_cache;
  1798. + else
  1799. + span_cache = (span_cache_t*)(heap->span_large_cache + (span_count - 2));
  1800. + if (span_cache->count) {
  1801. + _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_from_cache);
  1802. + return span_cache->span[--span_cache->count];
  1803. + }
  1804. +#endif
  1805. + return span;
  1806. +}
  1807. +
  1808. +static span_t*
  1809. +_rpmalloc_heap_thread_cache_deferred_extract(heap_t* heap, size_t span_count) {
  1810. + span_t* span = 0;
  1811. + if (span_count == 1) {
  1812. + _rpmalloc_heap_cache_adopt_deferred(heap, &span);
  1813. + } else {
  1814. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  1815. + span = _rpmalloc_heap_thread_cache_extract(heap, span_count);
  1816. + }
  1817. + return span;
  1818. +}
  1819. +
  1820. +static span_t*
  1821. +_rpmalloc_heap_reserved_extract(heap_t* heap, size_t span_count) {
  1822. + if (heap->spans_reserved >= span_count)
  1823. + return _rpmalloc_span_map(heap, span_count);
  1824. + return 0;
  1825. +}
  1826. +
  1827. +//! Extract a span from the global cache
  1828. +static span_t*
  1829. +_rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
  1830. +#if ENABLE_GLOBAL_CACHE
  1831. +#if ENABLE_THREAD_CACHE
  1832. + span_cache_t* span_cache;
  1833. + size_t wanted_count;
  1834. + if (span_count == 1) {
  1835. + span_cache = &heap->span_cache;
  1836. + wanted_count = THREAD_SPAN_CACHE_TRANSFER;
  1837. + } else {
  1838. + span_cache = (span_cache_t*)(heap->span_large_cache + (span_count - 2));
  1839. + wanted_count = THREAD_SPAN_LARGE_CACHE_TRANSFER;
  1840. + }
  1841. + span_cache->count = _rpmalloc_global_cache_extract_spans(span_cache->span, span_count, wanted_count);
  1842. + if (span_cache->count) {
  1843. + _rpmalloc_stat_add64(&heap->global_to_thread, span_count * span_cache->count * _memory_span_size);
  1844. + _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_from_global, span_cache->count);
  1845. + return span_cache->span[--span_cache->count];
  1846. + }
  1847. +#else
  1848. + span_t* span = 0;
  1849. + size_t count = _rpmalloc_global_cache_extract_spans(&span, span_count, 1);
  1850. + if (count) {
  1851. + _rpmalloc_stat_add64(&heap->global_to_thread, span_count * count * _memory_span_size);
  1852. + _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_from_global, count);
  1853. + return span;
  1854. + }
  1855. +#endif
  1856. +#endif
  1857. + (void)sizeof(heap);
  1858. + (void)sizeof(span_count);
  1859. + return 0;
  1860. +}
  1861. +
  1862. +static void
  1863. +_rpmalloc_inc_span_statistics(heap_t* heap, size_t span_count, uint32_t class_idx) {
  1864. + (void)sizeof(heap);
  1865. + (void)sizeof(span_count);
  1866. + (void)sizeof(class_idx);
  1867. +#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
  1868. + uint32_t idx = (uint32_t)span_count - 1;
  1869. + uint32_t current_count = (uint32_t)atomic_incr32(&heap->span_use[idx].current);
  1870. + if (current_count > (uint32_t)atomic_load32(&heap->span_use[idx].high))
  1871. + atomic_store32(&heap->span_use[idx].high, (int32_t)current_count);
  1872. + _rpmalloc_stat_add_peak(&heap->size_class_use[class_idx].spans_current, 1, heap->size_class_use[class_idx].spans_peak);
  1873. +#endif
  1874. +}
  1875. +
  1876. +//! Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping more memory
  1877. +static span_t*
  1878. +_rpmalloc_heap_extract_new_span(heap_t* heap, heap_size_class_t* heap_size_class, size_t span_count, uint32_t class_idx) {
  1879. + span_t* span;
  1880. +#if ENABLE_THREAD_CACHE
  1881. + if (heap_size_class && heap_size_class->cache) {
  1882. + span = heap_size_class->cache;
  1883. + heap_size_class->cache = (heap->span_cache.count ? heap->span_cache.span[--heap->span_cache.count] : 0);
  1884. + _rpmalloc_inc_span_statistics(heap, span_count, class_idx);
  1885. + return span;
  1886. + }
  1887. +#endif
  1888. + (void)sizeof(class_idx);
  1889. + // Allow 50% overhead to increase cache hits
  1890. + size_t base_span_count = span_count;
  1891. + size_t limit_span_count = (span_count > 2) ? (span_count + (span_count >> 1)) : span_count;
  1892. + if (limit_span_count > LARGE_CLASS_COUNT)
  1893. + limit_span_count = LARGE_CLASS_COUNT;
  1894. + do {
  1895. + span = _rpmalloc_heap_thread_cache_extract(heap, span_count);
  1896. + if (EXPECTED(span != 0)) {
  1897. + _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);
  1898. + _rpmalloc_inc_span_statistics(heap, span_count, class_idx);
  1899. + return span;
  1900. + }
  1901. + span = _rpmalloc_heap_thread_cache_deferred_extract(heap, span_count);
  1902. + if (EXPECTED(span != 0)) {
  1903. + _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);
  1904. + _rpmalloc_inc_span_statistics(heap, span_count, class_idx);
  1905. + return span;
  1906. + }
  1907. + span = _rpmalloc_heap_reserved_extract(heap, span_count);
  1908. + if (EXPECTED(span != 0)) {
  1909. + _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_reserved);
  1910. + _rpmalloc_inc_span_statistics(heap, span_count, class_idx);
  1911. + return span;
  1912. + }
  1913. + span = _rpmalloc_heap_global_cache_extract(heap, span_count);
  1914. + if (EXPECTED(span != 0)) {
  1915. + _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);
  1916. + _rpmalloc_inc_span_statistics(heap, span_count, class_idx);
  1917. + return span;
  1918. + }
  1919. + ++span_count;
  1920. + } while (span_count <= limit_span_count);
  1921. + //Final fallback, map in more virtual memory
  1922. + span = _rpmalloc_span_map(heap, base_span_count);
  1923. + _rpmalloc_inc_span_statistics(heap, base_span_count, class_idx);
  1924. + _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_map_calls);
  1925. + return span;
  1926. +}
  1927. +
  1928. +static void
  1929. +_rpmalloc_heap_initialize(heap_t* heap) {
  1930. + memset(heap, 0, sizeof(heap_t));
  1931. + //Get a new heap ID
  1932. + heap->id = 1 + atomic_incr32(&_memory_heap_id);
  1933. +
  1934. + //Link in heap in heap ID map
  1935. + size_t list_idx = (size_t)heap->id % HEAP_ARRAY_SIZE;
  1936. + heap->next_heap = _memory_heaps[list_idx];
  1937. + _memory_heaps[list_idx] = heap;
  1938. +}
  1939. +
  1940. +static void
  1941. +_rpmalloc_heap_orphan(heap_t* heap, int first_class) {
  1942. + heap->owner_thread = (uintptr_t)-1;
  1943. +#if RPMALLOC_FIRST_CLASS_HEAPS
  1944. + heap_t** heap_list = (first_class ? &_memory_first_class_orphan_heaps : &_memory_orphan_heaps);
  1945. +#else
  1946. + (void)sizeof(first_class);
  1947. + heap_t** heap_list = &_memory_orphan_heaps;
  1948. +#endif
  1949. + heap->next_orphan = *heap_list;
  1950. + *heap_list = heap;
  1951. +}
  1952. +
  1953. +//! Allocate a new heap from newly mapped memory pages
  1954. +static heap_t*
  1955. +_rpmalloc_heap_allocate_new(void) {
  1956. + // Map in pages for a 16 heaps. If page size is greater than required size for this, map a page and
  1957. + // use first part for heaps and remaining part for spans for allocations. Adds a lot of complexity,
  1958. + // but saves a lot of memory on systems where page size > 64 spans (4MiB)
  1959. + size_t heap_size = sizeof(heap_t);
  1960. + size_t aligned_heap_size = 16 * ((heap_size + 15) / 16);
  1961. + size_t request_heap_count = 16;
  1962. + size_t heap_span_count = ((aligned_heap_size * request_heap_count) + sizeof(span_t) + _memory_span_size - 1) / _memory_span_size;
  1963. + size_t block_size = _memory_span_size * heap_span_count;
  1964. + size_t span_count = heap_span_count;
  1965. + span_t* span = 0;
  1966. + // If there are global reserved spans, use these first
  1967. + if (_memory_global_reserve_count >= heap_span_count) {
  1968. + span = _rpmalloc_global_get_reserved_spans(heap_span_count);
  1969. + }
  1970. + if (!span) {
  1971. + if (_memory_page_size > block_size) {
  1972. + span_count = _memory_page_size / _memory_span_size;
  1973. + block_size = _memory_page_size;
  1974. + // If using huge pages, make sure to grab enough heaps to avoid reallocating a huge page just to serve new heaps
  1975. + size_t possible_heap_count = (block_size - sizeof(span_t)) / aligned_heap_size;
  1976. + if (possible_heap_count >= (request_heap_count * 16))
  1977. + request_heap_count *= 16;
  1978. + else if (possible_heap_count < request_heap_count)
  1979. + request_heap_count = possible_heap_count;
  1980. + heap_span_count = ((aligned_heap_size * request_heap_count) + sizeof(span_t) + _memory_span_size - 1) / _memory_span_size;
  1981. + }
  1982. +
  1983. + size_t align_offset = 0;
  1984. + span = (span_t*)_rpmalloc_mmap(block_size, &align_offset);
  1985. + if (!span)
  1986. + return 0;
  1987. +
  1988. + // Master span will contain the heaps
  1989. + _rpmalloc_stat_inc(&_master_spans);
  1990. + _rpmalloc_span_initialize(span, span_count, heap_span_count, align_offset);
  1991. + }
  1992. +
  1993. + size_t remain_size = _memory_span_size - sizeof(span_t);
  1994. + heap_t* heap = (heap_t*)pointer_offset(span, sizeof(span_t));
  1995. + _rpmalloc_heap_initialize(heap);
  1996. +
  1997. + // Put extra heaps as orphans
  1998. + size_t num_heaps = remain_size / aligned_heap_size;
  1999. + if (num_heaps < request_heap_count)
  2000. + num_heaps = request_heap_count;
  2001. + atomic_store32(&heap->child_count, (int32_t)num_heaps - 1);
  2002. + heap_t* extra_heap = (heap_t*)pointer_offset(heap, aligned_heap_size);
  2003. + while (num_heaps > 1) {
  2004. + _rpmalloc_heap_initialize(extra_heap);
  2005. + extra_heap->master_heap = heap;
  2006. + _rpmalloc_heap_orphan(extra_heap, 1);
  2007. + extra_heap = (heap_t*)pointer_offset(extra_heap, aligned_heap_size);
  2008. + --num_heaps;
  2009. + }
  2010. +
  2011. + if (span_count > heap_span_count) {
  2012. + // Cap reserved spans
  2013. + size_t remain_count = span_count - heap_span_count;
  2014. + size_t reserve_count = (remain_count > _memory_heap_reserve_count ? _memory_heap_reserve_count : remain_count);
  2015. + span_t* remain_span = (span_t*)pointer_offset(span, heap_span_count * _memory_span_size);
  2016. + _rpmalloc_heap_set_reserved_spans(heap, span, remain_span, reserve_count);
  2017. +
  2018. + if (remain_count > reserve_count) {
  2019. + // Set to global reserved spans
  2020. + remain_span = (span_t*)pointer_offset(remain_span, reserve_count * _memory_span_size);
  2021. + reserve_count = remain_count - reserve_count;
  2022. + _rpmalloc_global_set_reserved_spans(span, remain_span, reserve_count);
  2023. + }
  2024. + }
  2025. +
  2026. + return heap;
  2027. +}
  2028. +
  2029. +static heap_t*
  2030. +_rpmalloc_heap_extract_orphan(heap_t** heap_list) {
  2031. + heap_t* heap = *heap_list;
  2032. + *heap_list = (heap ? heap->next_orphan : 0);
  2033. + return heap;
  2034. +}
  2035. +
  2036. +//! Allocate a new heap, potentially reusing a previously orphaned heap
  2037. +static heap_t*
  2038. +_rpmalloc_heap_allocate(int first_class) {
  2039. + heap_t* heap = 0;
  2040. + while (!atomic_cas32_acquire(&_memory_global_lock, 1, 0))
  2041. + _rpmalloc_spin();
  2042. + if (first_class == 0)
  2043. + heap = _rpmalloc_heap_extract_orphan(&_memory_orphan_heaps);
  2044. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2045. + if (!heap)
  2046. + heap = _rpmalloc_heap_extract_orphan(&_memory_first_class_orphan_heaps);
  2047. +#endif
  2048. + if (!heap)
  2049. + heap = _rpmalloc_heap_allocate_new();
  2050. + atomic_store32_release(&_memory_global_lock, 0);
  2051. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  2052. + return heap;
  2053. +}
  2054. +
  2055. +static void
  2056. +_rpmalloc_heap_release(void* heapptr, int first_class, int release_cache) {
  2057. + heap_t* heap = (heap_t*)heapptr;
  2058. + if (!heap)
  2059. + return;
  2060. + //Release thread cache spans back to global cache
  2061. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  2062. + if (release_cache || heap->finalize) {
  2063. +#if ENABLE_THREAD_CACHE
  2064. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  2065. + span_cache_t* span_cache;
  2066. + if (!iclass)
  2067. + span_cache = &heap->span_cache;
  2068. + else
  2069. + span_cache = (span_cache_t*)(heap->span_large_cache + (iclass - 1));
  2070. + if (!span_cache->count)
  2071. + continue;
  2072. +#if ENABLE_GLOBAL_CACHE
  2073. + if (heap->finalize) {
  2074. + for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
  2075. + _rpmalloc_span_unmap(span_cache->span[ispan]);
  2076. + } else {
  2077. + _rpmalloc_stat_add64(&heap->thread_to_global, span_cache->count * (iclass + 1) * _memory_span_size);
  2078. + _rpmalloc_stat_add(&heap->span_use[iclass].spans_to_global, span_cache->count);
  2079. + _rpmalloc_global_cache_insert_spans(span_cache->span, iclass + 1, span_cache->count);
  2080. + }
  2081. +#else
  2082. + for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
  2083. + _rpmalloc_span_unmap(span_cache->span[ispan]);
  2084. +#endif
  2085. + span_cache->count = 0;
  2086. + }
  2087. +#endif
  2088. + }
  2089. +
  2090. + if (get_thread_heap_raw() == heap)
  2091. + set_thread_heap(0);
  2092. +
  2093. +#if ENABLE_STATISTICS
  2094. + atomic_decr32(&_memory_active_heaps);
  2095. + rpmalloc_assert(atomic_load32(&_memory_active_heaps) >= 0, "Still active heaps during finalization");
  2096. +#endif
  2097. +
  2098. + // If we are forcibly terminating with _exit the state of the
  2099. + // lock atomic is unknown and it's best to just go ahead and exit
  2100. + if (get_thread_id() != _rpmalloc_main_thread_id) {
  2101. + while (!atomic_cas32_acquire(&_memory_global_lock, 1, 0))
  2102. + _rpmalloc_spin();
  2103. + }
  2104. + _rpmalloc_heap_orphan(heap, first_class);
  2105. + atomic_store32_release(&_memory_global_lock, 0);
  2106. +}
  2107. +
  2108. +static void
  2109. +_rpmalloc_heap_release_raw(void* heapptr, int release_cache) {
  2110. + _rpmalloc_heap_release(heapptr, 0, release_cache);
  2111. +}
  2112. +
  2113. +static void
  2114. +_rpmalloc_heap_release_raw_fc(void* heapptr) {
  2115. + _rpmalloc_heap_release_raw(heapptr, 1);
  2116. +}
  2117. +
  2118. +static void
  2119. +_rpmalloc_heap_finalize(heap_t* heap) {
  2120. + if (heap->spans_reserved) {
  2121. + span_t* span = _rpmalloc_span_map(heap, heap->spans_reserved);
  2122. + _rpmalloc_span_unmap(span);
  2123. + heap->spans_reserved = 0;
  2124. + }
  2125. +
  2126. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  2127. +
  2128. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  2129. + if (heap->size_class[iclass].cache)
  2130. + _rpmalloc_span_unmap(heap->size_class[iclass].cache);
  2131. + heap->size_class[iclass].cache = 0;
  2132. + span_t* span = heap->size_class[iclass].partial_span;
  2133. + while (span) {
  2134. + span_t* next = span->next;
  2135. + _rpmalloc_span_finalize(heap, iclass, span, &heap->size_class[iclass].partial_span);
  2136. + span = next;
  2137. + }
  2138. + // If class still has a free list it must be a full span
  2139. + if (heap->size_class[iclass].free_list) {
  2140. + span_t* class_span = (span_t*)((uintptr_t)heap->size_class[iclass].free_list & _memory_span_mask);
  2141. + span_t** list = 0;
  2142. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2143. + list = &heap->full_span[iclass];
  2144. +#endif
  2145. + --heap->full_span_count;
  2146. + if (!_rpmalloc_span_finalize(heap, iclass, class_span, list)) {
  2147. + if (list)
  2148. + _rpmalloc_span_double_link_list_remove(list, class_span);
  2149. + _rpmalloc_span_double_link_list_add(&heap->size_class[iclass].partial_span, class_span);
  2150. + }
  2151. + }
  2152. + }
  2153. +
  2154. +#if ENABLE_THREAD_CACHE
  2155. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  2156. + span_cache_t* span_cache;
  2157. + if (!iclass)
  2158. + span_cache = &heap->span_cache;
  2159. + else
  2160. + span_cache = (span_cache_t*)(heap->span_large_cache + (iclass - 1));
  2161. + for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
  2162. + _rpmalloc_span_unmap(span_cache->span[ispan]);
  2163. + span_cache->count = 0;
  2164. + }
  2165. +#endif
  2166. + rpmalloc_assert(!atomic_load_ptr(&heap->span_free_deferred), "Heaps still active during finalization");
  2167. +}
  2168. +
  2169. +
  2170. +////////////
  2171. +///
  2172. +/// Allocation entry points
  2173. +///
  2174. +//////
  2175. +
  2176. +//! Pop first block from a free list
  2177. +static void*
  2178. +free_list_pop(void** list) {
  2179. + void* block = *list;
  2180. + *list = *((void**)block);
  2181. + return block;
  2182. +}
  2183. +
  2184. +//! Allocate a small/medium sized memory block from the given heap
  2185. +static void*
  2186. +_rpmalloc_allocate_from_heap_fallback(heap_t* heap, heap_size_class_t* heap_size_class, uint32_t class_idx) {
  2187. + span_t* span = heap_size_class->partial_span;
  2188. + if (EXPECTED(span != 0)) {
  2189. + rpmalloc_assert(span->block_count == _memory_size_class[span->size_class].block_count, "Span block count corrupted");
  2190. + rpmalloc_assert(!_rpmalloc_span_is_fully_utilized(span), "Internal failure");
  2191. + void* block;
  2192. + if (span->free_list) {
  2193. + //Span local free list is not empty, swap to size class free list
  2194. + block = free_list_pop(&span->free_list);
  2195. + heap_size_class->free_list = span->free_list;
  2196. + span->free_list = 0;
  2197. + } else {
  2198. + //If the span did not fully initialize free list, link up another page worth of blocks
  2199. + void* block_start = pointer_offset(span, SPAN_HEADER_SIZE + ((size_t)span->free_list_limit * span->block_size));
  2200. + span->free_list_limit += free_list_partial_init(&heap_size_class->free_list, &block,
  2201. + (void*)((uintptr_t)block_start & ~(_memory_page_size - 1)), block_start,
  2202. + span->block_count - span->free_list_limit, span->block_size);
  2203. + }
  2204. + rpmalloc_assert(span->free_list_limit <= span->block_count, "Span block count corrupted");
  2205. + span->used_count = span->free_list_limit;
  2206. +
  2207. + //Swap in deferred free list if present
  2208. + if (atomic_load_ptr(&span->free_list_deferred))
  2209. + _rpmalloc_span_extract_free_list_deferred(span);
  2210. +
  2211. + //If span is still not fully utilized keep it in partial list and early return block
  2212. + if (!_rpmalloc_span_is_fully_utilized(span))
  2213. + return block;
  2214. +
  2215. + //The span is fully utilized, unlink from partial list and add to fully utilized list
  2216. + _rpmalloc_span_double_link_list_pop_head(&heap_size_class->partial_span, span);
  2217. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2218. + _rpmalloc_span_double_link_list_add(&heap->full_span[class_idx], span);
  2219. +#endif
  2220. + ++heap->full_span_count;
  2221. + return block;
  2222. + }
  2223. +
  2224. + //Find a span in one of the cache levels
  2225. + span = _rpmalloc_heap_extract_new_span(heap, heap_size_class, 1, class_idx);
  2226. + if (EXPECTED(span != 0)) {
  2227. + //Mark span as owned by this heap and set base data, return first block
  2228. + return _rpmalloc_span_initialize_new(heap, heap_size_class, span, class_idx);
  2229. + }
  2230. +
  2231. + return 0;
  2232. +}
  2233. +
  2234. +//! Allocate a small sized memory block from the given heap
  2235. +static void*
  2236. +_rpmalloc_allocate_small(heap_t* heap, size_t size) {
  2237. + rpmalloc_assert(heap, "No thread heap");
  2238. + //Small sizes have unique size classes
  2239. + const uint32_t class_idx = (uint32_t)((size + (SMALL_GRANULARITY - 1)) >> SMALL_GRANULARITY_SHIFT);
  2240. + heap_size_class_t* heap_size_class = heap->size_class + class_idx;
  2241. + _rpmalloc_stat_inc_alloc(heap, class_idx);
  2242. + if (EXPECTED(heap_size_class->free_list != 0))
  2243. + return free_list_pop(&heap_size_class->free_list);
  2244. + return _rpmalloc_allocate_from_heap_fallback(heap, heap_size_class, class_idx);
  2245. +}
  2246. +
  2247. +//! Allocate a medium sized memory block from the given heap
  2248. +static void*
  2249. +_rpmalloc_allocate_medium(heap_t* heap, size_t size) {
  2250. + rpmalloc_assert(heap, "No thread heap");
  2251. + //Calculate the size class index and do a dependent lookup of the final class index (in case of merged classes)
  2252. + const uint32_t base_idx = (uint32_t)(SMALL_CLASS_COUNT + ((size - (SMALL_SIZE_LIMIT + 1)) >> MEDIUM_GRANULARITY_SHIFT));
  2253. + const uint32_t class_idx = _memory_size_class[base_idx].class_idx;
  2254. + heap_size_class_t* heap_size_class = heap->size_class + class_idx;
  2255. + _rpmalloc_stat_inc_alloc(heap, class_idx);
  2256. + if (EXPECTED(heap_size_class->free_list != 0))
  2257. + return free_list_pop(&heap_size_class->free_list);
  2258. + return _rpmalloc_allocate_from_heap_fallback(heap, heap_size_class, class_idx);
  2259. +}
  2260. +
  2261. +//! Allocate a large sized memory block from the given heap
  2262. +static void*
  2263. +_rpmalloc_allocate_large(heap_t* heap, size_t size) {
  2264. + rpmalloc_assert(heap, "No thread heap");
  2265. + //Calculate number of needed max sized spans (including header)
  2266. + //Since this function is never called if size > LARGE_SIZE_LIMIT
  2267. + //the span_count is guaranteed to be <= LARGE_CLASS_COUNT
  2268. + size += SPAN_HEADER_SIZE;
  2269. + size_t span_count = size >> _memory_span_size_shift;
  2270. + if (size & (_memory_span_size - 1))
  2271. + ++span_count;
  2272. +
  2273. + //Find a span in one of the cache levels
  2274. + span_t* span = _rpmalloc_heap_extract_new_span(heap, 0, span_count, SIZE_CLASS_LARGE);
  2275. + if (!span)
  2276. + return span;
  2277. +
  2278. + //Mark span as owned by this heap and set base data
  2279. + rpmalloc_assert(span->span_count >= span_count, "Internal failure");
  2280. + span->size_class = SIZE_CLASS_LARGE;
  2281. + span->heap = heap;
  2282. +
  2283. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2284. + _rpmalloc_span_double_link_list_add(&heap->large_huge_span, span);
  2285. +#endif
  2286. + ++heap->full_span_count;
  2287. +
  2288. + return pointer_offset(span, SPAN_HEADER_SIZE);
  2289. +}
  2290. +
  2291. +//! Allocate a huge block by mapping memory pages directly
  2292. +static void*
  2293. +_rpmalloc_allocate_huge(heap_t* heap, size_t size) {
  2294. + rpmalloc_assert(heap, "No thread heap");
  2295. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  2296. + size += SPAN_HEADER_SIZE;
  2297. + size_t num_pages = size >> _memory_page_size_shift;
  2298. + if (size & (_memory_page_size - 1))
  2299. + ++num_pages;
  2300. + size_t align_offset = 0;
  2301. + span_t* span = (span_t*)_rpmalloc_mmap(num_pages * _memory_page_size, &align_offset);
  2302. + if (!span)
  2303. + return span;
  2304. +
  2305. + //Store page count in span_count
  2306. + span->size_class = SIZE_CLASS_HUGE;
  2307. + span->span_count = (uint32_t)num_pages;
  2308. + span->align_offset = (uint32_t)align_offset;
  2309. + span->heap = heap;
  2310. + _rpmalloc_stat_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
  2311. +
  2312. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2313. + _rpmalloc_span_double_link_list_add(&heap->large_huge_span, span);
  2314. +#endif
  2315. + ++heap->full_span_count;
  2316. +
  2317. + return pointer_offset(span, SPAN_HEADER_SIZE);
  2318. +}
  2319. +
  2320. +//! Allocate a block of the given size
  2321. +static void*
  2322. +_rpmalloc_allocate(heap_t* heap, size_t size) {
  2323. + _rpmalloc_stat_add64(&_allocation_counter, 1);
  2324. + if (EXPECTED(size <= SMALL_SIZE_LIMIT))
  2325. + return _rpmalloc_allocate_small(heap, size);
  2326. + else if (size <= _memory_medium_size_limit)
  2327. + return _rpmalloc_allocate_medium(heap, size);
  2328. + else if (size <= LARGE_SIZE_LIMIT)
  2329. + return _rpmalloc_allocate_large(heap, size);
  2330. + return _rpmalloc_allocate_huge(heap, size);
  2331. +}
  2332. +
  2333. +static void*
  2334. +_rpmalloc_aligned_allocate(heap_t* heap, size_t alignment, size_t size) {
  2335. + if (alignment <= SMALL_GRANULARITY)
  2336. + return _rpmalloc_allocate(heap, size);
  2337. +
  2338. +#if ENABLE_VALIDATE_ARGS
  2339. + if ((size + alignment) < size) {
  2340. + errno = EINVAL;
  2341. + return 0;
  2342. + }
  2343. + if (alignment & (alignment - 1)) {
  2344. + errno = EINVAL;
  2345. + return 0;
  2346. + }
  2347. +#endif
  2348. +
  2349. + if ((alignment <= SPAN_HEADER_SIZE) && (size < _memory_medium_size_limit)) {
  2350. + // If alignment is less or equal to span header size (which is power of two),
  2351. + // and size aligned to span header size multiples is less than size + alignment,
  2352. + // then use natural alignment of blocks to provide alignment
  2353. + size_t multiple_size = size ? (size + (SPAN_HEADER_SIZE - 1)) & ~(uintptr_t)(SPAN_HEADER_SIZE - 1) : SPAN_HEADER_SIZE;
  2354. + rpmalloc_assert(!(multiple_size % SPAN_HEADER_SIZE), "Failed alignment calculation");
  2355. + if (multiple_size <= (size + alignment))
  2356. + return _rpmalloc_allocate(heap, multiple_size);
  2357. + }
  2358. +
  2359. + void* ptr = 0;
  2360. + size_t align_mask = alignment - 1;
  2361. + if (alignment <= _memory_page_size) {
  2362. + ptr = _rpmalloc_allocate(heap, size + alignment);
  2363. + if ((uintptr_t)ptr & align_mask) {
  2364. + ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
  2365. + //Mark as having aligned blocks
  2366. + span_t* span = (span_t*)((uintptr_t)ptr & _memory_span_mask);
  2367. + span->flags |= SPAN_FLAG_ALIGNED_BLOCKS;
  2368. + }
  2369. + return ptr;
  2370. + }
  2371. +
  2372. + // Fallback to mapping new pages for this request. Since pointers passed
  2373. + // to rpfree must be able to reach the start of the span by bitmasking of
  2374. + // the address with the span size, the returned aligned pointer from this
  2375. + // function must be with a span size of the start of the mapped area.
  2376. + // In worst case this requires us to loop and map pages until we get a
  2377. + // suitable memory address. It also means we can never align to span size
  2378. + // or greater, since the span header will push alignment more than one
  2379. + // span size away from span start (thus causing pointer mask to give us
  2380. + // an invalid span start on free)
  2381. + if (alignment & align_mask) {
  2382. + errno = EINVAL;
  2383. + return 0;
  2384. + }
  2385. + if (alignment >= _memory_span_size) {
  2386. + errno = EINVAL;
  2387. + return 0;
  2388. + }
  2389. +
  2390. + size_t extra_pages = alignment / _memory_page_size;
  2391. +
  2392. + // Since each span has a header, we will at least need one extra memory page
  2393. + size_t num_pages = 1 + (size / _memory_page_size);
  2394. + if (size & (_memory_page_size - 1))
  2395. + ++num_pages;
  2396. +
  2397. + if (extra_pages > num_pages)
  2398. + num_pages = 1 + extra_pages;
  2399. +
  2400. + size_t original_pages = num_pages;
  2401. + size_t limit_pages = (_memory_span_size / _memory_page_size) * 2;
  2402. + if (limit_pages < (original_pages * 2))
  2403. + limit_pages = original_pages * 2;
  2404. +
  2405. + size_t mapped_size, align_offset;
  2406. + span_t* span;
  2407. +
  2408. +retry:
  2409. + align_offset = 0;
  2410. + mapped_size = num_pages * _memory_page_size;
  2411. +
  2412. + span = (span_t*)_rpmalloc_mmap(mapped_size, &align_offset);
  2413. + if (!span) {
  2414. + errno = ENOMEM;
  2415. + return 0;
  2416. + }
  2417. + ptr = pointer_offset(span, SPAN_HEADER_SIZE);
  2418. +
  2419. + if ((uintptr_t)ptr & align_mask)
  2420. + ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
  2421. +
  2422. + if (((size_t)pointer_diff(ptr, span) >= _memory_span_size) ||
  2423. + (pointer_offset(ptr, size) > pointer_offset(span, mapped_size)) ||
  2424. + (((uintptr_t)ptr & _memory_span_mask) != (uintptr_t)span)) {
  2425. + _rpmalloc_unmap(span, mapped_size, align_offset, mapped_size);
  2426. + ++num_pages;
  2427. + if (num_pages > limit_pages) {
  2428. + errno = EINVAL;
  2429. + return 0;
  2430. + }
  2431. + goto retry;
  2432. + }
  2433. +
  2434. + //Store page count in span_count
  2435. + span->size_class = SIZE_CLASS_HUGE;
  2436. + span->span_count = (uint32_t)num_pages;
  2437. + span->align_offset = (uint32_t)align_offset;
  2438. + span->heap = heap;
  2439. + _rpmalloc_stat_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
  2440. +
  2441. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2442. + _rpmalloc_span_double_link_list_add(&heap->large_huge_span, span);
  2443. +#endif
  2444. + ++heap->full_span_count;
  2445. +
  2446. + _rpmalloc_stat_add64(&_allocation_counter, 1);
  2447. +
  2448. + return ptr;
  2449. +}
  2450. +
  2451. +
  2452. +////////////
  2453. +///
  2454. +/// Deallocation entry points
  2455. +///
  2456. +//////
  2457. +
  2458. +//! Deallocate the given small/medium memory block in the current thread local heap
  2459. +static void
  2460. +_rpmalloc_deallocate_direct_small_or_medium(span_t* span, void* block) {
  2461. + heap_t* heap = span->heap;
  2462. + rpmalloc_assert(heap->owner_thread == get_thread_id() || !heap->owner_thread || heap->finalize, "Internal failure");
  2463. + //Add block to free list
  2464. + if (UNEXPECTED(_rpmalloc_span_is_fully_utilized(span))) {
  2465. + span->used_count = span->block_count;
  2466. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2467. + _rpmalloc_span_double_link_list_remove(&heap->full_span[span->size_class], span);
  2468. +#endif
  2469. + _rpmalloc_span_double_link_list_add(&heap->size_class[span->size_class].partial_span, span);
  2470. + --heap->full_span_count;
  2471. + }
  2472. + *((void**)block) = span->free_list;
  2473. + --span->used_count;
  2474. + span->free_list = block;
  2475. + if (UNEXPECTED(span->used_count == span->list_size)) {
  2476. + _rpmalloc_span_double_link_list_remove(&heap->size_class[span->size_class].partial_span, span);
  2477. + _rpmalloc_span_release_to_cache(heap, span);
  2478. + }
  2479. +}
  2480. +
  2481. +static void
  2482. +_rpmalloc_deallocate_defer_free_span(heap_t* heap, span_t* span) {
  2483. + if (span->size_class != SIZE_CLASS_HUGE)
  2484. + _rpmalloc_stat_inc(&heap->span_use[span->span_count - 1].spans_deferred);
  2485. + //This list does not need ABA protection, no mutable side state
  2486. + do {
  2487. + span->free_list = (void*)atomic_load_ptr(&heap->span_free_deferred);
  2488. + } while (!atomic_cas_ptr(&heap->span_free_deferred, span, span->free_list));
  2489. +}
  2490. +
  2491. +//! Put the block in the deferred free list of the owning span
  2492. +static void
  2493. +_rpmalloc_deallocate_defer_small_or_medium(span_t* span, void* block) {
  2494. + // The memory ordering here is a bit tricky, to avoid having to ABA protect
  2495. + // the deferred free list to avoid desynchronization of list and list size
  2496. + // we need to have acquire semantics on successful CAS of the pointer to
  2497. + // guarantee the list_size variable validity + release semantics on pointer store
  2498. + void* free_list;
  2499. + do {
  2500. + free_list = atomic_exchange_ptr_acquire(&span->free_list_deferred, INVALID_POINTER);
  2501. + } while (free_list == INVALID_POINTER);
  2502. + *((void**)block) = free_list;
  2503. + uint32_t free_count = ++span->list_size;
  2504. + atomic_store_ptr_release(&span->free_list_deferred, block);
  2505. + if (free_count == span->block_count) {
  2506. + // Span was completely freed by this block. Due to the INVALID_POINTER spin lock
  2507. + // no other thread can reach this state simultaneously on this span.
  2508. + // Safe to move to owner heap deferred cache
  2509. + _rpmalloc_deallocate_defer_free_span(span->heap, span);
  2510. + }
  2511. +}
  2512. +
  2513. +static void
  2514. +_rpmalloc_deallocate_small_or_medium(span_t* span, void* p) {
  2515. + _rpmalloc_stat_inc_free(span->heap, span->size_class);
  2516. + if (span->flags & SPAN_FLAG_ALIGNED_BLOCKS) {
  2517. + //Realign pointer to block start
  2518. + void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
  2519. + uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
  2520. + p = pointer_offset(p, -(int32_t)(block_offset % span->block_size));
  2521. + }
  2522. + //Check if block belongs to this heap or if deallocation should be deferred
  2523. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2524. + int defer = (span->heap->owner_thread && (span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2525. +#else
  2526. + int defer = ((span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2527. +#endif
  2528. + if (!defer)
  2529. + _rpmalloc_deallocate_direct_small_or_medium(span, p);
  2530. + else
  2531. + _rpmalloc_deallocate_defer_small_or_medium(span, p);
  2532. +}
  2533. +
  2534. +//! Deallocate the given large memory block to the current heap
  2535. +static void
  2536. +_rpmalloc_deallocate_large(span_t* span) {
  2537. + rpmalloc_assert(span->size_class == SIZE_CLASS_LARGE, "Bad span size class");
  2538. + rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  2539. + rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
  2540. + //We must always defer (unless finalizing) if from another heap since we cannot touch the list or counters of another heap
  2541. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2542. + int defer = (span->heap->owner_thread && (span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2543. +#else
  2544. + int defer = ((span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2545. +#endif
  2546. + if (defer) {
  2547. + _rpmalloc_deallocate_defer_free_span(span->heap, span);
  2548. + return;
  2549. + }
  2550. + rpmalloc_assert(span->heap->full_span_count, "Heap span counter corrupted");
  2551. + --span->heap->full_span_count;
  2552. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2553. + _rpmalloc_span_double_link_list_remove(&span->heap->large_huge_span, span);
  2554. +#endif
  2555. +#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
  2556. + //Decrease counter
  2557. + size_t idx = span->span_count - 1;
  2558. + atomic_decr32(&span->heap->span_use[idx].current);
  2559. +#endif
  2560. + heap_t* heap = span->heap;
  2561. + rpmalloc_assert(heap, "No thread heap");
  2562. +#if ENABLE_THREAD_CACHE
  2563. + const int set_as_reserved = ((span->span_count > 1) && (heap->span_cache.count == 0) && !heap->finalize && !heap->spans_reserved);
  2564. +#else
  2565. + const int set_as_reserved = ((span->span_count > 1) && !heap->finalize && !heap->spans_reserved);
  2566. +#endif
  2567. + if (set_as_reserved) {
  2568. + heap->span_reserve = span;
  2569. + heap->spans_reserved = span->span_count;
  2570. + if (span->flags & SPAN_FLAG_MASTER) {
  2571. + heap->span_reserve_master = span;
  2572. + } else { //SPAN_FLAG_SUBSPAN
  2573. + span_t* master = (span_t*)pointer_offset(span, -(intptr_t)((size_t)span->offset_from_master * _memory_span_size));
  2574. + heap->span_reserve_master = master;
  2575. + rpmalloc_assert(master->flags & SPAN_FLAG_MASTER, "Span flag corrupted");
  2576. + rpmalloc_assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count, "Master span count corrupted");
  2577. + }
  2578. + _rpmalloc_stat_inc(&heap->span_use[idx].spans_to_reserved);
  2579. + } else {
  2580. + //Insert into cache list
  2581. + _rpmalloc_heap_cache_insert(heap, span);
  2582. + }
  2583. +}
  2584. +
  2585. +//! Deallocate the given huge span
  2586. +static void
  2587. +_rpmalloc_deallocate_huge(span_t* span) {
  2588. + rpmalloc_assert(span->heap, "No span heap");
  2589. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2590. + int defer = (span->heap->owner_thread && (span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2591. +#else
  2592. + int defer = ((span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
  2593. +#endif
  2594. + if (defer) {
  2595. + _rpmalloc_deallocate_defer_free_span(span->heap, span);
  2596. + return;
  2597. + }
  2598. + rpmalloc_assert(span->heap->full_span_count, "Heap span counter corrupted");
  2599. + --span->heap->full_span_count;
  2600. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2601. + _rpmalloc_span_double_link_list_remove(&span->heap->large_huge_span, span);
  2602. +#endif
  2603. +
  2604. + //Oversized allocation, page count is stored in span_count
  2605. + size_t num_pages = span->span_count;
  2606. + _rpmalloc_unmap(span, num_pages * _memory_page_size, span->align_offset, num_pages * _memory_page_size);
  2607. + _rpmalloc_stat_sub(&_huge_pages_current, num_pages);
  2608. +}
  2609. +
  2610. +//! Deallocate the given block
  2611. +static void
  2612. +_rpmalloc_deallocate(void* p) {
  2613. + _rpmalloc_stat_add64(&_deallocation_counter, 1);
  2614. + //Grab the span (always at start of span, using span alignment)
  2615. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  2616. + if (UNEXPECTED(!span))
  2617. + return;
  2618. + if (EXPECTED(span->size_class < SIZE_CLASS_COUNT))
  2619. + _rpmalloc_deallocate_small_or_medium(span, p);
  2620. + else if (span->size_class == SIZE_CLASS_LARGE)
  2621. + _rpmalloc_deallocate_large(span);
  2622. + else
  2623. + _rpmalloc_deallocate_huge(span);
  2624. +}
  2625. +
  2626. +////////////
  2627. +///
  2628. +/// Reallocation entry points
  2629. +///
  2630. +//////
  2631. +
  2632. +static size_t
  2633. +_rpmalloc_usable_size(void* p);
  2634. +
  2635. +//! Reallocate the given block to the given size
  2636. +static void*
  2637. +_rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigned int flags) {
  2638. + if (p) {
  2639. + //Grab the span using guaranteed span alignment
  2640. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  2641. + if (EXPECTED(span->size_class < SIZE_CLASS_COUNT)) {
  2642. + //Small/medium sized block
  2643. + rpmalloc_assert(span->span_count == 1, "Span counter corrupted");
  2644. + void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
  2645. + uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
  2646. + uint32_t block_idx = block_offset / span->block_size;
  2647. + void* block = pointer_offset(blocks_start, (size_t)block_idx * span->block_size);
  2648. + if (!oldsize)
  2649. + oldsize = (size_t)((ptrdiff_t)span->block_size - pointer_diff(p, block));
  2650. + if ((size_t)span->block_size >= size) {
  2651. + //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
  2652. + if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
  2653. + memmove(block, p, oldsize);
  2654. + return block;
  2655. + }
  2656. + } else if (span->size_class == SIZE_CLASS_LARGE) {
  2657. + //Large block
  2658. + size_t total_size = size + SPAN_HEADER_SIZE;
  2659. + size_t num_spans = total_size >> _memory_span_size_shift;
  2660. + if (total_size & (_memory_span_mask - 1))
  2661. + ++num_spans;
  2662. + size_t current_spans = span->span_count;
  2663. + void* block = pointer_offset(span, SPAN_HEADER_SIZE);
  2664. + if (!oldsize)
  2665. + oldsize = (current_spans * _memory_span_size) - (size_t)pointer_diff(p, block) - SPAN_HEADER_SIZE;
  2666. + if ((current_spans >= num_spans) && (total_size >= (oldsize / 2))) {
  2667. + //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
  2668. + if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
  2669. + memmove(block, p, oldsize);
  2670. + return block;
  2671. + }
  2672. + } else {
  2673. + //Oversized block
  2674. + size_t total_size = size + SPAN_HEADER_SIZE;
  2675. + size_t num_pages = total_size >> _memory_page_size_shift;
  2676. + if (total_size & (_memory_page_size - 1))
  2677. + ++num_pages;
  2678. + //Page count is stored in span_count
  2679. + size_t current_pages = span->span_count;
  2680. + void* block = pointer_offset(span, SPAN_HEADER_SIZE);
  2681. + if (!oldsize)
  2682. + oldsize = (current_pages * _memory_page_size) - (size_t)pointer_diff(p, block) - SPAN_HEADER_SIZE;
  2683. + if ((current_pages >= num_pages) && (num_pages >= (current_pages / 2))) {
  2684. + //Still fits in block, never mind trying to save memory, but preserve data if alignment changed
  2685. + if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
  2686. + memmove(block, p, oldsize);
  2687. + return block;
  2688. + }
  2689. + }
  2690. + } else {
  2691. + oldsize = 0;
  2692. + }
  2693. +
  2694. + if (!!(flags & RPMALLOC_GROW_OR_FAIL))
  2695. + return 0;
  2696. +
  2697. + //Size is greater than block size, need to allocate a new block and deallocate the old
  2698. + //Avoid hysteresis by overallocating if increase is small (below 37%)
  2699. + size_t lower_bound = oldsize + (oldsize >> 2) + (oldsize >> 3);
  2700. + size_t new_size = (size > lower_bound) ? size : ((size > oldsize) ? lower_bound : size);
  2701. + void* block = _rpmalloc_allocate(heap, new_size);
  2702. + if (p && block) {
  2703. + if (!(flags & RPMALLOC_NO_PRESERVE))
  2704. + memcpy(block, p, oldsize < new_size ? oldsize : new_size);
  2705. + _rpmalloc_deallocate(p);
  2706. + }
  2707. +
  2708. + return block;
  2709. +}
  2710. +
  2711. +static void*
  2712. +_rpmalloc_aligned_reallocate(heap_t* heap, void* ptr, size_t alignment, size_t size, size_t oldsize,
  2713. + unsigned int flags) {
  2714. + if (alignment <= SMALL_GRANULARITY)
  2715. + return _rpmalloc_reallocate(heap, ptr, size, oldsize, flags);
  2716. +
  2717. + int no_alloc = !!(flags & RPMALLOC_GROW_OR_FAIL);
  2718. + size_t usablesize = (ptr ? _rpmalloc_usable_size(ptr) : 0);
  2719. + if ((usablesize >= size) && !((uintptr_t)ptr & (alignment - 1))) {
  2720. + if (no_alloc || (size >= (usablesize / 2)))
  2721. + return ptr;
  2722. + }
  2723. + // Aligned alloc marks span as having aligned blocks
  2724. + void* block = (!no_alloc ? _rpmalloc_aligned_allocate(heap, alignment, size) : 0);
  2725. + if (EXPECTED(block != 0)) {
  2726. + if (!(flags & RPMALLOC_NO_PRESERVE) && ptr) {
  2727. + if (!oldsize)
  2728. + oldsize = usablesize;
  2729. + memcpy(block, ptr, oldsize < size ? oldsize : size);
  2730. + }
  2731. + _rpmalloc_deallocate(ptr);
  2732. + }
  2733. + return block;
  2734. +}
  2735. +
  2736. +
  2737. +////////////
  2738. +///
  2739. +/// Initialization, finalization and utility
  2740. +///
  2741. +//////
  2742. +
  2743. +//! Get the usable size of the given block
  2744. +static size_t
  2745. +_rpmalloc_usable_size(void* p) {
  2746. + //Grab the span using guaranteed span alignment
  2747. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  2748. + if (span->size_class < SIZE_CLASS_COUNT) {
  2749. + //Small/medium block
  2750. + void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
  2751. + return span->block_size - ((size_t)pointer_diff(p, blocks_start) % span->block_size);
  2752. + }
  2753. + if (span->size_class == SIZE_CLASS_LARGE) {
  2754. + //Large block
  2755. + size_t current_spans = span->span_count;
  2756. + return (current_spans * _memory_span_size) - (size_t)pointer_diff(p, span);
  2757. + }
  2758. + //Oversized block, page count is stored in span_count
  2759. + size_t current_pages = span->span_count;
  2760. + return (current_pages * _memory_page_size) - (size_t)pointer_diff(p, span);
  2761. +}
  2762. +
  2763. +//! Adjust and optimize the size class properties for the given class
  2764. +static void
  2765. +_rpmalloc_adjust_size_class(size_t iclass) {
  2766. + size_t block_size = _memory_size_class[iclass].block_size;
  2767. + size_t block_count = (_memory_span_size - SPAN_HEADER_SIZE) / block_size;
  2768. +
  2769. + _memory_size_class[iclass].block_count = (uint16_t)block_count;
  2770. + _memory_size_class[iclass].class_idx = (uint16_t)iclass;
  2771. +
  2772. + //Check if previous size classes can be merged
  2773. + if (iclass >= SMALL_CLASS_COUNT) {
  2774. + size_t prevclass = iclass;
  2775. + while (prevclass > 0) {
  2776. + --prevclass;
  2777. + //A class can be merged if number of pages and number of blocks are equal
  2778. + if (_memory_size_class[prevclass].block_count == _memory_size_class[iclass].block_count)
  2779. + memcpy(_memory_size_class + prevclass, _memory_size_class + iclass, sizeof(_memory_size_class[iclass]));
  2780. + else
  2781. + break;
  2782. + }
  2783. + }
  2784. +}
  2785. +
  2786. +//! Initialize the allocator and setup global data
  2787. +extern inline int
  2788. +rpmalloc_initialize(void) {
  2789. + if (_rpmalloc_initialized) {
  2790. + rpmalloc_thread_initialize();
  2791. + return 0;
  2792. + }
  2793. + return rpmalloc_initialize_config(0);
  2794. +}
  2795. +
  2796. +static int
  2797. +rpmalloc_initialize_config(const rpmalloc_config_t* config) {
  2798. + if (_rpmalloc_initialized) {
  2799. + rpmalloc_thread_initialize();
  2800. + return 0;
  2801. + }
  2802. + _rpmalloc_initialized = 1;
  2803. +
  2804. + if (config)
  2805. + memcpy(&_memory_config, config, sizeof(rpmalloc_config_t));
  2806. + else
  2807. + memset(&_memory_config, 0, sizeof(rpmalloc_config_t));
  2808. +
  2809. + if (!_memory_config.memory_map || !_memory_config.memory_unmap) {
  2810. + _memory_config.memory_map = _rpmalloc_mmap_os;
  2811. + _memory_config.memory_unmap = _rpmalloc_unmap_os;
  2812. + }
  2813. +
  2814. +#if PLATFORM_WINDOWS
  2815. + SYSTEM_INFO system_info;
  2816. + memset(&system_info, 0, sizeof(system_info));
  2817. + GetSystemInfo(&system_info);
  2818. + _memory_map_granularity = system_info.dwAllocationGranularity;
  2819. +#else
  2820. + _memory_map_granularity = (size_t)sysconf(_SC_PAGESIZE);
  2821. +#endif
  2822. +
  2823. +#if RPMALLOC_CONFIGURABLE
  2824. + _memory_page_size = _memory_config.page_size;
  2825. +#else
  2826. + _memory_page_size = 0;
  2827. +#endif
  2828. + _memory_huge_pages = 0;
  2829. + if (!_memory_page_size) {
  2830. +#if PLATFORM_WINDOWS
  2831. + _memory_page_size = system_info.dwPageSize;
  2832. +#else
  2833. + _memory_page_size = _memory_map_granularity;
  2834. + if (_memory_config.enable_huge_pages) {
  2835. +#if defined(__linux__)
  2836. + size_t huge_page_size = 0;
  2837. + FILE* meminfo = fopen("/proc/meminfo", "r");
  2838. + if (meminfo) {
  2839. + char line[128];
  2840. + while (!huge_page_size && fgets(line, sizeof(line) - 1, meminfo)) {
  2841. + line[sizeof(line) - 1] = 0;
  2842. + if (strstr(line, "Hugepagesize:"))
  2843. + huge_page_size = (size_t)strtol(line + 13, 0, 10) * 1024;
  2844. + }
  2845. + fclose(meminfo);
  2846. + }
  2847. + if (huge_page_size) {
  2848. + _memory_huge_pages = 1;
  2849. + _memory_page_size = huge_page_size;
  2850. + _memory_map_granularity = huge_page_size;
  2851. + }
  2852. +#elif defined(__FreeBSD__)
  2853. + int rc;
  2854. + size_t sz = sizeof(rc);
  2855. +
  2856. + if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
  2857. + _memory_huge_pages = 1;
  2858. + _memory_page_size = 2 * 1024 * 1024;
  2859. + _memory_map_granularity = _memory_page_size;
  2860. + }
  2861. +#elif defined(__APPLE__) || defined(__NetBSD__)
  2862. + _memory_huge_pages = 1;
  2863. + _memory_page_size = 2 * 1024 * 1024;
  2864. + _memory_map_granularity = _memory_page_size;
  2865. +#endif
  2866. + }
  2867. +#endif
  2868. + } else {
  2869. + if (_memory_config.enable_huge_pages)
  2870. + _memory_huge_pages = 1;
  2871. + }
  2872. +
  2873. +#if PLATFORM_WINDOWS
  2874. + if (_memory_config.enable_huge_pages) {
  2875. + HANDLE token = 0;
  2876. + size_t large_page_minimum = GetLargePageMinimum();
  2877. + if (large_page_minimum)
  2878. + OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
  2879. + if (token) {
  2880. + LUID luid;
  2881. + if (LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &luid)) {
  2882. + TOKEN_PRIVILEGES token_privileges;
  2883. + memset(&token_privileges, 0, sizeof(token_privileges));
  2884. + token_privileges.PrivilegeCount = 1;
  2885. + token_privileges.Privileges[0].Luid = luid;
  2886. + token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
  2887. + if (AdjustTokenPrivileges(token, FALSE, &token_privileges, 0, 0, 0)) {
  2888. + if (GetLastError() == ERROR_SUCCESS)
  2889. + _memory_huge_pages = 1;
  2890. + }
  2891. + }
  2892. + CloseHandle(token);
  2893. + }
  2894. + if (_memory_huge_pages) {
  2895. + if (large_page_minimum > _memory_page_size)
  2896. + _memory_page_size = large_page_minimum;
  2897. + if (large_page_minimum > _memory_map_granularity)
  2898. + _memory_map_granularity = large_page_minimum;
  2899. + }
  2900. + }
  2901. +#endif
  2902. +
  2903. + size_t min_span_size = 256;
  2904. + size_t max_page_size;
  2905. +#if UINTPTR_MAX > 0xFFFFFFFF
  2906. + max_page_size = 4096ULL * 1024ULL * 1024ULL;
  2907. +#else
  2908. + max_page_size = 4 * 1024 * 1024;
  2909. +#endif
  2910. + if (_memory_page_size < min_span_size)
  2911. + _memory_page_size = min_span_size;
  2912. + if (_memory_page_size > max_page_size)
  2913. + _memory_page_size = max_page_size;
  2914. + _memory_page_size_shift = 0;
  2915. + size_t page_size_bit = _memory_page_size;
  2916. + while (page_size_bit != 1) {
  2917. + ++_memory_page_size_shift;
  2918. + page_size_bit >>= 1;
  2919. + }
  2920. + _memory_page_size = ((size_t)1 << _memory_page_size_shift);
  2921. +
  2922. +#if RPMALLOC_CONFIGURABLE
  2923. + if (!_memory_config.span_size) {
  2924. + _memory_span_size = _memory_default_span_size;
  2925. + _memory_span_size_shift = _memory_default_span_size_shift;
  2926. + _memory_span_mask = _memory_default_span_mask;
  2927. + } else {
  2928. + size_t span_size = _memory_config.span_size;
  2929. + if (span_size > (256 * 1024))
  2930. + span_size = (256 * 1024);
  2931. + _memory_span_size = 4096;
  2932. + _memory_span_size_shift = 12;
  2933. + while (_memory_span_size < span_size) {
  2934. + _memory_span_size <<= 1;
  2935. + ++_memory_span_size_shift;
  2936. + }
  2937. + _memory_span_mask = ~(uintptr_t)(_memory_span_size - 1);
  2938. + }
  2939. +#endif
  2940. +
  2941. + _memory_span_map_count = ( _memory_config.span_map_count ? _memory_config.span_map_count : DEFAULT_SPAN_MAP_COUNT);
  2942. + if ((_memory_span_size * _memory_span_map_count) < _memory_page_size)
  2943. + _memory_span_map_count = (_memory_page_size / _memory_span_size);
  2944. + if ((_memory_page_size >= _memory_span_size) && ((_memory_span_map_count * _memory_span_size) % _memory_page_size))
  2945. + _memory_span_map_count = (_memory_page_size / _memory_span_size);
  2946. + _memory_heap_reserve_count = (_memory_span_map_count > DEFAULT_SPAN_MAP_COUNT) ? DEFAULT_SPAN_MAP_COUNT : _memory_span_map_count;
  2947. +
  2948. + _memory_config.page_size = _memory_page_size;
  2949. + _memory_config.span_size = _memory_span_size;
  2950. + _memory_config.span_map_count = _memory_span_map_count;
  2951. + _memory_config.enable_huge_pages = _memory_huge_pages;
  2952. +
  2953. +#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || defined(__TINYC__)
  2954. + if (pthread_key_create(&_memory_thread_heap, _rpmalloc_heap_release_raw_fc))
  2955. + return -1;
  2956. +#endif
  2957. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  2958. + fls_key = FlsAlloc(&_rpmalloc_thread_destructor);
  2959. +#endif
  2960. +
  2961. + //Setup all small and medium size classes
  2962. + size_t iclass = 0;
  2963. + _memory_size_class[iclass].block_size = SMALL_GRANULARITY;
  2964. + _rpmalloc_adjust_size_class(iclass);
  2965. + for (iclass = 1; iclass < SMALL_CLASS_COUNT; ++iclass) {
  2966. + size_t size = iclass * SMALL_GRANULARITY;
  2967. + _memory_size_class[iclass].block_size = (uint32_t)size;
  2968. + _rpmalloc_adjust_size_class(iclass);
  2969. + }
  2970. + //At least two blocks per span, then fall back to large allocations
  2971. + _memory_medium_size_limit = (_memory_span_size - SPAN_HEADER_SIZE) >> 1;
  2972. + if (_memory_medium_size_limit > MEDIUM_SIZE_LIMIT)
  2973. + _memory_medium_size_limit = MEDIUM_SIZE_LIMIT;
  2974. + for (iclass = 0; iclass < MEDIUM_CLASS_COUNT; ++iclass) {
  2975. + size_t size = SMALL_SIZE_LIMIT + ((iclass + 1) * MEDIUM_GRANULARITY);
  2976. + if (size > _memory_medium_size_limit)
  2977. + break;
  2978. + _memory_size_class[SMALL_CLASS_COUNT + iclass].block_size = (uint32_t)size;
  2979. + _rpmalloc_adjust_size_class(SMALL_CLASS_COUNT + iclass);
  2980. + }
  2981. +
  2982. + _memory_orphan_heaps = 0;
  2983. +#if RPMALLOC_FIRST_CLASS_HEAPS
  2984. + _memory_first_class_orphan_heaps = 0;
  2985. +#endif
  2986. +#if ENABLE_STATISTICS
  2987. + atomic_store32(&_memory_active_heaps, 0);
  2988. + atomic_store32(&_mapped_pages, 0);
  2989. + _mapped_pages_peak = 0;
  2990. + atomic_store32(&_master_spans, 0);
  2991. + atomic_store32(&_mapped_total, 0);
  2992. + atomic_store32(&_unmapped_total, 0);
  2993. + atomic_store32(&_mapped_pages_os, 0);
  2994. + atomic_store32(&_huge_pages_current, 0);
  2995. + _huge_pages_peak = 0;
  2996. +#endif
  2997. + memset(_memory_heaps, 0, sizeof(_memory_heaps));
  2998. + atomic_store32_release(&_memory_global_lock, 0);
  2999. +
  3000. + //Initialize this thread
  3001. + rpmalloc_thread_initialize();
  3002. + return 0;
  3003. +}
  3004. +
  3005. +//! Finalize the allocator
  3006. +void
  3007. +rpmalloc_finalize(void) {
  3008. + rpmalloc_thread_finalize(1);
  3009. + //rpmalloc_dump_statistics(stdout);
  3010. +
  3011. + if (_memory_global_reserve) {
  3012. + atomic_add32(&_memory_global_reserve_master->remaining_spans, -(int32_t)_memory_global_reserve_count);
  3013. + _memory_global_reserve_master = 0;
  3014. + _memory_global_reserve_count = 0;
  3015. + _memory_global_reserve = 0;
  3016. + }
  3017. + atomic_store32_release(&_memory_global_lock, 0);
  3018. +
  3019. + //Free all thread caches and fully free spans
  3020. + for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
  3021. + heap_t* heap = _memory_heaps[list_idx];
  3022. + while (heap) {
  3023. + heap_t* next_heap = heap->next_heap;
  3024. + heap->finalize = 1;
  3025. + _rpmalloc_heap_global_finalize(heap);
  3026. + heap = next_heap;
  3027. + }
  3028. + }
  3029. +
  3030. +#if ENABLE_GLOBAL_CACHE
  3031. + //Free global caches
  3032. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass)
  3033. + _rpmalloc_global_cache_finalize(&_memory_span_cache[iclass]);
  3034. +#endif
  3035. +
  3036. +#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  3037. + pthread_key_delete(_memory_thread_heap);
  3038. +#endif
  3039. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  3040. + FlsFree(fls_key);
  3041. + fls_key = 0;
  3042. +#endif
  3043. +#if ENABLE_STATISTICS
  3044. + //If you hit these asserts you probably have memory leaks (perhaps global scope data doing dynamic allocations) or double frees in your code
  3045. + rpmalloc_assert(atomic_load32(&_mapped_pages) == 0, "Memory leak detected");
  3046. + rpmalloc_assert(atomic_load32(&_mapped_pages_os) == 0, "Memory leak detected");
  3047. +#endif
  3048. +
  3049. + _rpmalloc_initialized = 0;
  3050. +}
  3051. +
  3052. +//! Initialize thread, assign heap
  3053. +extern inline void
  3054. +rpmalloc_thread_initialize(void) {
  3055. + if (!get_thread_heap_raw()) {
  3056. + heap_t* heap = _rpmalloc_heap_allocate(0);
  3057. + if (heap) {
  3058. + _rpmalloc_stat_inc(&_memory_active_heaps);
  3059. + set_thread_heap(heap);
  3060. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  3061. + FlsSetValue(fls_key, heap);
  3062. +#endif
  3063. + }
  3064. + }
  3065. +}
  3066. +
  3067. +//! Finalize thread, orphan heap
  3068. +void
  3069. +rpmalloc_thread_finalize(int release_caches) {
  3070. + heap_t* heap = get_thread_heap_raw();
  3071. + if (heap)
  3072. + _rpmalloc_heap_release_raw(heap, release_caches);
  3073. + set_thread_heap(0);
  3074. +#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
  3075. + FlsSetValue(fls_key, 0);
  3076. +#endif
  3077. +}
  3078. +
  3079. +int
  3080. +rpmalloc_is_thread_initialized(void) {
  3081. + return (get_thread_heap_raw() != 0) ? 1 : 0;
  3082. +}
  3083. +
  3084. +const rpmalloc_config_t*
  3085. +rpmalloc_config(void) {
  3086. + return &_memory_config;
  3087. +}
  3088. +
  3089. +// Extern interface
  3090. +
  3091. +extern inline RPMALLOC_ALLOCATOR void*
  3092. +rpmalloc(size_t size) {
  3093. +#if ENABLE_VALIDATE_ARGS
  3094. + if (size >= MAX_ALLOC_SIZE) {
  3095. + errno = EINVAL;
  3096. + return 0;
  3097. + }
  3098. +#endif
  3099. + heap_t* heap = get_thread_heap();
  3100. + return _rpmalloc_allocate(heap, size);
  3101. +}
  3102. +
  3103. +extern inline void
  3104. +rpfree(void* ptr) {
  3105. + _rpmalloc_deallocate(ptr);
  3106. +}
  3107. +
  3108. +extern inline RPMALLOC_ALLOCATOR void*
  3109. +rpcalloc(size_t num, size_t size) {
  3110. + size_t total;
  3111. +#if ENABLE_VALIDATE_ARGS
  3112. +#if PLATFORM_WINDOWS
  3113. + int err = SizeTMult(num, size, &total);
  3114. + if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
  3115. + errno = EINVAL;
  3116. + return 0;
  3117. + }
  3118. +#else
  3119. + int err = __builtin_umull_overflow(num, size, &total);
  3120. + if (err || (total >= MAX_ALLOC_SIZE)) {
  3121. + errno = EINVAL;
  3122. + return 0;
  3123. + }
  3124. +#endif
  3125. +#else
  3126. + total = num * size;
  3127. +#endif
  3128. + heap_t* heap = get_thread_heap();
  3129. + void* block = _rpmalloc_allocate(heap, total);
  3130. + if (block)
  3131. + memset(block, 0, total);
  3132. + return block;
  3133. +}
  3134. +
  3135. +extern inline RPMALLOC_ALLOCATOR void*
  3136. +rprealloc(void* ptr, size_t size) {
  3137. +#if ENABLE_VALIDATE_ARGS
  3138. + if (size >= MAX_ALLOC_SIZE) {
  3139. + errno = EINVAL;
  3140. + return ptr;
  3141. + }
  3142. +#endif
  3143. + heap_t* heap = get_thread_heap();
  3144. + return _rpmalloc_reallocate(heap, ptr, size, 0, 0);
  3145. +}
  3146. +
  3147. +extern RPMALLOC_ALLOCATOR void*
  3148. +rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize,
  3149. + unsigned int flags) {
  3150. +#if ENABLE_VALIDATE_ARGS
  3151. + if ((size + alignment < size) || (alignment > _memory_page_size)) {
  3152. + errno = EINVAL;
  3153. + return 0;
  3154. + }
  3155. +#endif
  3156. + heap_t* heap = get_thread_heap();
  3157. + return _rpmalloc_aligned_reallocate(heap, ptr, alignment, size, oldsize, flags);
  3158. +}
  3159. +
  3160. +extern RPMALLOC_ALLOCATOR void*
  3161. +rpaligned_alloc(size_t alignment, size_t size) {
  3162. + heap_t* heap = get_thread_heap();
  3163. + return _rpmalloc_aligned_allocate(heap, alignment, size);
  3164. +}
  3165. +
  3166. +extern inline RPMALLOC_ALLOCATOR void*
  3167. +rpaligned_calloc(size_t alignment, size_t num, size_t size) {
  3168. + size_t total;
  3169. +#if ENABLE_VALIDATE_ARGS
  3170. +#if PLATFORM_WINDOWS
  3171. + int err = SizeTMult(num, size, &total);
  3172. + if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
  3173. + errno = EINVAL;
  3174. + return 0;
  3175. + }
  3176. +#else
  3177. + int err = __builtin_umull_overflow(num, size, &total);
  3178. + if (err || (total >= MAX_ALLOC_SIZE)) {
  3179. + errno = EINVAL;
  3180. + return 0;
  3181. + }
  3182. +#endif
  3183. +#else
  3184. + total = num * size;
  3185. +#endif
  3186. + void* block = rpaligned_alloc(alignment, total);
  3187. + if (block)
  3188. + memset(block, 0, total);
  3189. + return block;
  3190. +}
  3191. +
  3192. +extern inline RPMALLOC_ALLOCATOR void*
  3193. +rpmemalign(size_t alignment, size_t size) {
  3194. + return rpaligned_alloc(alignment, size);
  3195. +}
  3196. +
  3197. +extern inline int
  3198. +rpposix_memalign(void **memptr, size_t alignment, size_t size) {
  3199. + if (memptr)
  3200. + *memptr = rpaligned_alloc(alignment, size);
  3201. + else
  3202. + return EINVAL;
  3203. + return *memptr ? 0 : ENOMEM;
  3204. +}
  3205. +
  3206. +extern inline size_t
  3207. +rpmalloc_usable_size(void* ptr) {
  3208. + return (ptr ? _rpmalloc_usable_size(ptr) : 0);
  3209. +}
  3210. +
  3211. +extern inline void
  3212. +rpmalloc_thread_collect(void) {
  3213. +}
  3214. +
  3215. +void
  3216. +rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
  3217. + memset(stats, 0, sizeof(rpmalloc_thread_statistics_t));
  3218. + heap_t* heap = get_thread_heap_raw();
  3219. + if (!heap)
  3220. + return;
  3221. +
  3222. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  3223. + size_class_t* size_class = _memory_size_class + iclass;
  3224. + span_t* span = heap->size_class[iclass].partial_span;
  3225. + while (span) {
  3226. + size_t free_count = span->list_size;
  3227. + size_t block_count = size_class->block_count;
  3228. + if (span->free_list_limit < block_count)
  3229. + block_count = span->free_list_limit;
  3230. + free_count += (block_count - span->used_count);
  3231. + stats->sizecache = free_count * size_class->block_size;
  3232. + span = span->next;
  3233. + }
  3234. + }
  3235. +
  3236. +#if ENABLE_THREAD_CACHE
  3237. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3238. + span_cache_t* span_cache;
  3239. + if (!iclass)
  3240. + span_cache = &heap->span_cache;
  3241. + else
  3242. + span_cache = (span_cache_t*)(heap->span_large_cache + (iclass - 1));
  3243. + stats->spancache = span_cache->count * (iclass + 1) * _memory_span_size;
  3244. + }
  3245. +#endif
  3246. +
  3247. + span_t* deferred = (span_t*)atomic_load_ptr(&heap->span_free_deferred);
  3248. + while (deferred) {
  3249. + if (deferred->size_class != SIZE_CLASS_HUGE)
  3250. + stats->spancache = (size_t)deferred->span_count * _memory_span_size;
  3251. + deferred = (span_t*)deferred->free_list;
  3252. + }
  3253. +
  3254. +#if ENABLE_STATISTICS
  3255. + stats->thread_to_global = (size_t)atomic_load64(&heap->thread_to_global);
  3256. + stats->global_to_thread = (size_t)atomic_load64(&heap->global_to_thread);
  3257. +
  3258. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3259. + stats->span_use[iclass].current = (size_t)atomic_load32(&heap->span_use[iclass].current);
  3260. + stats->span_use[iclass].peak = (size_t)atomic_load32(&heap->span_use[iclass].high);
  3261. + stats->span_use[iclass].to_global = (size_t)atomic_load32(&heap->span_use[iclass].spans_to_global);
  3262. + stats->span_use[iclass].from_global = (size_t)atomic_load32(&heap->span_use[iclass].spans_from_global);
  3263. + stats->span_use[iclass].to_cache = (size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache);
  3264. + stats->span_use[iclass].from_cache = (size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache);
  3265. + stats->span_use[iclass].to_reserved = (size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved);
  3266. + stats->span_use[iclass].from_reserved = (size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved);
  3267. + stats->span_use[iclass].map_calls = (size_t)atomic_load32(&heap->span_use[iclass].spans_map_calls);
  3268. + }
  3269. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  3270. + stats->size_use[iclass].alloc_current = (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_current);
  3271. + stats->size_use[iclass].alloc_peak = (size_t)heap->size_class_use[iclass].alloc_peak;
  3272. + stats->size_use[iclass].alloc_total = (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_total);
  3273. + stats->size_use[iclass].free_total = (size_t)atomic_load32(&heap->size_class_use[iclass].free_total);
  3274. + stats->size_use[iclass].spans_to_cache = (size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache);
  3275. + stats->size_use[iclass].spans_from_cache = (size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache);
  3276. + stats->size_use[iclass].spans_from_reserved = (size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_reserved);
  3277. + stats->size_use[iclass].map_calls = (size_t)atomic_load32(&heap->size_class_use[iclass].spans_map_calls);
  3278. + }
  3279. +#endif
  3280. +}
  3281. +
  3282. +void
  3283. +rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
  3284. + memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
  3285. +#if ENABLE_STATISTICS
  3286. + stats->mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
  3287. + stats->mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
  3288. + stats->mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
  3289. + stats->unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
  3290. + stats->huge_alloc = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
  3291. + stats->huge_alloc_peak = (size_t)_huge_pages_peak * _memory_page_size;
  3292. +#endif
  3293. +#if ENABLE_GLOBAL_CACHE
  3294. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass)
  3295. + stats->cached += _memory_span_cache[iclass].count * (iclass + 1) * _memory_span_size;
  3296. +#endif
  3297. +}
  3298. +
  3299. +#if ENABLE_STATISTICS
  3300. +
  3301. +static void
  3302. +_memory_heap_dump_statistics(heap_t* heap, void* file) {
  3303. + fprintf(file, "Heap %d stats:\n", heap->id);
  3304. + fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB FromCacheMiB FromReserveMiB MmapCalls\n");
  3305. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  3306. + if (!atomic_load32(&heap->size_class_use[iclass].alloc_total))
  3307. + continue;
  3308. + fprintf(file, "%3u: %10u %10u %10u %10u %8u %8u %8d %9d %13zu %11zu %12zu %14zu %9u\n", (uint32_t)iclass,
  3309. + atomic_load32(&heap->size_class_use[iclass].alloc_current),
  3310. + heap->size_class_use[iclass].alloc_peak,
  3311. + atomic_load32(&heap->size_class_use[iclass].alloc_total),
  3312. + atomic_load32(&heap->size_class_use[iclass].free_total),
  3313. + _memory_size_class[iclass].block_size,
  3314. + _memory_size_class[iclass].block_count,
  3315. + atomic_load32(&heap->size_class_use[iclass].spans_current),
  3316. + heap->size_class_use[iclass].spans_peak,
  3317. + ((size_t)heap->size_class_use[iclass].alloc_peak * (size_t)_memory_size_class[iclass].block_size) / (size_t)(1024 * 1024),
  3318. + ((size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache) * _memory_span_size) / (size_t)(1024 * 1024),
  3319. + ((size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache) * _memory_span_size) / (size_t)(1024 * 1024),
  3320. + ((size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_reserved) * _memory_span_size) / (size_t)(1024 * 1024),
  3321. + atomic_load32(&heap->size_class_use[iclass].spans_map_calls));
  3322. + }
  3323. + fprintf(file, "Spans Current Peak Deferred PeakMiB Cached ToCacheMiB FromCacheMiB ToReserveMiB FromReserveMiB ToGlobalMiB FromGlobalMiB MmapCalls\n");
  3324. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3325. + if (!atomic_load32(&heap->span_use[iclass].high) && !atomic_load32(&heap->span_use[iclass].spans_map_calls))
  3326. + continue;
  3327. + fprintf(file, "%4u: %8d %8u %8u %8zu %7u %11zu %12zu %12zu %14zu %11zu %13zu %10u\n", (uint32_t)(iclass + 1),
  3328. + atomic_load32(&heap->span_use[iclass].current),
  3329. + atomic_load32(&heap->span_use[iclass].high),
  3330. + atomic_load32(&heap->span_use[iclass].spans_deferred),
  3331. + ((size_t)atomic_load32(&heap->span_use[iclass].high) * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
  3332. +#if ENABLE_THREAD_CACHE
  3333. + (unsigned int)(!iclass ? heap->span_cache.count : heap->span_large_cache[iclass - 1].count),
  3334. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache) * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
  3335. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache) * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
  3336. +#else
  3337. + 0, (size_t)0, (size_t)0,
  3338. +#endif
  3339. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved) * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
  3340. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved) * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
  3341. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_global) * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
  3342. + ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_global) * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
  3343. + atomic_load32(&heap->span_use[iclass].spans_map_calls));
  3344. + }
  3345. + fprintf(file, "Full spans: %zu\n", heap->full_span_count);
  3346. + fprintf(file, "ThreadToGlobalMiB GlobalToThreadMiB\n");
  3347. + fprintf(file, "%17zu %17zu\n", (size_t)atomic_load64(&heap->thread_to_global) / (size_t)(1024 * 1024), (size_t)atomic_load64(&heap->global_to_thread) / (size_t)(1024 * 1024));
  3348. +}
  3349. +
  3350. +#endif
  3351. +
  3352. +void
  3353. +rpmalloc_dump_statistics(void* file) {
  3354. +#if ENABLE_STATISTICS
  3355. + for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
  3356. + heap_t* heap = _memory_heaps[list_idx];
  3357. + while (heap) {
  3358. + int need_dump = 0;
  3359. + for (size_t iclass = 0; !need_dump && (iclass < SIZE_CLASS_COUNT); ++iclass) {
  3360. + if (!atomic_load32(&heap->size_class_use[iclass].alloc_total)) {
  3361. + rpmalloc_assert(!atomic_load32(&heap->size_class_use[iclass].free_total), "Heap statistics counter mismatch");
  3362. + rpmalloc_assert(!atomic_load32(&heap->size_class_use[iclass].spans_map_calls), "Heap statistics counter mismatch");
  3363. + continue;
  3364. + }
  3365. + need_dump = 1;
  3366. + }
  3367. + for (size_t iclass = 0; !need_dump && (iclass < LARGE_CLASS_COUNT); ++iclass) {
  3368. + if (!atomic_load32(&heap->span_use[iclass].high) && !atomic_load32(&heap->span_use[iclass].spans_map_calls))
  3369. + continue;
  3370. + need_dump = 1;
  3371. + }
  3372. + if (need_dump)
  3373. + _memory_heap_dump_statistics(heap, file);
  3374. + heap = heap->next_heap;
  3375. + }
  3376. + }
  3377. + fprintf(file, "Global stats:\n");
  3378. + size_t huge_current = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
  3379. + size_t huge_peak = (size_t)_huge_pages_peak * _memory_page_size;
  3380. + fprintf(file, "HugeCurrentMiB HugePeakMiB\n");
  3381. + fprintf(file, "%14zu %11zu\n", huge_current / (size_t)(1024 * 1024), huge_peak / (size_t)(1024 * 1024));
  3382. +
  3383. + fprintf(file, "GlobalCacheMiB\n");
  3384. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3385. + global_cache_t* cache = _memory_span_cache + iclass;
  3386. + size_t global_cache = (size_t)cache->count * iclass * _memory_span_size;
  3387. +
  3388. + size_t global_overflow_cache = 0;
  3389. + span_t* span = cache->overflow;
  3390. + while (span) {
  3391. + global_overflow_cache += iclass * _memory_span_size;
  3392. + span = span->next;
  3393. + }
  3394. + if (global_cache || global_overflow_cache || cache->insert_count || cache->extract_count)
  3395. + fprintf(file, "%4zu: %8zuMiB (%8zuMiB overflow) %14zu insert %14zu extract\n", iclass + 1, global_cache / (size_t)(1024 * 1024), global_overflow_cache / (size_t)(1024 * 1024), cache->insert_count, cache->extract_count);
  3396. + }
  3397. +
  3398. + size_t mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
  3399. + size_t mapped_os = (size_t)atomic_load32(&_mapped_pages_os) * _memory_page_size;
  3400. + size_t mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
  3401. + size_t mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
  3402. + size_t unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
  3403. + fprintf(file, "MappedMiB MappedOSMiB MappedPeakMiB MappedTotalMiB UnmappedTotalMiB\n");
  3404. + fprintf(file, "%9zu %11zu %13zu %14zu %16zu\n",
  3405. + mapped / (size_t)(1024 * 1024),
  3406. + mapped_os / (size_t)(1024 * 1024),
  3407. + mapped_peak / (size_t)(1024 * 1024),
  3408. + mapped_total / (size_t)(1024 * 1024),
  3409. + unmapped_total / (size_t)(1024 * 1024));
  3410. +
  3411. + fprintf(file, "\n");
  3412. +#if 0
  3413. + int64_t allocated = atomic_load64(&_allocation_counter);
  3414. + int64_t deallocated = atomic_load64(&_deallocation_counter);
  3415. + fprintf(file, "Allocation count: %lli\n", allocated);
  3416. + fprintf(file, "Deallocation count: %lli\n", deallocated);
  3417. + fprintf(file, "Current allocations: %lli\n", (allocated - deallocated));
  3418. + fprintf(file, "Master spans: %d\n", atomic_load32(&_master_spans));
  3419. + fprintf(file, "Dangling master spans: %d\n", atomic_load32(&_unmapped_master_spans));
  3420. +#endif
  3421. +#endif
  3422. + (void)sizeof(file);
  3423. +}
  3424. +
  3425. +#if RPMALLOC_FIRST_CLASS_HEAPS
  3426. +
  3427. +extern inline rpmalloc_heap_t*
  3428. +rpmalloc_heap_acquire(void) {
  3429. + // Must be a pristine heap from newly mapped memory pages, or else memory blocks
  3430. + // could already be allocated from the heap which would (wrongly) be released when
  3431. + // heap is cleared with rpmalloc_heap_free_all(). Also heaps guaranteed to be
  3432. + // pristine from the dedicated orphan list can be used.
  3433. + heap_t* heap = _rpmalloc_heap_allocate(1);
  3434. + heap->owner_thread = 0;
  3435. + _rpmalloc_stat_inc(&_memory_active_heaps);
  3436. + return heap;
  3437. +}
  3438. +
  3439. +extern inline void
  3440. +rpmalloc_heap_release(rpmalloc_heap_t* heap) {
  3441. + if (heap)
  3442. + _rpmalloc_heap_release(heap, 1, 1);
  3443. +}
  3444. +
  3445. +extern inline RPMALLOC_ALLOCATOR void*
  3446. +rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) {
  3447. +#if ENABLE_VALIDATE_ARGS
  3448. + if (size >= MAX_ALLOC_SIZE) {
  3449. + errno = EINVAL;
  3450. + return 0;
  3451. + }
  3452. +#endif
  3453. + return _rpmalloc_allocate(heap, size);
  3454. +}
  3455. +
  3456. +extern inline RPMALLOC_ALLOCATOR void*
  3457. +rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) {
  3458. +#if ENABLE_VALIDATE_ARGS
  3459. + if (size >= MAX_ALLOC_SIZE) {
  3460. + errno = EINVAL;
  3461. + return 0;
  3462. + }
  3463. +#endif
  3464. + return _rpmalloc_aligned_allocate(heap, alignment, size);
  3465. +}
  3466. +
  3467. +extern inline RPMALLOC_ALLOCATOR void*
  3468. +rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) {
  3469. + return rpmalloc_heap_aligned_calloc(heap, 0, num, size);
  3470. +}
  3471. +
  3472. +extern inline RPMALLOC_ALLOCATOR void*
  3473. +rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) {
  3474. + size_t total;
  3475. +#if ENABLE_VALIDATE_ARGS
  3476. +#if PLATFORM_WINDOWS
  3477. + int err = SizeTMult(num, size, &total);
  3478. + if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
  3479. + errno = EINVAL;
  3480. + return 0;
  3481. + }
  3482. +#else
  3483. + int err = __builtin_umull_overflow(num, size, &total);
  3484. + if (err || (total >= MAX_ALLOC_SIZE)) {
  3485. + errno = EINVAL;
  3486. + return 0;
  3487. + }
  3488. +#endif
  3489. +#else
  3490. + total = num * size;
  3491. +#endif
  3492. + void* block = _rpmalloc_aligned_allocate(heap, alignment, total);
  3493. + if (block)
  3494. + memset(block, 0, total);
  3495. + return block;
  3496. +}
  3497. +
  3498. +extern inline RPMALLOC_ALLOCATOR void*
  3499. +rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned int flags) {
  3500. +#if ENABLE_VALIDATE_ARGS
  3501. + if (size >= MAX_ALLOC_SIZE) {
  3502. + errno = EINVAL;
  3503. + return ptr;
  3504. + }
  3505. +#endif
  3506. + return _rpmalloc_reallocate(heap, ptr, size, 0, flags);
  3507. +}
  3508. +
  3509. +extern inline RPMALLOC_ALLOCATOR void*
  3510. +rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
  3511. +#if ENABLE_VALIDATE_ARGS
  3512. + if ((size + alignment < size) || (alignment > _memory_page_size)) {
  3513. + errno = EINVAL;
  3514. + return 0;
  3515. + }
  3516. +#endif
  3517. + return _rpmalloc_aligned_reallocate(heap, ptr, alignment, size, 0, flags);
  3518. +}
  3519. +
  3520. +extern inline void
  3521. +rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr) {
  3522. + (void)sizeof(heap);
  3523. + _rpmalloc_deallocate(ptr);
  3524. +}
  3525. +
  3526. +extern inline void
  3527. +rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
  3528. + span_t* span;
  3529. + span_t* next_span;
  3530. +
  3531. + _rpmalloc_heap_cache_adopt_deferred(heap, 0);
  3532. +
  3533. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  3534. + span = heap->size_class[iclass].partial_span;
  3535. + while (span) {
  3536. + next_span = span->next;
  3537. + _rpmalloc_heap_cache_insert(heap, span);
  3538. + span = next_span;
  3539. + }
  3540. + heap->size_class[iclass].partial_span = 0;
  3541. + span = heap->full_span[iclass];
  3542. + while (span) {
  3543. + next_span = span->next;
  3544. + _rpmalloc_heap_cache_insert(heap, span);
  3545. + span = next_span;
  3546. + }
  3547. + }
  3548. + memset(heap->size_class, 0, sizeof(heap->size_class));
  3549. + memset(heap->full_span, 0, sizeof(heap->full_span));
  3550. +
  3551. + span = heap->large_huge_span;
  3552. + while (span) {
  3553. + next_span = span->next;
  3554. + if (UNEXPECTED(span->size_class == SIZE_CLASS_HUGE))
  3555. + _rpmalloc_deallocate_huge(span);
  3556. + else
  3557. + _rpmalloc_heap_cache_insert(heap, span);
  3558. + span = next_span;
  3559. + }
  3560. + heap->large_huge_span = 0;
  3561. + heap->full_span_count = 0;
  3562. +
  3563. +#if ENABLE_THREAD_CACHE
  3564. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3565. + span_cache_t* span_cache;
  3566. + if (!iclass)
  3567. + span_cache = &heap->span_cache;
  3568. + else
  3569. + span_cache = (span_cache_t*)(heap->span_large_cache + (iclass - 1));
  3570. + if (!span_cache->count)
  3571. + continue;
  3572. +#if ENABLE_GLOBAL_CACHE
  3573. + _rpmalloc_stat_add64(&heap->thread_to_global, span_cache->count * (iclass + 1) * _memory_span_size);
  3574. + _rpmalloc_stat_add(&heap->span_use[iclass].spans_to_global, span_cache->count);
  3575. + _rpmalloc_global_cache_insert_spans(span_cache->span, iclass + 1, span_cache->count);
  3576. +#else
  3577. + for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
  3578. + _rpmalloc_span_unmap(span_cache->span[ispan]);
  3579. +#endif
  3580. + span_cache->count = 0;
  3581. + }
  3582. +#endif
  3583. +
  3584. +#if ENABLE_STATISTICS
  3585. + for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
  3586. + atomic_store32(&heap->size_class_use[iclass].alloc_current, 0);
  3587. + atomic_store32(&heap->size_class_use[iclass].spans_current, 0);
  3588. + }
  3589. + for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
  3590. + atomic_store32(&heap->span_use[iclass].current, 0);
  3591. + }
  3592. +#endif
  3593. +}
  3594. +
  3595. +extern inline void
  3596. +rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap) {
  3597. + heap_t* prev_heap = get_thread_heap_raw();
  3598. + if (prev_heap != heap) {
  3599. + set_thread_heap(heap);
  3600. + if (prev_heap)
  3601. + rpmalloc_heap_release(prev_heap);
  3602. + }
  3603. +}
  3604. +
  3605. +#endif
  3606. +
  3607. +#include "glue.h"
  3608. diff --git a/src/malloc/rpmalloc/rpmalloc.h b/src/malloc/rpmalloc/rpmalloc.h
  3609. new file mode 100644
  3610. index 00000000..de00e30f
  3611. --- /dev/null
  3612. +++ b/src/malloc/rpmalloc/rpmalloc.h
  3613. @@ -0,0 +1,362 @@
  3614. +/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
  3615. + *
  3616. + * This library provides a cross-platform lock free thread caching malloc implementation in C11.
  3617. + * The latest source code is always available at
  3618. + *
  3619. + * https://github.com/mjansson/rpmalloc
  3620. + *
  3621. + * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
  3622. + *
  3623. + */
  3624. +
  3625. +#pragma once
  3626. +
  3627. +#include <stddef.h>
  3628. +
  3629. +#ifdef __cplusplus
  3630. +extern "C" {
  3631. +#endif
  3632. +
  3633. +#if defined(__clang__) || defined(__GNUC__)
  3634. +# define RPMALLOC_EXPORT static
  3635. +# define RPMALLOC_ALLOCATOR
  3636. +# if (defined(__clang_major__) && (__clang_major__ < 4)) || (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD)
  3637. +# define RPMALLOC_ATTRIB_MALLOC
  3638. +# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
  3639. +# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
  3640. +# else
  3641. +# define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
  3642. +# define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
  3643. +# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
  3644. +# endif
  3645. +# define RPMALLOC_CDECL
  3646. +#elif defined(_MSC_VER)
  3647. +# define RPMALLOC_EXPORT
  3648. +# define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
  3649. +# define RPMALLOC_ATTRIB_MALLOC
  3650. +# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
  3651. +# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
  3652. +# define RPMALLOC_CDECL __cdecl
  3653. +#else
  3654. +# define RPMALLOC_EXPORT
  3655. +# define RPMALLOC_ALLOCATOR
  3656. +# define RPMALLOC_ATTRIB_MALLOC
  3657. +# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
  3658. +# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
  3659. +# define RPMALLOC_CDECL
  3660. +#endif
  3661. +
  3662. +//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce
  3663. +// a very small overhead due to some size calculations not being compile time constants
  3664. +#ifndef RPMALLOC_CONFIGURABLE
  3665. +#define RPMALLOC_CONFIGURABLE 0
  3666. +#endif
  3667. +
  3668. +//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* functions).
  3669. +// Will introduce a very small overhead to track fully allocated spans in heaps
  3670. +#ifndef RPMALLOC_FIRST_CLASS_HEAPS
  3671. +#define RPMALLOC_FIRST_CLASS_HEAPS 0
  3672. +#endif
  3673. +
  3674. +//! Flag to rpaligned_realloc to not preserve content in reallocation
  3675. +#define RPMALLOC_NO_PRESERVE 1
  3676. +//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,
  3677. +// in which case the original pointer is still valid (just like a call to realloc which failes to allocate
  3678. +// a new block).
  3679. +#define RPMALLOC_GROW_OR_FAIL 2
  3680. +
  3681. +typedef struct rpmalloc_global_statistics_t {
  3682. + //! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
  3683. + size_t mapped;
  3684. + //! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
  3685. + size_t mapped_peak;
  3686. + //! Current amount of memory in global caches for small and medium sizes (<32KiB)
  3687. + size_t cached;
  3688. + //! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
  3689. + size_t huge_alloc;
  3690. + //! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
  3691. + size_t huge_alloc_peak;
  3692. + //! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
  3693. + size_t mapped_total;
  3694. + //! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
  3695. + size_t unmapped_total;
  3696. +} rpmalloc_global_statistics_t;
  3697. +
  3698. +typedef struct rpmalloc_thread_statistics_t {
  3699. + //! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
  3700. + size_t sizecache;
  3701. + //! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
  3702. + size_t spancache;
  3703. + //! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
  3704. + size_t thread_to_global;
  3705. + //! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
  3706. + size_t global_to_thread;
  3707. + //! Per span count statistics (only if ENABLE_STATISTICS=1)
  3708. + struct {
  3709. + //! Currently used number of spans
  3710. + size_t current;
  3711. + //! High water mark of spans used
  3712. + size_t peak;
  3713. + //! Number of spans transitioned to global cache
  3714. + size_t to_global;
  3715. + //! Number of spans transitioned from global cache
  3716. + size_t from_global;
  3717. + //! Number of spans transitioned to thread cache
  3718. + size_t to_cache;
  3719. + //! Number of spans transitioned from thread cache
  3720. + size_t from_cache;
  3721. + //! Number of spans transitioned to reserved state
  3722. + size_t to_reserved;
  3723. + //! Number of spans transitioned from reserved state
  3724. + size_t from_reserved;
  3725. + //! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
  3726. + size_t map_calls;
  3727. + } span_use[64];
  3728. + //! Per size class statistics (only if ENABLE_STATISTICS=1)
  3729. + struct {
  3730. + //! Current number of allocations
  3731. + size_t alloc_current;
  3732. + //! Peak number of allocations
  3733. + size_t alloc_peak;
  3734. + //! Total number of allocations
  3735. + size_t alloc_total;
  3736. + //! Total number of frees
  3737. + size_t free_total;
  3738. + //! Number of spans transitioned to cache
  3739. + size_t spans_to_cache;
  3740. + //! Number of spans transitioned from cache
  3741. + size_t spans_from_cache;
  3742. + //! Number of spans transitioned from reserved state
  3743. + size_t spans_from_reserved;
  3744. + //! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
  3745. + size_t map_calls;
  3746. + } size_use[128];
  3747. +} rpmalloc_thread_statistics_t;
  3748. +
  3749. +typedef struct rpmalloc_config_t {
  3750. + //! Map memory pages for the given number of bytes. The returned address MUST be
  3751. + // aligned to the rpmalloc span size, which will always be a power of two.
  3752. + // Optionally the function can store an alignment offset in the offset variable
  3753. + // in case it performs alignment and the returned pointer is offset from the
  3754. + // actual start of the memory region due to this alignment. The alignment offset
  3755. + // will be passed to the memory unmap function. The alignment offset MUST NOT be
  3756. + // larger than 65535 (storable in an uint16_t), if it is you must use natural
  3757. + // alignment to shift it into 16 bits. If you set a memory_map function, you
  3758. + // must also set a memory_unmap function or else the default implementation will
  3759. + // be used for both. This function must be thread safe, it can be called by
  3760. + // multiple threads simultaneously.
  3761. + void* (*memory_map)(size_t size, size_t* offset);
  3762. + //! Unmap the memory pages starting at address and spanning the given number of bytes.
  3763. + // If release is set to non-zero, the unmap is for an entire span range as returned by
  3764. + // a previous call to memory_map and that the entire range should be released. The
  3765. + // release argument holds the size of the entire span range. If release is set to 0,
  3766. + // the unmap is a partial decommit of a subset of the mapped memory range.
  3767. + // If you set a memory_unmap function, you must also set a memory_map function or
  3768. + // else the default implementation will be used for both. This function must be thread
  3769. + // safe, it can be called by multiple threads simultaneously.
  3770. + void (*memory_unmap)(void* address, size_t size, size_t offset, size_t release);
  3771. + //! Called when an assert fails, if asserts are enabled. Will use the standard assert()
  3772. + // if this is not set.
  3773. + void (*error_callback)(const char* message);
  3774. + //! Called when a call to map memory pages fails (out of memory). If this callback is
  3775. + // not set or returns zero the library will return a null pointer in the allocation
  3776. + // call. If this callback returns non-zero the map call will be retried. The argument
  3777. + // passed is the number of bytes that was requested in the map call. Only used if
  3778. + // the default system memory map function is used (memory_map callback is not set).
  3779. + int (*map_fail_callback)(size_t size);
  3780. + //! Size of memory pages. The page size MUST be a power of two. All memory mapping
  3781. + // requests to memory_map will be made with size set to a multiple of the page size.
  3782. + // Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
  3783. + size_t page_size;
  3784. + //! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
  3785. + // range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
  3786. + // is defined to 1.
  3787. + size_t span_size;
  3788. + //! Number of spans to map at each request to map new virtual memory blocks. This can
  3789. + // be used to minimize the system call overhead at the cost of virtual memory address
  3790. + // space. The extra mapped pages will not be written until actually used, so physical
  3791. + // committed memory should not be affected in the default implementation. Will be
  3792. + // aligned to a multiple of spans that match memory page size in case of huge pages.
  3793. + size_t span_map_count;
  3794. + //! Enable use of large/huge pages. If this flag is set to non-zero and page size is
  3795. + // zero, the allocator will try to enable huge pages and auto detect the configuration.
  3796. + // If this is set to non-zero and page_size is also non-zero, the allocator will
  3797. + // assume huge pages have been configured and enabled prior to initializing the
  3798. + // allocator.
  3799. + // For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
  3800. + // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
  3801. + int enable_huge_pages;
  3802. + int unused;
  3803. +} rpmalloc_config_t;
  3804. +
  3805. +//! Initialize allocator with default configuration
  3806. +RPMALLOC_EXPORT int
  3807. +rpmalloc_initialize(void);
  3808. +
  3809. +//! Initialize allocator with given configuration
  3810. +RPMALLOC_EXPORT int
  3811. +rpmalloc_initialize_config(const rpmalloc_config_t* config);
  3812. +
  3813. +//! Get allocator configuration
  3814. +RPMALLOC_EXPORT const rpmalloc_config_t*
  3815. +rpmalloc_config(void);
  3816. +
  3817. +//! Finalize allocator
  3818. +RPMALLOC_EXPORT void
  3819. +rpmalloc_finalize(void);
  3820. +
  3821. +//! Initialize allocator for calling thread
  3822. +RPMALLOC_EXPORT void
  3823. +rpmalloc_thread_initialize(void);
  3824. +
  3825. +//! Finalize allocator for calling thread
  3826. +RPMALLOC_EXPORT void
  3827. +rpmalloc_thread_finalize(int release_caches);
  3828. +
  3829. +//! Perform deferred deallocations pending for the calling thread heap
  3830. +RPMALLOC_EXPORT void
  3831. +rpmalloc_thread_collect(void);
  3832. +
  3833. +//! Query if allocator is initialized for calling thread
  3834. +RPMALLOC_EXPORT int
  3835. +rpmalloc_is_thread_initialized(void);
  3836. +
  3837. +//! Get per-thread statistics
  3838. +RPMALLOC_EXPORT void
  3839. +rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats);
  3840. +
  3841. +//! Get global statistics
  3842. +RPMALLOC_EXPORT void
  3843. +rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats);
  3844. +
  3845. +//! Dump all statistics in human readable format to file (should be a FILE*)
  3846. +RPMALLOC_EXPORT void
  3847. +rpmalloc_dump_statistics(void* file);
  3848. +
  3849. +//! Allocate a memory block of at least the given size
  3850. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3851. +rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
  3852. +
  3853. +//! Free the given memory block
  3854. +RPMALLOC_EXPORT void
  3855. +rpfree(void* ptr);
  3856. +
  3857. +//! Allocate a memory block of at least the given size and zero initialize it
  3858. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3859. +rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
  3860. +
  3861. +//! Reallocate the given block to at least the given size
  3862. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3863. +rprealloc(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
  3864. +
  3865. +//! Reallocate the given block to at least the given size and alignment,
  3866. +// with optional control flags (see RPMALLOC_NO_PRESERVE).
  3867. +// Alignment must be a power of two and a multiple of sizeof(void*),
  3868. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3869. +// internals is that this must also be strictly less than the span size (default 64KiB)
  3870. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3871. +rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
  3872. +
  3873. +//! Allocate a memory block of at least the given size and alignment.
  3874. +// Alignment must be a power of two and a multiple of sizeof(void*),
  3875. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3876. +// internals is that this must also be strictly less than the span size (default 64KiB)
  3877. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3878. +rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
  3879. +
  3880. +//! Allocate a memory block of at least the given size and alignment, and zero initialize it.
  3881. +// Alignment must be a power of two and a multiple of sizeof(void*),
  3882. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3883. +// internals is that this must also be strictly less than the span size (default 64KiB)
  3884. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3885. +rpaligned_calloc(size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
  3886. +
  3887. +//! Allocate a memory block of at least the given size and alignment.
  3888. +// Alignment must be a power of two and a multiple of sizeof(void*),
  3889. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3890. +// internals is that this must also be strictly less than the span size (default 64KiB)
  3891. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3892. +rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
  3893. +
  3894. +//! Allocate a memory block of at least the given size and alignment.
  3895. +// Alignment must be a power of two and a multiple of sizeof(void*),
  3896. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3897. +// internals is that this must also be strictly less than the span size (default 64KiB)
  3898. +RPMALLOC_EXPORT int
  3899. +rpposix_memalign(void** memptr, size_t alignment, size_t size);
  3900. +
  3901. +//! Query the usable size of the given memory block (from given pointer to the end of block)
  3902. +RPMALLOC_EXPORT size_t
  3903. +rpmalloc_usable_size(void* ptr);
  3904. +
  3905. +#if RPMALLOC_FIRST_CLASS_HEAPS
  3906. +
  3907. +//! Heap type
  3908. +typedef struct heap_t rpmalloc_heap_t;
  3909. +
  3910. +//! Acquire a new heap. Will reuse existing released heaps or allocate memory for a new heap
  3911. +// if none available. Heap API is implemented with the strict assumption that only one single
  3912. +// thread will call heap functions for a given heap at any given time, no functions are thread safe.
  3913. +RPMALLOC_EXPORT rpmalloc_heap_t*
  3914. +rpmalloc_heap_acquire(void);
  3915. +
  3916. +//! Release a heap (does NOT free the memory allocated by the heap, use rpmalloc_heap_free_all before destroying the heap).
  3917. +// Releasing a heap will enable it to be reused by other threads. Safe to pass a null pointer.
  3918. +RPMALLOC_EXPORT void
  3919. +rpmalloc_heap_release(rpmalloc_heap_t* heap);
  3920. +
  3921. +//! Allocate a memory block of at least the given size using the given heap.
  3922. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3923. +rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
  3924. +
  3925. +//! Allocate a memory block of at least the given size using the given heap. The returned
  3926. +// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
  3927. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3928. +// internals is that this must also be strictly less than the span size (default 64KiB).
  3929. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3930. +rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
  3931. +
  3932. +//! Allocate a memory block of at least the given size using the given heap and zero initialize it.
  3933. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3934. +rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
  3935. +
  3936. +//! Allocate a memory block of at least the given size using the given heap and zero initialize it. The returned
  3937. +// block will have the requested alignment. Alignment must either be zero, or a power of two and a multiple of sizeof(void*),
  3938. +// and should ideally be less than memory page size. A caveat of rpmalloc
  3939. +// internals is that this must also be strictly less than the span size (default 64KiB).
  3940. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3941. +rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
  3942. +
  3943. +//! Reallocate the given block to at least the given size. The memory block MUST be allocated
  3944. +// by the same heap given to this function.
  3945. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3946. +rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
  3947. +
  3948. +//! Reallocate the given block to at least the given size. The memory block MUST be allocated
  3949. +// by the same heap given to this function. The returned block will have the requested alignment.
  3950. +// Alignment must be either zero, or a power of two and a multiple of sizeof(void*), and should ideally be
  3951. +// less than memory page size. A caveat of rpmalloc internals is that this must also be strictly less than
  3952. +// the span size (default 64KiB).
  3953. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
  3954. +rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
  3955. +
  3956. +//! Free the given memory block from the given heap. The memory block MUST be allocated
  3957. +// by the same heap given to this function.
  3958. +RPMALLOC_EXPORT void
  3959. +rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr);
  3960. +
  3961. +//! Free all memory allocated by the heap
  3962. +RPMALLOC_EXPORT void
  3963. +rpmalloc_heap_free_all(rpmalloc_heap_t* heap);
  3964. +
  3965. +//! Set the given heap as the current heap for the calling thread. A heap MUST only be current heap
  3966. +// for a single thread, a heap can never be shared between multiple threads. The previous
  3967. +// current heap for the calling thread is released to be reused by other threads.
  3968. +RPMALLOC_EXPORT void
  3969. +rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap);
  3970. +
  3971. +#endif
  3972. +
  3973. +#ifdef __cplusplus
  3974. +}
  3975. +#endif
  3976. diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
  3977. index 6f187ee8..4a917914 100644
  3978. --- a/src/thread/pthread_create.c
  3979. +++ b/src/thread/pthread_create.c
  3980. @@ -7,6 +7,9 @@
  3981. #include <string.h>
  3982. #include <stddef.h>
  3983. +hidden void __malloc_thread_init();
  3984. +hidden void __malloc_thread_finalize();
  3985. +
  3986. static void dummy_0()
  3987. {
  3988. }
  3989. @@ -69,6 +72,8 @@ _Noreturn void __pthread_exit(void *result)
  3990. __pthread_tsd_run_dtors();
  3991. + __malloc_thread_finalize();
  3992. +
  3993. __block_app_sigs(&set);
  3994. /* This atomic potentially competes with a concurrent pthread_detach
  3995. @@ -200,6 +205,7 @@ static int start(void *p)
  3996. }
  3997. }
  3998. __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
  3999. + __malloc_thread_init();
  4000. __pthread_exit(args->start_func(args->start_arg));
  4001. return 0;
  4002. }
  4003. --
  4004. 2.35.2