HashTable.h 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  2. * This Source Code Form is subject to the terms of the Mozilla Public
  3. * License, v. 2.0. If a copy of the MPL was not distributed with this
  4. * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
  5. #ifndef js_HashTable_h
  6. #define js_HashTable_h
  7. #include "mozilla/Alignment.h"
  8. #include "mozilla/Assertions.h"
  9. #include "mozilla/Attributes.h"
  10. #include "mozilla/Casting.h"
  11. #include "mozilla/HashFunctions.h"
  12. #include "mozilla/MemoryChecking.h"
  13. #include "mozilla/MemoryReporting.h"
  14. #include "mozilla/Move.h"
  15. #include "mozilla/Opaque.h"
  16. #include "mozilla/PodOperations.h"
  17. #include "mozilla/ReentrancyGuard.h"
  18. #include "mozilla/TemplateLib.h"
  19. #include "mozilla/TypeTraits.h"
  20. #include "mozilla/UniquePtr.h"
  21. #include "js/Utility.h"
  22. namespace js {
  23. class TempAllocPolicy;
  24. template <class> struct DefaultHasher;
  25. template <class, class> class HashMapEntry;
  26. namespace detail {
  27. template <class T> class HashTableEntry;
  28. template <class T, class HashPolicy, class AllocPolicy> class HashTable;
  29. } // namespace detail
  30. /*****************************************************************************/
  31. // The "generation" of a hash table is an opaque value indicating the state of
  32. // modification of the hash table through its lifetime. If the generation of
  33. // a hash table compares equal at times T1 and T2, then lookups in the hash
  34. // table, pointers to (or into) hash table entries, etc. at time T1 are valid
  35. // at time T2. If the generation compares unequal, these computations are all
  36. // invalid and must be performed again to be used.
  37. //
  38. // Generations are meaningfully comparable only with respect to a single hash
  39. // table. It's always nonsensical to compare the generation of distinct hash
  40. // tables H1 and H2.
  41. using Generation = mozilla::Opaque<uint64_t>;
  42. // A JS-friendly, STL-like container providing a hash-based map from keys to
  43. // values. In particular, HashMap calls constructors and destructors of all
  44. // objects added so non-PODs may be used safely.
  45. //
  46. // Key/Value requirements:
  47. // - movable, destructible, assignable
  48. // HashPolicy requirements:
  49. // - see Hash Policy section below
  50. // AllocPolicy:
  51. // - see jsalloc.h
  52. //
  53. // Note:
  54. // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
  55. // called by HashMap must not call back into the same HashMap object.
  56. // - Due to the lack of exception handling, the user must call |init()|.
  57. template <class Key,
  58. class Value,
  59. class HashPolicy = DefaultHasher<Key>,
  60. class AllocPolicy = TempAllocPolicy>
  61. class HashMap
  62. {
  63. typedef HashMapEntry<Key, Value> TableEntry;
  64. struct MapHashPolicy : HashPolicy
  65. {
  66. using Base = HashPolicy;
  67. typedef Key KeyType;
  68. static const Key& getKey(TableEntry& e) { return e.key(); }
  69. static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); }
  70. };
  71. typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
  72. Impl impl;
  73. public:
  74. typedef typename HashPolicy::Lookup Lookup;
  75. typedef TableEntry Entry;
  76. // HashMap construction is fallible (due to OOM); thus the user must call
  77. // init after constructing a HashMap and check the return value.
  78. explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {}
  79. MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
  80. bool initialized() const { return impl.initialized(); }
  81. // Return whether the given lookup value is present in the map. E.g.:
  82. //
  83. // typedef HashMap<int,char> HM;
  84. // HM h;
  85. // if (HM::Ptr p = h.lookup(3)) {
  86. // const HM::Entry& e = *p; // p acts like a pointer to Entry
  87. // assert(p->key == 3); // Entry contains the key
  88. // char val = p->value; // and value
  89. // }
  90. //
  91. // Also see the definition of Ptr in HashTable above (with T = Entry).
  92. typedef typename Impl::Ptr Ptr;
  93. Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
  94. // Like lookup, but does not assert if two threads call lookup at the same
  95. // time. Only use this method when none of the threads will modify the map.
  96. Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
  97. // Assuming |p.found()|, remove |*p|.
  98. void remove(Ptr p) { impl.remove(p); }
  99. // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
  100. // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
  101. // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
  102. //
  103. // typedef HashMap<int,char> HM;
  104. // HM h;
  105. // HM::AddPtr p = h.lookupForAdd(3);
  106. // if (!p) {
  107. // if (!h.add(p, 3, 'a'))
  108. // return false;
  109. // }
  110. // const HM::Entry& e = *p; // p acts like a pointer to Entry
  111. // assert(p->key == 3); // Entry contains the key
  112. // char val = p->value; // and value
  113. //
  114. // Also see the definition of AddPtr in HashTable above (with T = Entry).
  115. //
  116. // N.B. The caller must ensure that no mutating hash table operations
  117. // occur between a pair of |lookupForAdd| and |add| calls. To avoid
  118. // looking up the key a second time, the caller may use the more efficient
  119. // relookupOrAdd method. This method reuses part of the hashing computation
  120. // to more efficiently insert the key if it has not been added. For
  121. // example, a mutation-handling version of the previous example:
  122. //
  123. // HM::AddPtr p = h.lookupForAdd(3);
  124. // if (!p) {
  125. // call_that_may_mutate_h();
  126. // if (!h.relookupOrAdd(p, 3, 'a'))
  127. // return false;
  128. // }
  129. // const HM::Entry& e = *p;
  130. // assert(p->key == 3);
  131. // char val = p->value;
  132. typedef typename Impl::AddPtr AddPtr;
  133. AddPtr lookupForAdd(const Lookup& l) const {
  134. return impl.lookupForAdd(l);
  135. }
  136. template<typename KeyInput, typename ValueInput>
  137. MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
  138. return impl.add(p,
  139. mozilla::Forward<KeyInput>(k),
  140. mozilla::Forward<ValueInput>(v));
  141. }
  142. template<typename KeyInput>
  143. MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) {
  144. return impl.add(p, mozilla::Forward<KeyInput>(k), Value());
  145. }
  146. template<typename KeyInput, typename ValueInput>
  147. MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
  148. return impl.relookupOrAdd(p, k,
  149. mozilla::Forward<KeyInput>(k),
  150. mozilla::Forward<ValueInput>(v));
  151. }
  152. // |all()| returns a Range containing |count()| elements. E.g.:
  153. //
  154. // typedef HashMap<int,char> HM;
  155. // HM h;
  156. // for (HM::Range r = h.all(); !r.empty(); r.popFront())
  157. // char c = r.front().value();
  158. //
  159. // Also see the definition of Range in HashTable above (with T = Entry).
  160. typedef typename Impl::Range Range;
  161. Range all() const { return impl.all(); }
  162. // Typedef for the enumeration class. An Enum may be used to examine and
  163. // remove table entries:
  164. //
  165. // typedef HashMap<int,char> HM;
  166. // HM s;
  167. // for (HM::Enum e(s); !e.empty(); e.popFront())
  168. // if (e.front().value() == 'l')
  169. // e.removeFront();
  170. //
  171. // Table resize may occur in Enum's destructor. Also see the definition of
  172. // Enum in HashTable above (with T = Entry).
  173. typedef typename Impl::Enum Enum;
  174. // Remove all entries. This does not shrink the table. For that consider
  175. // using the finish() method.
  176. void clear() { impl.clear(); }
  177. // Remove all the entries and release all internal buffers. The map must
  178. // be initialized again before any use.
  179. void finish() { impl.finish(); }
  180. // Does the table contain any entries?
  181. bool empty() const { return impl.empty(); }
  182. // Number of live elements in the map.
  183. uint32_t count() const { return impl.count(); }
  184. // Total number of allocation in the dynamic table. Note: resize will
  185. // happen well before count() == capacity().
  186. size_t capacity() const { return impl.capacity(); }
  187. // Don't just call |impl.sizeOfExcludingThis()| because there's no
  188. // guarantee that |impl| is the first field in HashMap.
  189. size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
  190. return impl.sizeOfExcludingThis(mallocSizeOf);
  191. }
  192. size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
  193. return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
  194. }
  195. Generation generation() const {
  196. return impl.generation();
  197. }
  198. /************************************************** Shorthand operations */
  199. bool has(const Lookup& l) const {
  200. return impl.lookup(l).found();
  201. }
  202. // Overwrite existing value with v. Return false on oom.
  203. template<typename KeyInput, typename ValueInput>
  204. MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) {
  205. AddPtr p = lookupForAdd(k);
  206. if (p) {
  207. p->value() = mozilla::Forward<ValueInput>(v);
  208. return true;
  209. }
  210. return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
  211. }
  212. // Like put, but assert that the given key is not already present.
  213. template<typename KeyInput, typename ValueInput>
  214. MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) {
  215. return impl.putNew(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
  216. }
  217. // Only call this to populate an empty map after reserving space with init().
  218. template<typename KeyInput, typename ValueInput>
  219. void putNewInfallible(KeyInput&& k, ValueInput&& v) {
  220. impl.putNewInfallible(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
  221. }
  222. // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
  223. Ptr lookupWithDefault(const Key& k, const Value& defaultValue) {
  224. AddPtr p = lookupForAdd(k);
  225. if (p)
  226. return p;
  227. bool ok = add(p, k, defaultValue);
  228. MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
  229. (void)ok;
  230. return p;
  231. }
  232. // Remove if present.
  233. void remove(const Lookup& l) {
  234. if (Ptr p = lookup(l))
  235. remove(p);
  236. }
  237. // Infallibly rekey one entry, if necessary.
  238. // Requires template parameters Key and HashPolicy::Lookup to be the same type.
  239. void rekeyIfMoved(const Key& old_key, const Key& new_key) {
  240. if (old_key != new_key)
  241. rekeyAs(old_key, new_key, new_key);
  242. }
  243. // Infallibly rekey one entry if present, and return whether that happened.
  244. bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) {
  245. if (Ptr p = lookup(old_lookup)) {
  246. impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
  247. return true;
  248. }
  249. return false;
  250. }
  251. // HashMap is movable
  252. HashMap(HashMap&& rhs) : impl(mozilla::Move(rhs.impl)) {}
  253. void operator=(HashMap&& rhs) {
  254. MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
  255. impl = mozilla::Move(rhs.impl);
  256. }
  257. private:
  258. // HashMap is not copyable or assignable
  259. HashMap(const HashMap& hm) = delete;
  260. HashMap& operator=(const HashMap& hm) = delete;
  261. friend class Impl::Enum;
  262. };
  263. /*****************************************************************************/
  264. // A JS-friendly, STL-like container providing a hash-based set of values. In
  265. // particular, HashSet calls constructors and destructors of all objects added
  266. // so non-PODs may be used safely.
  267. //
  268. // T requirements:
  269. // - movable, destructible, assignable
  270. // HashPolicy requirements:
  271. // - see Hash Policy section below
  272. // AllocPolicy:
  273. // - see jsalloc.h
  274. //
  275. // Note:
  276. // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
  277. // HashSet must not call back into the same HashSet object.
  278. // - Due to the lack of exception handling, the user must call |init()|.
  279. template <class T,
  280. class HashPolicy = DefaultHasher<T>,
  281. class AllocPolicy = TempAllocPolicy>
  282. class HashSet
  283. {
  284. struct SetOps : HashPolicy
  285. {
  286. using Base = HashPolicy;
  287. typedef T KeyType;
  288. static const KeyType& getKey(const T& t) { return t; }
  289. static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
  290. };
  291. typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
  292. Impl impl;
  293. public:
  294. typedef typename HashPolicy::Lookup Lookup;
  295. typedef T Entry;
  296. // HashSet construction is fallible (due to OOM); thus the user must call
  297. // init after constructing a HashSet and check the return value.
  298. explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {}
  299. MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
  300. bool initialized() const { return impl.initialized(); }
  301. // Return whether the given lookup value is present in the map. E.g.:
  302. //
  303. // typedef HashSet<int> HS;
  304. // HS h;
  305. // if (HS::Ptr p = h.lookup(3)) {
  306. // assert(*p == 3); // p acts like a pointer to int
  307. // }
  308. //
  309. // Also see the definition of Ptr in HashTable above.
  310. typedef typename Impl::Ptr Ptr;
  311. Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
  312. // Like lookup, but does not assert if two threads call lookup at the same
  313. // time. Only use this method when none of the threads will modify the map.
  314. Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
  315. // Assuming |p.found()|, remove |*p|.
  316. void remove(Ptr p) { impl.remove(p); }
  317. // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
  318. // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
  319. // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
  320. //
  321. // typedef HashSet<int> HS;
  322. // HS h;
  323. // HS::AddPtr p = h.lookupForAdd(3);
  324. // if (!p) {
  325. // if (!h.add(p, 3))
  326. // return false;
  327. // }
  328. // assert(*p == 3); // p acts like a pointer to int
  329. //
  330. // Also see the definition of AddPtr in HashTable above.
  331. //
  332. // N.B. The caller must ensure that no mutating hash table operations
  333. // occur between a pair of |lookupForAdd| and |add| calls. To avoid
  334. // looking up the key a second time, the caller may use the more efficient
  335. // relookupOrAdd method. This method reuses part of the hashing computation
  336. // to more efficiently insert the key if it has not been added. For
  337. // example, a mutation-handling version of the previous example:
  338. //
  339. // HS::AddPtr p = h.lookupForAdd(3);
  340. // if (!p) {
  341. // call_that_may_mutate_h();
  342. // if (!h.relookupOrAdd(p, 3, 3))
  343. // return false;
  344. // }
  345. // assert(*p == 3);
  346. //
  347. // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
  348. // entry |t|, where the caller ensures match(l,t).
  349. typedef typename Impl::AddPtr AddPtr;
  350. AddPtr lookupForAdd(const Lookup& l) const { return impl.lookupForAdd(l); }
  351. template <typename U>
  352. MOZ_MUST_USE bool add(AddPtr& p, U&& u) {
  353. return impl.add(p, mozilla::Forward<U>(u));
  354. }
  355. template <typename U>
  356. MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
  357. return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u));
  358. }
  359. // |all()| returns a Range containing |count()| elements:
  360. //
  361. // typedef HashSet<int> HS;
  362. // HS h;
  363. // for (HS::Range r = h.all(); !r.empty(); r.popFront())
  364. // int i = r.front();
  365. //
  366. // Also see the definition of Range in HashTable above.
  367. typedef typename Impl::Range Range;
  368. Range all() const { return impl.all(); }
  369. // Typedef for the enumeration class. An Enum may be used to examine and
  370. // remove table entries:
  371. //
  372. // typedef HashSet<int> HS;
  373. // HS s;
  374. // for (HS::Enum e(s); !e.empty(); e.popFront())
  375. // if (e.front() == 42)
  376. // e.removeFront();
  377. //
  378. // Table resize may occur in Enum's destructor. Also see the definition of
  379. // Enum in HashTable above.
  380. typedef typename Impl::Enum Enum;
  381. // Remove all entries. This does not shrink the table. For that consider
  382. // using the finish() method.
  383. void clear() { impl.clear(); }
  384. // Remove all the entries and release all internal buffers. The set must
  385. // be initialized again before any use.
  386. void finish() { impl.finish(); }
  387. // Does the table contain any entries?
  388. bool empty() const { return impl.empty(); }
  389. // Number of live elements in the map.
  390. uint32_t count() const { return impl.count(); }
  391. // Total number of allocation in the dynamic table. Note: resize will
  392. // happen well before count() == capacity().
  393. size_t capacity() const { return impl.capacity(); }
  394. // Don't just call |impl.sizeOfExcludingThis()| because there's no
  395. // guarantee that |impl| is the first field in HashSet.
  396. size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
  397. return impl.sizeOfExcludingThis(mallocSizeOf);
  398. }
  399. size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
  400. return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
  401. }
  402. Generation generation() const {
  403. return impl.generation();
  404. }
  405. /************************************************** Shorthand operations */
  406. bool has(const Lookup& l) const {
  407. return impl.lookup(l).found();
  408. }
  409. // Add |u| if it is not present already. Return false on oom.
  410. template <typename U>
  411. MOZ_MUST_USE bool put(U&& u) {
  412. AddPtr p = lookupForAdd(u);
  413. return p ? true : add(p, mozilla::Forward<U>(u));
  414. }
  415. // Like put, but assert that the given key is not already present.
  416. template <typename U>
  417. MOZ_MUST_USE bool putNew(U&& u) {
  418. return impl.putNew(u, mozilla::Forward<U>(u));
  419. }
  420. template <typename U>
  421. MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) {
  422. return impl.putNew(l, mozilla::Forward<U>(u));
  423. }
  424. // Only call this to populate an empty set after reserving space with init().
  425. template <typename U>
  426. void putNewInfallible(const Lookup& l, U&& u) {
  427. impl.putNewInfallible(l, mozilla::Forward<U>(u));
  428. }
  429. void remove(const Lookup& l) {
  430. if (Ptr p = lookup(l))
  431. remove(p);
  432. }
  433. // Infallibly rekey one entry, if present.
  434. // Requires template parameters T and HashPolicy::Lookup to be the same type.
  435. void rekeyIfMoved(const Lookup& old_value, const T& new_value) {
  436. if (old_value != new_value)
  437. rekeyAs(old_value, new_value, new_value);
  438. }
  439. // Infallibly rekey one entry if present, and return whether that happened.
  440. bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) {
  441. if (Ptr p = lookup(old_lookup)) {
  442. impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
  443. return true;
  444. }
  445. return false;
  446. }
  447. // Infallibly replace the current key at |p| with an equivalent key.
  448. // Specifically, both HashPolicy::hash and HashPolicy::match must return
  449. // identical results for the new and old key when applied against all
  450. // possible matching values.
  451. void replaceKey(Ptr p, const T& new_value) {
  452. MOZ_ASSERT(p.found());
  453. MOZ_ASSERT(*p != new_value);
  454. MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
  455. MOZ_ASSERT(HashPolicy::match(*p, new_value));
  456. const_cast<T&>(*p) = new_value;
  457. }
  458. // HashSet is movable
  459. HashSet(HashSet&& rhs) : impl(mozilla::Move(rhs.impl)) {}
  460. void operator=(HashSet&& rhs) {
  461. MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
  462. impl = mozilla::Move(rhs.impl);
  463. }
  464. private:
  465. // HashSet is not copyable or assignable
  466. HashSet(const HashSet& hs) = delete;
  467. HashSet& operator=(const HashSet& hs) = delete;
  468. friend class Impl::Enum;
  469. };
  470. /*****************************************************************************/
  471. // Hash Policy
  472. //
  473. // A hash policy P for a hash table with key-type Key must provide:
  474. // - a type |P::Lookup| to use to lookup table entries;
  475. // - a static member function |P::hash| with signature
  476. //
  477. // static js::HashNumber hash(Lookup)
  478. //
  479. // to use to hash the lookup type; and
  480. // - a static member function |P::match| with signature
  481. //
  482. // static bool match(Key, Lookup)
  483. //
  484. // to use to test equality of key and lookup values.
  485. //
  486. // Normally, Lookup = Key. In general, though, different values and types of
  487. // values can be used to lookup and store. If a Lookup value |l| is != to the
  488. // added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
  489. //
  490. // js::HashSet<Key, P>::AddPtr p = h.lookup(l);
  491. // if (!p) {
  492. // assert(P::match(k, l)); // must hold
  493. // h.add(p, k);
  494. // }
  495. // Pointer hashing policy that strips the lowest zeroBits when calculating the
  496. // hash to improve key distribution.
  497. template <typename Key, size_t zeroBits>
  498. struct PointerHasher
  499. {
  500. typedef Key Lookup;
  501. static HashNumber hash(const Lookup& l) {
  502. size_t word = reinterpret_cast<size_t>(l) >> zeroBits;
  503. static_assert(sizeof(HashNumber) == 4,
  504. "subsequent code assumes a four-byte hash");
  505. #if JS_BITS_PER_WORD == 32
  506. return HashNumber(word);
  507. #else
  508. static_assert(sizeof(word) == 8,
  509. "unexpected word size, new hashing strategy required to "
  510. "properly incorporate all bits");
  511. return HashNumber((word >> 32) ^ word);
  512. #endif
  513. }
  514. static bool match(const Key& k, const Lookup& l) {
  515. return k == l;
  516. }
  517. static void rekey(Key& k, const Key& newKey) {
  518. k = newKey;
  519. }
  520. };
  521. // Default hash policy: just use the 'lookup' value. This of course only
  522. // works if the lookup value is integral. HashTable applies ScrambleHashCode to
  523. // the result of the 'hash' which means that it is 'ok' if the lookup value is
  524. // not well distributed over the HashNumber domain.
  525. template <class Key>
  526. struct DefaultHasher
  527. {
  528. typedef Key Lookup;
  529. static HashNumber hash(const Lookup& l) {
  530. // Hash if can implicitly cast to hash number type.
  531. return l;
  532. }
  533. static bool match(const Key& k, const Lookup& l) {
  534. // Use builtin or overloaded operator==.
  535. return k == l;
  536. }
  537. static void rekey(Key& k, const Key& newKey) {
  538. k = newKey;
  539. }
  540. };
  541. // Specialize hashing policy for pointer types. It assumes that the type is
  542. // at least word-aligned. For types with smaller size use PointerHasher.
  543. template <class T>
  544. struct DefaultHasher<T*> : PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>
  545. {};
  546. // Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
  547. // raw pointer to PointerHasher.
  548. template <class T, class D>
  549. struct DefaultHasher<mozilla::UniquePtr<T, D>>
  550. {
  551. using Lookup = mozilla::UniquePtr<T, D>;
  552. using PtrHasher = PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>;
  553. static HashNumber hash(const Lookup& l) {
  554. return PtrHasher::hash(l.get());
  555. }
  556. static bool match(const mozilla::UniquePtr<T, D>& k, const Lookup& l) {
  557. return PtrHasher::match(k.get(), l.get());
  558. }
  559. static void rekey(mozilla::UniquePtr<T, D>& k, mozilla::UniquePtr<T, D>&& newKey) {
  560. k = mozilla::Move(newKey);
  561. }
  562. };
  563. // For doubles, we can xor the two uint32s.
  564. template <>
  565. struct DefaultHasher<double>
  566. {
  567. typedef double Lookup;
  568. static HashNumber hash(double d) {
  569. static_assert(sizeof(HashNumber) == 4,
  570. "subsequent code assumes a four-byte hash");
  571. uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
  572. return HashNumber(u ^ (u >> 32));
  573. }
  574. static bool match(double lhs, double rhs) {
  575. return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
  576. }
  577. };
  578. template <>
  579. struct DefaultHasher<float>
  580. {
  581. typedef float Lookup;
  582. static HashNumber hash(float f) {
  583. static_assert(sizeof(HashNumber) == 4,
  584. "subsequent code assumes a four-byte hash");
  585. return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
  586. }
  587. static bool match(float lhs, float rhs) {
  588. return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
  589. }
  590. };
  591. // A hash policy that compares C strings.
  592. struct CStringHasher
  593. {
  594. typedef const char* Lookup;
  595. static js::HashNumber hash(Lookup l) {
  596. return mozilla::HashString(l);
  597. }
  598. static bool match(const char* key, Lookup lookup) {
  599. return strcmp(key, lookup) == 0;
  600. }
  601. };
  602. // Fallible hashing interface.
  603. //
  604. // Most of the time generating a hash code is infallible so this class provides
  605. // default methods that always succeed. Specialize this class for your own hash
  606. // policy to provide fallible hashing.
  607. //
  608. // This is used by MovableCellHasher to handle the fact that generating a unique
  609. // ID for cell pointer may fail due to OOM.
  610. template <typename HashPolicy>
  611. struct FallibleHashMethods
  612. {
  613. // Return true if a hashcode is already available for its argument. Once
  614. // this returns true for a specific argument it must continue to do so.
  615. template <typename Lookup> static bool hasHash(Lookup&& l) { return true; }
  616. // Fallible method to ensure a hashcode exists for its argument and create
  617. // one if not. Returns false on error, e.g. out of memory.
  618. template <typename Lookup> static bool ensureHash(Lookup&& l) { return true; }
  619. };
  620. template <typename HashPolicy, typename Lookup>
  621. static bool
  622. HasHash(Lookup&& l) {
  623. return FallibleHashMethods<typename HashPolicy::Base>::hasHash(mozilla::Forward<Lookup>(l));
  624. }
  625. template <typename HashPolicy, typename Lookup>
  626. static bool
  627. EnsureHash(Lookup&& l) {
  628. return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(mozilla::Forward<Lookup>(l));
  629. }
  630. /*****************************************************************************/
  631. // Both HashMap and HashSet are implemented by a single HashTable that is even
  632. // more heavily parameterized than the other two. This leaves HashTable gnarly
  633. // and extremely coupled to HashMap and HashSet; thus code should not use
  634. // HashTable directly.
  635. template <class Key, class Value>
  636. class HashMapEntry
  637. {
  638. Key key_;
  639. Value value_;
  640. template <class, class, class> friend class detail::HashTable;
  641. template <class> friend class detail::HashTableEntry;
  642. template <class, class, class, class> friend class HashMap;
  643. public:
  644. template<typename KeyInput, typename ValueInput>
  645. HashMapEntry(KeyInput&& k, ValueInput&& v)
  646. : key_(mozilla::Forward<KeyInput>(k)),
  647. value_(mozilla::Forward<ValueInput>(v))
  648. {}
  649. HashMapEntry(HashMapEntry&& rhs)
  650. : key_(mozilla::Move(rhs.key_)),
  651. value_(mozilla::Move(rhs.value_))
  652. {}
  653. void operator=(HashMapEntry&& rhs) {
  654. key_ = mozilla::Move(rhs.key_);
  655. value_ = mozilla::Move(rhs.value_);
  656. }
  657. typedef Key KeyType;
  658. typedef Value ValueType;
  659. const Key& key() const { return key_; }
  660. Key& mutableKey() { return key_; }
  661. const Value& value() const { return value_; }
  662. Value& value() { return value_; }
  663. private:
  664. HashMapEntry(const HashMapEntry&) = delete;
  665. void operator=(const HashMapEntry&) = delete;
  666. };
  667. } // namespace js
  668. namespace mozilla {
  669. template <typename T>
  670. struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {};
  671. template <typename K, typename V>
  672. struct IsPod<js::HashMapEntry<K, V> >
  673. : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
  674. {};
  675. } // namespace mozilla
  676. namespace js {
  677. namespace detail {
  678. template <class T, class HashPolicy, class AllocPolicy>
  679. class HashTable;
  680. template <class T>
  681. class HashTableEntry
  682. {
  683. template <class, class, class> friend class HashTable;
  684. typedef typename mozilla::RemoveConst<T>::Type NonConstT;
  685. HashNumber keyHash;
  686. mozilla::AlignedStorage2<NonConstT> mem;
  687. static const HashNumber sFreeKey = 0;
  688. static const HashNumber sRemovedKey = 1;
  689. static const HashNumber sCollisionBit = 1;
  690. static bool isLiveHash(HashNumber hash)
  691. {
  692. return hash > sRemovedKey;
  693. }
  694. HashTableEntry(const HashTableEntry&) = delete;
  695. void operator=(const HashTableEntry&) = delete;
  696. ~HashTableEntry() = delete;
  697. void destroyStoredT() {
  698. mem.addr()->~T();
  699. MOZ_MAKE_MEM_UNDEFINED(mem.addr(), sizeof(*mem.addr()));
  700. }
  701. public:
  702. // NB: HashTableEntry is treated as a POD: no constructor or destructor calls.
  703. void destroyIfLive() {
  704. if (isLive())
  705. destroyStoredT();
  706. }
  707. void destroy() {
  708. MOZ_ASSERT(isLive());
  709. destroyStoredT();
  710. }
  711. void swap(HashTableEntry* other) {
  712. if (this == other)
  713. return;
  714. MOZ_ASSERT(isLive());
  715. if (other->isLive()) {
  716. mozilla::Swap(*mem.addr(), *other->mem.addr());
  717. } else {
  718. *other->mem.addr() = mozilla::Move(*mem.addr());
  719. destroy();
  720. }
  721. mozilla::Swap(keyHash, other->keyHash);
  722. }
  723. T& get() { MOZ_ASSERT(isLive()); return *mem.addr(); }
  724. NonConstT& getMutable() { MOZ_ASSERT(isLive()); return *mem.addr(); }
  725. bool isFree() const { return keyHash == sFreeKey; }
  726. void clearLive() {
  727. MOZ_ASSERT(isLive());
  728. keyHash = sFreeKey;
  729. destroyStoredT();
  730. }
  731. void clear() {
  732. if (isLive())
  733. destroyStoredT();
  734. MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
  735. keyHash = sFreeKey;
  736. }
  737. bool isRemoved() const { return keyHash == sRemovedKey; }
  738. void removeLive() {
  739. MOZ_ASSERT(isLive());
  740. keyHash = sRemovedKey;
  741. destroyStoredT();
  742. }
  743. bool isLive() const { return isLiveHash(keyHash); }
  744. void setCollision() { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; }
  745. void unsetCollision() { keyHash &= ~sCollisionBit; }
  746. bool hasCollision() const { return keyHash & sCollisionBit; }
  747. bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
  748. HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
  749. template <typename... Args>
  750. void setLive(HashNumber hn, Args&&... args)
  751. {
  752. MOZ_ASSERT(!isLive());
  753. keyHash = hn;
  754. new(mem.addr()) T(mozilla::Forward<Args>(args)...);
  755. MOZ_ASSERT(isLive());
  756. }
  757. };
  758. template <class T, class HashPolicy, class AllocPolicy>
  759. class HashTable : private AllocPolicy
  760. {
  761. friend class mozilla::ReentrancyGuard;
  762. typedef typename mozilla::RemoveConst<T>::Type NonConstT;
  763. typedef typename HashPolicy::KeyType Key;
  764. typedef typename HashPolicy::Lookup Lookup;
  765. public:
  766. typedef HashTableEntry<T> Entry;
  767. // A nullable pointer to a hash table element. A Ptr |p| can be tested
  768. // either explicitly |if (p.found()) p->...| or using boolean conversion
  769. // |if (p) p->...|. Ptr objects must not be used after any mutating hash
  770. // table operations unless |generation()| is tested.
  771. class Ptr
  772. {
  773. friend class HashTable;
  774. Entry* entry_;
  775. #ifdef JS_DEBUG
  776. const HashTable* table_;
  777. Generation generation;
  778. #endif
  779. protected:
  780. Ptr(Entry& entry, const HashTable& tableArg)
  781. : entry_(&entry)
  782. #ifdef JS_DEBUG
  783. , table_(&tableArg)
  784. , generation(tableArg.generation())
  785. #endif
  786. {}
  787. public:
  788. Ptr()
  789. : entry_(nullptr)
  790. #ifdef JS_DEBUG
  791. , table_(nullptr)
  792. , generation(0)
  793. #endif
  794. {}
  795. bool isValid() const {
  796. return !entry_;
  797. }
  798. bool found() const {
  799. if (isValid())
  800. return false;
  801. #ifdef JS_DEBUG
  802. MOZ_ASSERT(generation == table_->generation());
  803. #endif
  804. return entry_->isLive();
  805. }
  806. explicit operator bool() const {
  807. return found();
  808. }
  809. bool operator==(const Ptr& rhs) const {
  810. MOZ_ASSERT(found() && rhs.found());
  811. return entry_ == rhs.entry_;
  812. }
  813. bool operator!=(const Ptr& rhs) const {
  814. #ifdef JS_DEBUG
  815. MOZ_ASSERT(generation == table_->generation());
  816. #endif
  817. return !(*this == rhs);
  818. }
  819. T& operator*() const {
  820. #ifdef JS_DEBUG
  821. MOZ_ASSERT(found());
  822. MOZ_ASSERT(generation == table_->generation());
  823. #endif
  824. return entry_->get();
  825. }
  826. T* operator->() const {
  827. #ifdef JS_DEBUG
  828. MOZ_ASSERT(found());
  829. MOZ_ASSERT(generation == table_->generation());
  830. #endif
  831. return &entry_->get();
  832. }
  833. };
  834. // A Ptr that can be used to add a key after a failed lookup.
  835. class AddPtr : public Ptr
  836. {
  837. friend class HashTable;
  838. HashNumber keyHash;
  839. #ifdef JS_DEBUG
  840. uint64_t mutationCount;
  841. #endif
  842. AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
  843. : Ptr(entry, tableArg)
  844. , keyHash(hn)
  845. #ifdef JS_DEBUG
  846. , mutationCount(tableArg.mutationCount)
  847. #endif
  848. {}
  849. public:
  850. AddPtr() : keyHash(0) {}
  851. };
  852. // A collection of hash table entries. The collection is enumerated by
  853. // calling |front()| followed by |popFront()| as long as |!empty()|. As
  854. // with Ptr/AddPtr, Range objects must not be used after any mutating hash
  855. // table operation unless the |generation()| is tested.
  856. class Range
  857. {
  858. protected:
  859. friend class HashTable;
  860. Range(const HashTable& tableArg, Entry* c, Entry* e)
  861. : cur(c)
  862. , end(e)
  863. #ifdef JS_DEBUG
  864. , table_(&tableArg)
  865. , mutationCount(tableArg.mutationCount)
  866. , generation(tableArg.generation())
  867. , validEntry(true)
  868. #endif
  869. {
  870. while (cur < end && !cur->isLive())
  871. ++cur;
  872. }
  873. Entry* cur;
  874. Entry* end;
  875. #ifdef JS_DEBUG
  876. const HashTable* table_;
  877. uint64_t mutationCount;
  878. Generation generation;
  879. bool validEntry;
  880. #endif
  881. public:
  882. Range()
  883. : cur(nullptr)
  884. , end(nullptr)
  885. #ifdef JS_DEBUG
  886. , table_(nullptr)
  887. , mutationCount(0)
  888. , generation(0)
  889. , validEntry(false)
  890. #endif
  891. {}
  892. bool empty() const {
  893. #ifdef JS_DEBUG
  894. MOZ_ASSERT(generation == table_->generation());
  895. MOZ_ASSERT(mutationCount == table_->mutationCount);
  896. #endif
  897. return cur == end;
  898. }
  899. T& front() const {
  900. MOZ_ASSERT(!empty());
  901. #ifdef JS_DEBUG
  902. MOZ_ASSERT(validEntry);
  903. MOZ_ASSERT(generation == table_->generation());
  904. MOZ_ASSERT(mutationCount == table_->mutationCount);
  905. #endif
  906. return cur->get();
  907. }
  908. void popFront() {
  909. MOZ_ASSERT(!empty());
  910. #ifdef JS_DEBUG
  911. MOZ_ASSERT(generation == table_->generation());
  912. MOZ_ASSERT(mutationCount == table_->mutationCount);
  913. #endif
  914. while (++cur < end && !cur->isLive())
  915. continue;
  916. #ifdef JS_DEBUG
  917. validEntry = true;
  918. #endif
  919. }
  920. };
  921. // A Range whose lifetime delimits a mutating enumeration of a hash table.
  922. // Since rehashing when elements were removed during enumeration would be
  923. // bad, it is postponed until the Enum is destructed. Since the Enum's
  924. // destructor touches the hash table, the user must ensure that the hash
  925. // table is still alive when the destructor runs.
  926. class Enum : public Range
  927. {
  928. friend class HashTable;
  929. HashTable& table_;
  930. bool rekeyed;
  931. bool removed;
  932. /* Not copyable. */
  933. Enum(const Enum&) = delete;
  934. void operator=(const Enum&) = delete;
  935. public:
  936. template<class Map> explicit
  937. Enum(Map& map) : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
  938. // Removes the |front()| element from the table, leaving |front()|
  939. // invalid until the next call to |popFront()|. For example:
  940. //
  941. // HashSet<int> s;
  942. // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
  943. // if (e.front() == 42)
  944. // e.removeFront();
  945. void removeFront() {
  946. table_.remove(*this->cur);
  947. removed = true;
  948. #ifdef JS_DEBUG
  949. this->validEntry = false;
  950. this->mutationCount = table_.mutationCount;
  951. #endif
  952. }
  953. NonConstT& mutableFront() {
  954. MOZ_ASSERT(!this->empty());
  955. #ifdef JS_DEBUG
  956. MOZ_ASSERT(this->validEntry);
  957. MOZ_ASSERT(this->generation == this->Range::table_->generation());
  958. MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
  959. #endif
  960. return this->cur->getMutable();
  961. }
  962. // Removes the |front()| element and re-inserts it into the table with
  963. // a new key at the new Lookup position. |front()| is invalid after
  964. // this operation until the next call to |popFront()|.
  965. void rekeyFront(const Lookup& l, const Key& k) {
  966. MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
  967. Ptr p(*this->cur, table_);
  968. table_.rekeyWithoutRehash(p, l, k);
  969. rekeyed = true;
  970. #ifdef JS_DEBUG
  971. this->validEntry = false;
  972. this->mutationCount = table_.mutationCount;
  973. #endif
  974. }
  975. void rekeyFront(const Key& k) {
  976. rekeyFront(k, k);
  977. }
  978. // Potentially rehashes the table.
  979. ~Enum() {
  980. if (rekeyed) {
  981. table_.gen++;
  982. table_.checkOverRemoved();
  983. }
  984. if (removed)
  985. table_.compactIfUnderloaded();
  986. }
  987. };
  988. // HashTable is movable
  989. HashTable(HashTable&& rhs)
  990. : AllocPolicy(rhs)
  991. {
  992. mozilla::PodAssign(this, &rhs);
  993. rhs.table = nullptr;
  994. }
  995. void operator=(HashTable&& rhs) {
  996. MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
  997. if (table)
  998. destroyTable(*this, table, capacity());
  999. mozilla::PodAssign(this, &rhs);
  1000. rhs.table = nullptr;
  1001. }
  1002. private:
  1003. // HashTable is not copyable or assignable
  1004. HashTable(const HashTable&) = delete;
  1005. void operator=(const HashTable&) = delete;
  1006. private:
  1007. static const size_t CAP_BITS = 30;
  1008. public:
  1009. uint64_t gen:56; // entry storage generation number
  1010. uint64_t hashShift:8; // multiplicative hash shift
  1011. Entry* table; // entry storage
  1012. uint32_t entryCount; // number of entries in table
  1013. uint32_t removedCount; // removed entry sentinels in table
  1014. #ifdef JS_DEBUG
  1015. uint64_t mutationCount;
  1016. mutable bool mEntered;
  1017. // Note that some updates to these stats are not thread-safe. See the
  1018. // comment on the three-argument overloading of HashTable::lookup().
  1019. mutable struct Stats
  1020. {
  1021. uint32_t searches; // total number of table searches
  1022. uint32_t steps; // hash chain links traversed
  1023. uint32_t hits; // searches that found key
  1024. uint32_t misses; // searches that didn't find key
  1025. uint32_t addOverRemoved; // adds that recycled a removed entry
  1026. uint32_t removes; // calls to remove
  1027. uint32_t removeFrees; // calls to remove that freed the entry
  1028. uint32_t grows; // table expansions
  1029. uint32_t shrinks; // table contractions
  1030. uint32_t compresses; // table compressions
  1031. uint32_t rehashes; // tombstone decontaminations
  1032. } stats;
  1033. # define METER(x) x
  1034. #else
  1035. # define METER(x)
  1036. #endif
  1037. // The default initial capacity is 32 (enough to hold 16 elements), but it
  1038. // can be as low as 4.
  1039. static const unsigned sMinCapacityLog2 = 2;
  1040. static const unsigned sMinCapacity = 1 << sMinCapacityLog2;
  1041. static const unsigned sMaxInit = JS_BIT(CAP_BITS - 1);
  1042. static const unsigned sMaxCapacity = JS_BIT(CAP_BITS);
  1043. static const unsigned sHashBits = mozilla::tl::BitSize<HashNumber>::value;
  1044. // Hash-table alpha is conceptually a fraction, but to avoid floating-point
  1045. // math we implement it as a ratio of integers.
  1046. static const uint8_t sAlphaDenominator = 4;
  1047. static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
  1048. static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
  1049. static const HashNumber sFreeKey = Entry::sFreeKey;
  1050. static const HashNumber sRemovedKey = Entry::sRemovedKey;
  1051. static const HashNumber sCollisionBit = Entry::sCollisionBit;
  1052. void setTableSizeLog2(unsigned sizeLog2)
  1053. {
  1054. hashShift = sHashBits - sizeLog2;
  1055. }
  1056. static bool isLiveHash(HashNumber hash)
  1057. {
  1058. return Entry::isLiveHash(hash);
  1059. }
  1060. static HashNumber prepareHash(const Lookup& l)
  1061. {
  1062. HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
  1063. // Avoid reserved hash codes.
  1064. if (!isLiveHash(keyHash))
  1065. keyHash -= (sRemovedKey + 1);
  1066. return keyHash & ~sCollisionBit;
  1067. }
  1068. enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
  1069. static Entry* createTable(AllocPolicy& alloc, uint32_t capacity,
  1070. FailureBehavior reportFailure = ReportFailure)
  1071. {
  1072. static_assert(sFreeKey == 0,
  1073. "newly-calloc'd tables have to be considered empty");
  1074. if (reportFailure)
  1075. return alloc.template pod_calloc<Entry>(capacity);
  1076. return alloc.template maybe_pod_calloc<Entry>(capacity);
  1077. }
  1078. static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
  1079. {
  1080. static_assert(sFreeKey == 0,
  1081. "newly-calloc'd tables have to be considered empty");
  1082. return alloc.template maybe_pod_calloc<Entry>(capacity);
  1083. }
  1084. static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity)
  1085. {
  1086. Entry* end = oldTable + capacity;
  1087. for (Entry* e = oldTable; e < end; ++e)
  1088. e->destroyIfLive();
  1089. alloc.free_(oldTable);
  1090. }
  1091. public:
  1092. explicit HashTable(AllocPolicy ap)
  1093. : AllocPolicy(ap)
  1094. , gen(0)
  1095. , hashShift(sHashBits)
  1096. , table(nullptr)
  1097. , entryCount(0)
  1098. , removedCount(0)
  1099. #ifdef JS_DEBUG
  1100. , mutationCount(0)
  1101. , mEntered(false)
  1102. #endif
  1103. {}
  1104. MOZ_MUST_USE bool init(uint32_t length)
  1105. {
  1106. MOZ_ASSERT(!initialized());
  1107. // Reject all lengths whose initial computed capacity would exceed
  1108. // sMaxCapacity. Round that maximum length down to the nearest power
  1109. // of two for speedier code.
  1110. if (MOZ_UNLIKELY(length > sMaxInit)) {
  1111. this->reportAllocOverflow();
  1112. return false;
  1113. }
  1114. static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
  1115. "multiplication in numerator below could overflow");
  1116. static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
  1117. "numerator calculation below could potentially overflow");
  1118. // Compute the smallest capacity allowing |length| elements to be
  1119. // inserted without rehashing: ceil(length / max-alpha). (Ceiling
  1120. // integral division: <http://stackoverflow.com/a/2745086>.)
  1121. uint32_t newCapacity =
  1122. (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
  1123. if (newCapacity < sMinCapacity)
  1124. newCapacity = sMinCapacity;
  1125. // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
  1126. uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
  1127. while (roundUp < newCapacity) {
  1128. roundUp <<= 1;
  1129. ++roundUpLog2;
  1130. }
  1131. newCapacity = roundUp;
  1132. MOZ_ASSERT(newCapacity >= length);
  1133. MOZ_ASSERT(newCapacity <= sMaxCapacity);
  1134. table = createTable(*this, newCapacity);
  1135. if (!table)
  1136. return false;
  1137. setTableSizeLog2(roundUpLog2);
  1138. METER(memset(&stats, 0, sizeof(stats)));
  1139. return true;
  1140. }
  1141. bool initialized() const
  1142. {
  1143. return !!table;
  1144. }
  1145. ~HashTable()
  1146. {
  1147. if (table)
  1148. destroyTable(*this, table, capacity());
  1149. }
  1150. private:
  1151. HashNumber hash1(HashNumber hash0) const
  1152. {
  1153. return hash0 >> hashShift;
  1154. }
  1155. struct DoubleHash
  1156. {
  1157. HashNumber h2;
  1158. HashNumber sizeMask;
  1159. };
  1160. DoubleHash hash2(HashNumber curKeyHash) const
  1161. {
  1162. unsigned sizeLog2 = sHashBits - hashShift;
  1163. DoubleHash dh = {
  1164. ((curKeyHash << sizeLog2) >> hashShift) | 1,
  1165. (HashNumber(1) << sizeLog2) - 1
  1166. };
  1167. return dh;
  1168. }
  1169. static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
  1170. {
  1171. return (h1 - dh.h2) & dh.sizeMask;
  1172. }
  1173. bool overloaded()
  1174. {
  1175. static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
  1176. "multiplication below could overflow");
  1177. return entryCount + removedCount >=
  1178. capacity() * sMaxAlphaNumerator / sAlphaDenominator;
  1179. }
  1180. // Would the table be underloaded if it had the given capacity and entryCount?
  1181. static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
  1182. {
  1183. static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
  1184. "multiplication below could overflow");
  1185. return capacity > sMinCapacity &&
  1186. entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
  1187. }
  1188. bool underloaded()
  1189. {
  1190. return wouldBeUnderloaded(capacity(), entryCount);
  1191. }
  1192. static bool match(Entry& e, const Lookup& l)
  1193. {
  1194. return HashPolicy::match(HashPolicy::getKey(e.get()), l);
  1195. }
  1196. // Warning: in order for readonlyThreadsafeLookup() to be safe this
  1197. // function must not modify the table in any way when |collisionBit| is 0.
  1198. // (The use of the METER() macro to increment stats violates this
  1199. // restriction but we will live with that for now because it's enabled so
  1200. // rarely.)
  1201. Entry& lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const
  1202. {
  1203. MOZ_ASSERT(isLiveHash(keyHash));
  1204. MOZ_ASSERT(!(keyHash & sCollisionBit));
  1205. MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
  1206. MOZ_ASSERT(table);
  1207. METER(stats.searches++);
  1208. // Compute the primary hash address.
  1209. HashNumber h1 = hash1(keyHash);
  1210. Entry* entry = &table[h1];
  1211. // Miss: return space for a new entry.
  1212. if (entry->isFree()) {
  1213. METER(stats.misses++);
  1214. return *entry;
  1215. }
  1216. // Hit: return entry.
  1217. if (entry->matchHash(keyHash) && match(*entry, l)) {
  1218. METER(stats.hits++);
  1219. return *entry;
  1220. }
  1221. // Collision: double hash.
  1222. DoubleHash dh = hash2(keyHash);
  1223. // Save the first removed entry pointer so we can recycle later.
  1224. Entry* firstRemoved = nullptr;
  1225. while (true) {
  1226. if (MOZ_UNLIKELY(entry->isRemoved())) {
  1227. if (!firstRemoved)
  1228. firstRemoved = entry;
  1229. } else {
  1230. if (collisionBit == sCollisionBit)
  1231. entry->setCollision();
  1232. }
  1233. METER(stats.steps++);
  1234. h1 = applyDoubleHash(h1, dh);
  1235. entry = &table[h1];
  1236. if (entry->isFree()) {
  1237. METER(stats.misses++);
  1238. return firstRemoved ? *firstRemoved : *entry;
  1239. }
  1240. if (entry->matchHash(keyHash) && match(*entry, l)) {
  1241. METER(stats.hits++);
  1242. return *entry;
  1243. }
  1244. }
  1245. }
  1246. // This is a copy of lookup hardcoded to the assumptions:
  1247. // 1. the lookup is a lookupForAdd
  1248. // 2. the key, whose |keyHash| has been passed is not in the table,
  1249. // 3. no entries have been removed from the table.
  1250. // This specialized search avoids the need for recovering lookup values
  1251. // from entries, which allows more flexible Lookup/Key types.
  1252. Entry& findFreeEntry(HashNumber keyHash)
  1253. {
  1254. MOZ_ASSERT(!(keyHash & sCollisionBit));
  1255. MOZ_ASSERT(table);
  1256. METER(stats.searches++);
  1257. // We assume 'keyHash' has already been distributed.
  1258. // Compute the primary hash address.
  1259. HashNumber h1 = hash1(keyHash);
  1260. Entry* entry = &table[h1];
  1261. // Miss: return space for a new entry.
  1262. if (!entry->isLive()) {
  1263. METER(stats.misses++);
  1264. return *entry;
  1265. }
  1266. // Collision: double hash.
  1267. DoubleHash dh = hash2(keyHash);
  1268. while (true) {
  1269. MOZ_ASSERT(!entry->isRemoved());
  1270. entry->setCollision();
  1271. METER(stats.steps++);
  1272. h1 = applyDoubleHash(h1, dh);
  1273. entry = &table[h1];
  1274. if (!entry->isLive()) {
  1275. METER(stats.misses++);
  1276. return *entry;
  1277. }
  1278. }
  1279. }
  1280. enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
  1281. RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
  1282. {
  1283. // Look, but don't touch, until we succeed in getting new entry store.
  1284. Entry* oldTable = table;
  1285. uint32_t oldCap = capacity();
  1286. uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
  1287. uint32_t newCapacity = JS_BIT(newLog2);
  1288. if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
  1289. if (reportFailure)
  1290. this->reportAllocOverflow();
  1291. return RehashFailed;
  1292. }
  1293. Entry* newTable = createTable(*this, newCapacity, reportFailure);
  1294. if (!newTable)
  1295. return RehashFailed;
  1296. // We can't fail from here on, so update table parameters.
  1297. setTableSizeLog2(newLog2);
  1298. removedCount = 0;
  1299. gen++;
  1300. table = newTable;
  1301. // Copy only live entries, leaving removed ones behind.
  1302. Entry* end = oldTable + oldCap;
  1303. for (Entry* src = oldTable; src < end; ++src) {
  1304. if (src->isLive()) {
  1305. HashNumber hn = src->getKeyHash();
  1306. findFreeEntry(hn).setLive(
  1307. hn, mozilla::Move(const_cast<typename Entry::NonConstT&>(src->get())));
  1308. src->destroy();
  1309. }
  1310. }
  1311. // All entries have been destroyed, no need to destroyTable.
  1312. this->free_(oldTable);
  1313. return Rehashed;
  1314. }
  1315. bool shouldCompressTable()
  1316. {
  1317. // Compress if a quarter or more of all entries are removed.
  1318. return removedCount >= (capacity() >> 2);
  1319. }
  1320. RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
  1321. {
  1322. if (!overloaded())
  1323. return NotOverloaded;
  1324. int deltaLog2;
  1325. if (shouldCompressTable()) {
  1326. METER(stats.compresses++);
  1327. deltaLog2 = 0;
  1328. } else {
  1329. METER(stats.grows++);
  1330. deltaLog2 = 1;
  1331. }
  1332. return changeTableSize(deltaLog2, reportFailure);
  1333. }
  1334. // Infallibly rehash the table if we are overloaded with removals.
  1335. void checkOverRemoved()
  1336. {
  1337. if (overloaded()) {
  1338. if (checkOverloaded(DontReportFailure) == RehashFailed)
  1339. rehashTableInPlace();
  1340. }
  1341. }
  1342. void remove(Entry& e)
  1343. {
  1344. MOZ_ASSERT(table);
  1345. METER(stats.removes++);
  1346. if (e.hasCollision()) {
  1347. e.removeLive();
  1348. removedCount++;
  1349. } else {
  1350. METER(stats.removeFrees++);
  1351. e.clearLive();
  1352. }
  1353. entryCount--;
  1354. #ifdef JS_DEBUG
  1355. mutationCount++;
  1356. #endif
  1357. }
  1358. void checkUnderloaded()
  1359. {
  1360. if (underloaded()) {
  1361. METER(stats.shrinks++);
  1362. (void) changeTableSize(-1, DontReportFailure);
  1363. }
  1364. }
  1365. // Resize the table down to the largest capacity which doesn't underload the
  1366. // table. Since we call checkUnderloaded() on every remove, you only need
  1367. // to call this after a bulk removal of items done without calling remove().
  1368. void compactIfUnderloaded()
  1369. {
  1370. int32_t resizeLog2 = 0;
  1371. uint32_t newCapacity = capacity();
  1372. while (wouldBeUnderloaded(newCapacity, entryCount)) {
  1373. newCapacity = newCapacity >> 1;
  1374. resizeLog2--;
  1375. }
  1376. if (resizeLog2 != 0)
  1377. (void) changeTableSize(resizeLog2, DontReportFailure);
  1378. }
  1379. // This is identical to changeTableSize(currentSize), but without requiring
  1380. // a second table. We do this by recycling the collision bits to tell us if
  1381. // the element is already inserted or still waiting to be inserted. Since
  1382. // already-inserted elements win any conflicts, we get the same table as we
  1383. // would have gotten through random insertion order.
  1384. void rehashTableInPlace()
  1385. {
  1386. METER(stats.rehashes++);
  1387. removedCount = 0;
  1388. for (size_t i = 0; i < capacity(); ++i)
  1389. table[i].unsetCollision();
  1390. for (size_t i = 0; i < capacity();) {
  1391. Entry* src = &table[i];
  1392. if (!src->isLive() || src->hasCollision()) {
  1393. ++i;
  1394. continue;
  1395. }
  1396. HashNumber keyHash = src->getKeyHash();
  1397. HashNumber h1 = hash1(keyHash);
  1398. DoubleHash dh = hash2(keyHash);
  1399. Entry* tgt = &table[h1];
  1400. while (true) {
  1401. if (!tgt->hasCollision()) {
  1402. src->swap(tgt);
  1403. tgt->setCollision();
  1404. break;
  1405. }
  1406. h1 = applyDoubleHash(h1, dh);
  1407. tgt = &table[h1];
  1408. }
  1409. }
  1410. // TODO: this algorithm leaves collision bits on *all* elements, even if
  1411. // they are on no collision path. We have the option of setting the
  1412. // collision bits correctly on a subsequent pass or skipping the rehash
  1413. // unless we are totally filled with tombstones: benchmark to find out
  1414. // which approach is best.
  1415. }
  1416. // Note: |l| may be a reference to a piece of |u|, so this function
  1417. // must take care not to use |l| after moving |u|.
  1418. //
  1419. // Prefer to use putNewInfallible; this function does not check
  1420. // invariants.
  1421. template <typename... Args>
  1422. void putNewInfallibleInternal(const Lookup& l, Args&&... args)
  1423. {
  1424. MOZ_ASSERT(table);
  1425. HashNumber keyHash = prepareHash(l);
  1426. Entry* entry = &findFreeEntry(keyHash);
  1427. MOZ_ASSERT(entry);
  1428. if (entry->isRemoved()) {
  1429. METER(stats.addOverRemoved++);
  1430. removedCount--;
  1431. keyHash |= sCollisionBit;
  1432. }
  1433. entry->setLive(keyHash, mozilla::Forward<Args>(args)...);
  1434. entryCount++;
  1435. #ifdef JS_DEBUG
  1436. mutationCount++;
  1437. #endif
  1438. }
  1439. public:
  1440. void clear()
  1441. {
  1442. Entry* end = table + capacity();
  1443. for (Entry* e = table; e < end; ++e)
  1444. e->clear();
  1445. removedCount = 0;
  1446. entryCount = 0;
  1447. #ifdef JS_DEBUG
  1448. mutationCount++;
  1449. #endif
  1450. }
  1451. void finish()
  1452. {
  1453. #ifdef JS_DEBUG
  1454. MOZ_ASSERT(!mEntered);
  1455. #endif
  1456. if (!table)
  1457. return;
  1458. destroyTable(*this, table, capacity());
  1459. table = nullptr;
  1460. gen++;
  1461. entryCount = 0;
  1462. removedCount = 0;
  1463. #ifdef JS_DEBUG
  1464. mutationCount++;
  1465. #endif
  1466. }
  1467. Range all() const
  1468. {
  1469. MOZ_ASSERT(table);
  1470. return Range(*this, table, table + capacity());
  1471. }
  1472. bool empty() const
  1473. {
  1474. MOZ_ASSERT(table);
  1475. return !entryCount;
  1476. }
  1477. uint32_t count() const
  1478. {
  1479. MOZ_ASSERT(table);
  1480. return entryCount;
  1481. }
  1482. uint32_t capacity() const
  1483. {
  1484. MOZ_ASSERT(table);
  1485. return JS_BIT(sHashBits - hashShift);
  1486. }
  1487. Generation generation() const
  1488. {
  1489. MOZ_ASSERT(table);
  1490. return Generation(gen);
  1491. }
  1492. size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
  1493. {
  1494. return mallocSizeOf(table);
  1495. }
  1496. size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
  1497. {
  1498. return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
  1499. }
  1500. Ptr lookup(const Lookup& l) const
  1501. {
  1502. mozilla::ReentrancyGuard g(*this);
  1503. if (!HasHash<HashPolicy>(l))
  1504. return Ptr();
  1505. HashNumber keyHash = prepareHash(l);
  1506. return Ptr(lookup(l, keyHash, 0), *this);
  1507. }
  1508. Ptr readonlyThreadsafeLookup(const Lookup& l) const
  1509. {
  1510. if (!HasHash<HashPolicy>(l))
  1511. return Ptr();
  1512. HashNumber keyHash = prepareHash(l);
  1513. return Ptr(lookup(l, keyHash, 0), *this);
  1514. }
  1515. AddPtr lookupForAdd(const Lookup& l) const
  1516. {
  1517. mozilla::ReentrancyGuard g(*this);
  1518. if (!EnsureHash<HashPolicy>(l))
  1519. return AddPtr();
  1520. HashNumber keyHash = prepareHash(l);
  1521. Entry& entry = lookup(l, keyHash, sCollisionBit);
  1522. AddPtr p(entry, *this, keyHash);
  1523. return p;
  1524. }
  1525. template <typename... Args>
  1526. MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
  1527. {
  1528. mozilla::ReentrancyGuard g(*this);
  1529. MOZ_ASSERT(table);
  1530. MOZ_ASSERT(!p.found());
  1531. MOZ_ASSERT(!(p.keyHash & sCollisionBit));
  1532. // Check for error from ensureHash() here.
  1533. if (p.isValid())
  1534. return false;
  1535. // Changing an entry from removed to live does not affect whether we
  1536. // are overloaded and can be handled separately.
  1537. if (p.entry_->isRemoved()) {
  1538. if (!this->checkSimulatedOOM())
  1539. return false;
  1540. METER(stats.addOverRemoved++);
  1541. removedCount--;
  1542. p.keyHash |= sCollisionBit;
  1543. } else {
  1544. // Preserve the validity of |p.entry_|.
  1545. RebuildStatus status = checkOverloaded();
  1546. if (status == RehashFailed)
  1547. return false;
  1548. if (status == NotOverloaded && !this->checkSimulatedOOM())
  1549. return false;
  1550. if (status == Rehashed)
  1551. p.entry_ = &findFreeEntry(p.keyHash);
  1552. }
  1553. p.entry_->setLive(p.keyHash, mozilla::Forward<Args>(args)...);
  1554. entryCount++;
  1555. #ifdef JS_DEBUG
  1556. mutationCount++;
  1557. p.generation = generation();
  1558. p.mutationCount = mutationCount;
  1559. #endif
  1560. return true;
  1561. }
  1562. // Note: |l| may be a reference to a piece of |u|, so this function
  1563. // must take care not to use |l| after moving |u|.
  1564. template <typename... Args>
  1565. void putNewInfallible(const Lookup& l, Args&&... args)
  1566. {
  1567. MOZ_ASSERT(!lookup(l).found());
  1568. mozilla::ReentrancyGuard g(*this);
  1569. putNewInfallibleInternal(l, mozilla::Forward<Args>(args)...);
  1570. }
  1571. // Note: |l| may be alias arguments in |args|, so this function must take
  1572. // care not to use |l| after moving |args|.
  1573. template <typename... Args>
  1574. MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
  1575. {
  1576. if (!this->checkSimulatedOOM())
  1577. return false;
  1578. if (!EnsureHash<HashPolicy>(l))
  1579. return false;
  1580. if (checkOverloaded() == RehashFailed)
  1581. return false;
  1582. putNewInfallible(l, mozilla::Forward<Args>(args)...);
  1583. return true;
  1584. }
  1585. // Note: |l| may be a reference to a piece of |u|, so this function
  1586. // must take care not to use |l| after moving |u|.
  1587. template <typename... Args>
  1588. MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
  1589. {
  1590. // Check for error from ensureHash() here.
  1591. if (p.isValid())
  1592. return false;
  1593. #ifdef JS_DEBUG
  1594. p.generation = generation();
  1595. p.mutationCount = mutationCount;
  1596. #endif
  1597. {
  1598. mozilla::ReentrancyGuard g(*this);
  1599. MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
  1600. p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
  1601. }
  1602. return p.found() || add(p, mozilla::Forward<Args>(args)...);
  1603. }
  1604. void remove(Ptr p)
  1605. {
  1606. MOZ_ASSERT(table);
  1607. mozilla::ReentrancyGuard g(*this);
  1608. MOZ_ASSERT(p.found());
  1609. remove(*p.entry_);
  1610. checkUnderloaded();
  1611. }
  1612. void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
  1613. {
  1614. MOZ_ASSERT(table);
  1615. mozilla::ReentrancyGuard g(*this);
  1616. MOZ_ASSERT(p.found());
  1617. typename HashTableEntry<T>::NonConstT t(mozilla::Move(*p));
  1618. HashPolicy::setKey(t, const_cast<Key&>(k));
  1619. remove(*p.entry_);
  1620. putNewInfallibleInternal(l, mozilla::Move(t));
  1621. }
  1622. void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
  1623. {
  1624. rekeyWithoutRehash(p, l, k);
  1625. checkOverRemoved();
  1626. }
  1627. #undef METER
  1628. };
  1629. } // namespace detail
  1630. } // namespace js
  1631. #endif /* js_HashTable_h */