AllocatorTests.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Copyright (c) Contributors to the Open 3D Engine Project.
  3. * For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0 OR MIT
  6. *
  7. */
  8. #include "RHITestFixture.h"
  9. #include <Atom/RHI/PoolAllocator.h>
  10. #include <Atom/RHI/FreeListAllocator.h>
  11. #include <AzCore/Math/Random.h>
  12. #include <AzCore/std/time.h>
  13. #include <AzCore/UnitTest/UnitTest.h>
  14. #define PRINTF(...) do { UnitTest::ColoredPrintf(UnitTest::COLOR_GREEN, "[ ] "); UnitTest::ColoredPrintf(UnitTest::COLOR_YELLOW, __VA_ARGS__); } while(0)
  15. namespace UnitTest
  16. {
  17. using namespace AZ;
  18. class AllocatorTest
  19. : public RHITestFixture
  20. {
  21. public:
  22. AllocatorTest()
  23. : RHITestFixture()
  24. {}
  25. static size_t GetBytesToBlocks(size_t bytes, size_t alignment)
  26. {
  27. return size_t((bytes + alignment - 1) / alignment);
  28. }
  29. struct TestDescriptor
  30. {
  31. bool m_useVisualizer = false;
  32. RHI::Allocator* m_allocator = nullptr;
  33. size_t m_byteCountMax = 0;
  34. size_t m_byteAlignmentBase = 256;
  35. size_t m_gcLatency = 3;
  36. size_t m_iterations = 10000;
  37. size_t m_allocationSizeMin = 1;
  38. size_t m_allocationSizeMax = 8 * 1024;
  39. AZ::u64 m_addressBase = 0;
  40. };
  41. void run(const TestDescriptor& descriptor)
  42. {
  43. struct Block
  44. {
  45. Block() : m_used{}, m_gcIteration{} {}
  46. AZ::u16 m_used;
  47. AZ::u16 m_gcIteration;
  48. };
  49. struct Allocation
  50. {
  51. RHI::VirtualAddress address;
  52. size_t size;
  53. };
  54. const size_t BlockCount = descriptor.m_byteCountMax / descriptor.m_byteAlignmentBase;
  55. AZStd::vector<Block> m_usedBlocks(BlockCount);
  56. AZStd::vector<Allocation> currentAllocations;
  57. AZStd::vector<Allocation> retiredAllocationsCurrent;
  58. AZStd::vector<Allocation> retiredAllocationsPrevious;
  59. const size_t AllocationSizeRange = descriptor.m_allocationSizeMax - descriptor.m_allocationSizeMin;
  60. AZStd::string outputString;
  61. outputString.resize(BlockCount);
  62. AZ::SimpleLcgRandom random(AZStd::GetTimeNowMicroSecond());
  63. /**
  64. * Does a bunch of random add / remove iterations, tracking garbage collection
  65. * and block usage. It will assert if the allocator attempts to stomp on another
  66. * allocation that is marked used.
  67. */
  68. for (size_t iteration = 0; iteration < descriptor.m_iterations; ++iteration)
  69. {
  70. bool doPrint = false;
  71. // 51% chance of doing an add. Biased towards adds so we fill up the allocator.
  72. if ((random.GetRandom() % 100) <= 51)
  73. {
  74. const size_t AllocationSize = (AllocationSizeRange ? (random.GetRandom() % AllocationSizeRange) : 0) + descriptor.m_allocationSizeMin;
  75. const size_t AllocationBlockCount = GetBytesToBlocks(AllocationSize, descriptor.m_byteAlignmentBase);
  76. RHI::VirtualAddress address = descriptor.m_allocator->Allocate(AllocationSize, descriptor.m_byteAlignmentBase);
  77. // Allocator has space. Record the allocation.
  78. if (address.IsValid())
  79. {
  80. ASSERT_TRUE(address.m_ptr % descriptor.m_byteAlignmentBase == 0);
  81. size_t addressOffset = address.m_ptr - descriptor.m_addressBase;
  82. for (size_t currentBlock = 0; currentBlock < AllocationBlockCount; ++currentBlock)
  83. {
  84. size_t blockIndex = GetBytesToBlocks(addressOffset, descriptor.m_byteAlignmentBase) + currentBlock;
  85. ASSERT_TRUE(m_usedBlocks[blockIndex].m_used == 0);
  86. m_usedBlocks[blockIndex].m_used = 1;
  87. }
  88. currentAllocations.push_back(Allocation{ address, AllocationSize });
  89. doPrint = true;
  90. }
  91. else
  92. {
  93. ASSERT_TRUE(currentAllocations.size() || retiredAllocationsCurrent.size());
  94. }
  95. }
  96. else if (currentAllocations.size())
  97. {
  98. // pick a random allocation to remove
  99. size_t allocationIndex = random.GetRandom() % currentAllocations.size();
  100. Allocation allocation = currentAllocations[allocationIndex];
  101. currentAllocations.erase(currentAllocations.begin() + allocationIndex);
  102. descriptor.m_allocator->DeAllocate(allocation.address);
  103. retiredAllocationsCurrent.push_back(allocation);
  104. doPrint = true;
  105. }
  106. if ((random.GetRandom() % 4) == 0)
  107. {
  108. descriptor.m_allocator->GarbageCollect();
  109. doPrint = true;
  110. retiredAllocationsPrevious.clear();
  111. for (Allocation retiredAllocation : retiredAllocationsCurrent)
  112. {
  113. size_t addressOffset = retiredAllocation.address.m_ptr - descriptor.m_addressBase;
  114. size_t blockIndex = addressOffset / descriptor.m_byteAlignmentBase;
  115. const size_t AllocationBlockCount = GetBytesToBlocks(retiredAllocation.size, descriptor.m_byteAlignmentBase);
  116. // Just use the root block as the bookmarked item.
  117. ASSERT_TRUE(m_usedBlocks[blockIndex].m_used == 1);
  118. m_usedBlocks[blockIndex].m_gcIteration++;
  119. if (m_usedBlocks[blockIndex].m_gcIteration > descriptor.m_gcLatency)
  120. {
  121. for (size_t currentBlock = 0; currentBlock < AllocationBlockCount; ++currentBlock)
  122. {
  123. size_t subBlockIndex = GetBytesToBlocks(addressOffset, descriptor.m_byteAlignmentBase) + currentBlock;
  124. m_usedBlocks[subBlockIndex].m_used = 0;
  125. m_usedBlocks[subBlockIndex].m_gcIteration = 0;
  126. }
  127. }
  128. else
  129. {
  130. retiredAllocationsPrevious.push_back(retiredAllocation);
  131. }
  132. }
  133. retiredAllocationsCurrent.swap(retiredAllocationsPrevious);
  134. if (descriptor.m_useVisualizer)
  135. {
  136. PRINTF("GC...\n");
  137. }
  138. }
  139. ASSERT_TRUE((retiredAllocationsCurrent.size() + currentAllocations.size()) == descriptor.m_allocator->GetAllocationCount());
  140. if (doPrint)
  141. {
  142. for (size_t i = 0; i < BlockCount; ++i)
  143. {
  144. outputString[i] = m_usedBlocks[i].m_used ? 'x' : '-';
  145. }
  146. if (descriptor.m_useVisualizer)
  147. {
  148. PRINTF("%s\n", outputString.c_str());
  149. }
  150. }
  151. }
  152. }
  153. };
  154. TEST_F(AllocatorTest, PoolAllocator)
  155. {
  156. RHI::PoolAllocator::Descriptor descriptor;
  157. descriptor.m_elementSize = 128;
  158. descriptor.m_capacityInBytes = 128 * descriptor.m_elementSize;
  159. descriptor.m_garbageCollectLatency = 2;
  160. descriptor.m_addressBase = RHI::VirtualAddress::CreateFromPointer((void*)0xffffffffeeee0100);
  161. RHI::PoolAllocator allocator;
  162. allocator.Init(descriptor);
  163. AllocatorTest::TestDescriptor testDescriptor;
  164. testDescriptor.m_byteCountMax = descriptor.m_capacityInBytes;
  165. testDescriptor.m_byteAlignmentBase = 128;
  166. testDescriptor.m_gcLatency = descriptor.m_garbageCollectLatency;
  167. testDescriptor.m_allocator = &allocator;
  168. testDescriptor.m_useVisualizer = false;
  169. testDescriptor.m_iterations = 10000;
  170. testDescriptor.m_allocationSizeMin = descriptor.m_elementSize;
  171. testDescriptor.m_allocationSizeMax = descriptor.m_elementSize;
  172. testDescriptor.m_addressBase = descriptor.m_addressBase.m_ptr;
  173. run(testDescriptor);
  174. }
  175. TEST_F(AllocatorTest, FirstFitAllocator)
  176. {
  177. RHI::FreeListAllocator::Descriptor descriptor;
  178. descriptor.m_capacityInBytes = 64 * 1024;
  179. descriptor.m_alignmentInBytes = 256;
  180. descriptor.m_garbageCollectLatency = 2;
  181. descriptor.m_addressBase = RHI::VirtualAddress::CreateFromPointer((void*)0xffffffffeeee0100);
  182. descriptor.m_policy = RHI::FreeListAllocatorPolicy::FirstFit;
  183. RHI::FreeListAllocator allocator;
  184. allocator.Init(descriptor);
  185. AllocatorTest::TestDescriptor testDescriptor;
  186. testDescriptor.m_byteCountMax = descriptor.m_capacityInBytes;
  187. testDescriptor.m_byteAlignmentBase = descriptor.m_alignmentInBytes;
  188. testDescriptor.m_gcLatency = descriptor.m_garbageCollectLatency;
  189. testDescriptor.m_allocator = &allocator;
  190. testDescriptor.m_useVisualizer = false;
  191. testDescriptor.m_iterations = 10000;
  192. testDescriptor.m_addressBase = descriptor.m_addressBase.m_ptr;
  193. run(testDescriptor);
  194. descriptor.m_garbageCollectLatency = 0;
  195. descriptor.m_addressBase = RHI::VirtualAddress::CreateFromOffset(1024);
  196. allocator.Init(descriptor);
  197. testDescriptor.m_gcLatency = descriptor.m_garbageCollectLatency;
  198. testDescriptor.m_addressBase = descriptor.m_addressBase.m_ptr;
  199. run(testDescriptor);
  200. }
  201. TEST_F(AllocatorTest, BestFitAllocator)
  202. {
  203. RHI::FreeListAllocator::Descriptor descriptor;
  204. descriptor.m_capacityInBytes = 64 * 1024;
  205. descriptor.m_alignmentInBytes = 256;
  206. descriptor.m_garbageCollectLatency = 2;
  207. descriptor.m_addressBase = RHI::VirtualAddress::CreateFromPointer((void*)0xffffffffeeee0100);
  208. descriptor.m_policy = RHI::FreeListAllocatorPolicy::BestFit;
  209. RHI::FreeListAllocator allocator;
  210. allocator.Init(descriptor);
  211. AllocatorTest::TestDescriptor testDescriptor;
  212. testDescriptor.m_byteCountMax = descriptor.m_capacityInBytes;
  213. testDescriptor.m_byteAlignmentBase = descriptor.m_alignmentInBytes;
  214. testDescriptor.m_gcLatency = descriptor.m_garbageCollectLatency;
  215. testDescriptor.m_allocator = &allocator;
  216. testDescriptor.m_useVisualizer = false;
  217. testDescriptor.m_iterations = 10000;
  218. testDescriptor.m_addressBase = descriptor.m_addressBase.m_ptr;
  219. run(testDescriptor);
  220. descriptor.m_garbageCollectLatency = 0;
  221. descriptor.m_addressBase = RHI::VirtualAddress::CreateFromOffset(1024);
  222. allocator.Init(descriptor);
  223. testDescriptor.m_gcLatency = descriptor.m_garbageCollectLatency;
  224. testDescriptor.m_addressBase = descriptor.m_addressBase.m_ptr;
  225. run(testDescriptor);
  226. }
  227. TEST_F(AllocatorTest, FreeListFragmentation)
  228. {
  229. // There are several ways to measure fragmentation, with varying degrees of accuracy (at the
  230. // expense of cost). The free list fragmentation computation uses a relatively simple scheme
  231. // that relates the available capacity with the largest blocksize (1 minus this ratio).
  232. // Create an allocator featuring 4 contiguous 256 byte blocks
  233. RHI::FreeListAllocator::Descriptor descriptor;
  234. descriptor.m_capacityInBytes = 1024;
  235. descriptor.m_alignmentInBytes = 256;
  236. descriptor.m_policy = RHI::FreeListAllocatorPolicy::FirstFit;
  237. RHI::FreeListAllocator allocator;
  238. allocator.Init(descriptor);
  239. // An allocator without any allocations reports 0 fragmentation
  240. ASSERT_EQ(allocator.ComputeFragmentation(), 0.f);
  241. auto address0 = allocator.Allocate(256, 0);
  242. // After allocating a single block as above, the remaining memory in the allocator remains
  243. // contiguous, so fragmentation remains 0
  244. ASSERT_EQ(allocator.ComputeFragmentation(), 0.f);
  245. allocator.Allocate(256, 0);
  246. // Same after the second allocation. The free memory is one large block at the end
  247. ASSERT_EQ(allocator.ComputeFragmentation(), 0.f);
  248. allocator.DeAllocate(address0);
  249. allocator.GarbageCollect();
  250. // Now, we have two free blocks. The large block represents 2/3rds of the available free space,
  251. // so we expect 1/3 to be the reported fragmentation.
  252. ASSERT_NEAR(allocator.ComputeFragmentation(), 1.f / 3, 1e-6f);
  253. allocator.Allocate(512, 0);
  254. // We've now occupied the last two blocks, so we once again expect 0 fragmentation
  255. ASSERT_EQ(allocator.ComputeFragmentation(), 0.f);
  256. }
  257. }