Gaia-ECS v0.9.3
A simple and powerful entity component system
Loading...
Searching...
No Matches
chunk_allocator.h
1#pragma once
2#include "gaia/config/config.h"
3
4#include <cinttypes>
5#include <cstdint>
6
7#include "gaia/cnt/fwd_llist.h"
8#include "gaia/cnt/sarray.h"
9#include "gaia/core/bit_utils.h"
10#include "gaia/core/dyn_singleton.h"
11#include "gaia/core/utility.h"
12#include "gaia/mem/mem_alloc.h"
13#include "gaia/util/logging.h"
14
15namespace gaia {
16 namespace ecs {
17 static constexpr uint32_t MinMemoryBlockSize = 1024 * 8;
19 static constexpr uint32_t MaxMemoryBlockSize = MinMemoryBlockSize * 4;
21 static constexpr uint32_t MemoryBlockUsableOffset = sizeof(uintptr_t);
22
23 constexpr uint16_t mem_block_size(uint32_t sizeType) {
24 constexpr uint16_t sizes[] = {MinMemoryBlockSize, MinMemoryBlockSize * 2, MaxMemoryBlockSize};
25 return sizes[sizeType];
26 }
27
28 constexpr uint8_t mem_block_size_type(uint32_t sizeBytes) {
29 // Ceil division by smallest block size
30 const uint32_t blocks = (sizeBytes + MinMemoryBlockSize - 1) / MinMemoryBlockSize;
31 return blocks > 2 ? 2 : static_cast<uint8_t>(blocks - 1);
32 }
33
34#if GAIA_ECS_CHUNK_ALLOCATOR
35 struct GAIA_API ChunkAllocatorPageStats final {
37 uint64_t mem_total;
39 uint64_t mem_used;
41 uint32_t num_pages;
43 uint32_t num_pages_free;
44 };
45
46 struct GAIA_API ChunkAllocatorStats final {
47 ChunkAllocatorPageStats stats[3];
48 };
49
50 namespace detail {
51 class ChunkAllocatorImpl;
52 }
53 using ChunkAllocator = core::dyn_singleton<detail::ChunkAllocatorImpl>;
54
55 namespace detail {
56
57 struct MemoryPageHeader {
59 void* m_data;
60
61 MemoryPageHeader(void* ptr): m_data(ptr) {}
62 };
63
64 struct MemoryPage: MemoryPageHeader, cnt::fwd_llist_base<MemoryPage> {
65 static constexpr uint16_t NBlocks = 48;
66 static constexpr uint16_t NBlocks_Bits = (uint16_t)core::count_bits(NBlocks);
67 static constexpr uint32_t InvalidBlockId = NBlocks + 1;
68 static constexpr uint32_t BlockArrayBytes = ((uint32_t)NBlocks_Bits * (uint32_t)NBlocks + 7) / 8;
69 using BlockArray = cnt::sarray<uint8_t, BlockArrayBytes>;
70 using BitView = core::bit_view<NBlocks_Bits>;
71
73 BlockArray m_blocks;
74
76 uint32_t m_sizeType : 2;
78 uint32_t m_blockCnt : NBlocks_Bits;
80 uint32_t m_usedBlocks : NBlocks_Bits;
82 uint32_t m_nextFreeBlock : NBlocks_Bits;
84 uint32_t m_freeBlocks : NBlocks_Bits;
86 // uint32_t m_unused : 6;
87
88 MemoryPage(void* ptr, uint8_t sizeType):
89 MemoryPageHeader(ptr), m_sizeType(sizeType), m_blockCnt(0), m_usedBlocks(0), m_nextFreeBlock(0),
90 m_freeBlocks(0) {
91 // One cacheline long on x86. The point is for this to be as small as possible
92 static_assert(sizeof(MemoryPage) <= 64);
93 }
94
95 void write_block_idx(uint32_t blockIdx, uint32_t value) {
96 const uint32_t bitPosition = blockIdx * NBlocks_Bits;
97
98 GAIA_ASSERT(bitPosition < NBlocks * NBlocks_Bits);
99 GAIA_ASSERT(value <= InvalidBlockId);
100
101 BitView{{(uint8_t*)m_blocks.data(), BlockArrayBytes}}.set(bitPosition, (uint8_t)value);
102 }
103
104 uint8_t read_block_idx(uint32_t blockIdx) const {
105 const uint32_t bitPosition = blockIdx * NBlocks_Bits;
106
107 GAIA_ASSERT(bitPosition < NBlocks * NBlocks_Bits);
108
109 return BitView{{(uint8_t*)m_blocks.data(), BlockArrayBytes}}.get(bitPosition);
110 }
111
112 GAIA_NODISCARD void* alloc_block() {
113 auto StoreBlockAddress = [&](uint32_t index) {
114 // Encode info about chunk's page in the memory block.
115 // The actual pointer returned is offset by MemoryBlockUsableOffset bytes
116 uint8_t* pMemoryBlock = (uint8_t*)m_data + (index * mem_block_size(m_sizeType));
117 GAIA_ASSERT((uintptr_t)pMemoryBlock % sizeof(uintptr_t) == 0);
118 mem::unaligned_ref<uintptr_t>{pMemoryBlock} = (uintptr_t)this;
119 return (void*)(pMemoryBlock + MemoryBlockUsableOffset);
120 };
121
122 // We don't want to go out of range for new blocks
123 GAIA_ASSERT(!full() && "Trying to allocate too many blocks!");
124
125 if (m_freeBlocks == 0U) {
126 const auto index = m_blockCnt;
127 ++m_usedBlocks;
128 ++m_blockCnt;
129 write_block_idx(index, index);
130
131 return StoreBlockAddress(index);
132 }
133
134 GAIA_ASSERT(m_nextFreeBlock < m_blockCnt && "Block allocator recycle list broken!");
135
136 ++m_usedBlocks;
137 --m_freeBlocks;
138
139 const auto index = m_nextFreeBlock;
140 m_nextFreeBlock = read_block_idx(m_nextFreeBlock);
141
142 return StoreBlockAddress(index);
143 }
144
145 void free_block(void* pBlock) {
146 GAIA_ASSERT(pBlock != nullptr);
147 GAIA_ASSERT(m_usedBlocks > 0);
148 GAIA_ASSERT(m_freeBlocks <= NBlocks);
149
150 // Offset the chunk memory so we get the real block address
151 const auto* pMemoryBlock = (uint8_t*)pBlock - MemoryBlockUsableOffset;
152 const auto blckAddr = (uintptr_t)pMemoryBlock;
153 GAIA_ASSERT(blckAddr % sizeof(uintptr_t) == 0);
154 const auto dataAddr = (uintptr_t)m_data;
155 const auto blockSize = (uintptr_t)mem_block_size(m_sizeType);
156 const auto pageSize = blockSize * NBlocks;
157 GAIA_ASSERT(blckAddr >= dataAddr);
158 GAIA_ASSERT(blckAddr < dataAddr + pageSize);
159 GAIA_ASSERT((blckAddr - dataAddr) % blockSize == 0);
160 const auto blockIdx = (uint32_t)((blckAddr - dataAddr) / blockSize);
161 GAIA_ASSERT(blockIdx < m_blockCnt);
162
163 // Update our implicit list
164 if (m_freeBlocks == 0U)
165 write_block_idx(blockIdx, InvalidBlockId);
166 else
167 write_block_idx(blockIdx, m_nextFreeBlock);
168 m_nextFreeBlock = blockIdx;
169
170 ++m_freeBlocks;
171 --m_usedBlocks;
172 }
173
174 GAIA_NODISCARD uint32_t used_blocks_cnt() const {
175 return m_usedBlocks;
176 }
177
178 GAIA_NODISCARD bool full() const {
179 return used_blocks_cnt() >= NBlocks;
180 }
181
182 GAIA_NODISCARD bool empty() const {
183 return used_blocks_cnt() == 0;
184 }
185 };
186
187 struct MemoryPageContainer {
189 cnt::fwd_llist<MemoryPage> pagesFree;
191 cnt::fwd_llist<MemoryPage> pagesFull;
192
193 GAIA_NODISCARD bool empty() const {
194 return pagesFree.empty() && pagesFull.empty();
195 }
196 };
197
199 class ChunkAllocatorImpl {
200 friend ::gaia::ecs::ChunkAllocator;
201
203 MemoryPageContainer m_pages[3];
204
206 bool m_isDone = false;
207
208 private:
209 ChunkAllocatorImpl() = default;
210
211 void on_delete() {
212 flush(true);
213
214 // Make sure there are no leaks
215 auto memStats = stats();
216 for (const auto& s: memStats.stats) {
217 if (s.mem_total != 0) {
218 GAIA_ASSERT2(false, "ECS leaking memory");
219 GAIA_LOG_W("ECS leaking memory!");
220 diag();
221 }
222 }
223 }
224
225 public:
226 ~ChunkAllocatorImpl() {
227 on_delete();
228 }
229
230 ChunkAllocatorImpl(ChunkAllocatorImpl&& world) = delete;
231 ChunkAllocatorImpl(const ChunkAllocatorImpl& world) = delete;
232 ChunkAllocatorImpl& operator=(ChunkAllocatorImpl&&) = delete;
233 ChunkAllocatorImpl& operator=(const ChunkAllocatorImpl&) = delete;
234
236 void* alloc(uint32_t bytesWanted) {
237 GAIA_ASSERT(bytesWanted <= MaxMemoryBlockSize);
238
239 void* pBlock = nullptr;
240 const auto sizeType = mem_block_size_type(bytesWanted);
241 auto& container = m_pages[sizeType];
242
243 // Free list contains only pages with available capacity.
244 auto* pPage = container.pagesFree.first;
245 GAIA_ASSERT(pPage == nullptr || !pPage->full());
246 if (pPage == nullptr) {
247 // Allocate a new page if no free page was found
248 pPage = alloc_page(sizeType);
249 container.pagesFree.link(pPage);
250 }
251
252 // Allocate a new chunk of memory
253 pBlock = pPage->alloc_block();
254
255 // Handle full pages
256 if (pPage->full()) {
257 // Remove the page from the open list
258 container.pagesFree.unlink(pPage);
259 // Move our page to the full list
260 container.pagesFull.link(pPage);
261 }
262
263 return pBlock;
264 }
265
266 GAIA_CLANG_WARNING_PUSH()
267 // Memory is aligned so we can silence this warning
268 GAIA_CLANG_WARNING_DISABLE("-Wcast-align")
269
271 void free(void* pBlock) {
272 // Decode the page from the address
273 const auto pageAddr = *(uintptr_t*)((uint8_t*)pBlock - MemoryBlockUsableOffset);
274 GAIA_ASSERT(pageAddr % sizeof(uintptr_t) == 0);
275 auto* pPage = (MemoryPage*)pageAddr;
276 const bool wasFull = pPage->full();
277
278 auto& container = m_pages[pPage->m_sizeType];
279
280 #if GAIA_ASSERT_ENABLED
281 if (wasFull) {
282 const auto res = container.pagesFull.has(pPage);
283 GAIA_ASSERT(res && "Memory page couldn't be found among full pages");
284 } else {
285 const auto res = container.pagesFree.has(pPage);
286 GAIA_ASSERT(res && "Memory page couldn't be found among free pages");
287 }
288 #endif
289
290 // Free the chunk
291 pPage->free_block(pBlock);
292
293 // Update lists
294 if (wasFull) {
295 // Our page is no longer full
296 container.pagesFull.unlink(pPage);
297 // Move our page to the open list
298 container.pagesFree.link(pPage);
299 }
300
301 // Special handling for the allocator signaled to destroy itself
302 if (m_isDone) {
303 // Remove the page right away
304 if (pPage->empty()) {
305 GAIA_ASSERT(!container.pagesFree.empty());
306 container.pagesFree.unlink(pPage);
307 free_page(pPage);
308 }
309
310 try_delete_this();
311 }
312 }
313
314 GAIA_CLANG_WARNING_POP()
315
316
317 ChunkAllocatorStats stats() const {
318 ChunkAllocatorStats stats;
319 stats.stats[0] = page_stats(0);
320 stats.stats[1] = page_stats(1);
321 stats.stats[2] = page_stats(2);
322 return stats;
323 }
324
327 void flush(bool releaseAll = false) {
328 auto flushPages = [releaseAll](MemoryPageContainer& container) {
329 bool keptWarmPage = false;
330 for (auto it = container.pagesFree.begin(); it != container.pagesFree.end();) {
331 auto* pPage = &(*it);
332 ++it;
333
334 // Skip non-empty pages
335 if (!pPage->empty())
336 continue;
337 if (!releaseAll && !keptWarmPage) {
338 keptWarmPage = true;
339 continue;
340 }
341
342 container.pagesFree.unlink(pPage);
343 free_page(pPage);
344 }
345 };
346
347 for (auto& c: m_pages)
348 flushPages(c);
349 }
350
352 void diag() const {
353 auto diagPage = [](const ChunkAllocatorPageStats& stats, uint32_t sizeType) {
354 GAIA_LOG_N("ChunkAllocator %uK stats", mem_block_size(sizeType) / 1024);
355 GAIA_LOG_N(" Allocated: %" PRIu64 " B", stats.mem_total);
356 GAIA_LOG_N(" Used: %" PRIu64 " B", stats.mem_used);
357 GAIA_LOG_N(" Overhead: %" PRIu64 " B", stats.mem_total - stats.mem_used);
358 GAIA_LOG_N(
359 " Utilization: %.1f%%",
360 stats.mem_total ? 100.0 * ((double)stats.mem_used / (double)stats.mem_total) : 0);
361 GAIA_LOG_N(" Pages: %u", stats.num_pages);
362 GAIA_LOG_N(" Free pages: %u", stats.num_pages_free);
363 };
364
365 auto memStats = stats();
366 diagPage(memStats.stats[0], 0);
367 diagPage(memStats.stats[1], 1);
368 diagPage(memStats.stats[2], 2);
369 }
370
371 private:
372 static constexpr const char* s_strChunkAlloc_Chunk = "Chunk";
373 static constexpr const char* s_strChunkAlloc_MemPage = "MemoryPage";
374
375 static MemoryPage* alloc_page(uint8_t sizeType) {
376 const uint32_t size = mem_block_size(sizeType) * MemoryPage::NBlocks;
377 auto* pPageData = mem::AllocHelper::alloc_alig<uint8_t>(s_strChunkAlloc_Chunk, 16U, size);
378 auto* pMemoryPage = mem::AllocHelper::alloc<MemoryPage>(s_strChunkAlloc_MemPage);
379 return new (pMemoryPage) MemoryPage(pPageData, sizeType);
380 }
381
382 static void free_page(MemoryPage* pMemoryPage) {
383 GAIA_ASSERT(pMemoryPage != nullptr);
384
385 mem::AllocHelper::free_alig(s_strChunkAlloc_Chunk, pMemoryPage->m_data);
386 pMemoryPage->~MemoryPage();
387 mem::AllocHelper::free(s_strChunkAlloc_MemPage, pMemoryPage);
388 }
389
390 void done() {
391 m_isDone = true;
392 }
393
394 void try_delete_this() {
395 // When there is nothing left, delete the allocator
396 bool allEmpty = true;
397 for (const auto& c: m_pages)
398 allEmpty = allEmpty && c.empty();
399 if (allEmpty)
400 delete this;
401 }
402
403 ChunkAllocatorPageStats page_stats(uint32_t sizeType) const {
404 ChunkAllocatorPageStats stats{};
405 const auto& container = m_pages[sizeType];
406 const auto blockSize = (size_t)mem_block_size(sizeType);
407
408 stats.num_pages = (uint32_t)container.pagesFree.size() + (uint32_t)container.pagesFull.size();
409 stats.num_pages_free = (uint32_t)container.pagesFree.size();
410 stats.mem_total = stats.num_pages * blockSize * MemoryPage::NBlocks;
411 stats.mem_used = container.pagesFull.size() * blockSize * MemoryPage::NBlocks;
412
413 const auto& pagesFree = container.pagesFree;
414 for (const auto& page: pagesFree)
415 stats.mem_used += page.used_blocks_cnt() * blockSize;
416 return stats;
417 }
418 };
419 } // namespace detail
420
421#endif
422
423 } // namespace ecs
424} // namespace gaia
Checks if endianess was detected correctly at compile-time.
Definition bitset.h:9