Gaia-ECS v0.9.3
A simple and powerful entity component system
Loading...
Searching...
No Matches
chunk_allocator.h
1#pragma once
2#include "gaia/config/config.h"
3
4#include <cinttypes>
5#include <cstdint>
6
7#include "gaia/cnt/fwd_llist.h"
8#include "gaia/cnt/sarray.h"
9#include "gaia/core/bit_utils.h"
10#include "gaia/core/dyn_singleton.h"
11#include "gaia/core/utility.h"
12#include "gaia/mem/mem_alloc.h"
13#include "gaia/util/logging.h"
14
15namespace gaia {
16 namespace ecs {
17 static constexpr uint32_t MinMemoryBlockSize = 1024 * 8;
19 static constexpr uint32_t MaxMemoryBlockSize = MinMemoryBlockSize * 4;
21 static constexpr uint32_t MemoryBlockUsableOffset = sizeof(uintptr_t);
22
23 constexpr uint16_t mem_block_size(uint32_t sizeType) {
24 constexpr uint16_t sizes[] = {MinMemoryBlockSize, MinMemoryBlockSize * 2, MaxMemoryBlockSize};
25 return sizes[sizeType];
26 }
27
28 constexpr uint8_t mem_block_size_type(uint32_t sizeBytes) {
29 // Ceil division by smallest block size
30 const uint32_t blocks = (sizeBytes + MinMemoryBlockSize) / MinMemoryBlockSize;
31 return blocks > 2 ? 2 : static_cast<uint8_t>(blocks - 1);
32 }
33
34#if GAIA_ECS_CHUNK_ALLOCATOR
35 struct GAIA_API ChunkAllocatorPageStats final {
37 uint64_t mem_total;
39 uint64_t mem_used;
41 uint32_t num_pages;
43 uint32_t num_pages_free;
44 };
45
46 struct GAIA_API ChunkAllocatorStats final {
47 ChunkAllocatorPageStats stats[3];
48 };
49
50 namespace detail {
51 class ChunkAllocatorImpl;
52 }
53 using ChunkAllocator = core::dyn_singleton<detail::ChunkAllocatorImpl>;
54
55 namespace detail {
56
58 class ChunkAllocatorImpl {
59 friend ::gaia::ecs::ChunkAllocator;
60
61 struct MemoryPageHeader {
63 void* m_data;
64
65 MemoryPageHeader(void* ptr): m_data(ptr) {}
66 };
67
68 struct MemoryPage: MemoryPageHeader, cnt::fwd_llist_base<MemoryPage> {
69 static constexpr uint16_t NBlocks = 48;
70 static constexpr uint16_t NBlocks_Bits = (uint16_t)core::count_bits(NBlocks);
71 static constexpr uint32_t InvalidBlockId = NBlocks + 1;
72 static constexpr uint32_t BlockArrayBytes = ((uint32_t)NBlocks_Bits * (uint32_t)NBlocks + 7) / 8;
73 using BlockArray = cnt::sarray<uint8_t, BlockArrayBytes>;
74 using BitView = core::bit_view<NBlocks_Bits>;
75
77 BlockArray m_blocks;
78
80 uint32_t m_sizeType : 2;
82 uint32_t m_blockCnt : NBlocks_Bits;
84 uint32_t m_usedBlocks : NBlocks_Bits;
86 uint32_t m_nextFreeBlock : NBlocks_Bits;
88 uint32_t m_freeBlocks : NBlocks_Bits;
90 // uint32_t m_unused : 6;
91
92 MemoryPage(void* ptr, uint8_t sizeType):
93 MemoryPageHeader(ptr), m_sizeType(sizeType), m_blockCnt(0), m_usedBlocks(0), m_nextFreeBlock(0),
94 m_freeBlocks(0) {
95 // One cacheline long on x86. The point is for this to be as small as possible
96 static_assert(sizeof(MemoryPage) <= 64);
97 }
98
99 void write_block_idx(uint32_t blockIdx, uint32_t value) {
100 const uint32_t bitPosition = blockIdx * NBlocks_Bits;
101
102 GAIA_ASSERT(bitPosition < NBlocks * NBlocks_Bits);
103 GAIA_ASSERT(value <= InvalidBlockId);
104
105 BitView{{(uint8_t*)m_blocks.data(), BlockArrayBytes}}.set(bitPosition, (uint8_t)value);
106 }
107
108 uint8_t read_block_idx(uint32_t blockIdx) const {
109 const uint32_t bitPosition = blockIdx * NBlocks_Bits;
110
111 GAIA_ASSERT(bitPosition < NBlocks * NBlocks_Bits);
112
113 return BitView{{(uint8_t*)m_blocks.data(), BlockArrayBytes}}.get(bitPosition);
114 }
115
116 GAIA_NODISCARD void* alloc_block() {
117 auto StoreBlockAddress = [&](uint32_t index) {
118 // Encode info about chunk's page in the memory block.
119 // The actual pointer returned is offset by MemoryBlockUsableOffset bytes
120 uint8_t* pMemoryBlock = (uint8_t*)m_data + (index * mem_block_size(m_sizeType));
121 GAIA_ASSERT((uintptr_t)pMemoryBlock % sizeof(uintptr_t) == 0);
122 mem::unaligned_ref<uintptr_t>{pMemoryBlock} = (uintptr_t)this;
123 return (void*)(pMemoryBlock + MemoryBlockUsableOffset);
124 };
125
126 // We don't want to go out of range for new blocks
127 GAIA_ASSERT(!full() && "Trying to allocate too many blocks!");
128
129 if (m_freeBlocks == 0U) {
130 const auto index = m_blockCnt;
131 ++m_usedBlocks;
132 ++m_blockCnt;
133 write_block_idx(index, index);
134
135 return StoreBlockAddress(index);
136 }
137
138 GAIA_ASSERT(m_nextFreeBlock < m_blockCnt && "Block allocator recycle list broken!");
139
140 ++m_usedBlocks;
141 --m_freeBlocks;
142
143 const auto index = m_nextFreeBlock;
144 m_nextFreeBlock = read_block_idx(m_nextFreeBlock);
145
146 return StoreBlockAddress(index);
147 }
148
149 void free_block(void* pBlock) {
150 GAIA_ASSERT(m_usedBlocks > 0);
151 GAIA_ASSERT(m_freeBlocks <= NBlocks);
152
153 // Offset the chunk memory so we get the real block address
154 const auto* pMemoryBlock = (uint8_t*)pBlock - MemoryBlockUsableOffset;
155 const auto blckAddr = (uintptr_t)pMemoryBlock;
156 GAIA_ASSERT(blckAddr % sizeof(uintptr_t) == 0);
157 const auto dataAddr = (uintptr_t)m_data;
158 const auto blockIdx = (uint32_t)((blckAddr - dataAddr) / mem_block_size(m_sizeType));
159
160 // Update our implicit list
161 if (m_freeBlocks == 0U)
162 write_block_idx(blockIdx, InvalidBlockId);
163 else
164 write_block_idx(blockIdx, m_nextFreeBlock);
165 m_nextFreeBlock = blockIdx;
166
167 ++m_freeBlocks;
168 --m_usedBlocks;
169 }
170
171 GAIA_NODISCARD uint32_t used_blocks_cnt() const {
172 return m_usedBlocks;
173 }
174
175 GAIA_NODISCARD bool full() const {
176 return used_blocks_cnt() >= NBlocks;
177 }
178
179 GAIA_NODISCARD bool empty() const {
180 return used_blocks_cnt() == 0;
181 }
182 };
183
184 struct MemoryPageContainer {
186 cnt::fwd_llist<MemoryPage> pagesFree;
188 cnt::fwd_llist<MemoryPage> pagesFull;
189
190 GAIA_NODISCARD bool empty() const {
191 return pagesFree.empty() && pagesFull.empty();
192 }
193 };
194
196 MemoryPageContainer m_pages[3];
197
199 bool m_isDone = false;
200
201 private:
202 ChunkAllocatorImpl() = default;
203
204 void on_delete() {
205 flush();
206
207 // Make sure there are no leaks
208 auto memStats = stats();
209 for (const auto& s: memStats.stats) {
210 if (s.mem_total != 0) {
211 GAIA_ASSERT2(false, "ECS leaking memory");
212 GAIA_LOG_W("ECS leaking memory!");
213 diag();
214 }
215 }
216 }
217
218 public:
219 ~ChunkAllocatorImpl() {
220 on_delete();
221 }
222
223 ChunkAllocatorImpl(ChunkAllocatorImpl&& world) = delete;
224 ChunkAllocatorImpl(const ChunkAllocatorImpl& world) = delete;
225 ChunkAllocatorImpl& operator=(ChunkAllocatorImpl&&) = delete;
226 ChunkAllocatorImpl& operator=(const ChunkAllocatorImpl&) = delete;
227
229 void* alloc(uint32_t bytesWanted) {
230 GAIA_ASSERT(bytesWanted <= MaxMemoryBlockSize);
231
232 void* pBlock = nullptr;
233 MemoryPage* pPage = nullptr;
234
235 const auto sizeType = mem_block_size_type(bytesWanted);
236 auto& container = m_pages[sizeType];
237
238 // Find first page with available space
239 for (auto& page: container.pagesFree) {
240 if (page.full())
241 continue;
242 pPage = &page;
243 break;
244 }
245 if (pPage == nullptr) {
246 // Allocate a new page if no free page was found
247 pPage = alloc_page(sizeType);
248 container.pagesFree.link(pPage);
249 }
250
251 // Allocate a new chunk of memory
252 pBlock = pPage->alloc_block();
253
254 // Handle full pages
255 if (pPage->full()) {
256 // Remove the page from the open list
257 container.pagesFree.unlink(pPage);
258 // Move our page to the full list
259 container.pagesFull.link(pPage);
260 }
261
262 return pBlock;
263 }
264
265 GAIA_CLANG_WARNING_PUSH()
266 // Memory is aligned so we can silence this warning
267 GAIA_CLANG_WARNING_DISABLE("-Wcast-align")
268
270 void free(void* pBlock) {
271 // Decode the page from the address
272 const auto pageAddr = *(uintptr_t*)((uint8_t*)pBlock - MemoryBlockUsableOffset);
273 GAIA_ASSERT(pageAddr % sizeof(uintptr_t) == 0);
274 auto* pPage = (MemoryPage*)pageAddr;
275 const bool wasFull = pPage->full();
276
277 auto& container = m_pages[pPage->m_sizeType];
278
279 #if GAIA_ASSERT_ENABLED
280 if (wasFull) {
281 const auto res = container.pagesFull.has(pPage);
282 GAIA_ASSERT(res && "Memory page couldn't be found among full pages");
283 } else {
284 const auto res = container.pagesFree.has(pPage);
285 GAIA_ASSERT(res && "Memory page couldn't be found among free pages");
286 }
287 #endif
288
289 // Free the chunk
290 pPage->free_block(pBlock);
291
292 // Update lists
293 if (wasFull) {
294 // Our page is no longer full
295 container.pagesFull.unlink(pPage);
296 // Move our page to the open list
297 container.pagesFree.link(pPage);
298 }
299
300 // Special handling for the allocator signaled to destroy itself
301 if (m_isDone) {
302 // Remove the page right away
303 if (pPage->empty()) {
304 GAIA_ASSERT(!container.pagesFree.empty());
305 container.pagesFree.unlink(pPage);
306 }
307
308 try_delete_this();
309 }
310 }
311
312 GAIA_CLANG_WARNING_POP()
313
314
315 ChunkAllocatorStats stats() const {
316 ChunkAllocatorStats stats;
317 stats.stats[0] = page_stats(0);
318 stats.stats[1] = page_stats(1);
319 stats.stats[2] = page_stats(2);
320 return stats;
321 }
322
324 void flush() {
325 auto flushPages = [](MemoryPageContainer& container) {
326 for (auto it = container.pagesFree.begin(); it != container.pagesFree.end();) {
327 auto* pPage = &(*it);
328 ++it;
329
330 // Skip non-empty pages
331 if (!pPage->empty())
332 continue;
333
334 container.pagesFree.unlink(pPage);
335 free_page(pPage);
336 }
337 };
338
339 for (auto& c: m_pages)
340 flushPages(c);
341 }
342
344 void diag() const {
345 auto diagPage = [](const ChunkAllocatorPageStats& stats, uint32_t sizeType) {
346 GAIA_LOG_N("ChunkAllocator %uK stats", mem_block_size(sizeType) / 1024);
347 GAIA_LOG_N(" Allocated: %" PRIu64 " B", stats.mem_total);
348 GAIA_LOG_N(" Used: %" PRIu64 " B", stats.mem_total - stats.mem_used);
349 GAIA_LOG_N(" Overhead: %" PRIu64 " B", stats.mem_used);
350 GAIA_LOG_N(
351 " Utilization: %.1f%%",
352 stats.mem_total ? 100.0 * ((double)stats.mem_used / (double)stats.mem_total) : 0);
353 GAIA_LOG_N(" Pages: %u", stats.num_pages);
354 GAIA_LOG_N(" Free pages: %u", stats.num_pages_free);
355 };
356
357 auto memStats = stats();
358 diagPage(memStats.stats[0], 0);
359 diagPage(memStats.stats[1], 1);
360 diagPage(memStats.stats[2], 2);
361 }
362
363 private:
364 static constexpr const char* s_strChunkAlloc_Chunk = "Chunk";
365 static constexpr const char* s_strChunkAlloc_MemPage = "MemoryPage";
366
367 static MemoryPage* alloc_page(uint8_t sizeType) {
368 const uint32_t size = mem_block_size(sizeType) * MemoryPage::NBlocks;
369 auto* pPageData = mem::AllocHelper::alloc_alig<uint8_t>(s_strChunkAlloc_Chunk, 16U, size);
370 auto* pMemoryPage = mem::AllocHelper::alloc<MemoryPage>(s_strChunkAlloc_MemPage);
371 return new (pMemoryPage) MemoryPage(pPageData, sizeType);
372 }
373
374 static void free_page(MemoryPage* pMemoryPage) {
375 GAIA_ASSERT(pMemoryPage != nullptr);
376
377 mem::AllocHelper::free_alig(s_strChunkAlloc_Chunk, pMemoryPage->m_data);
378 pMemoryPage->~MemoryPage();
379 mem::AllocHelper::free(s_strChunkAlloc_MemPage, pMemoryPage);
380 }
381
382 void done() {
383 m_isDone = true;
384 }
385
386 void try_delete_this() {
387 // When there is nothing left, delete the allocator
388 bool allEmpty = true;
389 for (const auto& c: m_pages)
390 allEmpty = allEmpty && c.empty();
391 if (allEmpty)
392 delete this;
393 }
394
395 ChunkAllocatorPageStats page_stats(uint32_t sizeType) const {
396 ChunkAllocatorPageStats stats{};
397 const auto& container = m_pages[sizeType];
398
399 stats.num_pages = (uint32_t)container.pagesFree.size() + (uint32_t)container.pagesFull.size();
400 stats.num_pages_free = (uint32_t)container.pagesFree.size();
401 stats.mem_total = stats.num_pages * (size_t)mem_block_size(sizeType) * MemoryPage::NBlocks;
402 stats.mem_used = container.pagesFull.size() * (size_t)mem_block_size(sizeType) * MemoryPage::NBlocks;
403
404 const auto& pagesFree = container.pagesFree;
405 for (const auto& page: pagesFree)
406 stats.mem_used += page.used_blocks_cnt() * (size_t)MaxMemoryBlockSize;
407 return stats;
408 }
409 };
410 } // namespace detail
411
412#endif
413
414 } // namespace ecs
415} // namespace gaia
Checks if endianess was detected correctly at compile-time.
Definition bitset.h:9