Broom 1.0.0
A thread-local C++ Garbage Collector
Loading...
Searching...
No Matches
allocator-inl.h
Go to the documentation of this file.
1#pragma once
2
3#include <algorithm>
4
5#include "src/allocator.h"
6#include "src/globals.h"
8
9namespace broom {
11 // Unmap all of our spaces.
12 for (const MemorySpace& space : spaces_) {
14 }
15 for (const MemorySpace& space : large_spaces_) {
17 }
18}
19
20// Creates a new MemorySpace for use in the allocator. This method can be quite
21// slow, so calling it should be the last resort in any allocation strategy.
24 size_t size_to_grow_by;
25 // Figure out what space we need to allocate in.
27 // If we are allocating in the large space, round up the size to match an
28 // expected multiple of the large space size.
31 } else {
33 "requested_size >= space_size: {} >= {}", requested_size,
36 }
37 // Get the new space and ensure its validity.
39 if (!space.IsValid()) BROOM_UNLIKELY return false;
41 std::numeric_limits<decltype(stats_.total_mapped_bytes)>::max() -
43 "total_mapped_bytes would overflow: {}",
48 large_spaces_.emplace_back(space);
49 // Update our currently used large space.
52 return true;
53 } else {
54 spaces_.emplace_back(space);
55 // Update our currently used regular space.
56 current_space_ = &spaces_[spaces_.size() - 1];
58 return true;
59 }
60 abort();
61}
62
63#ifdef BROOM_DEBUG_SLOW
64void FreeListAllocator::PrintList(std::string prefix,
65 const FreeList& free_list) const {
66 std::println("{}", prefix);
67 for (const auto& space : free_list) {
68 std::println(" {:#x} .. {:#x}", space.Start(), space.End());
69 }
70}
71
72bool FreeListAllocator::VerifyLists() const {
73 absl::btree_set<UintPtr> verifier_front;
74 absl::btree_set<UintPtr> verifier_back;
75 for (const auto& space : free_list_) {
76 if (verifier_front.contains(space.Start()) ||
77 verifier_back.contains(space.End())) {
78 PrintList("Regular space FreeList:", free_list_);
79 PrintList("Large space FreeList", large_free_list_);
80 return false;
81 }
82 verifier_front.insert(space.Start());
83 verifier_back.insert(space.End());
84 }
85
86 for (const auto& space : large_free_list_) {
87 if (verifier_front.contains(space.Start()) ||
88 verifier_back.contains(space.End())) {
89 PrintList("Regular space FreeList:", free_list_);
90 PrintList("Large space FreeList", large_free_list_);
91 return false;
92 }
93 verifier_front.insert(space.Start());
94 verifier_back.insert(space.End());
95 }
96 return true;
97}
98#endif
99
100// Splits a B-tree node into two parts:
101// - [start, start + requested_size)
102// - [start + requested_size, end)
103// The first split is used to fulfill an allocation request of requested_size
104// and returned to the caller, while the second split is re-inserted into the
105// B-tree as the remaining free space.
106//
107// Additionally, the metadata is updated as follows:
108// - metadata @ (start + requested_size) is first zeroed, and then its size is
109// updated to match the new size. The zeroing is a necessary step because we
110// don't know what kind of data was present there before, and we don't want
111// to accidentally have unexpected bits set in our metadata;
112//
113// - metadata @ end has its size updated to match the remaining size. No
114// zeroing
115// is needed here because we know that metadata was already in this location,
116// and that it must have been disposed by virtue of it already being in our
117// B-tree.
118//
119// The real metadata of the first split is not updated as the allocation is not
120// yet considered fulfilled. Doing so is not the job of this method.
122 FreeList::iterator& pos,
123 size_t requested_size) {
124 // First split.
125 void* allocated_ptr = pos->StartPtr();
126 BDEBUG_LOG("ALLOCATOR: {:#x} .. {:#x} -> {:#x} .. {:#x}\n", pos->Start(),
127 pos->End(), pos->Start(), pos->Start() + requested_size);
128 auto node = free_list.extract(pos);
129 node.value().IncrementStart(requested_size);
130 // Second split.
131 UintPtr new_ptr = node.value().Start();
132 size_t new_size = node.value().Size();
133 BDEBUG_LOG(" | {:#x} .. {:#x}\n", node.value().Start(), node.value().End());
134 free_list.insert(std::move(node));
135 BASSERT_SLOW(VerifyLists(), "Failed to verify lists");
136 // Update the metadata.
137 {
140 metadata->ZeroBytes();
141 metadata->SetSize(new_size);
142 }
143 {
146 metadata->SetSize(new_size);
147 }
148 return allocated_ptr;
149}
150
151// Returns a pointer to the start of a memory region that has enough space to
152// hold requested_size bytes or a null pointer. The method will first attempt to
153// find an exact match. Failing that, it will return the next best match. It
154// also handles empty trees and trees with only one region in them.
156 FreeList::iterator& pos,
157 size_t requested_size) {
158 // Since we are trying to extract a region from the tree, first ensure that
159 // the tree is valid.
160 BASSERT_SLOW(VerifyLists(), "Failed to verify lists");
161 // If there are no entries, simply return a null pointer.
162 if (pos == free_list.end()) return nullptr;
163 if (pos->Size() == requested_size) {
164 // Found an exact match, return that and remove it from our tree.
165 void* allocated_ptr = pos->StartPtr();
166 BDEBUG_LOG("ALLOCATOR: {:#x} .. {:#x}\n", pos->Start(), pos->End());
167 free_list.erase(pos);
168 return allocated_ptr;
169 }
170 // If we don't have an exact match, then this is the exact element we should
171 // return, but we need to split it in the free list into the allocated portion
172 // and free portion.
173 BASSERT_GE(pos->Size(), requested_size, "Expected {} < {}", pos->Size(),
176}
177
179 size_t requested_size) {
180 // Early exit for no entries.
181 if (free_list.size() == 0) return nullptr;
182 auto cmp = [](const MemoryRegion& region, size_t requested_size) {
183 return region.Size() < requested_size;
184 };
185 auto it =
186 std::lower_bound(free_list.begin(), free_list.end(), requested_size, cmp);
188}
189
193
197
198// Ensures that the current space we're using is filled and that any remaining
199// space is added to the corresponding free list.
202 // We need at least enough space to fit one pointer and our metadata. Storing
203 // anything smaller than that is pointless.
204 size_t remaining_bytes = space->RemainingBytes();
206 space->Fill();
207 return;
208 }
209 // We are now sure we can fit this into our free list. Update the metadata in
210 // the corresponding locations to maintain consistency and then add it to the
211 // correct free list.
216 BASSERT(!front_metadata->IsCurrentlyAllocated(),
217 "Front metadata should not be currently allocated");
218 BASSERT(!back_metadata->IsCurrentlyAllocated(),
219 "Back metadata should not be currently allocated");
222
223 // Add it to our free list.
224 list->emplace(space->Top(), space->End());
225 space->Fill();
226}
227
228// Attempt to allocate a pointer that can fit requested_size bytes. We still
229// don't fill in the metadata of the allocation here. The pointer returned is to
230// the start of the region, i.e. to the start of the front metadata of the
231// allocation.
232//
233// The strategy is to first try and allocate from our B-trees rather than use up
234// new bytes in our latest allocated space or attempt growing the space. Failing
235// that, we try to bump the top pointer in our latest memory space. If that also
236// fails, we will map a new space all together.
240 // Determine which space this allocation belongs to and attempt to allocate
241 // from our B-trees.
244 if (result != nullptr) return result;
247 } else {
249 if (result != nullptr) return result;
252 }
253
254 // We couldn't find a suitable region in a B-tree, attempt to bump the pointer
255 // in our latest space.
256 UintPtr maybe_ptr = *correct_space != nullptr
257 ? (*correct_space)->Allocate(requested_size)
258 : kNullUintPtr;
259 if (maybe_ptr != kNullUintPtr) {
261 }
262 // There wasn't enough room in the current space. Bump the top pointer to the
263 // end of the space and add the region to the corresponding B-tree. Then, try
264 // to map a new space.
265 if (*correct_space != nullptr) {
267 }
269 return nullptr;
270 }
271 BENSURE_NE(*correct_space, nullptr, "correct_space shouldn't be NULL");
272 // This should never fail as we've just finished successfully mapping a space
273 // which can fit the amount of bytes requested.
274 maybe_ptr = (*correct_space)->Allocate(requested_size);
275 BENSURE_NE(maybe_ptr, kNullUintPtr, "Expected a successful allocation");
276 BDEBUG_LOG("ALLOCATOR: ALLOCATE {:#x} .. {:#x}\n", maybe_ptr,
279}
280
281// Returns a pointer that user data will be stored into (right after the front
282// metadata). The method will first attempt to allocate a enough space to fit
283// all the data we need to store in there and then update the front and back
284// metadata with the correct values.
286 // Guard very large allocation sizes we cannot handle.
288 return nullptr;
289 // Compute the real size.
292 if (definitely_pointer == kNullUintPtr) return nullptr;
293 // Update our metadata.
294 {
297 metadata->MarkAllocated(real_size);
298 }
299 {
303 metadata->MarkAllocated(real_size);
304 }
307 "The allocated pointer ({:#x}) must be pointer aligned.",
311 std::numeric_limits<decltype(stats_.allocated_bytes)>::max() - real_size,
312 "allocated_bytes would ovreflow = {}", stats_.allocated_bytes);
313 BASSERT_GE(stats_.free_bytes, real_size, "free_bytes would underflow = {}",
315 // Update our statistics.
318 // Return the pointer where the real data will be stored.
320}
321
328
335
336// Adds an entry to the correct B-tree or coalesces any entries that can be
337// coalesced with the region we are attempting to add into the tree. We don't
338// need to check if the regions can be coalesced, as we will have done so during
339// pointer disposal by manipulating the corresponding allocator metadata. Here,
340// we are simply maintaining the B-tree state. There are four different cases we
341// need to handle:
342// - We can't coalesce the newly available free region, in which case we will
343// simply add it into the corresponding B-tree.
344//
345// - We are able to coalesce with the region directly following the current
346// region, requiring us to update its start pointer (thereby changing its
347// size) and causing rebalancing in the B-tree.
348//
349// - We are able to coalesce with the region directly before the current
350// region, requiring us to update its size, rebalancing the B-tree in the
351// process.
352//
353// - We can coalesce with both the region directly before and following the
354// current region. That case is slightly more complicated than the other
355// three. We handle it by growing the region directly before the current
356// region and erasing the node in the B-tree corresponding to the region
357// directly following the current region. This action also causes tree
358// rebalancing.
362 const MemoryRegion coalesced) {
363 // Helpers.
364 UintPtr start_current = current.Start();
368 UintPtr start_next = next.Start();
369 UintPtr end_next = next.End();
372
373 // We should never be called with a current region being null.
374 BASSERT_NE(start_current, kNullUintPtr, "start_current is NULL");
375 BASSERT_NE(end_current, kNullUintPtr, "end_current is NULL");
377 // We didn't coalesce anything, just add the new entry.
378 BDEBUG_LOG("ALLOCATOR: FreeList ++ {:#x} .. {:#x}\n", start_current,
381 } else if (start_current == start_coalesced) {
382 // Coalesced the next region. Fetch the next node and update it.
383 BASSERT_NE(start_next, kNullUintPtr, "start_next is NULL");
384 BASSERT_NE(end_next, kNullUintPtr, "end_next is NULL");
386 FreeList::iterator it = FindExactRegion(free_list, region_to_find);
387 // [start_current, end_current) ++ [start_next, end_next)
388 // -> [start_current, end_next)
389 BASSERT_NE(it, free_list.end(), "Couldn't find {:#x} .. {:#x}", start_next,
390 end_next);
391 auto node = free_list.extract(it);
394 "ALLOCATOR: Coalesce({:#x} .. {:#x}, {:#x} .. {:#x}) -> {:#x} .. "
395 "{:#x}\n",
397 node.value().End());
398 free_list.insert(std::move(node));
399 } else if (end_current == end_coalesced) {
400 // Coalesced with the previous region. Update that node with the correct
401 // information.
402 BASSERT_NE(start_previous, kNullUintPtr, "start_previous is NULL");
403 BASSERT_NE(end_previous, kNullUintPtr, "end_previous is NULL");
405 FreeList::iterator it = FindExactRegion(free_list, region_to_find);
406 // [start_previous, end_previous) ++ [start_current, end_current)
407 // -> [start_previous, end_current)
408 BASSERT_NE(it, free_list.end(), "Couldn't find {:#x} .. {:#x}",
410 auto node = free_list.extract(it);
413 "ALLOCATOR: Coalesce({:#x} .. {:#x}, {:#x} .. {:#x}) -> {:#x} .. "
414 "{:#x}\n",
416 node.value().Start(), node.value().End());
417 free_list.insert(std::move(node));
418 } else {
419 // Coalesced both. Need to squash the two nodes into one.
420 BASSERT_NE(start_next, kNullUintPtr, "start_next is NULL");
421 BASSERT_NE(end_next, kNullUintPtr, "end_next is NULL");
422 BASSERT_NE(start_previous, kNullUintPtr, "start_previous is NULL");
423 BASSERT_NE(end_previous, kNullUintPtr, "end_previous is NULL");
425 FreeList::iterator it_previous =
428 if (BROOM_DEBUG_BOOL) {
429 BROOM_UNUSED FreeList::iterator it_next =
432 "Couldn't find next range: {:#x} .. {:#x}", start_next,
433 end_next);
434 }
435 // [start_previous, end_previous) ++
436 // [start_current, end_current) ++
437 // [start_next, end_next)
438 // -> [start_previous, end_next)
440 "Couldn't find previous range: {:#x} .. {:#x}", start_previous,
442 auto node = free_list.extract(it_previous);
445 "ALLOCATOR: Coalesce({:#x} .. {:#x}, {:#x} .. {:#x}, {:#x} .. {:#x}) "
446 "->"
447 " {:#x} .. {:#x}, remove: {:#x}, {:#x}\n",
449 end_next, node.value().Start(), node.value().End(), start_next,
450 end_next);
451 free_list.insert(std::move(node));
452 // Erase the next region as we have coalesced it into the previous one.
454 }
455 BASSERT_SLOW(VerifyLists(), "Failed to verify lists");
456}
457
458// The main meat of the disposal for FreeListAllocator. The strategy for
459// disposal is to first find the space which the pointer belongs to so that we
460// have a start and end of the space we can check. We then check for two things:
461// - Can the memory region immediately preceding the region described by the
462// pointer we are currently disposing be coalesced with the current one;
463//
464// - Ditto for the region immediately following it.
465//
466// After we determine that a region can be coalesced, we attempt to update its
467// metadata to do so and keep track of the start and end of the coalesced
468// region. Once we have checked both the previous and next region and updated
469// any metadata that needed updating, we call into a helper method to update our
470// B-tree state.
472 if (definitely_pointer == nullptr) return;
473 BASSERT_SLOW(VerifyLists(), "Failed to verify lists");
477 // Look through our large spaces to find the pointer.
478 for (const MemorySpace& space : large_spaces_) {
479 if (space.Start() <= ptr_addr && ptr_addr < space.End()) {
480 space_start = space.Start();
481 space_end = space.End();
482 break;
483 }
484 }
487 if (space_start == kNullUintPtr) {
488 // If we haven't found the pointer in one of our large spaces, we know that
489 // regular spaces are always sized equally, so we can simply mask the
490 // pointer to the start of our space.
493 }
494 BASSERT_NE(space_start, kNullUintPtr, "Expected to find a space");
495 // Get our allocator metadata. We need both the top and bottom to attempt to
496 // coalesce the free list.
499 size_t current_size = metadata->Size();
501 "allocated_bytes would underflow = {}", stats_.allocated_bytes);
504 std::numeric_limits<decltype(stats_.free_bytes)>::max() - current_size,
505 "free_bytes would overflow = {}", stats_.free_bytes);
506 // Update our statistics.
511 BDEBUG_LOG("ALLOCATOR: Dispose {:#x} .. {:#x}\n", to_dispose,
513 // Dispose both the top and bottom metadata of the current allocation.
514 metadata->MarkDisposed();
515 metadata_bottom->MarkDisposed();
516 // Helpers.
526 if (to_dispose > space_start) {
527 // We aren't at the start of the space, therefore there must be a previous
528 // region and we can check its metadata.
531 BASSERT(previous->IsValid(), "Expected {:#x} to be valid",
535 // Keep track of the previous region's start and end.
539 // Attempt to coalesce the current disposed allocation into the previous
540 // one.
541 if (!previous->IsCurrentlyAllocated()) {
542 BDEBUG_LOG("ALLOCATOR: Attempt coalesce with previous: {:#x} .. {:#x}\n",
544 // Attempt to grow the allocation that was maybe disposed before our
545 // current allocation. If this succeeds, we can proceed to grow the
546 // bottom metadata as well.
547 if (previous_top->AttemptGrow(current_size)) {
548 // We managed to grow the top metadata. Now grow the bottom of the
549 // current allocation. This should always succeed.
550 BENSURE(metadata_bottom->AttemptGrow(previous->Size()),
551 "Failed to grow metadata_bottom");
552 // Update the start of our coalesced region.
554 metadata->ZeroBytes();
556 }
557 }
558 }
559
560 // If our current region doesn't end at the end of the space, we must either
561 // have a next region, or zeroes.
563 // Now attempt to coalesce the current allocation with the next allocation.
566 // Effectively checks if the metadata exists, or if we're hitting zeroes.
567 if (next->IsValid()) {
569 SafeCast<UintPtr>(next) + next->Size() -
571 // Keep track of the start and end of the next region.
575 // We need to check if it's initialised because we might be at the end of
576 // our currently allocated area, and we rely on that being zeroed.
577 if (!next->IsCurrentlyAllocated()) {
578 BDEBUG_LOG("ALLOCATOR: Attempt coalesce with next: {:#x} .. {:#x}\n",
582 "Expected next_bottom to be lesser or equal to the space end: "
583 "{:#x} > {:#x}",
585 // Attempt to grow the current allocation.
586 if (metadata->AttemptGrow(next->Size())) {
587 BENSURE(next_bottom->AttemptGrow(metadata_bottom->Size()),
588 "Failed to grow next_bottom");
589 // Update the end of our coalesced region.
591 metadata_bottom->ZeroBytes();
593 }
594 }
595 }
596 }
597
598 // Finally, coalesce the regions in our B-trees.
605 } else {
610 }
611}
612
616
617// A helper function to figure out a base pointer (the one returned by our
618// allocation method) from a possible inner pointer into an object. The GC needs
619// this to operate during scanning. Our strategy here is to simply iterate
620// through the spaces in order to determine which space contains our pointer,
621// and then loop through the space looking for our pointer. Since our metadata
622// is structured in a way where the front metadata holds the exact size of the
623// current region, we can simply iterate region by region.
625 const void* maybe_inner) const {
626 if (maybe_inner == nullptr) return nullptr;
630 // Find the space which contains our pointer.
631 for (const MemorySpace& space : spaces_) {
632 if (space.Start() <= ptr_addr && ptr_addr < space.End()) {
633 space_start = space.Start();
634 space_end = space.End();
635 break;
636 }
637 }
638 if (space_start == kNullUintPtr) {
639 for (const MemorySpace& space : large_spaces_) {
640 if (space.Start() <= ptr_addr && ptr_addr < space.End()) {
641 space_start = space.Start();
642 space_end = space.End();
643 break;
644 }
645 }
646 }
647 // No space, meaning this isn't a pointer allocated by this allocator.
648 if (space_start == kNullUintPtr) return nullptr;
651 size_t size_to_add = 0;
652 // Now iterate through the regions, looking for the region that our pointer is
653 // a part of.
655 current += size_to_add) {
658 size_t current_chunk_size = metadata->Size();
659 // We scanned a value that just happens to pretend to be a pointer into our
660 // space, but we've obviously gone past the end of our currently allocated
661 // space.
662 if (current_chunk_size == 0) {
663 return nullptr;
664 }
665 // This is a pointer somewhere into the metadata, so it shouldn't have been
666 // a managed pointer actually used by the application.
668 return nullptr;
671 if (metadata->IsCurrentlyAllocated()) {
672 // We've found our pointer.
674 break;
675 } else {
676 // This pointer has already been free'd, so there's no base pointer for
677 // it. Simply return null.
678 //
679 // This early bailout isn't strictly necessary and is done for
680 // performance reasons.
681 return nullptr;
682 }
683 }
685 }
687}
688} // namespace broom
static constexpr const int kFrontMetadataSize
Definition allocator.h:238
static size_t SpaceMask(size_t space_size)
Definition allocator.h:296
static constexpr const int kTotalMetadataSize
Definition allocator.h:241
static constexpr const int kBackMetadataSize
Definition allocator.h:240
AllocatorStatistics stats_
Definition allocator.h:302
virtual void * Allocate(size_t requested_size) override
virtual size_t LargeSpaceThreshold() const override
Definition allocator.h:328
static constexpr size_t GetRealAllocationSize(size_t requested_size)
Definition allocator.h:390
void * TryAllocateExactOrFindNext(FreeList &free_list, FreeList::iterator &pos, size_t requested_size)
virtual bool Grow(size_t requested_size, MemorySpace **space_to_update) override
MemorySpace * current_large_space_
Definition allocator.h:404
MemorySpace * current_space_
Definition allocator.h:403
void AddOrCoalesceFreeListImpl(FreeList &free_list, const MemoryRegion current, const MemoryRegion previous, const MemoryRegion next, const MemoryRegion coalesced)
std::vector< MemorySpace > spaces_
Definition allocator.h:398
void FillSpaceAndAddToFreeList(FreeList *list, MemorySpace *space)
void * TryAllocateFromLargeFreeList(size_t requested_size)
void AddOrCoalesceFreeList(const MemoryRegion current, const MemoryRegion previous, const MemoryRegion next, const MemoryRegion coalesced)
void * AllocateRawPointer(size_t requested_size)
void DisposeRawPointer(void *definitely_pointer)
FreeList::iterator FindExactRegion(FreeList &free_list, const MemoryRegion region)
Definition allocator.h:346
void * TryAllocateFromFreeList(size_t requested_size)
absl::btree_multiset< MemoryRegion, MemoryRegion::SetCompare > FreeList
Definition allocator.h:336
void AddOrCoalesceLargeFreeList(const MemoryRegion current, const MemoryRegion previous, const MemoryRegion next, const MemoryRegion coalesced)
virtual void Dispose(void *definitely_uintptr) override
static constexpr size_t MaximumAllocationSize()
Definition allocator.h:385
void * SplitFreeListEntry(FreeList &free_list, FreeList::iterator &pos, size_t requested_size)
virtual const void * GetBasePointerOfMaybeInnerPointer(const void *maybe_inner) const override
void * TryAllocateFromFreeListImpl(FreeList &free_list, size_t requested_size)
std::vector< MemorySpace > large_spaces_
Definition allocator.h:399
UintPtr Allocate(size_t size)
Definition allocator.h:81
#define BASSERT(x, m,...)
Definition macros.h:57
#define BASSERT_GE(lhs, rhs, m,...)
Definition macros.h:59
#define BASSERT_LE(lhs, rhs, m,...)
Definition macros.h:61
#define BENSURE(x, m,...)
Definition macros.h:22
#define BROOM_UNLIKELY
Definition macros.h:8
#define BDEBUG_LOG(m,...)
Definition macros.h:98
#define BASSERT_NE(lhs, rhs, m,...)
Definition macros.h:63
#define BROOM_DEBUG_BOOL
Definition macros.h:64
#define BASSERT_LT(lhs, rhs, m,...)
Definition macros.h:62
#define BROOM_UNUSED
Definition macros.h:109
#define BENSURE_NE(lhs, rhs, m,...)
Definition macros.h:36
#define BASSERT_SLOW(x, m,...)
Definition macros.h:85
void UnmapMemorySpace(const MemorySpace &space)
MemorySpace MapMemorySpace(size_t space_size, MemoryPermissions permissions=Allocator::kDefaultPermissions)
uintptr_t MemoryAddress
Definition globals.h:13
constexpr const int kPointerSize
Definition globals.h:17
constexpr const uintptr_t kNullUintPtr
Definition globals.h:11
uintptr_t UintPtr
Definition globals.h:14
std::queue< T, broom::deque< T > > queue
Definition broom-queue.h:12