21 #ifndef GTIRB_ALLOCATOR_H
22 #define GTIRB_ALLOCATOR_H
34 #ifndef GTIRB_WRAP_UTILS_IN_NAMESPACE
35 #define GTIRB_DEPRECATED_UTILS \
36 [[deprecated("Define GTIRB_WRAP_UTILS_IN_NAMESPACE and access via the " \
37 "gtirb namespace to suppress this error.")]]
39 #define GTIRB_DEPRECATED_UTILS
70 GTIRB_DEPRECATED_UTILS constexpr
inline bool isPowerOf2_64(uint64_t Value) {
71 return Value && !(Value & (Value - 1));
84 "Alignment is not a power of two!");
86 assert((uintptr_t)
Addr + Alignment - 1 >= (uintptr_t)
Addr);
88 return (((uintptr_t)
Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
98 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
111 template <
size_t SlabSize = 4096,
size_t SizeThreshold = SlabSize>
114 static_assert(SizeThreshold <= SlabSize,
115 "The SizeThreshold must be at most the SlabSize to ensure "
116 "that objects larger than a slab go into their own memory "
124 : CurPtr(Old.CurPtr), End(Old.End), Slabs(
std::move(Old.Slabs)),
125 CustomSizedSlabs(
std::move(Old.CustomSizedSlabs)),
126 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
127 Old.CurPtr = Old.End =
nullptr;
128 Old.BytesAllocated = 0;
130 Old.CustomSizedSlabs.clear();
134 DeallocateSlabs(Slabs.begin(), Slabs.end());
135 DeallocateCustomSizedSlabs();
139 DeallocateSlabs(Slabs.begin(), Slabs.end());
140 DeallocateCustomSizedSlabs();
144 BytesAllocated = RHS.BytesAllocated;
145 RedZoneSize = RHS.RedZoneSize;
146 Slabs = std::move(RHS.Slabs);
147 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
149 RHS.CurPtr = RHS.End =
nullptr;
150 RHS.BytesAllocated = 0;
152 RHS.CustomSizedSlabs.clear();
158 assert(Alignment > 0 &&
"0-byte alignnment is not allowed. Use 1 instead.");
161 BytesAllocated += Size;
164 assert(Adjustment + Size >= Size &&
"Adjustment + Size must not overflow");
166 size_t SizeToAllocate = Size;
169 if (Adjustment + SizeToAllocate <=
size_t(End - CurPtr)) {
170 char* AlignedPtr = CurPtr + Adjustment;
171 CurPtr = AlignedPtr + SizeToAllocate;
176 size_t PaddedSize = SizeToAllocate + Alignment - 1;
177 if (PaddedSize > SizeThreshold) {
178 void* NewSlab = std::malloc(PaddedSize);
179 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
181 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
182 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
183 char* AlignedPtr = (
char*)AlignedAddr;
189 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
190 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
191 "Unable to allocate memory!");
192 char* AlignedPtr = (
char*)AlignedAddr;
193 CurPtr = AlignedPtr + SizeToAllocate;
198 template <
typename T> T*
Allocate(
size_t Num = 1) {
199 return static_cast<T*
>(Allocate(Num *
sizeof(T),
alignof(T)));
207 size_t GetNumSlabs()
const {
return Slabs.size() + CustomSizedSlabs.size(); }
210 size_t TotalMemory = 0;
211 for (
auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
212 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
213 for (
auto& PtrAndSize : CustomSizedSlabs)
214 TotalMemory += PtrAndSize.second;
226 char* CurPtr =
nullptr;
232 std::vector<void*> Slabs;
235 std::vector<std::pair<void*, size_t>> CustomSizedSlabs;
240 size_t BytesAllocated = 0;
244 size_t RedZoneSize = 1;
246 static size_t computeSlabSize(
size_t SlabIdx) {
251 return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
256 void StartNewSlab() {
257 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
259 void* NewSlab = std::malloc(AllocatedSlabSize);
261 Slabs.push_back(NewSlab);
262 CurPtr = (
char*)(NewSlab);
263 End = ((
char*)NewSlab) + AllocatedSlabSize;
267 void DeallocateSlabs(std::vector<void*>::iterator I,
268 std::vector<void*>::iterator E) {
269 for (; I != E; ++I) {
275 void DeallocateCustomSizedSlabs() {
276 for (
auto& PtrAndSize : CustomSizedSlabs) {
277 std::free(PtrAndSize.first);
306 Allocator.setRedZoneSize(0);
309 : Allocator(
std::move(Old.Allocator)) {}
313 Allocator = std::move(RHS.Allocator);
318 T*
Allocate(
size_t num = 1) {
return Allocator.Allocate<T>(num); }
325 Allocator.Slabs.clear();
326 Allocator.CustomSizedSlabs.clear();
334 auto DestroyElements = [](
char* Begin,
char* End) {
335 assert(Begin == (
char*)
alignAddr(Begin,
alignof(T)));
336 for (
char* Ptr = Begin; Ptr +
sizeof(T) <= End; Ptr +=
sizeof(T))
337 reinterpret_cast<T*
>(Ptr)->~T();
340 for (
auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
342 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
343 std::distance(Allocator.Slabs.begin(), I));
344 char* Begin = (
char*)
alignAddr(*I,
alignof(T));
345 char* End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
346 : (
char*)*I + AllocatedSlabSize;
348 DestroyElements(Begin, End);
351 for (
auto& PtrAndSize : Allocator.CustomSizedSlabs) {
352 void* Ptr = PtrAndSize.first;
353 size_t Size = PtrAndSize.second;
354 DestroyElements((
char*)
alignAddr(Ptr,
alignof(T)), (
char*)Ptr + Size);
363 #ifdef GTIRB_WRAP_UTILS_IN_NAMESPACE
370 using allocator::SpecificBumpPtrAllocator;
372 #endif // GTIRB_WRAP_UTILS_IN_NAMESPACE
376 #ifndef GTIRB_WRAP_UTILS_IN_NAMESPACE
383 using gtirb::allocator::SpecificBumpPtrAllocator;
385 #endif // GTIRB_WRAP_UTILS_IN_NAMESPACE
387 template <
size_t SlabSize,
size_t SizeThreshold>
388 void*
operator new(
size_t Size, gtirb::allocator::BumpPtrAllocatorImpl<
389 SlabSize, SizeThreshold>& Allocator) {
399 return Allocator.Allocate(
404 template <
size_t SlabSize,
size_t SizeThreshold>
405 void operator delete(
406 void*, gtirb::allocator::BumpPtrAllocatorImpl<SlabSize, SizeThreshold>&) {}
408 #endif // GTIRB_ALLOCATOR_H