GTIRB  v2.2.0
GrammaTech Intermediate Representation for Binaries: C++ API
Allocator.hpp
Go to the documentation of this file.
1 //===- Allocator.hpp --------------------------------------------*- C++ -*-===//
2 //
3 // Copyright (C) 2020 GrammaTech, Inc.
4 //
5 // This code is licensed under the MIT license. See the LICENSE file in the
6 // project root for license terms.
7 //
8 // This project is sponsored by the Office of Naval Research, One Liberty
9 // Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
10 // N68335-17-C-0700. The content of the information does not necessarily
11 // reflect the position or policy of the Government and no official
12 // endorsement should be inferred.
13 //
14 //===-Addition License Information-----------------------------------------===//
15 //
16 // This file was initially written for the LLVM Compiler Infrastructure
17 // project where it is distributed under the University of Illinois Open Source
18 // License. See the LICENSE file in the project root for license terms.
19 //
20 //===----------------------------------------------------------------------===//
21 #ifndef GTIRB_ALLOCATOR_H
22 #define GTIRB_ALLOCATOR_H
23 
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstdint>
28 #include <cstdlib>
29 #include <iterator>
30 #include <utility>
31 #include <vector>
32 
34 #ifndef GTIRB_WRAP_UTILS_IN_NAMESPACE
35 #define GTIRB_DEPRECATED_UTILS \
36  [[deprecated("Define GTIRB_WRAP_UTILS_IN_NAMESPACE and access via the " \
37  "gtirb namespace to suppress this error.")]]
38 #else
39 #define GTIRB_DEPRECATED_UTILS
40 #endif
41 
43 namespace gtirb {
44 
45 // We want clients to use the names in the gtirb namespace, so we exclude
46 // the allocator namespace when generating documentation.
48 namespace allocator {
50 
56 GTIRB_DEPRECATED_UTILS inline uint64_t NextPowerOf2(uint64_t A) {
57  A |= (A >> 1);
58  A |= (A >> 2);
59  A |= (A >> 4);
60  A |= (A >> 8);
61  A |= (A >> 16);
62  A |= (A >> 32);
63  return A + 1;
64 }
65 
70 GTIRB_DEPRECATED_UTILS constexpr inline bool isPowerOf2_64(uint64_t Value) {
71  return Value && !(Value & (Value - 1));
72 }
73 
81 GTIRB_DEPRECATED_UTILS inline uintptr_t alignAddr(const void* Addr,
82  size_t Alignment) {
83  assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
84  "Alignment is not a power of two!");
85 
86  assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
87 
88  return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
89 }
90 
96 GTIRB_DEPRECATED_UTILS inline size_t alignmentAdjustment(const void* Ptr,
97  size_t Alignment) {
98  return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
99 }
100 
111 template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
112 class GTIRB_DEPRECATED_UTILS BumpPtrAllocatorImpl {
113 public:
114  static_assert(SizeThreshold <= SlabSize,
115  "The SizeThreshold must be at most the SlabSize to ensure "
116  "that objects larger than a slab go into their own memory "
117  "allocation.");
118 
119  BumpPtrAllocatorImpl() = default;
120 
121  // Manually implement a move constructor as we must clear the old allocator's
122  // slabs as a matter of correctness.
124  : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
125  CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
126  BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
127  Old.CurPtr = Old.End = nullptr;
128  Old.BytesAllocated = 0;
129  Old.Slabs.clear();
130  Old.CustomSizedSlabs.clear();
131  }
132 
134  DeallocateSlabs(Slabs.begin(), Slabs.end());
135  DeallocateCustomSizedSlabs();
136  }
137 
139  DeallocateSlabs(Slabs.begin(), Slabs.end());
140  DeallocateCustomSizedSlabs();
141 
142  CurPtr = RHS.CurPtr;
143  End = RHS.End;
144  BytesAllocated = RHS.BytesAllocated;
145  RedZoneSize = RHS.RedZoneSize;
146  Slabs = std::move(RHS.Slabs);
147  CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
148 
149  RHS.CurPtr = RHS.End = nullptr;
150  RHS.BytesAllocated = 0;
151  RHS.Slabs.clear();
152  RHS.CustomSizedSlabs.clear();
153  return *this;
154  }
155 
157  void* Allocate(size_t Size, size_t Alignment) {
158  assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
159 
160  // Keep track of how many bytes we've allocated.
161  BytesAllocated += Size;
162 
163  size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
164  assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
165 
166  size_t SizeToAllocate = Size;
167 
168  // Check if we have enough space.
169  if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
170  char* AlignedPtr = CurPtr + Adjustment;
171  CurPtr = AlignedPtr + SizeToAllocate;
172  return AlignedPtr;
173  }
174 
175  // If Size is really big, allocate a separate slab for it.
176  size_t PaddedSize = SizeToAllocate + Alignment - 1;
177  if (PaddedSize > SizeThreshold) {
178  void* NewSlab = std::malloc(PaddedSize);
179  CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180 
181  uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182  assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
183  char* AlignedPtr = (char*)AlignedAddr;
184  return AlignedPtr;
185  }
186 
187  // Otherwise, start a new slab and try again.
188  StartNewSlab();
189  uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
190  assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
191  "Unable to allocate memory!");
192  char* AlignedPtr = (char*)AlignedAddr;
193  CurPtr = AlignedPtr + SizeToAllocate;
194  return AlignedPtr;
195  }
196 
198  template <typename T> T* Allocate(size_t Num = 1) {
199  return static_cast<T*>(Allocate(Num * sizeof(T), alignof(T)));
200  }
201 
202  // Bump pointer allocators are expected to never free their storage; and
203  // clients expect pointers to remain valid for non-dereferencing uses even
204  // after deallocation.
205  void Deallocate(const void*, size_t) {}
206 
207  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
208 
209  size_t getTotalMemory() const {
210  size_t TotalMemory = 0;
211  for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
212  TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
213  for (auto& PtrAndSize : CustomSizedSlabs)
214  TotalMemory += PtrAndSize.second;
215  return TotalMemory;
216  }
217 
218  size_t getBytesAllocated() const { return BytesAllocated; }
219 
220  void setRedZoneSize(size_t NewSize) { RedZoneSize = NewSize; }
221 
222 private:
226  char* CurPtr = nullptr;
227 
229  char* End = nullptr;
230 
232  std::vector<void*> Slabs;
233 
235  std::vector<std::pair<void*, size_t>> CustomSizedSlabs;
236 
240  size_t BytesAllocated = 0;
241 
244  size_t RedZoneSize = 1;
245 
246  static size_t computeSlabSize(size_t SlabIdx) {
247  // Scale the actual allocated slab size based on the number of slabs
248  // allocated. Every 128 slabs allocated, we double the allocated size to
249  // reduce allocation frequency, but saturate at multiplying the slab size by
250  // 2^30.
251  return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
252  }
253 
256  void StartNewSlab() {
257  size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
258 
259  void* NewSlab = std::malloc(AllocatedSlabSize);
260 
261  Slabs.push_back(NewSlab);
262  CurPtr = (char*)(NewSlab);
263  End = ((char*)NewSlab) + AllocatedSlabSize;
264  }
265 
267  void DeallocateSlabs(std::vector<void*>::iterator I,
268  std::vector<void*>::iterator E) {
269  for (; I != E; ++I) {
270  std::free(*I);
271  }
272  }
273 
275  void DeallocateCustomSizedSlabs() {
276  for (auto& PtrAndSize : CustomSizedSlabs) {
277  std::free(PtrAndSize.first);
278  }
279  }
280 
281  template <typename T> friend class SpecificBumpPtrAllocator;
282 };
283 
290 
299 template <typename T> class GTIRB_DEPRECATED_UTILS SpecificBumpPtrAllocator {
300  BumpPtrAllocator Allocator;
301 
302 public:
304  // Because SpecificBumpPtrAllocator walks the memory to call destructors,
305  // it can't have red zones between allocations.
306  Allocator.setRedZoneSize(0);
307  }
309  : Allocator(std::move(Old.Allocator)) {}
310  ~SpecificBumpPtrAllocator() { DestroyAll(); }
311 
313  Allocator = std::move(RHS.Allocator);
314  return *this;
315  }
316 
318  T* Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
319 
323  // program shutdown time).
325  Allocator.Slabs.clear();
326  Allocator.CustomSizedSlabs.clear();
327  }
328 
329 private:
333  void DestroyAll() {
334  auto DestroyElements = [](char* Begin, char* End) {
335  assert(Begin == (char*)alignAddr(Begin, alignof(T)));
336  for (char* Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
337  reinterpret_cast<T*>(Ptr)->~T();
338  };
339 
340  for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
341  ++I) {
342  size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
343  std::distance(Allocator.Slabs.begin(), I));
344  char* Begin = (char*)alignAddr(*I, alignof(T));
345  char* End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
346  : (char*)*I + AllocatedSlabSize;
347 
348  DestroyElements(Begin, End);
349  }
350 
351  for (auto& PtrAndSize : Allocator.CustomSizedSlabs) {
352  void* Ptr = PtrAndSize.first;
353  size_t Size = PtrAndSize.second;
354  DestroyElements((char*)alignAddr(Ptr, alignof(T)), (char*)Ptr + Size);
355  }
356  }
357 };
358 
360 } // namespace allocator
362 
363 #ifdef GTIRB_WRAP_UTILS_IN_NAMESPACE
364 
370 using allocator::SpecificBumpPtrAllocator;
371 
372 #endif // GTIRB_WRAP_UTILS_IN_NAMESPACE
373 
374 } // namespace gtirb
375 
376 #ifndef GTIRB_WRAP_UTILS_IN_NAMESPACE
377 
383 using gtirb::allocator::SpecificBumpPtrAllocator;
384 
385 #endif // GTIRB_WRAP_UTILS_IN_NAMESPACE
386 
387 template <size_t SlabSize, size_t SizeThreshold>
388 void* operator new(size_t Size, gtirb::allocator::BumpPtrAllocatorImpl<
389  SlabSize, SizeThreshold>& Allocator) {
390  struct S {
391  char c;
392  union {
393  double D;
394  long double LD;
395  long long L;
396  void* P;
397  } x;
398  };
399  return Allocator.Allocate(
400  Size,
401  std::min((size_t)gtirb::allocator::NextPowerOf2(Size), offsetof(S, x)));
402 }
403 
404 template <size_t SlabSize, size_t SizeThreshold>
405 void operator delete(
406  void*, gtirb::allocator::BumpPtrAllocatorImpl<SlabSize, SizeThreshold>&) {}
407 
408 #endif // GTIRB_ALLOCATOR_H
gtirb::SpecificBumpPtrAllocator
Definition: Allocator.hpp:299
gtirb::BumpPtrAllocatorImpl::getTotalMemory
size_t getTotalMemory() const
Definition: Allocator.hpp:209
gtirb::BumpPtrAllocatorImpl::Deallocate
void Deallocate(const void *, size_t)
Definition: Allocator.hpp:205
gtirb::Addr
A special class to store an Effective Address.
Definition: Addr.hpp:37
gtirb::BumpPtrAllocator
BumpPtrAllocatorImpl BumpPtrAllocator
Definition: Allocator.hpp:289
gtirb::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition: Allocator.hpp:123
gtirb::BumpPtrAllocatorImpl::Allocate
T * Allocate(size_t Num=1)
Allocate space for a sequence of objects without constructing them.
Definition: Allocator.hpp:198
gtirb::SpecificBumpPtrAllocator::~SpecificBumpPtrAllocator
~SpecificBumpPtrAllocator()
Definition: Allocator.hpp:310
gtirb
Main namespace for the GTIRB API.
Definition: Addr.hpp:28
gtirb::BumpPtrAllocatorImpl::Allocate
void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
Definition: Allocator.hpp:157
gtirb::SpecificBumpPtrAllocator::operator=
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition: Allocator.hpp:312
gtirb::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator()
Definition: Allocator.hpp:303
gtirb::BumpPtrAllocatorImpl
Definition: Allocator.hpp:112
gtirb::BumpPtrAllocatorImpl::GetNumSlabs
size_t GetNumSlabs() const
Definition: Allocator.hpp:207
gtirb::SpecificBumpPtrAllocator::ForgetAllocations
void ForgetAllocations()
Definition: Allocator.hpp:324
gtirb::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition: Allocator.hpp:308
gtirb::BumpPtrAllocatorImpl::~BumpPtrAllocatorImpl
~BumpPtrAllocatorImpl()
Definition: Allocator.hpp:133
gtirb::BumpPtrAllocatorImpl::operator=
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition: Allocator.hpp:138
std
Definition: Addr.hpp:359
gtirb::NextPowerOf2
uint64_t NextPowerOf2(uint64_t A)
Definition: Allocator.hpp:56
gtirb::BumpPtrAllocatorImpl::setRedZoneSize
void setRedZoneSize(size_t NewSize)
Definition: Allocator.hpp:220
gtirb::alignAddr
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Definition: Allocator.hpp:81
gtirb::SpecificBumpPtrAllocator::Allocate
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition: Allocator.hpp:318
gtirb::alignmentAdjustment
size_t alignmentAdjustment(const void *Ptr, size_t Alignment)
Definition: Allocator.hpp:96
gtirb::BumpPtrAllocatorImpl::getBytesAllocated
size_t getBytesAllocated() const
Definition: Allocator.hpp:218
gtirb::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Definition: Allocator.hpp:70