GTIRB  v2.1.0
GrammaTech Intermediate Representation for Binaries: C++ API
Allocator.hpp
Go to the documentation of this file.
1 //===- Allocator.hpp --------------------------------------------*- C++ -*-===//
2 //
3 // Copyright (C) 2020 GrammaTech, Inc.
4 //
5 // This code is licensed under the MIT license. See the LICENSE file in the
6 // project root for license terms.
7 //
8 // This project is sponsored by the Office of Naval Research, One Liberty
9 // Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
10 // N68335-17-C-0700. The content of the information does not necessarily
11 // reflect the position or policy of the Government and no official
12 // endorsement should be inferred.
13 //
14 //===-Addition License Information-----------------------------------------===//
15 //
16 // This file was initially written for the LLVM Compiler Infrastructure
17 // project where it is distributed under the University of Illinois Open Source
18 // License. See the LICENSE file in the project root for license terms.
19 //
20 //===----------------------------------------------------------------------===//
21 #ifndef GTIRB_ALLOCATOR_H
22 #define GTIRB_ALLOCATOR_H
23 
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstdint>
28 #include <cstdlib>
29 #include <iterator>
30 #include <utility>
31 #include <vector>
32 
35 inline uint64_t NextPowerOf2(uint64_t A) {
36  A |= (A >> 1);
37  A |= (A >> 2);
38  A |= (A >> 4);
39  A |= (A >> 8);
40  A |= (A >> 16);
41  A |= (A >> 32);
42  return A + 1;
43 }
44 
46 constexpr inline bool isPowerOf2_64(uint64_t Value) {
47  return Value && !(Value & (Value - 1));
48 }
49 
54 inline uintptr_t alignAddr(const void* Addr, size_t Alignment) {
55  assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
56  "Alignment is not a power of two!");
57 
58  assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
59 
60  return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
61 }
62 
65 inline size_t alignmentAdjustment(const void* Ptr, size_t Alignment) {
66  return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
67 }
68 
79 template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
81 public:
82  static_assert(SizeThreshold <= SlabSize,
83  "The SizeThreshold must be at most the SlabSize to ensure "
84  "that objects larger than a slab go into their own memory "
85  "allocation.");
86 
87  BumpPtrAllocatorImpl() = default;
88 
89  // Manually implement a move constructor as we must clear the old allocator's
90  // slabs as a matter of correctness.
92  : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
93  CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
94  BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
95  Old.CurPtr = Old.End = nullptr;
96  Old.BytesAllocated = 0;
97  Old.Slabs.clear();
98  Old.CustomSizedSlabs.clear();
99  }
100 
102  DeallocateSlabs(Slabs.begin(), Slabs.end());
103  DeallocateCustomSizedSlabs();
104  }
105 
107  DeallocateSlabs(Slabs.begin(), Slabs.end());
108  DeallocateCustomSizedSlabs();
109 
110  CurPtr = RHS.CurPtr;
111  End = RHS.End;
112  BytesAllocated = RHS.BytesAllocated;
113  RedZoneSize = RHS.RedZoneSize;
114  Slabs = std::move(RHS.Slabs);
115  CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
116 
117  RHS.CurPtr = RHS.End = nullptr;
118  RHS.BytesAllocated = 0;
119  RHS.Slabs.clear();
120  RHS.CustomSizedSlabs.clear();
121  return *this;
122  }
123 
125  void* Allocate(size_t Size, size_t Alignment) {
126  assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
127 
128  // Keep track of how many bytes we've allocated.
129  BytesAllocated += Size;
130 
131  size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
132  assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
133 
134  size_t SizeToAllocate = Size;
135  //#if LLVM_ADDRESS_SANITIZER_BUILD
136  // // Add trailing bytes as a "red zone" under ASan.
137  // SizeToAllocate += RedZoneSize;
138  //#endif
139 
140  // Check if we have enough space.
141  if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
142  char* AlignedPtr = CurPtr + Adjustment;
143  CurPtr = AlignedPtr + SizeToAllocate;
144  // Update the allocation point of this memory block in MemorySanitizer.
145  // Without this, MemorySanitizer messages for values originated from here
146  // will point to the allocation of the entire slab.
147  // __msan_allocated_memory(AlignedPtr, Size);
148  // Similarly, tell ASan about this space.
149  // __asan_unpoison_memory_region(AlignedPtr, Size);
150  return AlignedPtr;
151  }
152 
153  // If Size is really big, allocate a separate slab for it.
154  size_t PaddedSize = SizeToAllocate + Alignment - 1;
155  if (PaddedSize > SizeThreshold) {
156  void* NewSlab = std::malloc(PaddedSize);
157  // We own the new slab and don't want anyone reading anyting other than
158  // pieces returned from this method. So poison the whole slab.
159  // __asan_poison_memory_region(NewSlab, PaddedSize);
160  CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
161 
162  uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
163  assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
164  char* AlignedPtr = (char*)AlignedAddr;
165  // __msan_allocated_memory(AlignedPtr, Size);
166  // __asan_unpoison_memory_region(AlignedPtr, Size);
167  return AlignedPtr;
168  }
169 
170  // Otherwise, start a new slab and try again.
171  StartNewSlab();
172  uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
173  assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
174  "Unable to allocate memory!");
175  char* AlignedPtr = (char*)AlignedAddr;
176  CurPtr = AlignedPtr + SizeToAllocate;
177  // __msan_allocated_memory(AlignedPtr, Size);
178  // __asan_unpoison_memory_region(AlignedPtr, Size);
179  return AlignedPtr;
180  }
181 
183  template <typename T> T* Allocate(size_t Num = 1) {
184  return static_cast<T*>(Allocate(Num * sizeof(T), alignof(T)));
185  }
186 
187  // Bump pointer allocators are expected to never free their storage; and
188  // clients expect pointers to remain valid for non-dereferencing uses even
189  // after deallocation.
190  void Deallocate(const void*, size_t) {
191  // __asan_poison_memory_region(Ptr, Size);
192  }
193 
194  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
195 
196  size_t getTotalMemory() const {
197  size_t TotalMemory = 0;
198  for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
199  TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
200  for (auto& PtrAndSize : CustomSizedSlabs)
201  TotalMemory += PtrAndSize.second;
202  return TotalMemory;
203  }
204 
205  size_t getBytesAllocated() const { return BytesAllocated; }
206 
207  void setRedZoneSize(size_t NewSize) { RedZoneSize = NewSize; }
208 
209 private:
213  char* CurPtr = nullptr;
214 
216  char* End = nullptr;
217 
219  std::vector<void*> Slabs;
220 
222  std::vector<std::pair<void*, size_t>> CustomSizedSlabs;
223 
227  size_t BytesAllocated = 0;
228 
231  size_t RedZoneSize = 1;
232 
233  static size_t computeSlabSize(size_t SlabIdx) {
234  // Scale the actual allocated slab size based on the number of slabs
235  // allocated. Every 128 slabs allocated, we double the allocated size to
236  // reduce allocation frequency, but saturate at multiplying the slab size by
237  // 2^30.
238  return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
239  }
240 
243  void StartNewSlab() {
244  size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
245 
246  void* NewSlab = std::malloc(AllocatedSlabSize);
247  // We own the new slab and don't want anyone reading anything other than
248  // pieces returned from this method. So poison the whole slab.
249  // __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
250 
251  Slabs.push_back(NewSlab);
252  CurPtr = (char*)(NewSlab);
253  End = ((char*)NewSlab) + AllocatedSlabSize;
254  }
255 
257  void DeallocateSlabs(std::vector<void*>::iterator I,
258  std::vector<void*>::iterator E) {
259  for (; I != E; ++I) {
260  std::free(*I);
261  }
262  }
263 
265  void DeallocateCustomSizedSlabs() {
266  for (auto& PtrAndSize : CustomSizedSlabs) {
267  std::free(PtrAndSize.first);
268  }
269  }
270 
271  template <typename T> friend class SpecificBumpPtrAllocator;
272 };
273 
277 
283 template <typename T> class SpecificBumpPtrAllocator {
284  BumpPtrAllocator Allocator;
285 
286 public:
288  // Because SpecificBumpPtrAllocator walks the memory to call destructors,
289  // it can't have red zones between allocations.
290  Allocator.setRedZoneSize(0);
291  }
293  : Allocator(std::move(Old.Allocator)) {}
294  ~SpecificBumpPtrAllocator() { DestroyAll(); }
295 
297  Allocator = std::move(RHS.Allocator);
298  return *this;
299  }
300 
302  T* Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
303 
307  // program shutdown time).
309  Allocator.Slabs.clear();
310  Allocator.CustomSizedSlabs.clear();
311  }
312 
313 private:
317  void DestroyAll() {
318  auto DestroyElements = [](char* Begin, char* End) {
319  assert(Begin == (char*)alignAddr(Begin, alignof(T)));
320  for (char* Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
321  reinterpret_cast<T*>(Ptr)->~T();
322  };
323 
324  for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
325  ++I) {
326  size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
327  std::distance(Allocator.Slabs.begin(), I));
328  char* Begin = (char*)alignAddr(*I, alignof(T));
329  char* End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
330  : (char*)*I + AllocatedSlabSize;
331 
332  DestroyElements(Begin, End);
333  }
334 
335  for (auto& PtrAndSize : Allocator.CustomSizedSlabs) {
336  void* Ptr = PtrAndSize.first;
337  size_t Size = PtrAndSize.second;
338  DestroyElements((char*)alignAddr(Ptr, alignof(T)), (char*)Ptr + Size);
339  }
340  }
341 };
342 
343 template <size_t SlabSize, size_t SizeThreshold>
344 void* operator new(size_t Size,
346  struct S {
347  char c;
348  union {
349  double D;
350  long double LD;
351  long long L;
352  void* P;
353  } x;
354  };
355  return Allocator.Allocate(
356  Size, std::min((size_t)NextPowerOf2(Size), offsetof(S, x)));
357 }
358 
359 template <size_t SlabSize, size_t SizeThreshold>
360 void operator delete(void*, BumpPtrAllocatorImpl<SlabSize, SizeThreshold>&) {}
361 
362 #endif // GTIRB_ALLOCATOR_H
BumpPtrAllocatorImpl::setRedZoneSize
void setRedZoneSize(size_t NewSize)
Definition: Allocator.hpp:207
SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition: Allocator.hpp:292
NextPowerOf2
uint64_t NextPowerOf2(uint64_t A)
Definition: Allocator.hpp:35
BumpPtrAllocatorImpl::Allocate
void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
Definition: Allocator.hpp:125
BumpPtrAllocator
BumpPtrAllocatorImpl BumpPtrAllocator
Definition: Allocator.hpp:276
SpecificBumpPtrAllocator::Allocate
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition: Allocator.hpp:302
alignAddr
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Definition: Allocator.hpp:54
BumpPtrAllocatorImpl::getTotalMemory
size_t getTotalMemory() const
Definition: Allocator.hpp:196
BumpPtrAllocatorImpl::getBytesAllocated
size_t getBytesAllocated() const
Definition: Allocator.hpp:205
SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator()
Definition: Allocator.hpp:287
BumpPtrAllocatorImpl
Definition: Allocator.hpp:80
BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition: Allocator.hpp:91
BumpPtrAllocatorImpl::GetNumSlabs
size_t GetNumSlabs() const
Definition: Allocator.hpp:194
std
Definition: Addr.hpp:359
SpecificBumpPtrAllocator
Definition: Allocator.hpp:283
BumpPtrAllocatorImpl::Deallocate
void Deallocate(const void *, size_t)
Definition: Allocator.hpp:190
BumpPtrAllocatorImpl::~BumpPtrAllocatorImpl
~BumpPtrAllocatorImpl()
Definition: Allocator.hpp:101
SpecificBumpPtrAllocator::ForgetAllocations
void ForgetAllocations()
Definition: Allocator.hpp:308
SpecificBumpPtrAllocator::~SpecificBumpPtrAllocator
~SpecificBumpPtrAllocator()
Definition: Allocator.hpp:294
BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl()=default
isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: Allocator.hpp:46
alignmentAdjustment
size_t alignmentAdjustment(const void *Ptr, size_t Alignment)
Definition: Allocator.hpp:65
BumpPtrAllocatorImpl::Allocate
T * Allocate(size_t Num=1)
Allocate space for a sequence of objects without constructing them.
Definition: Allocator.hpp:183
SpecificBumpPtrAllocator::operator=
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition: Allocator.hpp:296
BumpPtrAllocatorImpl::operator=
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition: Allocator.hpp:106