/home/arjun/llvm-project/llvm/include/llvm/Support/Allocator.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | /// \file |
9 | | /// |
10 | | /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms |
11 | | /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but |
12 | | /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the |
13 | | /// allocator. |
14 | | /// |
15 | | //===----------------------------------------------------------------------===// |
16 | | |
17 | | #ifndef LLVM_SUPPORT_ALLOCATOR_H |
18 | | #define LLVM_SUPPORT_ALLOCATOR_H |
19 | | |
20 | | #include "llvm/ADT/Optional.h" |
21 | | #include "llvm/ADT/SmallVector.h" |
22 | | #include "llvm/Support/Alignment.h" |
23 | | #include "llvm/Support/AllocatorBase.h" |
24 | | #include "llvm/Support/Compiler.h" |
25 | | #include "llvm/Support/ErrorHandling.h" |
26 | | #include "llvm/Support/MathExtras.h" |
27 | | #include "llvm/Support/MemAlloc.h" |
28 | | #include <algorithm> |
29 | | #include <cassert> |
30 | | #include <cstddef> |
31 | | #include <cstdint> |
32 | | #include <cstdlib> |
33 | | #include <iterator> |
34 | | #include <type_traits> |
35 | | #include <utility> |
36 | | |
37 | | namespace llvm { |
38 | | |
39 | | namespace detail { |
40 | | |
41 | | // We call out to an external function to actually print the message as the |
42 | | // printing code uses Allocator.h in its implementation. |
43 | | void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, |
44 | | size_t TotalMemory); |
45 | | |
46 | | } // end namespace detail |
47 | | |
48 | | /// Allocate memory in an ever growing pool, as if by bump-pointer. |
49 | | /// |
50 | | /// This isn't strictly a bump-pointer allocator as it uses backing slabs of |
51 | | /// memory rather than relying on a boundless contiguous heap. However, it has |
52 | | /// bump-pointer semantics in that it is a monotonically growing pool of memory |
53 | | /// where every allocation is found by merely allocating the next N bytes in |
54 | | /// the slab, or the next N bytes in the next slab. |
55 | | /// |
56 | | /// Note that this also has a threshold for forcing allocations above a certain |
57 | | /// size into their own slab. |
58 | | /// |
59 | | /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator |
60 | | /// object, which wraps malloc, to allocate memory, but it can be changed to |
61 | | /// use a custom allocator. |
62 | | /// |
63 | | /// The GrowthDelay specifies after how many allocated slabs the allocator |
64 | | /// increases the size of the slabs. |
65 | | template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, |
66 | | size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128> |
67 | | class BumpPtrAllocatorImpl |
68 | | : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
69 | | SizeThreshold, GrowthDelay>> { |
70 | | public: |
71 | | static_assert(SizeThreshold <= SlabSize, |
72 | | "The SizeThreshold must be at most the SlabSize to ensure " |
73 | | "that objects larger than a slab go into their own memory " |
74 | | "allocation."); |
75 | | static_assert(GrowthDelay > 0, |
76 | | "GrowthDelay must be at least 1 which already increases the" |
77 | | "slab size after each allocated slab."); |
78 | | |
79 | 4 | BumpPtrAllocatorImpl() = default; |
80 | | |
81 | | template <typename T> |
82 | | BumpPtrAllocatorImpl(T &&Allocator) |
83 | | : Allocator(std::forward<T &&>(Allocator)) {} |
84 | | |
85 | | // Manually implement a move constructor as we must clear the old allocator's |
86 | | // slabs as a matter of correctness. |
87 | | BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) |
88 | | : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), |
89 | | CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), |
90 | | BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize), |
91 | | Allocator(std::move(Old.Allocator)) { |
92 | | Old.CurPtr = Old.End = nullptr; |
93 | | Old.BytesAllocated = 0; |
94 | | Old.Slabs.clear(); |
95 | | Old.CustomSizedSlabs.clear(); |
96 | | } |
97 | | |
98 | 4 | ~BumpPtrAllocatorImpl() { |
99 | 4 | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
100 | 4 | DeallocateCustomSizedSlabs(); |
101 | 4 | } |
102 | | |
103 | | BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { |
104 | | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
105 | | DeallocateCustomSizedSlabs(); |
106 | | |
107 | | CurPtr = RHS.CurPtr; |
108 | | End = RHS.End; |
109 | | BytesAllocated = RHS.BytesAllocated; |
110 | | RedZoneSize = RHS.RedZoneSize; |
111 | | Slabs = std::move(RHS.Slabs); |
112 | | CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); |
113 | | Allocator = std::move(RHS.Allocator); |
114 | | |
115 | | RHS.CurPtr = RHS.End = nullptr; |
116 | | RHS.BytesAllocated = 0; |
117 | | RHS.Slabs.clear(); |
118 | | RHS.CustomSizedSlabs.clear(); |
119 | | return *this; |
120 | | } |
121 | | |
122 | | /// Deallocate all but the current slab and reset the current pointer |
123 | | /// to the beginning of it, freeing all memory allocated so far. |
124 | 0 | void Reset() { |
125 | 0 | // Deallocate all but the first slab, and deallocate all custom-sized slabs. |
126 | 0 | DeallocateCustomSizedSlabs(); |
127 | 0 | CustomSizedSlabs.clear(); |
128 | 0 |
|
129 | 0 | if (Slabs.empty()) |
130 | 0 | return; |
131 | 0 | |
132 | 0 | // Reset the state. |
133 | 0 | BytesAllocated = 0; |
134 | 0 | CurPtr = (char *)Slabs.front(); |
135 | 0 | End = CurPtr + SlabSize; |
136 | 0 |
|
137 | 0 | __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0)); |
138 | 0 | DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); |
139 | 0 | Slabs.erase(std::next(Slabs.begin()), Slabs.end()); |
140 | 0 | } |
141 | | |
142 | | /// Allocate space at the specified alignment. |
143 | | LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * |
144 | 0 | Allocate(size_t Size, Align Alignment) { |
145 | 0 | // Keep track of how many bytes we've allocated. |
146 | 0 | BytesAllocated += Size; |
147 | 0 |
|
148 | 0 | size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment); |
149 | 0 | assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); |
150 | 0 |
|
151 | 0 | size_t SizeToAllocate = Size; |
152 | | #if LLVM_ADDRESS_SANITIZER_BUILD |
153 | | // Add trailing bytes as a "red zone" under ASan. |
154 | | SizeToAllocate += RedZoneSize; |
155 | | #endif |
156 | |
|
157 | 0 | // Check if we have enough space. |
158 | 0 | if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) { |
159 | 0 | char *AlignedPtr = CurPtr + Adjustment; |
160 | 0 | CurPtr = AlignedPtr + SizeToAllocate; |
161 | 0 | // Update the allocation point of this memory block in MemorySanitizer. |
162 | 0 | // Without this, MemorySanitizer messages for values originated from here |
163 | 0 | // will point to the allocation of the entire slab. |
164 | 0 | __msan_allocated_memory(AlignedPtr, Size); |
165 | 0 | // Similarly, tell ASan about this space. |
166 | 0 | __asan_unpoison_memory_region(AlignedPtr, Size); |
167 | 0 | return AlignedPtr; |
168 | 0 | } |
169 | 0 | |
170 | 0 | // If Size is really big, allocate a separate slab for it. |
171 | 0 | size_t PaddedSize = SizeToAllocate + Alignment.value() - 1; |
172 | 0 | if (PaddedSize > SizeThreshold) { |
173 | 0 | void *NewSlab = Allocator.Allocate(PaddedSize, alignof(std::max_align_t)); |
174 | 0 | // We own the new slab and don't want anyone reading anyting other than |
175 | 0 | // pieces returned from this method. So poison the whole slab. |
176 | 0 | __asan_poison_memory_region(NewSlab, PaddedSize); |
177 | 0 | CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); |
178 | 0 |
|
179 | 0 | uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); |
180 | 0 | assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); |
181 | 0 | char *AlignedPtr = (char*)AlignedAddr; |
182 | 0 | __msan_allocated_memory(AlignedPtr, Size); |
183 | 0 | __asan_unpoison_memory_region(AlignedPtr, Size); |
184 | 0 | return AlignedPtr; |
185 | 0 | } |
186 | 0 | |
187 | 0 | // Otherwise, start a new slab and try again. |
188 | 0 | StartNewSlab(); |
189 | 0 | uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); |
190 | 0 | assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End && |
191 | 0 | "Unable to allocate memory!"); |
192 | 0 | char *AlignedPtr = (char*)AlignedAddr; |
193 | 0 | CurPtr = AlignedPtr + SizeToAllocate; |
194 | 0 | __msan_allocated_memory(AlignedPtr, Size); |
195 | 0 | __asan_unpoison_memory_region(AlignedPtr, Size); |
196 | 0 | return AlignedPtr; |
197 | 0 | } |
198 | | |
199 | | inline LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * |
200 | 0 | Allocate(size_t Size, size_t Alignment) { |
201 | 0 | assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead."); |
202 | 0 | return Allocate(Size, Align(Alignment)); |
203 | 0 | } |
204 | | |
205 | | // Pull in base class overloads. |
206 | | using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; |
207 | | |
208 | | // Bump pointer allocators are expected to never free their storage; and |
209 | | // clients expect pointers to remain valid for non-dereferencing uses even |
210 | | // after deallocation. |
211 | 0 | void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) { |
212 | 0 | __asan_poison_memory_region(Ptr, Size); |
213 | 0 | } |
214 | | |
215 | | // Pull in base class overloads. |
216 | | using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; |
217 | | |
218 | | size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } |
219 | | |
220 | | /// \return An index uniquely and reproducibly identifying |
221 | | /// an input pointer \p Ptr in the given allocator. |
222 | | /// The returned value is negative iff the object is inside a custom-size |
223 | | /// slab. |
224 | | /// Returns an empty optional if the pointer is not found in the allocator. |
225 | | llvm::Optional<int64_t> identifyObject(const void *Ptr) { |
226 | | const char *P = static_cast<const char *>(Ptr); |
227 | | int64_t InSlabIdx = 0; |
228 | | for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) { |
229 | | const char *S = static_cast<const char *>(Slabs[Idx]); |
230 | | if (P >= S && P < S + computeSlabSize(Idx)) |
231 | | return InSlabIdx + static_cast<int64_t>(P - S); |
232 | | InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx)); |
233 | | } |
234 | | |
235 | | // Use negative index to denote custom sized slabs. |
236 | | int64_t InCustomSizedSlabIdx = -1; |
237 | | for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) { |
238 | | const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first); |
239 | | size_t Size = CustomSizedSlabs[Idx].second; |
240 | | if (P >= S && P < S + Size) |
241 | | return InCustomSizedSlabIdx - static_cast<int64_t>(P - S); |
242 | | InCustomSizedSlabIdx -= static_cast<int64_t>(Size); |
243 | | } |
244 | | return None; |
245 | | } |
246 | | |
247 | | /// A wrapper around identifyObject that additionally asserts that |
248 | | /// the object is indeed within the allocator. |
249 | | /// \return An index uniquely and reproducibly identifying |
250 | | /// an input pointer \p Ptr in the given allocator. |
251 | | int64_t identifyKnownObject(const void *Ptr) { |
252 | | Optional<int64_t> Out = identifyObject(Ptr); |
253 | | assert(Out && "Wrong allocator used"); |
254 | | return *Out; |
255 | | } |
256 | | |
257 | | /// A wrapper around identifyKnownObject. Accepts type information |
258 | | /// about the object and produces a smaller identifier by relying on |
259 | | /// the alignment information. Note that sub-classes may have different |
260 | | /// alignment, so the most base class should be passed as template parameter |
261 | | /// in order to obtain correct results. For that reason automatic template |
262 | | /// parameter deduction is disabled. |
263 | | /// \return An index uniquely and reproducibly identifying |
264 | | /// an input pointer \p Ptr in the given allocator. This identifier is |
265 | | /// different from the ones produced by identifyObject and |
266 | | /// identifyAlignedObject. |
267 | | template <typename T> |
268 | | int64_t identifyKnownAlignedObject(const void *Ptr) { |
269 | | int64_t Out = identifyKnownObject(Ptr); |
270 | | assert(Out % alignof(T) == 0 && "Wrong alignment information"); |
271 | | return Out / alignof(T); |
272 | | } |
273 | | |
274 | | size_t getTotalMemory() const { |
275 | | size_t TotalMemory = 0; |
276 | | for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) |
277 | | TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); |
278 | | for (auto &PtrAndSize : CustomSizedSlabs) |
279 | | TotalMemory += PtrAndSize.second; |
280 | | return TotalMemory; |
281 | | } |
282 | | |
283 | | size_t getBytesAllocated() const { return BytesAllocated; } |
284 | | |
285 | | void setRedZoneSize(size_t NewSize) { |
286 | | RedZoneSize = NewSize; |
287 | | } |
288 | | |
289 | | void PrintStats() const { |
290 | | detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, |
291 | | getTotalMemory()); |
292 | | } |
293 | | |
294 | | private: |
295 | | /// The current pointer into the current slab. |
296 | | /// |
297 | | /// This points to the next free byte in the slab. |
298 | | char *CurPtr = nullptr; |
299 | | |
300 | | /// The end of the current slab. |
301 | | char *End = nullptr; |
302 | | |
303 | | /// The slabs allocated so far. |
304 | | SmallVector<void *, 4> Slabs; |
305 | | |
306 | | /// Custom-sized slabs allocated for too-large allocation requests. |
307 | | SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; |
308 | | |
309 | | /// How many bytes we've allocated. |
310 | | /// |
311 | | /// Used so that we can compute how much space was wasted. |
312 | | size_t BytesAllocated = 0; |
313 | | |
314 | | /// The number of bytes to put between allocations when running under |
315 | | /// a sanitizer. |
316 | | size_t RedZoneSize = 1; |
317 | | |
318 | | /// The allocator instance we use to get slabs of memory. |
319 | | AllocatorT Allocator; |
320 | | |
321 | 0 | static size_t computeSlabSize(unsigned SlabIdx) { |
322 | 0 | // Scale the actual allocated slab size based on the number of slabs |
323 | 0 | // allocated. Every GrowthDelay slabs allocated, we double |
324 | 0 | // the allocated size to reduce allocation frequency, but saturate at |
325 | 0 | // multiplying the slab size by 2^30. |
326 | 0 | return SlabSize * |
327 | 0 | ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay)); |
328 | 0 | } |
329 | | |
330 | | /// Allocate a new slab and move the bump pointers over into the new |
331 | | /// slab, modifying CurPtr and End. |
332 | 0 | void StartNewSlab() { |
333 | 0 | size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); |
334 | 0 |
|
335 | 0 | void *NewSlab = |
336 | 0 | Allocator.Allocate(AllocatedSlabSize, alignof(std::max_align_t)); |
337 | 0 | // We own the new slab and don't want anyone reading anything other than |
338 | 0 | // pieces returned from this method. So poison the whole slab. |
339 | 0 | __asan_poison_memory_region(NewSlab, AllocatedSlabSize); |
340 | 0 |
|
341 | 0 | Slabs.push_back(NewSlab); |
342 | 0 | CurPtr = (char *)(NewSlab); |
343 | 0 | End = ((char *)NewSlab) + AllocatedSlabSize; |
344 | 0 | } |
345 | | |
346 | | /// Deallocate a sequence of slabs. |
347 | | void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, |
348 | 4 | SmallVectorImpl<void *>::iterator E) { |
349 | 4 | for (; I != E; ++I) { |
350 | 0 | size_t AllocatedSlabSize = |
351 | 0 | computeSlabSize(std::distance(Slabs.begin(), I)); |
352 | 0 | Allocator.Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t)); |
353 | 0 | } |
354 | 4 | } |
355 | | |
356 | | /// Deallocate all memory for custom sized slabs. |
357 | 4 | void DeallocateCustomSizedSlabs() { |
358 | 4 | for (auto &PtrAndSize : CustomSizedSlabs) { |
359 | 0 | void *Ptr = PtrAndSize.first; |
360 | 0 | size_t Size = PtrAndSize.second; |
361 | 0 | Allocator.Deallocate(Ptr, Size, alignof(std::max_align_t)); |
362 | 0 | } |
363 | 4 | } |
364 | | |
365 | | template <typename T> friend class SpecificBumpPtrAllocator; |
366 | | }; |
367 | | |
368 | | /// The standard BumpPtrAllocator which just uses the default template |
369 | | /// parameters. |
370 | | typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; |
371 | | |
372 | | /// A BumpPtrAllocator that allows only elements of a specific type to be |
373 | | /// allocated. |
374 | | /// |
375 | | /// This allows calling the destructor in DestroyAll() and when the allocator is |
376 | | /// destroyed. |
377 | | template <typename T> class SpecificBumpPtrAllocator { |
378 | | BumpPtrAllocator Allocator; |
379 | | |
380 | | public: |
381 | | SpecificBumpPtrAllocator() { |
382 | | // Because SpecificBumpPtrAllocator walks the memory to call destructors, |
383 | | // it can't have red zones between allocations. |
384 | | Allocator.setRedZoneSize(0); |
385 | | } |
386 | | SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) |
387 | | : Allocator(std::move(Old.Allocator)) {} |
388 | | ~SpecificBumpPtrAllocator() { DestroyAll(); } |
389 | | |
390 | | SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { |
391 | | Allocator = std::move(RHS.Allocator); |
392 | | return *this; |
393 | | } |
394 | | |
395 | | /// Call the destructor of each allocated object and deallocate all but the |
396 | | /// current slab and reset the current pointer to the beginning of it, freeing |
397 | | /// all memory allocated so far. |
398 | | void DestroyAll() { |
399 | | auto DestroyElements = [](char *Begin, char *End) { |
400 | | assert(Begin == (char *)alignAddr(Begin, Align::Of<T>())); |
401 | | for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) |
402 | | reinterpret_cast<T *>(Ptr)->~T(); |
403 | | }; |
404 | | |
405 | | for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; |
406 | | ++I) { |
407 | | size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( |
408 | | std::distance(Allocator.Slabs.begin(), I)); |
409 | | char *Begin = (char *)alignAddr(*I, Align::Of<T>()); |
410 | | char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr |
411 | | : (char *)*I + AllocatedSlabSize; |
412 | | |
413 | | DestroyElements(Begin, End); |
414 | | } |
415 | | |
416 | | for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { |
417 | | void *Ptr = PtrAndSize.first; |
418 | | size_t Size = PtrAndSize.second; |
419 | | DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()), |
420 | | (char *)Ptr + Size); |
421 | | } |
422 | | |
423 | | Allocator.Reset(); |
424 | | } |
425 | | |
426 | | /// Allocate space for an array of objects without constructing them. |
427 | | T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } |
428 | | }; |
429 | | |
430 | | } // end namespace llvm |
431 | | |
432 | | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
433 | | size_t GrowthDelay> |
434 | | void * |
435 | | operator new(size_t Size, |
436 | | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold, |
437 | 0 | GrowthDelay> &Allocator) { |
438 | 0 | return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), |
439 | 0 | alignof(std::max_align_t))); |
440 | 0 | } |
441 | | |
442 | | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
443 | | size_t GrowthDelay> |
444 | | void operator delete(void *, |
445 | | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
446 | | SizeThreshold, GrowthDelay> &) { |
447 | | } |
448 | | |
449 | | #endif // LLVM_SUPPORT_ALLOCATOR_H |