alloc/raw_vec/mod.rs
1#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
2#![cfg_attr(test, allow(dead_code))]
3
4// Note: This module is also included in the alloctests crate using #[path] to
5// run the tests. See the comment there for an explanation why this is the case.
6
7use core::marker::PhantomData;
8use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
9use core::ptr::{self, Alignment, NonNull, Unique};
10use core::{cmp, hint};
11
12#[cfg(not(no_global_oom_handling))]
13use crate::alloc::handle_alloc_error;
14use crate::alloc::{Allocator, Global, Layout};
15use crate::boxed::Box;
16use crate::collections::TryReserveError;
17use crate::collections::TryReserveErrorKind::*;
18
19#[cfg(test)]
20mod tests;
21
22// One central function responsible for reporting capacity overflows. This'll
23// ensure that the code generation related to these panics is minimal as there's
24// only one location which panics rather than a bunch throughout the module.
25#[cfg(not(no_global_oom_handling))]
26#[cfg_attr(not(panic = "immediate-abort"), inline(never))]
27#[track_caller]
28fn capacity_overflow() -> ! {
29 panic!("capacity overflow");
30}
31
32enum AllocInit {
33 /// The contents of the new memory are uninitialized.
34 Uninitialized,
35 #[cfg(not(no_global_oom_handling))]
36 /// The new memory is guaranteed to be zeroed.
37 Zeroed,
38}
39
40type Cap = core::num::niche_types::UsizeNoHighBit;
41
42const ZERO_CAP: Cap = unsafe { Cap::new_unchecked(0) };
43
44/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
45///
46/// # Safety: cap must be <= `isize::MAX`.
47unsafe fn new_cap<T>(cap: usize) -> Cap {
48 if T::IS_ZST { ZERO_CAP } else { unsafe { Cap::new_unchecked(cap) } }
49}
50
51/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
52/// a buffer of memory on the heap without having to worry about all the corner cases
53/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
54/// In particular:
55///
56/// * Produces `Unique::dangling()` on zero-sized types.
57/// * Produces `Unique::dangling()` on zero-length allocations.
58/// * Avoids freeing `Unique::dangling()`.
59/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
60/// * Guards against 32-bit systems allocating more than `isize::MAX` bytes.
61/// * Guards against overflowing your length.
62/// * Calls `handle_alloc_error` for fallible allocations.
63/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
64/// * Uses the excess returned from the allocator to use the largest available capacity.
65///
66/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
67/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
68/// to handle the actual things *stored* inside of a `RawVec`.
69///
70/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
71/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
72/// `Box<[T]>`, since `capacity()` won't yield the length.
73#[allow(missing_debug_implementations)]
74pub(crate) struct RawVec<T, A: Allocator = Global> {
75 inner: RawVecInner<A>,
76 _marker: PhantomData<T>,
77}
78
79/// Like a `RawVec`, but only generic over the allocator, not the type.
80///
81/// As such, all the methods need the layout passed-in as a parameter.
82///
83/// Having this separation reduces the amount of code we need to monomorphize,
84/// as most operations don't need the actual type, just its layout.
85#[allow(missing_debug_implementations)]
86struct RawVecInner<A: Allocator = Global> {
87 ptr: Unique<u8>,
88 /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
89 ///
90 /// # Safety
91 ///
92 /// `cap` must be in the `0..=isize::MAX` range.
93 cap: Cap,
94 alloc: A,
95}
96
97impl<T> RawVec<T, Global> {
98 /// Creates the biggest possible `RawVec` (on the system heap)
99 /// without allocating. If `T` has positive size, then this makes a
100 /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
101 /// `RawVec` with capacity `usize::MAX`. Useful for implementing
102 /// delayed allocation.
103 #[must_use]
104 pub(crate) const fn new() -> Self {
105 Self::new_in(Global)
106 }
107
108 /// Creates a `RawVec` (on the system heap) with exactly the
109 /// capacity and alignment requirements for a `[T; capacity]`. This is
110 /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
111 /// zero-sized. Note that if `T` is zero-sized this means you will
112 /// *not* get a `RawVec` with the requested capacity.
113 ///
114 /// Non-fallible version of `try_with_capacity`
115 ///
116 /// # Panics
117 ///
118 /// Panics if the requested capacity exceeds `isize::MAX` bytes.
119 ///
120 /// # Aborts
121 ///
122 /// Aborts on OOM.
123 #[cfg(not(any(no_global_oom_handling, test)))]
124 #[must_use]
125 #[inline]
126 #[track_caller]
127 pub(crate) fn with_capacity(capacity: usize) -> Self {
128 Self { inner: RawVecInner::with_capacity(capacity, T::LAYOUT), _marker: PhantomData }
129 }
130
131 /// Like `with_capacity`, but guarantees the buffer is zeroed.
132 #[cfg(not(any(no_global_oom_handling, test)))]
133 #[must_use]
134 #[inline]
135 #[track_caller]
136 pub(crate) fn with_capacity_zeroed(capacity: usize) -> Self {
137 Self {
138 inner: RawVecInner::with_capacity_zeroed_in(capacity, Global, T::LAYOUT),
139 _marker: PhantomData,
140 }
141 }
142}
143
144impl RawVecInner<Global> {
145 #[cfg(not(any(no_global_oom_handling, test)))]
146 #[must_use]
147 #[inline]
148 #[track_caller]
149 fn with_capacity(capacity: usize, elem_layout: Layout) -> Self {
150 match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global, elem_layout) {
151 Ok(res) => res,
152 Err(err) => handle_error(err),
153 }
154 }
155}
156
157// Tiny Vecs are dumb. Skip to:
158// - 8 if the element size is 1, because any heap allocator is likely
159// to round up a request of less than 8 bytes to at least 8 bytes.
160// - 4 if elements are moderate-sized (<= 1 KiB).
161// - 1 otherwise, to avoid wasting too much space for very short Vecs.
162const fn min_non_zero_cap(size: usize) -> usize {
163 if size == 1 {
164 8
165 } else if size <= 1024 {
166 4
167 } else {
168 1
169 }
170}
171
172impl<T, A: Allocator> RawVec<T, A> {
173 #[cfg(not(no_global_oom_handling))]
174 pub(crate) const MIN_NON_ZERO_CAP: usize = min_non_zero_cap(size_of::<T>());
175
176 /// Like `new`, but parameterized over the choice of allocator for
177 /// the returned `RawVec`.
178 #[inline]
179 pub(crate) const fn new_in(alloc: A) -> Self {
180 // Check assumption made in `current_memory`
181 const { assert!(T::LAYOUT.size() % T::LAYOUT.align() == 0) };
182 Self { inner: RawVecInner::new_in(alloc, Alignment::of::<T>()), _marker: PhantomData }
183 }
184
185 /// Like `with_capacity`, but parameterized over the choice of
186 /// allocator for the returned `RawVec`.
187 #[cfg(not(no_global_oom_handling))]
188 #[inline]
189 #[track_caller]
190 pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self {
191 Self {
192 inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT),
193 _marker: PhantomData,
194 }
195 }
196
197 /// Like `try_with_capacity`, but parameterized over the choice of
198 /// allocator for the returned `RawVec`.
199 #[inline]
200 pub(crate) fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
201 match RawVecInner::try_with_capacity_in(capacity, alloc, T::LAYOUT) {
202 Ok(inner) => Ok(Self { inner, _marker: PhantomData }),
203 Err(e) => Err(e),
204 }
205 }
206
207 /// Like `with_capacity_zeroed`, but parameterized over the choice
208 /// of allocator for the returned `RawVec`.
209 #[cfg(not(no_global_oom_handling))]
210 #[inline]
211 #[track_caller]
212 pub(crate) fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
213 Self {
214 inner: RawVecInner::with_capacity_zeroed_in(capacity, alloc, T::LAYOUT),
215 _marker: PhantomData,
216 }
217 }
218
219 /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
220 ///
221 /// Note that this will correctly reconstitute any `cap` changes
222 /// that may have been performed. (See description of type for details.)
223 ///
224 /// # Safety
225 ///
226 /// * `len` must be greater than or equal to the most recently requested capacity, and
227 /// * `len` must be less than or equal to `self.capacity()`.
228 ///
229 /// Note, that the requested capacity and `self.capacity()` could differ, as
230 /// an allocator could overallocate and return a greater memory block than requested.
231 pub(crate) unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
232 // Sanity-check one half of the safety requirement (we cannot check the other half).
233 debug_assert!(
234 len <= self.capacity(),
235 "`len` must be smaller than or equal to `self.capacity()`"
236 );
237
238 let me = ManuallyDrop::new(self);
239 unsafe {
240 let slice = ptr::slice_from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
241 Box::from_raw_in(slice, ptr::read(&me.inner.alloc))
242 }
243 }
244
245 /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
246 ///
247 /// # Safety
248 ///
249 /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
250 /// `capacity`.
251 /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
252 /// systems). For ZSTs capacity is ignored.
253 /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
254 /// guaranteed.
255 #[inline]
256 pub(crate) unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
257 // SAFETY: Precondition passed to the caller
258 unsafe {
259 let ptr = ptr.cast();
260 let capacity = new_cap::<T>(capacity);
261 Self {
262 inner: RawVecInner::from_raw_parts_in(ptr, capacity, alloc),
263 _marker: PhantomData,
264 }
265 }
266 }
267
268 /// A convenience method for hoisting the non-null precondition out of [`RawVec::from_raw_parts_in`].
269 ///
270 /// # Safety
271 ///
272 /// See [`RawVec::from_raw_parts_in`].
273 #[inline]
274 pub(crate) unsafe fn from_nonnull_in(ptr: NonNull<T>, capacity: usize, alloc: A) -> Self {
275 // SAFETY: Precondition passed to the caller
276 unsafe {
277 let ptr = ptr.cast();
278 let capacity = new_cap::<T>(capacity);
279 Self { inner: RawVecInner::from_nonnull_in(ptr, capacity, alloc), _marker: PhantomData }
280 }
281 }
282
283 /// Gets a raw pointer to the start of the allocation. Note that this is
284 /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
285 /// be careful.
286 #[inline]
287 pub(crate) const fn ptr(&self) -> *mut T {
288 self.inner.ptr()
289 }
290
291 #[inline]
292 pub(crate) const fn non_null(&self) -> NonNull<T> {
293 self.inner.non_null()
294 }
295
296 /// Gets the capacity of the allocation.
297 ///
298 /// This will always be `usize::MAX` if `T` is zero-sized.
299 #[inline]
300 pub(crate) const fn capacity(&self) -> usize {
301 self.inner.capacity(size_of::<T>())
302 }
303
304 /// Returns a shared reference to the allocator backing this `RawVec`.
305 #[inline]
306 pub(crate) fn allocator(&self) -> &A {
307 self.inner.allocator()
308 }
309
310 /// Ensures that the buffer contains at least enough space to hold `len +
311 /// additional` elements. If it doesn't already have enough capacity, will
312 /// reallocate enough space plus comfortable slack space to get amortized
313 /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
314 /// itself to panic.
315 ///
316 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
317 /// the requested space. This is not really unsafe, but the unsafe
318 /// code *you* write that relies on the behavior of this function may break.
319 ///
320 /// This is ideal for implementing a bulk-push operation like `extend`.
321 ///
322 /// # Panics
323 ///
324 /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
325 ///
326 /// # Aborts
327 ///
328 /// Aborts on OOM.
329 #[cfg(not(no_global_oom_handling))]
330 #[inline]
331 #[track_caller]
332 pub(crate) fn reserve(&mut self, len: usize, additional: usize) {
333 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
334 unsafe { self.inner.reserve(len, additional, T::LAYOUT) }
335 }
336
337 /// A specialized version of `self.reserve(len, 1)` which requires the
338 /// caller to ensure `len == self.capacity()`.
339 #[cfg(not(no_global_oom_handling))]
340 #[inline(never)]
341 #[track_caller]
342 pub(crate) fn grow_one(&mut self) {
343 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
344 unsafe { self.inner.grow_one(T::LAYOUT) }
345 }
346
347 /// The same as `reserve`, but returns on errors instead of panicking or aborting.
348 pub(crate) fn try_reserve(
349 &mut self,
350 len: usize,
351 additional: usize,
352 ) -> Result<(), TryReserveError> {
353 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
354 unsafe { self.inner.try_reserve(len, additional, T::LAYOUT) }
355 }
356
357 /// Ensures that the buffer contains at least enough space to hold `len +
358 /// additional` elements. If it doesn't already, will reallocate the
359 /// minimum possible amount of memory necessary. Generally this will be
360 /// exactly the amount of memory necessary, but in principle the allocator
361 /// is free to give back more than we asked for.
362 ///
363 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
364 /// the requested space. This is not really unsafe, but the unsafe code
365 /// *you* write that relies on the behavior of this function may break.
366 ///
367 /// # Panics
368 ///
369 /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
370 ///
371 /// # Aborts
372 ///
373 /// Aborts on OOM.
374 #[cfg(not(no_global_oom_handling))]
375 #[track_caller]
376 pub(crate) fn reserve_exact(&mut self, len: usize, additional: usize) {
377 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
378 unsafe { self.inner.reserve_exact(len, additional, T::LAYOUT) }
379 }
380
381 /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
382 pub(crate) fn try_reserve_exact(
383 &mut self,
384 len: usize,
385 additional: usize,
386 ) -> Result<(), TryReserveError> {
387 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
388 unsafe { self.inner.try_reserve_exact(len, additional, T::LAYOUT) }
389 }
390
391 /// Shrinks the buffer down to the specified capacity. If the given amount
392 /// is 0, actually completely deallocates.
393 ///
394 /// # Panics
395 ///
396 /// Panics if the given amount is *larger* than the current capacity.
397 ///
398 /// # Aborts
399 ///
400 /// Aborts on OOM.
401 #[cfg(not(no_global_oom_handling))]
402 #[track_caller]
403 #[inline]
404 pub(crate) fn shrink_to_fit(&mut self, cap: usize) {
405 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
406 unsafe { self.inner.shrink_to_fit(cap, T::LAYOUT) }
407 }
408}
409
410unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
411 /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
412 fn drop(&mut self) {
413 // SAFETY: We are in a Drop impl, self.inner will not be used again.
414 unsafe { self.inner.deallocate(T::LAYOUT) }
415 }
416}
417
418impl<A: Allocator> RawVecInner<A> {
419 #[inline]
420 const fn new_in(alloc: A, align: Alignment) -> Self {
421 let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero()));
422 // `cap: 0` means "unallocated". zero-sized types are ignored.
423 Self { ptr, cap: ZERO_CAP, alloc }
424 }
425
426 #[cfg(not(no_global_oom_handling))]
427 #[inline]
428 #[track_caller]
429 fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
430 match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) {
431 Ok(this) => {
432 unsafe {
433 // Make it more obvious that a subsequent Vec::reserve(capacity) will not allocate.
434 hint::assert_unchecked(!this.needs_to_grow(0, capacity, elem_layout));
435 }
436 this
437 }
438 Err(err) => handle_error(err),
439 }
440 }
441
442 #[inline]
443 fn try_with_capacity_in(
444 capacity: usize,
445 alloc: A,
446 elem_layout: Layout,
447 ) -> Result<Self, TryReserveError> {
448 Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout)
449 }
450
451 #[cfg(not(no_global_oom_handling))]
452 #[inline]
453 #[track_caller]
454 fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
455 match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) {
456 Ok(res) => res,
457 Err(err) => handle_error(err),
458 }
459 }
460
461 fn try_allocate_in(
462 capacity: usize,
463 init: AllocInit,
464 alloc: A,
465 elem_layout: Layout,
466 ) -> Result<Self, TryReserveError> {
467 // We avoid `unwrap_or_else` here because it bloats the amount of
468 // LLVM IR generated.
469 let layout = match layout_array(capacity, elem_layout) {
470 Ok(layout) => layout,
471 Err(_) => return Err(CapacityOverflow.into()),
472 };
473
474 // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
475 if layout.size() == 0 {
476 return Ok(Self::new_in(alloc, elem_layout.alignment()));
477 }
478
479 let result = match init {
480 AllocInit::Uninitialized => alloc.allocate(layout),
481 #[cfg(not(no_global_oom_handling))]
482 AllocInit::Zeroed => alloc.allocate_zeroed(layout),
483 };
484 let ptr = match result {
485 Ok(ptr) => ptr,
486 Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()),
487 };
488
489 // Allocators currently return a `NonNull<[u8]>` whose length
490 // matches the size requested. If that ever changes, the capacity
491 // here should change to `ptr.len() / size_of::<T>()`.
492 Ok(Self {
493 ptr: Unique::from(ptr.cast()),
494 cap: unsafe { Cap::new_unchecked(capacity) },
495 alloc,
496 })
497 }
498
499 #[inline]
500 unsafe fn from_raw_parts_in(ptr: *mut u8, cap: Cap, alloc: A) -> Self {
501 Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
502 }
503
504 #[inline]
505 unsafe fn from_nonnull_in(ptr: NonNull<u8>, cap: Cap, alloc: A) -> Self {
506 Self { ptr: Unique::from(ptr), cap, alloc }
507 }
508
509 #[inline]
510 const fn ptr<T>(&self) -> *mut T {
511 self.non_null::<T>().as_ptr()
512 }
513
514 #[inline]
515 const fn non_null<T>(&self) -> NonNull<T> {
516 self.ptr.cast().as_non_null_ptr()
517 }
518
519 #[inline]
520 const fn capacity(&self, elem_size: usize) -> usize {
521 if elem_size == 0 { usize::MAX } else { self.cap.as_inner() }
522 }
523
524 #[inline]
525 fn allocator(&self) -> &A {
526 &self.alloc
527 }
528
529 /// # Safety
530 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
531 /// initially construct `self`
532 /// - `elem_layout`'s size must be a multiple of its alignment
533 #[inline]
534 unsafe fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
535 if elem_layout.size() == 0 || self.cap.as_inner() == 0 {
536 None
537 } else {
538 // We could use Layout::array here which ensures the absence of isize and usize overflows
539 // and could hypothetically handle differences between stride and size, but this memory
540 // has already been allocated so we know it can't overflow and currently Rust does not
541 // support such types. So we can do better by skipping some checks and avoid an unwrap.
542 unsafe {
543 let alloc_size = elem_layout.size().unchecked_mul(self.cap.as_inner());
544 let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
545 Some((self.ptr.into(), layout))
546 }
547 }
548 }
549
550 /// # Safety
551 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
552 /// initially construct `self`
553 /// - `elem_layout`'s size must be a multiple of its alignment
554 #[cfg(not(no_global_oom_handling))]
555 #[inline]
556 #[track_caller]
557 unsafe fn reserve(&mut self, len: usize, additional: usize, elem_layout: Layout) {
558 // Callers expect this function to be very cheap when there is already sufficient capacity.
559 // Therefore, we move all the resizing and error-handling logic from grow_amortized and
560 // handle_reserve behind a call, while making sure that this function is likely to be
561 // inlined as just a comparison and a call if the comparison fails.
562 #[cold]
563 unsafe fn do_reserve_and_handle<A: Allocator>(
564 slf: &mut RawVecInner<A>,
565 len: usize,
566 additional: usize,
567 elem_layout: Layout,
568 ) {
569 // SAFETY: Precondition passed to caller
570 if let Err(err) = unsafe { slf.grow_amortized(len, additional, elem_layout) } {
571 handle_error(err);
572 }
573 }
574
575 if self.needs_to_grow(len, additional, elem_layout) {
576 unsafe {
577 do_reserve_and_handle(self, len, additional, elem_layout);
578 }
579 }
580 }
581
582 /// # Safety
583 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
584 /// initially construct `self`
585 /// - `elem_layout`'s size must be a multiple of its alignment
586 #[cfg(not(no_global_oom_handling))]
587 #[inline]
588 #[track_caller]
589 unsafe fn grow_one(&mut self, elem_layout: Layout) {
590 // SAFETY: Precondition passed to caller
591 if let Err(err) = unsafe { self.grow_amortized(self.cap.as_inner(), 1, elem_layout) } {
592 handle_error(err);
593 }
594 }
595
596 /// # Safety
597 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
598 /// initially construct `self`
599 /// - `elem_layout`'s size must be a multiple of its alignment
600 unsafe fn try_reserve(
601 &mut self,
602 len: usize,
603 additional: usize,
604 elem_layout: Layout,
605 ) -> Result<(), TryReserveError> {
606 if self.needs_to_grow(len, additional, elem_layout) {
607 // SAFETY: Precondition passed to caller
608 unsafe {
609 self.grow_amortized(len, additional, elem_layout)?;
610 }
611 }
612 unsafe {
613 // Inform the optimizer that the reservation has succeeded or wasn't needed
614 hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
615 }
616 Ok(())
617 }
618
619 /// # Safety
620 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
621 /// initially construct `self`
622 /// - `elem_layout`'s size must be a multiple of its alignment
623 #[cfg(not(no_global_oom_handling))]
624 #[track_caller]
625 unsafe fn reserve_exact(&mut self, len: usize, additional: usize, elem_layout: Layout) {
626 // SAFETY: Precondition passed to caller
627 if let Err(err) = unsafe { self.try_reserve_exact(len, additional, elem_layout) } {
628 handle_error(err);
629 }
630 }
631
632 /// # Safety
633 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
634 /// initially construct `self`
635 /// - `elem_layout`'s size must be a multiple of its alignment
636 unsafe fn try_reserve_exact(
637 &mut self,
638 len: usize,
639 additional: usize,
640 elem_layout: Layout,
641 ) -> Result<(), TryReserveError> {
642 if self.needs_to_grow(len, additional, elem_layout) {
643 // SAFETY: Precondition passed to caller
644 unsafe {
645 self.grow_exact(len, additional, elem_layout)?;
646 }
647 }
648 unsafe {
649 // Inform the optimizer that the reservation has succeeded or wasn't needed
650 hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
651 }
652 Ok(())
653 }
654
655 /// # Safety
656 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
657 /// initially construct `self`
658 /// - `elem_layout`'s size must be a multiple of its alignment
659 /// - `cap` must be less than or equal to `self.capacity(elem_layout.size())`
660 #[cfg(not(no_global_oom_handling))]
661 #[inline]
662 #[track_caller]
663 unsafe fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) {
664 if let Err(err) = unsafe { self.shrink(cap, elem_layout) } {
665 handle_error(err);
666 }
667 }
668
669 #[inline]
670 fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool {
671 additional > self.capacity(elem_layout.size()).wrapping_sub(len)
672 }
673
674 #[inline]
675 unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
676 // Allocators currently return a `NonNull<[u8]>` whose length matches
677 // the size requested. If that ever changes, the capacity here should
678 // change to `ptr.len() / size_of::<T>()`.
679 self.ptr = Unique::from(ptr.cast());
680 self.cap = unsafe { Cap::new_unchecked(cap) };
681 }
682
683 /// # Safety
684 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
685 /// initially construct `self`
686 /// - `elem_layout`'s size must be a multiple of its alignment
687 /// - The sum of `len` and `additional` must be greater than or equal to
688 /// `self.capacity(elem_layout.size())`
689 unsafe fn grow_amortized(
690 &mut self,
691 len: usize,
692 additional: usize,
693 elem_layout: Layout,
694 ) -> Result<(), TryReserveError> {
695 // This is ensured by the calling contexts.
696 debug_assert!(additional > 0);
697
698 if elem_layout.size() == 0 {
699 // Since we return a capacity of `usize::MAX` when `elem_size` is
700 // 0, getting to here necessarily means the `RawVec` is overfull.
701 return Err(CapacityOverflow.into());
702 }
703
704 // Nothing we can really do about these checks, sadly.
705 let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
706
707 // This guarantees exponential growth. The doubling cannot overflow
708 // because `cap <= isize::MAX` and the type of `cap` is `usize`.
709 let cap = cmp::max(self.cap.as_inner() * 2, required_cap);
710 let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap);
711
712 let new_layout = layout_array(cap, elem_layout)?;
713
714 // SAFETY:
715 // - For the `current_memory` call: Precondition passed to caller
716 // - For the `finish_grow` call: Precondition passed to caller
717 // + `current_memory` does the right thing
718 let ptr =
719 unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? };
720
721 // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
722 unsafe { self.set_ptr_and_cap(ptr, cap) };
723 Ok(())
724 }
725
726 /// # Safety
727 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
728 /// initially construct `self`
729 /// - `elem_layout`'s size must be a multiple of its alignment
730 /// - The sum of `len` and `additional` must be greater than or equal to
731 /// `self.capacity(elem_layout.size())`
732 unsafe fn grow_exact(
733 &mut self,
734 len: usize,
735 additional: usize,
736 elem_layout: Layout,
737 ) -> Result<(), TryReserveError> {
738 if elem_layout.size() == 0 {
739 // Since we return a capacity of `usize::MAX` when the type size is
740 // 0, getting to here necessarily means the `RawVec` is overfull.
741 return Err(CapacityOverflow.into());
742 }
743
744 let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
745 let new_layout = layout_array(cap, elem_layout)?;
746
747 // SAFETY:
748 // - For the `current_memory` call: Precondition passed to caller
749 // - For the `finish_grow` call: Precondition passed to caller
750 // + `current_memory` does the right thing
751 let ptr =
752 unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? };
753 // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
754 unsafe {
755 self.set_ptr_and_cap(ptr, cap);
756 }
757 Ok(())
758 }
759
760 /// # Safety
761 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
762 /// initially construct `self`
763 /// - `elem_layout`'s size must be a multiple of its alignment
764 /// - `cap` must be less than or equal to `self.capacity(elem_layout.size())`
765 #[cfg(not(no_global_oom_handling))]
766 #[inline]
767 unsafe fn shrink(&mut self, cap: usize, elem_layout: Layout) -> Result<(), TryReserveError> {
768 assert!(cap <= self.capacity(elem_layout.size()), "Tried to shrink to a larger capacity");
769 // SAFETY: Just checked this isn't trying to grow
770 unsafe { self.shrink_unchecked(cap, elem_layout) }
771 }
772
773 /// `shrink`, but without the capacity check.
774 ///
775 /// This is split out so that `shrink` can inline the check, since it
776 /// optimizes out in things like `shrink_to_fit`, without needing to
777 /// also inline all this code, as doing that ends up failing the
778 /// `vec-shrink-panic` codegen test when `shrink_to_fit` ends up being too
779 /// big for LLVM to be willing to inline.
780 ///
781 /// # Safety
782 /// `cap <= self.capacity()`
783 #[cfg(not(no_global_oom_handling))]
784 unsafe fn shrink_unchecked(
785 &mut self,
786 cap: usize,
787 elem_layout: Layout,
788 ) -> Result<(), TryReserveError> {
789 // SAFETY: Precondition passed to caller
790 let (ptr, layout) = if let Some(mem) = unsafe { self.current_memory(elem_layout) } {
791 mem
792 } else {
793 return Ok(());
794 };
795
796 // If shrinking to 0, deallocate the buffer. We don't reach this point
797 // for the T::IS_ZST case since current_memory() will have returned
798 // None.
799 if cap == 0 {
800 unsafe { self.alloc.deallocate(ptr, layout) };
801 self.ptr =
802 unsafe { Unique::new_unchecked(ptr::without_provenance_mut(elem_layout.align())) };
803 self.cap = ZERO_CAP;
804 } else {
805 let ptr = unsafe {
806 // Layout cannot overflow here because it would have
807 // overflowed earlier when capacity was larger.
808 let new_size = elem_layout.size().unchecked_mul(cap);
809 let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
810 self.alloc
811 .shrink(ptr, layout, new_layout)
812 .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
813 };
814 // SAFETY: if the allocation is valid, then the capacity is too
815 unsafe {
816 self.set_ptr_and_cap(ptr, cap);
817 }
818 }
819 Ok(())
820 }
821
822 /// # Safety
823 ///
824 /// This function deallocates the owned allocation, but does not update `ptr` or `cap` to
825 /// prevent double-free or use-after-free. Essentially, do not do anything with the caller
826 /// after this function returns.
827 /// Ideally this function would take `self` by move, but it cannot because it exists to be
828 /// called from a `Drop` impl.
829 unsafe fn deallocate(&mut self, elem_layout: Layout) {
830 // SAFETY: Precondition passed to caller
831 if let Some((ptr, layout)) = unsafe { self.current_memory(elem_layout) } {
832 unsafe {
833 self.alloc.deallocate(ptr, layout);
834 }
835 }
836 }
837}
838
839/// # Safety
840/// If `current_memory` matches `Some((ptr, old_layout))`:
841/// - `ptr` must denote a block of memory *currently allocated* via `alloc`
842/// - `old_layout` must *fit* that block of memory
843/// - `new_layout` must have the same alignment as `old_layout`
844/// - `new_layout.size()` must be greater than or equal to `old_layout.size()`
845/// If `current_memory` is `None`, this function is safe.
846// not marked inline(never) since we want optimizers to be able to observe the specifics of this
847// function, see tests/codegen-llvm/vec-reserve-extend.rs.
848#[cold]
849unsafe fn finish_grow<A>(
850 new_layout: Layout,
851 current_memory: Option<(NonNull<u8>, Layout)>,
852 alloc: &mut A,
853) -> Result<NonNull<[u8]>, TryReserveError>
854where
855 A: Allocator,
856{
857 let memory = if let Some((ptr, old_layout)) = current_memory {
858 debug_assert_eq!(old_layout.align(), new_layout.align());
859 unsafe {
860 // The allocator checks for alignment equality
861 hint::assert_unchecked(old_layout.align() == new_layout.align());
862 alloc.grow(ptr, old_layout, new_layout)
863 }
864 } else {
865 alloc.allocate(new_layout)
866 };
867
868 memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
869}
870
871// Central function for reserve error handling.
872#[cfg(not(no_global_oom_handling))]
873#[cold]
874#[optimize(size)]
875#[track_caller]
876fn handle_error(e: TryReserveError) -> ! {
877 match e.kind() {
878 CapacityOverflow => capacity_overflow(),
879 AllocError { layout, .. } => handle_alloc_error(layout),
880 }
881}
882
883#[inline]
884fn layout_array(cap: usize, elem_layout: Layout) -> Result<Layout, TryReserveError> {
885 elem_layout.repeat(cap).map(|(layout, _pad)| layout).map_err(|_| CapacityOverflow.into())
886}