From c9cbe7e7eb37ec06a9c76a6b9ca4d342ff5a1128 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 20 May 2020 20:45:05 +1000 Subject: [PATCH] Rename some identifiers in `RawVec` and `libarena`. - Use `len` more consistently for the number of elements in a vector, because that's the usual name. - Use `additional` more consistently for the number of elements we want to add, because that's what `Vec::reserve()` uses. - Use `cap` consistently rather than `capacity`. - Plus a few other tweaks. This increases consistency and conciseness. --- src/liballoc/raw_vec.rs | 81 ++++++++++++++++----------------------- src/liballoc/vec.rs | 8 ++-- src/librustc_arena/lib.rs | 52 ++++++++++++------------- 3 files changed, 62 insertions(+), 79 deletions(-) diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 8a22f267bf2..805dbfe2775 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -235,13 +235,13 @@ impl RawVec { } } - /// Ensures that the buffer contains at least enough space to hold - /// `used_capacity + needed_extra_capacity` elements. If it doesn't already have - /// enough capacity, will reallocate enough space plus comfortable slack - /// space to get amortized `O(1)` behavior. Will limit this behavior - /// if it would needlessly cause itself to panic. + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already have enough capacity, will + /// reallocate enough space plus comfortable slack space to get amortized + /// `O(1)` behavior. Will limit this behavior if it would needlessly cause + /// itself to panic. /// - /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate /// the requested space. This is not really unsafe, but the unsafe /// code *you* write that relies on the behavior of this function may break. /// @@ -287,8 +287,8 @@ impl RawVec { /// # vector.push_all(&[1, 3, 5, 7, 9]); /// # } /// ``` - pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.try_reserve(used_capacity, needed_extra_capacity) { + pub fn reserve(&mut self, len: usize, additional: usize) { + match self.try_reserve(len, additional) { Err(CapacityOverflow) => capacity_overflow(), Err(AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } @@ -296,28 +296,23 @@ impl RawVec { } /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { - if self.needs_to_grow(used_capacity, needed_extra_capacity) { - self.grow_amortized(used_capacity, needed_extra_capacity) + pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if self.needs_to_grow(len, additional) { + self.grow_amortized(len, additional) } else { Ok(()) } } - /// Ensures that the buffer contains at least enough space to hold - /// `used_capacity + needed_extra_capacity` elements. If it doesn't already, - /// will reallocate the minimum possible amount of memory necessary. - /// Generally this will be exactly the amount of memory necessary, - /// but in principle the allocator is free to give back more than what - /// we asked for. + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already, will reallocate the + /// minimum possible amount of memory necessary. Generally this will be + /// exactly the amount of memory necessary, but in principle the allocator + /// is free to give back more than we asked for. /// - /// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe code + /// *you* write that relies on the behavior of this function may break. /// /// # Panics /// @@ -328,8 +323,8 @@ impl RawVec { /// # Aborts /// /// Aborts on OOM. - pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { - match self.try_reserve_exact(used_capacity, needed_extra_capacity) { + pub fn reserve_exact(&mut self, len: usize, additional: usize) { + match self.try_reserve_exact(len, additional) { Err(CapacityOverflow) => capacity_overflow(), Err(AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } @@ -339,14 +334,10 @@ impl RawVec { /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. pub fn try_reserve_exact( &mut self, - used_capacity: usize, - needed_extra_capacity: usize, + len: usize, + additional: usize, ) -> Result<(), TryReserveError> { - if self.needs_to_grow(used_capacity, needed_extra_capacity) { - self.grow_exact(used_capacity, needed_extra_capacity) - } else { - Ok(()) - } + if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } } /// Shrinks the allocation down to the specified amount. If the given amount @@ -371,8 +362,8 @@ impl RawVec { impl RawVec { /// Returns if the buffer needs to grow to fulfill the needed extra capacity. /// Mainly used to make inlining reserve-calls possible without inlining `grow`. - fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool { - needed_extra_capacity > self.capacity().wrapping_sub(used_capacity) + fn needs_to_grow(&self, len: usize, additional: usize) -> bool { + additional > self.capacity().wrapping_sub(len) } fn capacity_from_bytes(excess: usize) -> usize { @@ -392,13 +383,10 @@ impl RawVec { // so that all of the code that depends on `T` is within it, while as much // of the code that doesn't depend on `T` as possible is in functions that // are non-generic over `T`. - fn grow_amortized( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { // This is ensured by the calling contexts. - debug_assert!(needed_extra_capacity > 0); + debug_assert!(additional > 0); + if mem::size_of::() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. @@ -406,8 +394,7 @@ impl RawVec { } // Nothing we can really do about these checks, sadly. - let required_cap = - used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; // This guarantees exponential growth. The doubling cannot overflow // because `cap <= isize::MAX` and the type of `cap` is `usize`. @@ -440,18 +427,14 @@ impl RawVec { // The constraints on this method are much the same as those on // `grow_amortized`, but this method is usually instantiated less often so // it's less critical. - fn grow_exact( - &mut self, - used_capacity: usize, - needed_extra_capacity: usize, - ) -> Result<(), TryReserveError> { + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if mem::size_of::() == 0 { // Since we return a capacity of `usize::MAX` when the type size is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow); } - let cap = used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?; + let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; let new_layout = Layout::array::(cap); // `finish_grow` is non-generic over `T`. diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index af943ecfd48..2226737757b 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -2977,12 +2977,12 @@ impl Drain<'_, T> { } /// Makes room for inserting more elements before the tail. - unsafe fn move_tail(&mut self, extra_capacity: usize) { + unsafe fn move_tail(&mut self, additional: usize) { let vec = self.vec.as_mut(); - let used_capacity = self.tail_start + self.tail_len; - vec.buf.reserve(used_capacity, extra_capacity); + let len = self.tail_start + self.tail_len; + vec.buf.reserve(len, additional); - let new_tail_start = self.tail_start + extra_capacity; + let new_tail_start = self.tail_start + additional; let src = vec.as_ptr().add(self.tail_start); let dst = vec.as_mut_ptr().add(new_tail_start); ptr::copy(src, dst, self.tail_len); diff --git a/src/librustc_arena/lib.rs b/src/librustc_arena/lib.rs index fbf68a5ca35..4da336f8e28 100644 --- a/src/librustc_arena/lib.rs +++ b/src/librustc_arena/lib.rs @@ -146,18 +146,18 @@ impl TypedArena { } #[inline] - fn can_allocate(&self, len: usize) -> bool { - let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize; - let at_least_bytes = len.checked_mul(mem::size_of::()).unwrap(); - available_capacity_bytes >= at_least_bytes + fn can_allocate(&self, additional: usize) -> bool { + let available_bytes = self.end.get() as usize - self.ptr.get() as usize; + let additional_bytes = additional.checked_mul(mem::size_of::()).unwrap(); + available_bytes >= additional_bytes } /// Ensures there's enough space in the current chunk to fit `len` objects. #[inline] - fn ensure_capacity(&self, len: usize) { - if !self.can_allocate(len) { - self.grow(len); - debug_assert!(self.can_allocate(len)); + fn ensure_capacity(&self, additional: usize) { + if !self.can_allocate(additional) { + self.grow(additional); + debug_assert!(self.can_allocate(additional)); } } @@ -214,13 +214,13 @@ impl TypedArena { /// Grows the arena. #[inline(never)] #[cold] - fn grow(&self, n: usize) { + fn grow(&self, additional: usize) { unsafe { // We need the element size to convert chunk sizes (ranging from // PAGE to HUGE_PAGE bytes) to element counts. let elem_size = cmp::max(1, mem::size_of::()); let mut chunks = self.chunks.borrow_mut(); - let mut new_capacity; + let mut new_cap; if let Some(last_chunk) = chunks.last_mut() { let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize; last_chunk.entries = used_bytes / mem::size_of::(); @@ -228,17 +228,17 @@ impl TypedArena { // If the previous chunk's capacity is less than HUGE_PAGE // bytes, then this chunk will be least double the previous // chunk's size. - new_capacity = last_chunk.storage.capacity(); - if new_capacity < HUGE_PAGE / elem_size { - new_capacity = new_capacity.checked_mul(2).unwrap(); + new_cap = last_chunk.storage.capacity(); + if new_cap < HUGE_PAGE / elem_size { + new_cap = new_cap.checked_mul(2).unwrap(); } } else { - new_capacity = PAGE / elem_size; + new_cap = PAGE / elem_size; } - // Also ensure that this chunk can fit `n`. - new_capacity = cmp::max(n, new_capacity); + // Also ensure that this chunk can fit `additional`. + new_cap = cmp::max(additional, new_cap); - let chunk = TypedArenaChunk::::new(new_capacity); + let chunk = TypedArenaChunk::::new(new_cap); self.ptr.set(chunk.start()); self.end.set(chunk.end()); chunks.push(chunk); @@ -342,10 +342,10 @@ impl DroplessArena { #[inline(never)] #[cold] - fn grow(&self, needed_bytes: usize) { + fn grow(&self, additional: usize) { unsafe { let mut chunks = self.chunks.borrow_mut(); - let mut new_capacity; + let mut new_cap; if let Some(last_chunk) = chunks.last_mut() { // There is no need to update `last_chunk.entries` because that // field isn't used by `DroplessArena`. @@ -353,17 +353,17 @@ impl DroplessArena { // If the previous chunk's capacity is less than HUGE_PAGE // bytes, then this chunk will be least double the previous // chunk's size. - new_capacity = last_chunk.storage.capacity(); - if new_capacity < HUGE_PAGE { - new_capacity = new_capacity.checked_mul(2).unwrap(); + new_cap = last_chunk.storage.capacity(); + if new_cap < HUGE_PAGE { + new_cap = new_cap.checked_mul(2).unwrap(); } } else { - new_capacity = PAGE; + new_cap = PAGE; } - // Also ensure that this chunk can fit `needed_bytes`. - new_capacity = cmp::max(needed_bytes, new_capacity); + // Also ensure that this chunk can fit `additional`. + new_cap = cmp::max(additional, new_cap); - let chunk = TypedArenaChunk::::new(new_capacity); + let chunk = TypedArenaChunk::::new(new_cap); self.ptr.set(chunk.start()); self.end.set(chunk.end()); chunks.push(chunk);