x.py fmt after previous deignore

This commit is contained in:
Mark Rousskov 2019-12-24 17:38:22 -05:00
parent 48291a9dda
commit 6891388e66
42 changed files with 9617 additions and 9775 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,15 @@
// ignore-tidy-filelength
use crate::cmp::{self, Ordering};
use crate::ops::{Add, Try};
use super::super::LoopState;
use super::super::{Chain, Cycle, Copied, Cloned, Enumerate, Filter, FilterMap, Fuse};
use super::super::{Flatten, FlatMap};
use super::super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev};
use super::super::{Zip, Sum, Product, FromIterator};
use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
use super::super::{FlatMap, Flatten};
use super::super::{FromIterator, Product, Sum, Zip};
use super::super::{Inspect, Map, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile};
fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {}
fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
/// An interface for dealing with iterators.
///
@ -20,71 +22,71 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(
on(
_Self="[std::ops::Range<Idx>; 1]",
label="if you meant to iterate between two values, remove the square brackets",
note="`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \
without the brackets: `start..end`"
_Self = "[std::ops::Range<Idx>; 1]",
label = "if you meant to iterate between two values, remove the square brackets",
note = "`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \
without the brackets: `start..end`"
),
on(
_Self="[std::ops::RangeFrom<Idx>; 1]",
label="if you meant to iterate from a value onwards, remove the square brackets",
note="`[start..]` is an array of one `RangeFrom`; you might have meant to have a \
_Self = "[std::ops::RangeFrom<Idx>; 1]",
label = "if you meant to iterate from a value onwards, remove the square brackets",
note = "`[start..]` is an array of one `RangeFrom`; you might have meant to have a \
`RangeFrom` without the brackets: `start..`, keeping in mind that iterating over an \
unbounded iterator will run forever unless you `break` or `return` from within the \
loop"
),
on(
_Self="[std::ops::RangeTo<Idx>; 1]",
label="if you meant to iterate until a value, remove the square brackets and add a \
starting value",
note="`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \
`Range` without the brackets: `0..end`"
_Self = "[std::ops::RangeTo<Idx>; 1]",
label = "if you meant to iterate until a value, remove the square brackets and add a \
starting value",
note = "`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \
`Range` without the brackets: `0..end`"
),
on(
_Self="[std::ops::RangeInclusive<Idx>; 1]",
label="if you meant to iterate between two values, remove the square brackets",
note="`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \
_Self = "[std::ops::RangeInclusive<Idx>; 1]",
label = "if you meant to iterate between two values, remove the square brackets",
note = "`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \
`RangeInclusive` without the brackets: `start..=end`"
),
on(
_Self="[std::ops::RangeToInclusive<Idx>; 1]",
label="if you meant to iterate until a value (including it), remove the square brackets \
and add a starting value",
note="`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \
bounded `RangeInclusive` without the brackets: `0..=end`"
_Self = "[std::ops::RangeToInclusive<Idx>; 1]",
label = "if you meant to iterate until a value (including it), remove the square brackets \
and add a starting value",
note = "`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \
bounded `RangeInclusive` without the brackets: `0..=end`"
),
on(
_Self="std::ops::RangeTo<Idx>",
label="if you meant to iterate until a value, add a starting value",
note="`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
_Self = "std::ops::RangeTo<Idx>",
label = "if you meant to iterate until a value, add a starting value",
note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
bounded `Range`: `0..end`"
),
on(
_Self="std::ops::RangeToInclusive<Idx>",
label="if you meant to iterate until a value (including it), add a starting value",
note="`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
_Self = "std::ops::RangeToInclusive<Idx>",
label = "if you meant to iterate until a value (including it), add a starting value",
note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
to have a bounded `RangeInclusive`: `0..=end`"
),
on(
_Self="&str",
label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
_Self = "&str",
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
_Self="std::string::String",
label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
_Self = "std::string::String",
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
_Self="[]",
label="borrow the array with `&` or call `.iter()` on it to iterate over it",
note="arrays are not iterators, but slices like the following are: `&[1, 2, 3]`"
_Self = "[]",
label = "borrow the array with `&` or call `.iter()` on it to iterate over it",
note = "arrays are not iterators, but slices like the following are: `&[1, 2, 3]`"
),
on(
_Self="{integral}",
note="if you want to iterate between `start` until a value `end`, use the exclusive range \
_Self = "{integral}",
note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
syntax `start..end` or the inclusive range syntax `start..=end`"
),
label="`{Self}` is not an iterator",
message="`{Self}` is not an iterator"
label = "`{Self}` is not an iterator",
message = "`{Self}` is not an iterator"
)]
#[doc(spotlight)]
#[must_use = "iterators are lazy and do nothing unless consumed"]
@ -197,7 +199,9 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn size_hint(&self) -> (usize, Option<usize>) { (0, None) }
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
/// Consumes the iterator, counting the number of iterations and returning it.
///
@ -236,7 +240,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn count(self) -> usize where Self: Sized {
fn count(self) -> usize
where
Self: Sized,
{
#[inline]
fn add1<T>(count: usize, _: T) -> usize {
// Might overflow.
@ -267,7 +274,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn last(self) -> Option<Self::Item> where Self: Sized {
fn last(self) -> Option<Self::Item>
where
Self: Sized,
{
#[inline]
fn some<T>(_: Option<T>, x: T) -> Option<T> {
Some(x)
@ -321,7 +331,9 @@ pub trait Iterator {
#[stable(feature = "rust1", since = "1.0.0")]
fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
for x in self {
if n == 0 { return Some(x) }
if n == 0 {
return Some(x);
}
n -= 1;
}
None
@ -373,7 +385,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_step_by", since = "1.28.0")]
fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized {
fn step_by(self, step: usize) -> StepBy<Self>
where
Self: Sized,
{
StepBy::new(self, step)
}
@ -443,8 +458,10 @@ pub trait Iterator {
/// [`OsStr`]: ../../std/ffi/struct.OsStr.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where
Self: Sized, U: IntoIterator<Item=Self::Item>,
fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator<Item = Self::Item>,
{
Chain::new(self, other.into_iter())
}
@ -521,8 +538,10 @@ pub trait Iterator {
/// [`None`]: ../../std/option/enum.Option.html#variant.None
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where
Self: Sized, U: IntoIterator
fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator,
{
Zip::new(self, other.into_iter())
}
@ -578,8 +597,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn map<B, F>(self, f: F) -> Map<Self, F> where
Self: Sized, F: FnMut(Self::Item) -> B,
fn map<B, F>(self, f: F) -> Map<Self, F>
where
Self: Sized,
F: FnMut(Self::Item) -> B,
{
Map::new(self, f)
}
@ -621,8 +642,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_for_each", since = "1.21.0")]
fn for_each<F>(self, f: F) where
Self: Sized, F: FnMut(Self::Item),
fn for_each<F>(self, f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
#[inline]
fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
@ -694,8 +717,10 @@ pub trait Iterator {
/// of these layers.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn filter<P>(self, predicate: P) -> Filter<Self, P> where
Self: Sized, P: FnMut(&Self::Item) -> bool,
fn filter<P>(self, predicate: P) -> Filter<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
Filter::new(self, predicate)
}
@ -751,8 +776,10 @@ pub trait Iterator {
/// [`None`]: ../../std/option/enum.Option.html#variant.None
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where
Self: Sized, F: FnMut(Self::Item) -> Option<B>,
fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
FilterMap::new(self, f)
}
@ -797,7 +824,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn enumerate(self) -> Enumerate<Self> where Self: Sized {
fn enumerate(self) -> Enumerate<Self>
where
Self: Sized,
{
Enumerate::new(self)
}
@ -843,7 +873,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn peekable(self) -> Peekable<Self> where Self: Sized {
fn peekable(self) -> Peekable<Self>
where
Self: Sized,
{
Peekable::new(self)
}
@ -904,8 +937,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where
Self: Sized, P: FnMut(&Self::Item) -> bool,
fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
SkipWhile::new(self, predicate)
}
@ -983,8 +1018,10 @@ pub trait Iterator {
/// the iteration should stop, but wasn't placed back into the iterator.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where
Self: Sized, P: FnMut(&Self::Item) -> bool,
fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
TakeWhile::new(self, predicate)
}
@ -1008,7 +1045,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn skip(self, n: usize) -> Skip<Self> where Self: Sized {
fn skip(self, n: usize) -> Skip<Self>
where
Self: Sized,
{
Skip::new(self, n)
}
@ -1040,7 +1080,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn take(self, n: usize) -> Take<Self> where Self: Sized, {
fn take(self, n: usize) -> Take<Self>
where
Self: Sized,
{
Take::new(self, n)
}
@ -1084,7 +1127,9 @@ pub trait Iterator {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F>
where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option<B>,
where
Self: Sized,
F: FnMut(&mut St, Self::Item) -> Option<B>,
{
Scan::new(self, initial_state, f)
}
@ -1122,7 +1167,10 @@ pub trait Iterator {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U,
where
Self: Sized,
U: IntoIterator,
F: FnMut(Self::Item) -> U,
{
FlatMap::new(self, f)
}
@ -1191,7 +1239,10 @@ pub trait Iterator {
#[inline]
#[stable(feature = "iterator_flatten", since = "1.29.0")]
fn flatten(self) -> Flatten<Self>
where Self: Sized, Self::Item: IntoIterator {
where
Self: Sized,
Self::Item: IntoIterator,
{
Flatten::new(self)
}
@ -1251,7 +1302,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn fuse(self) -> Fuse<Self> where Self: Sized {
fn fuse(self) -> Fuse<Self>
where
Self: Sized,
{
Fuse::new(self)
}
@ -1332,8 +1386,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn inspect<F>(self, f: F) -> Inspect<Self, F> where
Self: Sized, F: FnMut(&Self::Item),
fn inspect<F>(self, f: F) -> Inspect<Self, F>
where
Self: Sized,
F: FnMut(&Self::Item),
{
Inspect::new(self, f)
}
@ -1375,7 +1431,12 @@ pub trait Iterator {
/// assert_eq!(iter.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn by_ref(&mut self) -> &mut Self where Self: Sized { self }
fn by_ref(&mut self) -> &mut Self
where
Self: Sized,
{
self
}
/// Transforms an iterator into a collection.
///
@ -1490,7 +1551,10 @@ pub trait Iterator {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"]
fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized {
fn collect<B: FromIterator<Self::Item>>(self) -> B
where
Self: Sized,
{
FromIterator::from_iter(self)
}
@ -1520,10 +1584,11 @@ pub trait Iterator {
/// assert_eq!(odd, vec![1, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn partition<B, F>(self, f: F) -> (B, B) where
fn partition<B, F>(self, f: F) -> (B, B)
where
Self: Sized,
B: Default + Extend<Self::Item>,
F: FnMut(&Self::Item) -> bool
F: FnMut(&Self::Item) -> bool,
{
#[inline]
fn extend<'a, T, B: Extend<T>>(
@ -1597,9 +1662,7 @@ pub trait Iterator {
}
#[inline]
fn is_true<T>(
predicate: &mut impl FnMut(&T) -> bool
) -> impl FnMut(&&mut T) -> bool + '_ {
fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ {
move |x| predicate(&**x)
}
@ -1702,8 +1765,11 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_try_fold", since = "1.27.0")]
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Ok = B>,
{
let mut accum = init;
while let Some(x) = self.next() {
@ -1741,8 +1807,11 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_try_fold", since = "1.27.0")]
fn try_for_each<F, R>(&mut self, f: F) -> R where
Self: Sized, F: FnMut(Self::Item) -> R, R: Try<Ok=()>
fn try_for_each<F, R>(&mut self, f: F) -> R
where
Self: Sized,
F: FnMut(Self::Item) -> R,
R: Try<Ok = ()>,
{
#[inline]
fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
@ -1821,8 +1890,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn fold<B, F>(mut self, init: B, f: F) -> B where
Self: Sized, F: FnMut(B, Self::Item) -> B,
fn fold<B, F>(mut self, init: B, f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
#[inline]
fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
@ -1871,14 +1942,15 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn all<F>(&mut self, f: F) -> bool where
Self: Sized, F: FnMut(Self::Item) -> bool
fn all<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> LoopState<(), ()> {
move |(), x| {
if f(x) { LoopState::Continue(()) }
else { LoopState::Break(()) }
if f(x) { LoopState::Continue(()) } else { LoopState::Break(()) }
}
}
self.try_fold((), check(f)) == LoopState::Continue(())
@ -1923,15 +1995,15 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn any<F>(&mut self, f: F) -> bool where
fn any<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> LoopState<(), ()> {
move |(), x| {
if f(x) { LoopState::Break(()) }
else { LoopState::Continue(()) }
if f(x) { LoopState::Break(()) } else { LoopState::Continue(()) }
}
}
@ -1982,17 +2054,17 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn find<P>(&mut self, predicate: P) -> Option<Self::Item> where
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
#[inline]
fn check<T>(
mut predicate: impl FnMut(&T) -> bool
mut predicate: impl FnMut(&T) -> bool,
) -> impl FnMut((), T) -> LoopState<(), T> {
move |(), x| {
if predicate(&x) { LoopState::Break(x) }
else { LoopState::Continue(()) }
if predicate(&x) { LoopState::Break(x) } else { LoopState::Continue(()) }
}
}
@ -2016,7 +2088,8 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_find_map", since = "1.30.0")]
fn find_map<B, F>(&mut self, f: F) -> Option<B> where
fn find_map<B, F>(&mut self, f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
@ -2087,7 +2160,8 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn position<P>(&mut self, predicate: P) -> Option<usize> where
fn position<P>(&mut self, predicate: P) -> Option<usize>
where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
@ -2097,8 +2171,7 @@ pub trait Iterator {
) -> impl FnMut(usize, T) -> LoopState<usize, usize> {
// The addition might panic on overflow
move |i, x| {
if predicate(x) { LoopState::Break(i) }
else { LoopState::Continue(Add::add(i, 1)) }
if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(Add::add(i, 1)) }
}
}
@ -2145,9 +2218,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn rposition<P>(&mut self, predicate: P) -> Option<usize> where
fn rposition<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator
Self: Sized + ExactSizeIterator + DoubleEndedIterator,
{
// No need for an overflow check here, because `ExactSizeIterator`
// implies that the number of elements fits into a `usize`.
@ -2157,8 +2231,7 @@ pub trait Iterator {
) -> impl FnMut(usize, T) -> LoopState<usize, usize> {
move |i, x| {
let i = i - 1;
if predicate(x) { LoopState::Break(i) }
else { LoopState::Continue(i) }
if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(i) }
}
}
@ -2186,7 +2259,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn max(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord
fn max(self) -> Option<Self::Item>
where
Self: Sized,
Self::Item: Ord,
{
self.max_by(Ord::cmp)
}
@ -2211,7 +2287,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn min(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord
fn min(self) -> Option<Self::Item>
where
Self: Sized,
Self::Item: Ord,
{
self.min_by(Ord::cmp)
}
@ -2233,7 +2312,9 @@ pub trait Iterator {
#[inline]
#[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
where Self: Sized, F: FnMut(&Self::Item) -> B,
where
Self: Sized,
F: FnMut(&Self::Item) -> B,
{
#[inline]
fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
@ -2266,7 +2347,9 @@ pub trait Iterator {
#[inline]
#[stable(feature = "iter_max_by", since = "1.15.0")]
fn max_by<F>(self, compare: F) -> Option<Self::Item>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering,
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
#[inline]
fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
@ -2293,7 +2376,9 @@ pub trait Iterator {
#[inline]
#[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
where Self: Sized, F: FnMut(&Self::Item) -> B,
where
Self: Sized,
F: FnMut(&Self::Item) -> B,
{
#[inline]
fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
@ -2326,7 +2411,9 @@ pub trait Iterator {
#[inline]
#[stable(feature = "iter_min_by", since = "1.15.0")]
fn min_by<F>(self, compare: F) -> Option<Self::Item>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering,
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
#[inline]
fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
@ -2336,7 +2423,6 @@ pub trait Iterator {
fold1(self, fold(compare))
}
/// Reverses an iterator's direction.
///
/// Usually, iterators iterate from left to right. After using `rev()`,
@ -2362,7 +2448,10 @@ pub trait Iterator {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator {
fn rev(self) -> Rev<Self>
where
Self: Sized + DoubleEndedIterator,
{
Rev::new(self)
}
@ -2389,10 +2478,11 @@ pub trait Iterator {
/// assert_eq!(right, [2, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where
fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
where
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
Self: Sized + Iterator<Item=(A, B)>,
Self: Sized + Iterator<Item = (A, B)>,
{
fn extend<'a, A, B>(
ts: &'a mut impl Extend<A>,
@ -2434,7 +2524,9 @@ pub trait Iterator {
/// ```
#[stable(feature = "iter_copied", since = "1.36.0")]
fn copied<'a, T: 'a>(self) -> Copied<Self>
where Self: Sized + Iterator<Item=&'a T>, T: Copy
where
Self: Sized + Iterator<Item = &'a T>,
T: Copy,
{
Copied::new(self)
}
@ -2463,7 +2555,9 @@ pub trait Iterator {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn cloned<'a, T: 'a>(self) -> Cloned<Self>
where Self: Sized + Iterator<Item=&'a T>, T: Clone
where
Self: Sized + Iterator<Item = &'a T>,
T: Clone,
{
Cloned::new(self)
}
@ -2495,7 +2589,10 @@ pub trait Iterator {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
fn cycle(self) -> Cycle<Self> where Self: Sized + Clone {
fn cycle(self) -> Cycle<Self>
where
Self: Sized + Clone,
{
Cycle::new(self)
}
@ -2523,8 +2620,9 @@ pub trait Iterator {
/// ```
#[stable(feature = "iter_arith", since = "1.11.0")]
fn sum<S>(self) -> S
where Self: Sized,
S: Sum<Self::Item>,
where
Self: Sized,
S: Sum<Self::Item>,
{
Sum::sum(self)
}
@ -2551,8 +2649,9 @@ pub trait Iterator {
/// ```
#[stable(feature = "iter_arith", since = "1.11.0")]
fn product<P>(self) -> P
where Self: Sized,
P: Product<Self::Item>,
where
Self: Sized,
P: Product<Self::Item>,
{
Product::product(self)
}
@ -2609,11 +2708,13 @@ pub trait Iterator {
loop {
let x = match self.next() {
None => if other.next().is_none() {
return Ordering::Equal
} else {
return Ordering::Less
},
None => {
if other.next().is_none() {
return Ordering::Equal;
} else {
return Ordering::Less;
}
}
Some(val) => val,
};
@ -2692,11 +2793,13 @@ pub trait Iterator {
loop {
let x = match self.next() {
None => if other.next().is_none() {
return Some(Ordering::Equal)
} else {
return Some(Ordering::Less)
},
None => {
if other.next().is_none() {
return Some(Ordering::Equal);
} else {
return Some(Ordering::Less);
}
}
Some(val) => val,
};
@ -2782,7 +2885,8 @@ pub trait Iterator {
/// assert_eq!([1].iter().ne([1, 2].iter()), true);
/// ```
#[stable(feature = "iter_order", since = "1.5.0")]
fn ne<I>(self, other: I) -> bool where
fn ne<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialEq<I::Item>,
Self: Sized,
@ -2801,7 +2905,8 @@ pub trait Iterator {
/// assert_eq!([1, 2].iter().lt([1].iter()), false);
/// ```
#[stable(feature = "iter_order", since = "1.5.0")]
fn lt<I>(self, other: I) -> bool where
fn lt<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialOrd<I::Item>,
Self: Sized,
@ -2820,7 +2925,8 @@ pub trait Iterator {
/// assert_eq!([1, 2].iter().le([1].iter()), false);
/// ```
#[stable(feature = "iter_order", since = "1.5.0")]
fn le<I>(self, other: I) -> bool where
fn le<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialOrd<I::Item>,
Self: Sized,
@ -2842,7 +2948,8 @@ pub trait Iterator {
/// assert_eq!([1, 2].iter().gt([1].iter()), true);
/// ```
#[stable(feature = "iter_order", since = "1.5.0")]
fn gt<I>(self, other: I) -> bool where
fn gt<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialOrd<I::Item>,
Self: Sized,
@ -2861,7 +2968,8 @@ pub trait Iterator {
/// assert_eq!([1, 2].iter().ge([1].iter()), true);
/// ```
#[stable(feature = "iter_order", since = "1.5.0")]
fn ge<I>(self, other: I) -> bool where
fn ge<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialOrd<I::Item>,
Self: Sized,
@ -2925,7 +3033,7 @@ pub trait Iterator {
fn is_sorted_by<F>(mut self, mut compare: F) -> bool
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
{
let mut last = match self.next() {
Some(e) => e,
@ -2965,7 +3073,7 @@ pub trait Iterator {
where
Self: Sized,
F: FnMut(Self::Item) -> K,
K: PartialOrd
K: PartialOrd,
{
self.map(f).is_sorted()
}
@ -2974,9 +3082,9 @@ pub trait Iterator {
/// Fold an iterator without having to provide an initial value.
#[inline]
fn fold1<I, F>(mut it: I, f: F) -> Option<I::Item>
where
I: Iterator,
F: FnMut(I::Item, I::Item) -> I::Item,
where
I: Iterator,
F: FnMut(I::Item, I::Item) -> I::Item,
{
// start with the first element as our selection. This avoids
// having to use `Option`s inside the loop, translating to a
@ -2988,8 +3096,12 @@ fn fold1<I, F>(mut it: I, f: F) -> Option<I::Item>
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for &mut I {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> { (**self).next() }
fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() }
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
(**self).nth(n)
}

View File

@ -42,8 +42,6 @@ impl From<Option<(usize, usize)>> for Step {
}
}
// ignore-tidy-linelength
// FIXME(Manishearth) these tests focus on single-character searching (CharSearcher)
// and on next()/next_match(), not next_reject(). This is because
// the memchr changes make next_match() for single chars complex, but next_reject()

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,13 @@
//! Error Reporting for Anonymous Region Lifetime Errors
//! where both the regions are anonymous.
use crate::hir::Node;
use crate::hir::{Expr, ExprKind::Closure};
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
use crate::infer::lexical_region_resolve::RegionResolutionError::SubSupConflict;
use crate::infer::SubregionOrigin;
use crate::ty::RegionKind;
use crate::hir::{Expr, ExprKind::Closure};
use crate::hir::Node;
use crate::util::common::ErrorReported;
use crate::infer::lexical_region_resolve::RegionResolutionError::SubSupConflict;
impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
/// Print the error message for lifetime errors when binding escapes a closure.
@ -36,69 +36,75 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
/// ...because it cannot outlive this closure
/// ```
pub(super) fn try_report_outlives_closure(&self) -> Option<ErrorReported> {
if let Some(SubSupConflict(_,
origin,
ref sub_origin,
_,
ref sup_origin,
sup_region)) = self.error {
if let Some(SubSupConflict(_, origin, ref sub_origin, _, ref sup_origin, sup_region)) =
self.error
{
// #45983: when trying to assign the contents of an argument to a binding outside of a
// closure, provide a specific message pointing this out.
if let (&SubregionOrigin::BindingTypeIsNotValidAtDecl(ref external_span),
&RegionKind::ReFree(ref free_region)) = (&sub_origin, sup_region) {
if let (
&SubregionOrigin::BindingTypeIsNotValidAtDecl(ref external_span),
&RegionKind::ReFree(ref free_region),
) = (&sub_origin, sup_region)
{
let hir = &self.tcx().hir();
if let Some(hir_id) = hir.as_local_hir_id(free_region.scope) {
if let Node::Expr(Expr {
kind: Closure(_, _, _, closure_span, None),
..
}) = hir.get(hir_id) {
if let Node::Expr(Expr { kind: Closure(_, _, _, closure_span, None), .. }) =
hir.get(hir_id)
{
let sup_sp = sup_origin.span();
let origin_sp = origin.span();
let mut err = self.tcx().sess.struct_span_err(
sup_sp,
"borrowed data cannot be stored outside of its closure");
"borrowed data cannot be stored outside of its closure",
);
err.span_label(sup_sp, "cannot be stored outside of its closure");
if origin_sp == sup_sp || origin_sp.contains(sup_sp) {
// // sup_sp == origin.span():
//
// let mut x = None;
// ----- borrowed data cannot be stored into here...
// with_int(|y| x = Some(y));
// --- ^ cannot be stored outside of its closure
// |
// ...because it cannot outlive this closure
//
// // origin.contains(&sup_sp):
//
// let mut f: Option<&u32> = None;
// ----- borrowed data cannot be stored into here...
// closure_expecting_bound(|x: &'x u32| {
// ------------ ... because it cannot outlive this closure
// f = Some(x);
// ^ cannot be stored outside of its closure
err.span_label(*external_span,
"borrowed data cannot be stored into here...");
err.span_label(*closure_span,
"...because it cannot outlive this closure");
// // sup_sp == origin.span():
//
// let mut x = None;
// ----- borrowed data cannot be stored into here...
// with_int(|y| x = Some(y));
// --- ^ cannot be stored outside of its closure
// |
// ...because it cannot outlive this closure
//
// // origin.contains(&sup_sp):
//
// let mut f: Option<&u32> = None;
// ----- borrowed data cannot be stored into here...
// closure_expecting_bound(|x: &'x u32| {
// ------------ ... because it cannot outlive this closure
// f = Some(x);
// ^ cannot be stored outside of its closure
err.span_label(
*external_span,
"borrowed data cannot be stored into here...",
);
err.span_label(
*closure_span,
"...because it cannot outlive this closure",
);
} else {
// FIXME: the wording for this case could be much improved
//
// let mut lines_to_use: Vec<&CrateId> = Vec::new();
// - cannot infer an appropriate lifetime...
// let push_id = |installed_id: &CrateId| {
// ------- ------------------------ borrowed data cannot outlive this closure
// |
// ...so that variable is valid at time of its declaration
// lines_to_use.push(installed_id);
// ^^^^^^^^^^^^ cannot be stored outside of its closure
err.span_label(origin_sp,
"cannot infer an appropriate lifetime...");
err.span_label(*external_span,
"...so that variable is valid at time of its \
declaration");
err.span_label(*closure_span,
"borrowed data cannot outlive this closure");
// FIXME: the wording for this case could be much improved
//
// let mut lines_to_use: Vec<&CrateId> = Vec::new();
// - cannot infer an appropriate lifetime...
// let push_id = |installed_id: &CrateId| {
// ------- ------------------------ borrowed data cannot outlive this closure
// |
// ...so that variable is valid at time of its declaration
// lines_to_use.push(installed_id);
// ^^^^^^^^^^^^ cannot be stored outside of its closure
err.span_label(origin_sp, "cannot infer an appropriate lifetime...");
err.span_label(
*external_span,
"...so that variable is valid at time of its \
declaration",
);
err.span_label(
*closure_span,
"borrowed data cannot outlive this closure",
);
}
err.emit();
return Some(ErrorReported);

View File

@ -23,26 +23,27 @@ pub use self::LintSource::*;
use rustc_data_structures::sync;
use crate::hir;
use crate::hir::def_id::{CrateNum, LOCAL_CRATE};
use crate::hir::intravisit;
use crate::hir;
use crate::lint::builtin::BuiltinLintDiagnostics;
use crate::session::{Session, DiagnosticMessageId};
use crate::ty::TyCtxt;
use crate::session::{DiagnosticMessageId, Session};
use crate::ty::query::Providers;
use crate::ty::TyCtxt;
use crate::util::nodemap::NodeMap;
use errors::{DiagnosticBuilder, DiagnosticId};
use syntax::ast;
use syntax::source_map::{MultiSpan, ExpnKind, DesugaringKind};
use syntax::source_map::{DesugaringKind, ExpnKind, MultiSpan};
use syntax::symbol::Symbol;
use syntax_pos::hygiene::MacroKind;
use syntax_pos::Span;
pub use crate::lint::context::{LateContext, EarlyContext, LintContext, LintStore,
check_crate, check_ast_crate, late_lint_mod, CheckLintNameResult,
BufferedEarlyLint,};
pub use crate::lint::context::{
check_ast_crate, check_crate, late_lint_mod, BufferedEarlyLint, CheckLintNameResult,
EarlyContext, LateContext, LintContext, LintStore,
};
pub use rustc_session::lint::{Lint, LintId, Level, FutureIncompatibleInfo};
pub use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintId};
/// Declares a static `LintArray` and return it as an expression.
#[macro_export]
@ -351,8 +352,8 @@ macro_rules! declare_combined_early_lint_pass {
/// A lint pass boxed up as a trait object.
pub type EarlyLintPassObject = Box<dyn EarlyLintPass + sync::Send + sync::Sync + 'static>;
pub type LateLintPassObject = Box<dyn for<'a, 'tcx> LateLintPass<'a, 'tcx> + sync::Send
+ sync::Sync + 'static>;
pub type LateLintPassObject =
Box<dyn for<'a, 'tcx> LateLintPass<'a, 'tcx> + sync::Send + sync::Sync + 'static>;
/// How a lint level was set.
#[derive(Clone, Copy, PartialEq, Eq, HashStable)]
@ -371,11 +372,11 @@ pub enum LintSource {
pub type LevelSource = (Level, LintSource);
pub mod builtin;
pub mod internal;
mod context;
pub mod internal;
mod levels;
pub use self::levels::{LintLevelSets, LintLevelMap};
pub use self::levels::{LintLevelMap, LintLevelSets};
#[derive(Default)]
pub struct LintBuffer {
@ -383,18 +384,20 @@ pub struct LintBuffer {
}
impl LintBuffer {
pub fn add_lint(&mut self,
lint: &'static Lint,
id: ast::NodeId,
sp: MultiSpan,
msg: &str,
diagnostic: BuiltinLintDiagnostics) {
pub fn add_lint(
&mut self,
lint: &'static Lint,
id: ast::NodeId,
sp: MultiSpan,
msg: &str,
diagnostic: BuiltinLintDiagnostics,
) {
let early_lint = BufferedEarlyLint {
lint_id: LintId::of(lint),
ast_id: id,
span: sp,
msg: msg.to_string(),
diagnostic
diagnostic,
};
let arr = self.map.entry(id).or_default();
if !arr.contains(&early_lint) {
@ -428,22 +431,20 @@ impl LintBuffer {
}
}
pub fn struct_lint_level<'a>(sess: &'a Session,
lint: &'static Lint,
level: Level,
src: LintSource,
span: Option<MultiSpan>,
msg: &str)
-> DiagnosticBuilder<'a>
{
pub fn struct_lint_level<'a>(
sess: &'a Session,
lint: &'static Lint,
level: Level,
src: LintSource,
span: Option<MultiSpan>,
msg: &str,
) -> DiagnosticBuilder<'a> {
let mut err = match (level, span) {
(Level::Allow, _) => return sess.diagnostic().struct_dummy(),
(Level::Warn, Some(span)) => sess.struct_span_warn(span, msg),
(Level::Warn, None) => sess.struct_warn(msg),
(Level::Deny, Some(span)) |
(Level::Forbid, Some(span)) => sess.struct_span_err(span, msg),
(Level::Deny, None) |
(Level::Forbid, None) => sess.struct_err(msg),
(Level::Deny, Some(span)) | (Level::Forbid, Some(span)) => sess.struct_span_err(span, msg),
(Level::Deny, None) | (Level::Forbid, None) => sess.struct_err(msg),
};
// Check for future incompatibility lints and issue a stronger warning.
@ -475,7 +476,8 @@ pub fn struct_lint_level<'a>(sess: &'a Session,
sess.diag_note_once(
&mut err,
DiagnosticMessageId::from(lint),
&format!("`#[{}({})]` on by default", level.as_str(), name));
&format!("`#[{}({})]` on by default", level.as_str(), name),
);
}
LintSource::CommandLine(lint_flag_val) => {
let flag = match level {
@ -489,29 +491,43 @@ pub fn struct_lint_level<'a>(sess: &'a Session,
sess.diag_note_once(
&mut err,
DiagnosticMessageId::from(lint),
&format!("requested on the command line with `{} {}`",
flag, hyphen_case_lint_name));
&format!(
"requested on the command line with `{} {}`",
flag, hyphen_case_lint_name
),
);
} else {
let hyphen_case_flag_val = lint_flag_val.as_str().replace("_", "-");
sess.diag_note_once(
&mut err,
DiagnosticMessageId::from(lint),
&format!("`{} {}` implied by `{} {}`",
flag, hyphen_case_lint_name, flag,
hyphen_case_flag_val));
&format!(
"`{} {}` implied by `{} {}`",
flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
),
);
}
}
LintSource::Node(lint_attr_name, src, reason) => {
if let Some(rationale) = reason {
err.note(&rationale.as_str());
}
sess.diag_span_note_once(&mut err, DiagnosticMessageId::from(lint),
src, "lint level defined here");
sess.diag_span_note_once(
&mut err,
DiagnosticMessageId::from(lint),
src,
"lint level defined here",
);
if lint_attr_name.as_str() != name {
let level_str = level.as_str();
sess.diag_note_once(&mut err, DiagnosticMessageId::from(lint),
&format!("`#[{}({})]` implied by `#[{}({})]`",
level_str, name, level_str, lint_attr_name));
sess.diag_note_once(
&mut err,
DiagnosticMessageId::from(lint),
&format!(
"`#[{}({})]` implied by `#[{}({})]`",
level_str, name, level_str, lint_attr_name
),
);
}
}
}
@ -519,8 +535,7 @@ pub fn struct_lint_level<'a>(sess: &'a Session,
err.code(DiagnosticId::Lint(name));
if let Some(future_incompatible) = future_incompatible {
const STANDARD_MESSAGE: &str =
"this was previously accepted by the compiler but is being phased out; \
const STANDARD_MESSAGE: &str = "this was previously accepted by the compiler but is being phased out; \
it will become a hard error";
let explanation = if lint_id == LintId::of(builtin::UNSTABLE_NAME_COLLISIONS) {
@ -536,13 +551,12 @@ pub fn struct_lint_level<'a>(sess: &'a Session,
} else {
format!("{} in a future release!", STANDARD_MESSAGE)
};
let citation = format!("for more information, see {}",
future_incompatible.reference);
let citation = format!("for more information, see {}", future_incompatible.reference);
err.warn(&explanation);
err.note(&citation);
}
return err
return err;
}
pub fn maybe_lint_level_root(tcx: TyCtxt<'_>, id: hir::HirId) -> bool {
@ -563,7 +577,7 @@ fn lint_levels(tcx: TyCtxt<'_>, cnum: CrateNum) -> &LintLevelMap {
let push = builder.levels.push(&krate.attrs, &store);
builder.levels.register_id(hir::CRATE_HIR_ID);
for macro_def in krate.exported_macros {
builder.levels.register_id(macro_def.hir_id);
builder.levels.register_id(macro_def.hir_id);
}
intravisit::walk_crate(&mut builder, krate);
builder.levels.pop(push);
@ -578,11 +592,9 @@ struct LintLevelMapBuilder<'a, 'tcx> {
}
impl LintLevelMapBuilder<'_, '_> {
fn with_lint_attrs<F>(&mut self,
id: hir::HirId,
attrs: &[ast::Attribute],
f: F)
where F: FnOnce(&mut Self)
fn with_lint_attrs<F>(&mut self, id: hir::HirId, attrs: &[ast::Attribute], f: F)
where
F: FnOnce(&mut Self),
{
let push = self.levels.push(attrs, self.store);
if push.changed {
@ -628,10 +640,12 @@ impl intravisit::Visitor<'tcx> for LintLevelMapBuilder<'_, 'tcx> {
})
}
fn visit_variant(&mut self,
v: &'tcx hir::Variant<'tcx>,
g: &'tcx hir::Generics,
item_id: hir::HirId) {
fn visit_variant(
&mut self,
v: &'tcx hir::Variant<'tcx>,
g: &'tcx hir::Generics,
item_id: hir::HirId,
) {
self.with_lint_attrs(v.id, &v.attrs, |builder| {
intravisit::walk_variant(builder, v, g, item_id);
})

File diff suppressed because it is too large Load Diff

View File

@ -5,73 +5,72 @@
#[allow(dead_code)]
pub mod auto_trait;
mod chalk_fulfill;
pub mod codegen;
mod coherence;
pub mod error_reporting;
mod engine;
pub mod error_reporting;
mod fulfill;
mod project;
mod object_safety;
mod on_unimplemented;
mod project;
pub mod query;
mod select;
mod specialize;
mod structural_impls;
pub mod codegen;
mod util;
pub mod query;
use chalk_engine;
use crate::hir;
use crate::hir::def_id::DefId;
use crate::infer::{InferCtxt, SuppressRegionErrors};
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::{InferCtxt, SuppressRegionErrors};
use crate::middle::region;
use crate::mir::interpret::ErrorHandled;
use crate::ty::error::{ExpectedFound, TypeError};
use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::{self, AdtKind, GenericParamDefKind, List, ToPredicate, Ty, TyCtxt};
use crate::util::common::ErrorReported;
use chalk_engine;
use rustc_macros::HashStable;
use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate};
use crate::ty::error::{ExpectedFound, TypeError};
use crate::ty::fold::{TypeFolder, TypeFoldable, TypeVisitor};
use crate::util::common::ErrorReported;
use std::fmt::Debug;
use std::rc::Rc;
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
pub use self::Vtable::*;
pub use self::ObligationCauseCode::*;
pub use self::SelectionError::*;
pub use self::Vtable::*;
pub use self::coherence::{add_placeholder_note, orphan_check, overlapping_impls};
pub use self::coherence::{OrphanCheckErr, OverlapResult};
pub use self::engine::{TraitEngine, TraitEngineExt};
pub use self::fulfill::{FulfillmentContext, PendingPredicateObligation};
pub use self::object_safety::MethodViolationCode;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::on_unimplemented::{OnUnimplementedDirective, OnUnimplementedNote};
pub use self::project::MismatchedProjectionTypes;
pub use self::project::{normalize, normalize_projection_type, poly_project_and_unify_type};
pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, Reveal, Normalized};
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::on_unimplemented::{OnUnimplementedDirective, OnUnimplementedNote};
pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
pub use self::project::{Normalized, ProjectionCache, ProjectionCacheSnapshot, Reveal};
pub use self::select::{EvaluationCache, SelectionCache, SelectionContext};
pub use self::select::{EvaluationResult, IntercrateAmbiguityCause, OverflowError};
pub use self::specialize::{OverlapError, specialization_graph, translate_substs};
pub use self::specialize::find_associated_item;
pub use self::specialize::specialization_graph::FutureCompatOverlapError;
pub use self::specialize::specialization_graph::FutureCompatOverlapErrorKind;
pub use self::engine::{TraitEngine, TraitEngineExt};
pub use self::specialize::{specialization_graph, translate_substs, OverlapError};
pub use self::util::{elaborate_predicates, elaborate_trait_ref, elaborate_trait_refs};
pub use self::util::{
supertraits, supertrait_def_ids, transitive_bounds, Supertraits, SupertraitDefIds,
};
pub use self::util::{expand_trait_aliases, TraitAliasExpander};
pub use self::util::{
supertrait_def_ids, supertraits, transitive_bounds, SupertraitDefIds, Supertraits,
};
pub use self::chalk_fulfill::{
CanonicalGoal as ChalkCanonicalGoal,
FulfillmentContext as ChalkFulfillmentContext
CanonicalGoal as ChalkCanonicalGoal, FulfillmentContext as ChalkFulfillmentContext,
};
pub use self::ObligationCauseCode::*;
pub use self::FulfillmentErrorCode::*;
pub use self::ObligationCauseCode::*;
pub use self::SelectionError::*;
pub use self::Vtable::*;
@ -79,7 +78,7 @@ pub use self::Vtable::*;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum IntercrateMode {
Issue43355,
Fixed
Fixed,
}
/// The mode that trait queries run in.
@ -140,19 +139,19 @@ pub struct ObligationCause<'tcx> {
/// information.
pub body_id: hir::HirId,
pub code: ObligationCauseCode<'tcx>
pub code: ObligationCauseCode<'tcx>,
}
impl<'tcx> ObligationCause<'tcx> {
pub fn span(&self, tcx: TyCtxt<'tcx>) -> Span {
match self.code {
ObligationCauseCode::CompareImplMethodObligation { .. } |
ObligationCauseCode::MainFunctionType |
ObligationCauseCode::StartFunctionType => {
tcx.sess.source_map().def_span(self.span)
}
ObligationCauseCode::MatchExpressionArm(
box MatchExpressionArmCause { arm_span, .. }) => arm_span,
ObligationCauseCode::CompareImplMethodObligation { .. }
| ObligationCauseCode::MainFunctionType
| ObligationCauseCode::StartFunctionType => tcx.sess.source_map().def_span(self.span),
ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
arm_span,
..
}) => arm_span,
_ => self.span,
}
}
@ -189,7 +188,10 @@ pub enum ObligationCauseCode<'tcx> {
ObjectCastObligation(/* Object type */ Ty<'tcx>),
/// Obligation incurred due to a coercion.
Coercion { source: Ty<'tcx>, target: Ty<'tcx> },
Coercion {
source: Ty<'tcx>,
target: Ty<'tcx>,
},
/// Various cases where expressions must be `Sized` / `Copy` / etc.
/// `L = X` implies that `L` is `Sized`.
@ -211,7 +213,10 @@ pub enum ObligationCauseCode<'tcx> {
RepeatVec(bool),
/// Types of fields (other than the last, except for packed structs) in a struct must be sized.
FieldSized { adt_kind: AdtKind, last: bool },
FieldSized {
adt_kind: AdtKind,
last: bool,
},
/// Constant expressions must be sized.
ConstSized,
@ -245,7 +250,10 @@ pub enum ObligationCauseCode<'tcx> {
MatchExpressionArm(Box<MatchExpressionArmCause<'tcx>>),
/// Computing common supertype in the pattern guard for the arms of a match expression
MatchExpressionArmPattern { span: Span, ty: Ty<'tcx> },
MatchExpressionArmPattern {
span: Span,
ty: Ty<'tcx>,
},
/// Constants in patterns must have `Structural` type.
ConstPatternStructural,
@ -322,7 +330,7 @@ pub struct DerivedObligationCause<'tcx> {
parent_trait_ref: ty::PolyTraitRef<'tcx>,
/// The parent trait had this cause.
parent_code: Rc<ObligationCauseCode<'tcx>>
parent_code: Rc<ObligationCauseCode<'tcx>>,
}
pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
@ -415,7 +423,7 @@ impl<'tcx> GoalKind<'tcx> {
Some(p) => p.into_goal(),
None => GoalKind::Quantified(
QuantifierKind::Universal,
domain_goal.map_bound(|p| tcx.mk_goal(p.into_goal()))
domain_goal.map_bound(|p| tcx.mk_goal(p.into_goal())),
),
}
}
@ -474,10 +482,7 @@ pub struct Environment<'tcx> {
impl Environment<'tcx> {
pub fn with<G>(self, goal: G) -> InEnvironment<'tcx, G> {
InEnvironment {
environment: self,
goal,
}
InEnvironment { environment: self, goal }
}
}
@ -490,12 +495,14 @@ pub struct InEnvironment<'tcx, G> {
pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
#[derive(Clone,Debug,TypeFoldable)]
#[derive(Clone, Debug, TypeFoldable)]
pub enum SelectionError<'tcx> {
Unimplemented,
OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
ty::error::TypeError<'tcx>),
OutputTypeParameterMismatch(
ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
ty::error::TypeError<'tcx>,
),
TraitNotObjectSafe(DefId),
ConstEvalFailure(ErrorHandled),
Overflow,
@ -514,8 +521,7 @@ pub struct FulfillmentError<'tcx> {
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeSubtypeError(ExpectedFound<Ty<'tcx>>,
TypeError<'tcx>), // always comes from a SubtypePredicate
CodeSubtypeError(ExpectedFound<Ty<'tcx>>, TypeError<'tcx>), // always comes from a SubtypePredicate
CodeAmbiguity,
}
@ -617,7 +623,7 @@ pub enum Vtable<'tcx, N> {
pub struct VtableImplData<'tcx, N> {
pub impl_def_id: DefId,
pub substs: SubstsRef<'tcx>,
pub nested: Vec<N>
pub nested: Vec<N>,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
@ -626,7 +632,7 @@ pub struct VtableGeneratorData<'tcx, N> {
pub substs: SubstsRef<'tcx>,
/// Nested obligations. This can be non-empty if the generator
/// signature contains associated types.
pub nested: Vec<N>
pub nested: Vec<N>,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
@ -635,18 +641,18 @@ pub struct VtableClosureData<'tcx, N> {
pub substs: SubstsRef<'tcx>,
/// Nested obligations. This can be non-empty if the closure
/// signature contains associated types.
pub nested: Vec<N>
pub nested: Vec<N>,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
pub struct VtableAutoImplData<N> {
pub trait_def_id: DefId,
pub nested: Vec<N>
pub nested: Vec<N>,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
pub struct VtableBuiltinData<N> {
pub nested: Vec<N>
pub nested: Vec<N>,
}
/// A vtable for some object-safe trait `Foo` automatically derived
@ -667,7 +673,7 @@ pub struct VtableObjectData<'tcx, N> {
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
pub struct VtableFnPointerData<'tcx, N> {
pub fn_ty: Ty<'tcx>,
pub nested: Vec<N>
pub nested: Vec<N>,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable, TypeFoldable)]
@ -698,14 +704,13 @@ pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
def_id: DefId,
span: Span,
) -> bool {
debug!("type_known_to_meet_bound_modulo_regions(ty={:?}, bound={:?})",
ty,
infcx.tcx.def_path_str(def_id));
debug!(
"type_known_to_meet_bound_modulo_regions(ty={:?}, bound={:?})",
ty,
infcx.tcx.def_path_str(def_id)
);
let trait_ref = ty::TraitRef {
def_id,
substs: infcx.tcx.mk_substs_trait(ty, &[]),
};
let trait_ref = ty::TraitRef { def_id, substs: infcx.tcx.mk_substs_trait(ty, &[]) };
let obligation = Obligation {
param_env,
cause: ObligationCause::misc(span, hir::DUMMY_HIR_ID),
@ -714,8 +719,12 @@ pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
};
let result = infcx.predicate_must_hold_modulo_regions(&obligation);
debug!("type_known_to_meet_ty={:?} bound={} => {:?}",
ty, infcx.tcx.def_path_str(def_id), result);
debug!(
"type_known_to_meet_ty={:?} bound={} => {:?}",
ty,
infcx.tcx.def_path_str(def_id),
result
);
if result && (ty.has_infer_types() || ty.has_closure_types()) {
// Because of inference "guessing", selection can sometimes claim
@ -740,16 +749,20 @@ pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
// assume it is move; linear is always ok.
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => {
debug!("type_known_to_meet_bound_modulo_regions: ty={:?} bound={} success",
ty,
infcx.tcx.def_path_str(def_id));
debug!(
"type_known_to_meet_bound_modulo_regions: ty={:?} bound={} success",
ty,
infcx.tcx.def_path_str(def_id)
);
true
}
Err(e) => {
debug!("type_known_to_meet_bound_modulo_regions: ty={:?} bound={} errors={:?}",
ty,
infcx.tcx.def_path_str(def_id),
e);
debug!(
"type_known_to_meet_bound_modulo_regions: ty={:?} bound={} errors={:?}",
ty,
infcx.tcx.def_path_str(def_id),
e
);
false
}
}
@ -767,9 +780,7 @@ fn do_normalize_predicates<'tcx>(
) -> Result<Vec<ty::Predicate<'tcx>>, ErrorReported> {
debug!(
"do_normalize_predicates(predicates={:?}, region_context={:?}, cause={:?})",
predicates,
region_context,
cause,
predicates, region_context, cause,
);
let span = cause.span;
tcx.infer_ctxt().enter(|infcx| {
@ -787,19 +798,14 @@ fn do_normalize_predicates<'tcx>(
// them here too, and we will remove this function when
// we move over to lazy normalization *anyway*.
let fulfill_cx = FulfillmentContext::new_ignoring_regions();
let predicates = match fully_normalize(
&infcx,
fulfill_cx,
cause,
elaborated_env,
&predicates,
) {
Ok(predicates) => predicates,
Err(errors) => {
infcx.report_fulfillment_errors(&errors, None, false);
return Err(ErrorReported)
}
};
let predicates =
match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, &predicates) {
Ok(predicates) => predicates,
Err(errors) => {
infcx.report_fulfillment_errors(&errors, None, false);
return Err(ErrorReported);
}
};
debug!("do_normalize_predictes: normalized predicates = {:?}", predicates);
@ -827,7 +833,7 @@ fn do_normalize_predicates<'tcx>(
// unconstrained variable, and it seems better not to ICE,
// all things considered.
tcx.sess.span_err(span, &fixup_err.to_string());
return Err(ErrorReported)
return Err(ErrorReported);
}
};
if predicates.has_local_value() {
@ -862,20 +868,20 @@ pub fn normalize_param_env_or_error<'tcx>(
// and errors will get reported then; so after typeck we
// can be sure that no errors should occur.
debug!("normalize_param_env_or_error(region_context={:?}, unnormalized_env={:?}, cause={:?})",
region_context, unnormalized_env, cause);
debug!(
"normalize_param_env_or_error(region_context={:?}, unnormalized_env={:?}, cause={:?})",
region_context, unnormalized_env, cause
);
let mut predicates: Vec<_> =
util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.to_vec())
.collect();
util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.to_vec()).collect();
debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
predicates);
debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates);
let elaborated_env = ty::ParamEnv::new(
tcx.intern_predicates(&predicates),
unnormalized_env.reveal,
unnormalized_env.def_id
unnormalized_env.def_id,
);
// HACK: we are trying to normalize the param-env inside *itself*. The problem is that
@ -896,25 +902,31 @@ pub fn normalize_param_env_or_error<'tcx>(
//
// This works fairly well because trait matching does not actually care about param-env
// TypeOutlives predicates - these are normally used by regionck.
let outlives_predicates: Vec<_> = predicates.drain_filter(|predicate| {
match predicate {
let outlives_predicates: Vec<_> = predicates
.drain_filter(|predicate| match predicate {
ty::Predicate::TypeOutlives(..) => true,
_ => false
}
}).collect();
_ => false,
})
.collect();
debug!("normalize_param_env_or_error: predicates=(non-outlives={:?}, outlives={:?})",
predicates, outlives_predicates);
let non_outlives_predicates =
match do_normalize_predicates(tcx, region_context, cause.clone(),
elaborated_env, predicates) {
Ok(predicates) => predicates,
// An unnormalized env is better than nothing.
Err(ErrorReported) => {
debug!("normalize_param_env_or_error: errored resolving non-outlives predicates");
return elaborated_env
}
};
debug!(
"normalize_param_env_or_error: predicates=(non-outlives={:?}, outlives={:?})",
predicates, outlives_predicates
);
let non_outlives_predicates = match do_normalize_predicates(
tcx,
region_context,
cause.clone(),
elaborated_env,
predicates,
) {
Ok(predicates) => predicates,
// An unnormalized env is better than nothing.
Err(ErrorReported) => {
debug!("normalize_param_env_or_error: errored resolving non-outlives predicates");
return elaborated_env;
}
};
debug!("normalize_param_env_or_error: non-outlives predicates={:?}", non_outlives_predicates);
@ -923,21 +935,22 @@ pub fn normalize_param_env_or_error<'tcx>(
// predicates here anyway. Keeping them here anyway because it seems safer.
let outlives_env: Vec<_> =
non_outlives_predicates.iter().chain(&outlives_predicates).cloned().collect();
let outlives_env = ty::ParamEnv::new(
tcx.intern_predicates(&outlives_env),
unnormalized_env.reveal,
None
);
let outlives_predicates =
match do_normalize_predicates(tcx, region_context, cause,
outlives_env, outlives_predicates) {
Ok(predicates) => predicates,
// An unnormalized env is better than nothing.
Err(ErrorReported) => {
debug!("normalize_param_env_or_error: errored resolving outlives predicates");
return elaborated_env
}
};
let outlives_env =
ty::ParamEnv::new(tcx.intern_predicates(&outlives_env), unnormalized_env.reveal, None);
let outlives_predicates = match do_normalize_predicates(
tcx,
region_context,
cause,
outlives_env,
outlives_predicates,
) {
Ok(predicates) => predicates,
// An unnormalized env is better than nothing.
Err(ErrorReported) => {
debug!("normalize_param_env_or_error: errored resolving outlives predicates");
return elaborated_env;
}
};
debug!("normalize_param_env_or_error: outlives predicates={:?}", outlives_predicates);
let mut predicates = non_outlives_predicates;
@ -946,7 +959,7 @@ pub fn normalize_param_env_or_error<'tcx>(
ty::ParamEnv::new(
tcx.intern_predicates(&predicates),
unnormalized_env.reveal,
unnormalized_env.def_id
unnormalized_env.def_id,
)
}
@ -964,9 +977,10 @@ where
let selcx = &mut SelectionContext::new(infcx);
let Normalized { value: normalized_value, obligations } =
project::normalize(selcx, param_env, cause, value);
debug!("fully_normalize: normalized_value={:?} obligations={:?}",
normalized_value,
obligations);
debug!(
"fully_normalize: normalized_value={:?} obligations={:?}",
normalized_value, obligations
);
for obligation in obligations {
fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
}
@ -987,8 +1001,7 @@ fn normalize_and_test_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
predicates: Vec<ty::Predicate<'tcx>>,
) -> bool {
debug!("normalize_and_test_predicates(predicates={:?})",
predicates);
debug!("normalize_and_test_predicates(predicates={:?})", predicates);
let result = tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::reveal_all();
@ -1007,8 +1020,7 @@ fn normalize_and_test_predicates<'tcx>(
fulfill_cx.select_all_or_error(&infcx).is_ok()
});
debug!("normalize_and_test_predicates(predicates={:?}) = {:?}",
predicates, result);
debug!("normalize_and_test_predicates(predicates={:?}) = {:?}", predicates, result);
result
}
@ -1016,14 +1028,12 @@ fn substitute_normalize_and_test_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
key: (DefId, SubstsRef<'tcx>),
) -> bool {
debug!("substitute_normalize_and_test_predicates(key={:?})",
key);
debug!("substitute_normalize_and_test_predicates(key={:?})", key);
let predicates = tcx.predicates_of(key.0).instantiate(tcx, key.1).predicates;
let result = normalize_and_test_predicates(tcx, predicates);
debug!("substitute_normalize_and_test_predicates(key={:?}) = {:?}",
key, result);
debug!("substitute_normalize_and_test_predicates(key={:?}) = {:?}", key, result);
result
}
@ -1036,100 +1046,98 @@ fn vtable_methods<'tcx>(
) -> &'tcx [Option<(DefId, SubstsRef<'tcx>)>] {
debug!("vtable_methods({:?})", trait_ref);
tcx.arena.alloc_from_iter(
supertraits(tcx, trait_ref).flat_map(move |trait_ref| {
let trait_methods = tcx.associated_items(trait_ref.def_id())
.filter(|item| item.kind == ty::AssocKind::Method);
tcx.arena.alloc_from_iter(supertraits(tcx, trait_ref).flat_map(move |trait_ref| {
let trait_methods = tcx
.associated_items(trait_ref.def_id())
.filter(|item| item.kind == ty::AssocKind::Method);
// Now list each method's DefId and InternalSubsts (for within its trait).
// If the method can never be called from this object, produce None.
trait_methods.map(move |trait_method| {
debug!("vtable_methods: trait_method={:?}", trait_method);
let def_id = trait_method.def_id;
// Now list each method's DefId and InternalSubsts (for within its trait).
// If the method can never be called from this object, produce None.
trait_methods.map(move |trait_method| {
debug!("vtable_methods: trait_method={:?}", trait_method);
let def_id = trait_method.def_id;
// Some methods cannot be called on an object; skip those.
if !tcx.is_vtable_safe_method(trait_ref.def_id(), &trait_method) {
debug!("vtable_methods: not vtable safe");
return None;
}
// Some methods cannot be called on an object; skip those.
if !tcx.is_vtable_safe_method(trait_ref.def_id(), &trait_method) {
debug!("vtable_methods: not vtable safe");
return None;
}
// The method may have some early-bound lifetimes; add regions for those.
let substs = trait_ref.map_bound(|trait_ref|
InternalSubsts::for_item(tcx, def_id, |param, _|
match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
GenericParamDefKind::Type { .. } |
GenericParamDefKind::Const => {
trait_ref.substs[param.index as usize]
}
}
)
);
// The method may have some early-bound lifetimes; add regions for those.
let substs = trait_ref.map_bound(|trait_ref| {
InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => {
trait_ref.substs[param.index as usize]
}
})
});
// The trait type may have higher-ranked lifetimes in it;
// erase them if they appear, so that we get the type
// at some particular call site.
let substs = tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&substs
);
// The trait type may have higher-ranked lifetimes in it;
// erase them if they appear, so that we get the type
// at some particular call site.
let substs =
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &substs);
// It's possible that the method relies on where-clauses that
// do not hold for this particular set of type parameters.
// Note that this method could then never be called, so we
// do not want to try and codegen it, in that case (see #23435).
let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
if !normalize_and_test_predicates(tcx, predicates.predicates) {
debug!("vtable_methods: predicates do not hold");
return None;
}
// It's possible that the method relies on where-clauses that
// do not hold for this particular set of type parameters.
// Note that this method could then never be called, so we
// do not want to try and codegen it, in that case (see #23435).
let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
if !normalize_and_test_predicates(tcx, predicates.predicates) {
debug!("vtable_methods: predicates do not hold");
return None;
}
Some((def_id, substs))
})
Some((def_id, substs))
})
)
}))
}
impl<'tcx, O> Obligation<'tcx, O> {
pub fn new(cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
predicate: O)
-> Obligation<'tcx, O>
{
pub fn new(
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
predicate: O,
) -> Obligation<'tcx, O> {
Obligation { cause, param_env, recursion_depth: 0, predicate }
}
fn with_depth(cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
predicate: O)
-> Obligation<'tcx, O>
{
fn with_depth(
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
predicate: O,
) -> Obligation<'tcx, O> {
Obligation { cause, param_env, recursion_depth, predicate }
}
pub fn misc(span: Span,
body_id: hir::HirId,
param_env: ty::ParamEnv<'tcx>,
trait_ref: O)
-> Obligation<'tcx, O> {
pub fn misc(
span: Span,
body_id: hir::HirId,
param_env: ty::ParamEnv<'tcx>,
trait_ref: O,
) -> Obligation<'tcx, O> {
Obligation::new(ObligationCause::misc(span, body_id), param_env, trait_ref)
}
pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
Obligation { cause: self.cause.clone(),
param_env: self.param_env,
recursion_depth: self.recursion_depth,
predicate: value }
pub fn with<P>(&self, value: P) -> Obligation<'tcx, P> {
Obligation {
cause: self.cause.clone(),
param_env: self.param_env,
recursion_depth: self.recursion_depth,
predicate: value,
}
}
}
impl<'tcx> ObligationCause<'tcx> {
#[inline]
pub fn new(span: Span,
body_id: hir::HirId,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
pub fn new(
span: Span,
body_id: hir::HirId,
code: ObligationCauseCode<'tcx>,
) -> ObligationCause<'tcx> {
ObligationCause { span, body_id, code }
}
@ -1157,7 +1165,10 @@ impl<'tcx, N> Vtable<'tcx, N> {
}
}
pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M>
where
F: FnMut(N) -> M,
{
match self {
VtableImpl(i) => VtableImpl(VtableImplData {
impl_def_id: i.impl_def_id,
@ -1165,9 +1176,9 @@ impl<'tcx, N> Vtable<'tcx, N> {
nested: i.nested.into_iter().map(f).collect(),
}),
VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
nested: i.nested.into_iter().map(f).collect(),
}),
VtableBuiltin(i) => {
VtableBuiltin(VtableBuiltinData { nested: i.nested.into_iter().map(f).collect() })
}
VtableObject(o) => VtableObject(VtableObjectData {
upcast_trait_ref: o.upcast_trait_ref,
vtable_base: o.vtable_base,
@ -1201,10 +1212,10 @@ impl<'tcx, N> Vtable<'tcx, N> {
}
impl<'tcx> FulfillmentError<'tcx> {
fn new(obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>)
-> FulfillmentError<'tcx>
{
fn new(
obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>,
) -> FulfillmentError<'tcx> {
FulfillmentError { obligation: obligation, code: code, points_at_arg_span: false }
}
}

View File

@ -1,41 +1,38 @@
use crate::ty::query::Providers;
use crate::hir::def_id::DefId;
use crate::hir;
use crate::ty::TyCtxt;
use syntax_pos::symbol::Symbol;
use rustc_target::spec::abi::Abi;
use crate::hir::def_id::DefId;
use crate::hir::map::blocks::FnLikeNode;
use crate::ty::query::Providers;
use crate::ty::TyCtxt;
use rustc_target::spec::abi::Abi;
use syntax::attr;
use syntax_pos::symbol::Symbol;
impl<'tcx> TyCtxt<'tcx> {
/// Whether the `def_id` counts as const fn in your current crate, considering all active
/// feature gates
pub fn is_const_fn(self, def_id: DefId) -> bool {
self.is_const_fn_raw(def_id) && match self.is_unstable_const_fn(def_id) {
Some(feature_name) => {
// has a `rustc_const_unstable` attribute, check whether the user enabled the
// corresponding feature gate.
self.features()
.declared_lib_features
.iter()
.any(|&(sym, _)| sym == feature_name)
},
// functions without const stability are either stable user written
// const fn or the user is using feature gates and we thus don't
// care what they do
None => true,
}
self.is_const_fn_raw(def_id)
&& match self.is_unstable_const_fn(def_id) {
Some(feature_name) => {
// has a `rustc_const_unstable` attribute, check whether the user enabled the
// corresponding feature gate.
self.features()
.declared_lib_features
.iter()
.any(|&(sym, _)| sym == feature_name)
}
// functions without const stability are either stable user written
// const fn or the user is using feature gates and we thus don't
// care what they do
None => true,
}
}
/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
pub fn is_unstable_const_fn(self, def_id: DefId) -> Option<Symbol> {
if self.is_const_fn_raw(def_id) {
let const_stab = self.lookup_const_stability(def_id)?;
if const_stab.level.is_unstable() {
Some(const_stab.feature)
} else {
None
}
if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None }
} else {
None
}
@ -54,29 +51,31 @@ impl<'tcx> TyCtxt<'tcx> {
match self.lookup_const_stability(def_id) {
// `rustc_const_unstable` functions don't need to conform.
Some(&attr::ConstStability { ref level, .. }) if level.is_unstable() => false,
None => if let Some(stab) = self.lookup_stability(def_id) {
if stab.level.is_stable() {
self.sess.span_err(
self.def_span(def_id),
"stable const functions must have either `rustc_const_stable` or \
None => {
if let Some(stab) = self.lookup_stability(def_id) {
if stab.level.is_stable() {
self.sess.span_err(
self.def_span(def_id),
"stable const functions must have either `rustc_const_stable` or \
`rustc_const_unstable` attribute",
);
// While we errored above, because we don't know if we need to conform, we
// err on the "safe" side and require min_const_fn.
true
);
// While we errored above, because we don't know if we need to conform, we
// err on the "safe" side and require min_const_fn.
true
} else {
// Unstable functions need not conform to min_const_fn.
false
}
} else {
// Unstable functions need not conform to min_const_fn.
false
// Internal functions are forced to conform to min_const_fn.
// Annotate the internal function with a const stability attribute if
// you need to use unstable features.
// Note: this is an arbitrary choice that does not affect stability or const
// safety or anything, it just changes whether we need to annotate some
// internal functions with `rustc_const_stable` or with `rustc_const_unstable`
true
}
} else {
// Internal functions are forced to conform to min_const_fn.
// Annotate the internal function with a const stability attribute if
// you need to use unstable features.
// Note: this is an arbitrary choice that does not affect stability or const
// safety or anything, it just changes whether we need to annotate some
// internal functions with `rustc_const_stable` or with `rustc_const_unstable`
true
},
}
// Everything else needs to conform, because it would be callable from
// other `min_const_fn` functions.
_ => true,
@ -88,23 +87,25 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
pub fn provide(providers: &mut Providers<'_>) {
/// Const evaluability whitelist is here to check evaluability at the
/// top level beforehand.
fn is_const_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> Option<bool> {
match tcx.fn_sig(def_id).abi() {
Abi::RustIntrinsic |
Abi::PlatformIntrinsic => Some(tcx.lookup_const_stability(def_id).is_some()),
_ => None
Abi::RustIntrinsic | Abi::PlatformIntrinsic => {
Some(tcx.lookup_const_stability(def_id).is_some())
}
_ => None,
}
}
/// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether
/// said intrinsic is on the whitelist for being const callable.
fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
let hir_id = tcx.hir().as_local_hir_id(def_id)
.expect("Non-local call to local provider is_const_fn");
let hir_id = tcx
.hir()
.as_local_hir_id(def_id)
.expect("Non-local call to local provider is_const_fn");
let node = tcx.hir().get(hir_id);
@ -120,27 +121,30 @@ pub fn provide(providers: &mut Providers<'_>) {
}
fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
tcx.is_const_fn(def_id) && match tcx.lookup_const_stability(def_id) {
Some(stab) => {
if cfg!(debug_assertions) && stab.promotable {
let sig = tcx.fn_sig(def_id);
assert_eq!(
sig.unsafety(),
hir::Unsafety::Normal,
"don't mark const unsafe fns as promotable",
// https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
);
tcx.is_const_fn(def_id)
&& match tcx.lookup_const_stability(def_id) {
Some(stab) => {
if cfg!(debug_assertions) && stab.promotable {
let sig = tcx.fn_sig(def_id);
assert_eq!(
sig.unsafety(),
hir::Unsafety::Normal,
"don't mark const unsafe fns as promotable",
// https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
);
}
stab.promotable
}
stab.promotable
},
None => false,
}
None => false,
}
}
fn const_fn_is_allowed_fn_ptr(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
tcx.is_const_fn(def_id) &&
tcx.lookup_const_stability(def_id)
.map(|stab| stab.allow_const_fn_ptr).unwrap_or(false)
tcx.is_const_fn(def_id)
&& tcx
.lookup_const_stability(def_id)
.map(|stab| stab.allow_const_fn_ptr)
.unwrap_or(false)
}
*providers = Providers {

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,13 @@
use crate::hir;
use crate::hir::def_id::DefId;
use crate::infer::InferCtxt;
use crate::ty::subst::SubstsRef;
use crate::middle::lang_items;
use crate::traits::{self, AssocTypeBoundData};
use crate::ty::subst::SubstsRef;
use crate::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
use std::iter::once;
use syntax::symbol::{kw, Ident};
use syntax_pos::Span;
use crate::middle::lang_items;
/// Returns the set of obligations needed to make `ty` well-formed.
/// If `ty` contains unresolved inference variables, this may include
@ -22,14 +22,7 @@ pub fn obligations<'a, 'tcx>(
ty: Ty<'tcx>,
span: Span,
) -> Option<Vec<traits::PredicateObligation<'tcx>>> {
let mut wf = WfPredicates {
infcx,
param_env,
body_id,
span,
out: vec![],
item: None,
};
let mut wf = WfPredicates { infcx, param_env, body_id, span, out: vec![], item: None };
if wf.compute(ty) {
debug!("wf::obligations({:?}, body_id={:?}) = {:?}", ty, body_id, wf.out);
let result = wf.normalize();
@ -71,8 +64,7 @@ pub fn predicate_obligations<'a, 'tcx>(
ty::Predicate::Trait(ref t) => {
wf.compute_trait_ref(&t.skip_binder().trait_ref, Elaborate::None); // (*)
}
ty::Predicate::RegionOutlives(..) => {
}
ty::Predicate::RegionOutlives(..) => {}
ty::Predicate::TypeOutlives(ref t) => {
wf.compute(t.skip_binder().0);
}
@ -84,10 +76,8 @@ pub fn predicate_obligations<'a, 'tcx>(
ty::Predicate::WellFormed(t) => {
wf.compute(t);
}
ty::Predicate::ObjectSafe(_) => {
}
ty::Predicate::ClosureKind(..) => {
}
ty::Predicate::ObjectSafe(_) => {}
ty::Predicate::ClosureKind(..) => {}
ty::Predicate::Subtype(ref data) => {
wf.compute(data.skip_binder().a); // (*)
wf.compute(data.skip_binder().b); // (*)
@ -152,14 +142,15 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
let cause = self.cause(traits::MiscObligation);
let infcx = &mut self.infcx;
let param_env = self.param_env;
self.out.iter()
.inspect(|pred| assert!(!pred.has_escaping_bound_vars()))
.flat_map(|pred| {
let mut selcx = traits::SelectionContext::new(infcx);
let pred = traits::normalize(&mut selcx, param_env, cause.clone(), pred);
once(pred.value).chain(pred.obligations)
})
.collect()
self.out
.iter()
.inspect(|pred| assert!(!pred.has_escaping_bound_vars()))
.flat_map(|pred| {
let mut selcx = traits::SelectionContext::new(infcx);
let pred = traits::normalize(&mut selcx, param_env, cause.clone(), pred);
once(pred.value).chain(pred.obligations)
})
.collect()
}
/// Pushes the obligations required for `trait_ref` to be WF into `self.out`.
@ -171,154 +162,163 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
let param_env = self.param_env;
let item = &self.item;
let extend_cause_with_original_assoc_item_obligation = |
cause: &mut traits::ObligationCause<'_>,
pred: &ty::Predicate<'_>,
trait_assoc_items: ty::AssocItemsIterator<'_>,
| {
let trait_item = tcx.hir().as_local_hir_id(trait_ref.def_id).and_then(|trait_id| {
tcx.hir().find(trait_id)
});
let (trait_name, trait_generics) = match trait_item {
Some(hir::Node::Item(hir::Item {
ident,
kind: hir::ItemKind::Trait(.., generics, _, _),
..
})) |
Some(hir::Node::Item(hir::Item {
ident,
kind: hir::ItemKind::TraitAlias(generics, _),
..
})) => (Some(ident), Some(generics)),
_ => (None, None),
};
let extend_cause_with_original_assoc_item_obligation =
|cause: &mut traits::ObligationCause<'_>,
pred: &ty::Predicate<'_>,
trait_assoc_items: ty::AssocItemsIterator<'_>| {
let trait_item = tcx
.hir()
.as_local_hir_id(trait_ref.def_id)
.and_then(|trait_id| tcx.hir().find(trait_id));
let (trait_name, trait_generics) = match trait_item {
Some(hir::Node::Item(hir::Item {
ident,
kind: hir::ItemKind::Trait(.., generics, _, _),
..
}))
| Some(hir::Node::Item(hir::Item {
ident,
kind: hir::ItemKind::TraitAlias(generics, _),
..
})) => (Some(ident), Some(generics)),
_ => (None, None),
};
let item_span = item.map(|i| tcx.sess.source_map().def_span(i.span));
match pred {
ty::Predicate::Projection(proj) => {
// The obligation comes not from the current `impl` nor the `trait` being
// implemented, but rather from a "second order" obligation, like in
// `src/test/ui/associated-types/point-at-type-on-obligation-failure.rs`:
//
// error[E0271]: type mismatch resolving `<Foo2 as Bar2>::Ok == ()`
// --> $DIR/point-at-type-on-obligation-failure.rs:13:5
// |
// LL | type Ok;
// | -- associated type defined here
// ...
// LL | impl Bar for Foo {
// | ---------------- in this `impl` item
// LL | type Ok = ();
// | ^^^^^^^^^^^^^ expected `u32`, found `()`
// |
// = note: expected type `u32`
// found type `()`
//
// FIXME: we would want to point a span to all places that contributed to this
// obligation. In the case above, it should be closer to:
//
// error[E0271]: type mismatch resolving `<Foo2 as Bar2>::Ok == ()`
// --> $DIR/point-at-type-on-obligation-failure.rs:13:5
// |
// LL | type Ok;
// | -- associated type defined here
// LL | type Sibling: Bar2<Ok=Self::Ok>;
// | -------------------------------- obligation set here
// ...
// LL | impl Bar for Foo {
// | ---------------- in this `impl` item
// LL | type Ok = ();
// | ^^^^^^^^^^^^^ expected `u32`, found `()`
// ...
// LL | impl Bar2 for Foo2 {
// | ---------------- in this `impl` item
// LL | type Ok = u32;
// | -------------- obligation set here
// |
// = note: expected type `u32`
// found type `()`
if let Some(hir::ItemKind::Impl(.., impl_items)) = item.map(|i| &i.kind) {
let trait_assoc_item = tcx.associated_item(proj.projection_def_id());
if let Some(impl_item) = impl_items.iter().filter(|item| {
item.ident == trait_assoc_item.ident
}).next() {
cause.span = impl_item.span;
cause.code = traits::AssocTypeBound(Box::new(AssocTypeBoundData {
impl_span: item_span,
original: trait_assoc_item.ident.span,
bounds: vec![],
}));
}
}
}
ty::Predicate::Trait(proj) => {
// An associated item obligation born out of the `trait` failed to be met.
// Point at the `impl` that failed the obligation, the associated item that
// needed to meet the obligation, and the definition of that associated item,
// which should hold the obligation in most cases. An example can be seen in
// `src/test/ui/associated-types/point-at-type-on-obligation-failure-2.rs`:
//
// error[E0277]: the trait bound `bool: Bar` is not satisfied
// --> $DIR/point-at-type-on-obligation-failure-2.rs:8:5
// |
// LL | type Assoc: Bar;
// | ----- associated type defined here
// ...
// LL | impl Foo for () {
// | --------------- in this `impl` item
// LL | type Assoc = bool;
// | ^^^^^^^^^^^^^^^^^^ the trait `Bar` is not implemented for `bool`
//
// If the obligation comes from the where clause in the `trait`, we point at it:
//
// error[E0277]: the trait bound `bool: Bar` is not satisfied
// --> $DIR/point-at-type-on-obligation-failure-2.rs:8:5
// |
// | trait Foo where <Self as Foo>>::Assoc: Bar {
// | -------------------------- restricted in this bound
// LL | type Assoc;
// | ----- associated type defined here
// ...
// LL | impl Foo for () {
// | --------------- in this `impl` item
// LL | type Assoc = bool;
// | ^^^^^^^^^^^^^^^^^^ the trait `Bar` is not implemented for `bool`
if let (
ty::Projection(ty::ProjectionTy { item_def_id, .. }),
Some(hir::ItemKind::Impl(.., impl_items)),
) = (&proj.skip_binder().self_ty().kind, item.map(|i| &i.kind)) {
if let Some((impl_item, trait_assoc_item)) = trait_assoc_items
.filter(|i| i.def_id == *item_def_id)
.next()
.and_then(|trait_assoc_item| impl_items.iter()
.filter(|i| i.ident == trait_assoc_item.ident)
let item_span = item.map(|i| tcx.sess.source_map().def_span(i.span));
match pred {
ty::Predicate::Projection(proj) => {
// The obligation comes not from the current `impl` nor the `trait` being
// implemented, but rather from a "second order" obligation, like in
// `src/test/ui/associated-types/point-at-type-on-obligation-failure.rs`:
//
// error[E0271]: type mismatch resolving `<Foo2 as Bar2>::Ok == ()`
// --> $DIR/point-at-type-on-obligation-failure.rs:13:5
// |
// LL | type Ok;
// | -- associated type defined here
// ...
// LL | impl Bar for Foo {
// | ---------------- in this `impl` item
// LL | type Ok = ();
// | ^^^^^^^^^^^^^ expected `u32`, found `()`
// |
// = note: expected type `u32`
// found type `()`
//
// FIXME: we would want to point a span to all places that contributed to this
// obligation. In the case above, it should be closer to:
//
// error[E0271]: type mismatch resolving `<Foo2 as Bar2>::Ok == ()`
// --> $DIR/point-at-type-on-obligation-failure.rs:13:5
// |
// LL | type Ok;
// | -- associated type defined here
// LL | type Sibling: Bar2<Ok=Self::Ok>;
// | -------------------------------- obligation set here
// ...
// LL | impl Bar for Foo {
// | ---------------- in this `impl` item
// LL | type Ok = ();
// | ^^^^^^^^^^^^^ expected `u32`, found `()`
// ...
// LL | impl Bar2 for Foo2 {
// | ---------------- in this `impl` item
// LL | type Ok = u32;
// | -------------- obligation set here
// |
// = note: expected type `u32`
// found type `()`
if let Some(hir::ItemKind::Impl(.., impl_items)) = item.map(|i| &i.kind) {
let trait_assoc_item = tcx.associated_item(proj.projection_def_id());
if let Some(impl_item) = impl_items
.iter()
.filter(|item| item.ident == trait_assoc_item.ident)
.next()
.map(|impl_item| (impl_item, trait_assoc_item)))
{
let bounds = trait_generics.map(|generics| get_generic_bound_spans(
&generics,
trait_name,
trait_assoc_item.ident,
)).unwrap_or_else(Vec::new);
cause.span = impl_item.span;
cause.code = traits::AssocTypeBound(Box::new(AssocTypeBoundData {
impl_span: item_span,
original: trait_assoc_item.ident.span,
bounds,
}));
{
cause.span = impl_item.span;
cause.code = traits::AssocTypeBound(Box::new(AssocTypeBoundData {
impl_span: item_span,
original: trait_assoc_item.ident.span,
bounds: vec![],
}));
}
}
}
ty::Predicate::Trait(proj) => {
// An associated item obligation born out of the `trait` failed to be met.
// Point at the `impl` that failed the obligation, the associated item that
// needed to meet the obligation, and the definition of that associated item,
// which should hold the obligation in most cases. An example can be seen in
// `src/test/ui/associated-types/point-at-type-on-obligation-failure-2.rs`:
//
// error[E0277]: the trait bound `bool: Bar` is not satisfied
// --> $DIR/point-at-type-on-obligation-failure-2.rs:8:5
// |
// LL | type Assoc: Bar;
// | ----- associated type defined here
// ...
// LL | impl Foo for () {
// | --------------- in this `impl` item
// LL | type Assoc = bool;
// | ^^^^^^^^^^^^^^^^^^ the trait `Bar` is not implemented for `bool`
//
// If the obligation comes from the where clause in the `trait`, we point at it:
//
// error[E0277]: the trait bound `bool: Bar` is not satisfied
// --> $DIR/point-at-type-on-obligation-failure-2.rs:8:5
// |
// | trait Foo where <Self as Foo>>::Assoc: Bar {
// | -------------------------- restricted in this bound
// LL | type Assoc;
// | ----- associated type defined here
// ...
// LL | impl Foo for () {
// | --------------- in this `impl` item
// LL | type Assoc = bool;
// | ^^^^^^^^^^^^^^^^^^ the trait `Bar` is not implemented for `bool`
if let (
ty::Projection(ty::ProjectionTy { item_def_id, .. }),
Some(hir::ItemKind::Impl(.., impl_items)),
) = (&proj.skip_binder().self_ty().kind, item.map(|i| &i.kind))
{
if let Some((impl_item, trait_assoc_item)) = trait_assoc_items
.filter(|i| i.def_id == *item_def_id)
.next()
.and_then(|trait_assoc_item| {
impl_items
.iter()
.filter(|i| i.ident == trait_assoc_item.ident)
.next()
.map(|impl_item| (impl_item, trait_assoc_item))
})
{
let bounds = trait_generics
.map(|generics| {
get_generic_bound_spans(
&generics,
trait_name,
trait_assoc_item.ident,
)
})
.unwrap_or_else(Vec::new);
cause.span = impl_item.span;
cause.code = traits::AssocTypeBound(Box::new(AssocTypeBoundData {
impl_span: item_span,
original: trait_assoc_item.ident.span,
bounds,
}));
}
}
}
_ => {}
}
_ => {}
}
};
};
if let Elaborate::All = elaborate {
let trait_assoc_items = tcx.associated_items(trait_ref.def_id);
let predicates = obligations.iter()
.map(|obligation| obligation.predicate.clone())
.collect();
let predicates =
obligations.iter().map(|obligation| obligation.predicate.clone()).collect();
let implied_obligations = traits::elaborate_predicates(tcx, predicates);
let implied_obligations = implied_obligations.map(|pred| {
let mut cause = cause.clone();
@ -334,13 +334,9 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
self.out.extend(obligations);
self.out.extend(trait_ref.substs.types()
.filter(|ty| !ty.has_escaping_bound_vars())
.map(|ty| traits::Obligation::new(
cause.clone(),
param_env,
ty::Predicate::WellFormed(ty),
)));
self.out.extend(trait_ref.substs.types().filter(|ty| !ty.has_escaping_bound_vars()).map(
|ty| traits::Obligation::new(cause.clone(), param_env, ty::Predicate::WellFormed(ty)),
));
}
/// Pushes the obligations required for `trait_ref::Item` to be WF
@ -368,9 +364,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
let predicate = ty::Predicate::ConstEvaluatable(def_id, substs);
let cause = self.cause(traits::MiscObligation);
self.out.push(traits::Obligation::new(cause,
self.param_env,
predicate));
self.out.push(traits::Obligation::new(cause, self.param_env, predicate));
}
}
@ -394,19 +388,19 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
let param_env = self.param_env;
while let Some(ty) = subtys.next() {
match ty.kind {
ty::Bool |
ty::Char |
ty::Int(..) |
ty::Uint(..) |
ty::Float(..) |
ty::Error |
ty::Str |
ty::GeneratorWitness(..) |
ty::Never |
ty::Param(_) |
ty::Bound(..) |
ty::Placeholder(..) |
ty::Foreign(..) => {
ty::Bool
| ty::Char
| ty::Int(..)
| ty::Uint(..)
| ty::Float(..)
| ty::Error
| ty::Str
| ty::GeneratorWitness(..)
| ty::Never
| ty::Param(_)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Foreign(..) => {
// WfScalar, WfParameter, etc
}
@ -453,13 +447,13 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// WfReference
if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() {
let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
self.out.push(
traits::Obligation::new(
cause,
param_env,
ty::Predicate::TypeOutlives(
ty::Binder::dummy(
ty::OutlivesPredicate(rty, r)))));
self.out.push(traits::Obligation::new(
cause,
param_env,
ty::Predicate::TypeOutlives(ty::Binder::dummy(ty::OutlivesPredicate(
rty, r,
))),
));
}
}
@ -537,20 +531,18 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// obligations that don't refer to Self and
// checking those
let defer_to_coercion =
self.infcx.tcx.features().object_safe_for_dispatch;
let defer_to_coercion = self.infcx.tcx.features().object_safe_for_dispatch;
if !defer_to_coercion {
let cause = self.cause(traits::MiscObligation);
let component_traits =
data.auto_traits().chain(data.principal_def_id());
self.out.extend(
component_traits.map(|did| traits::Obligation::new(
let component_traits = data.auto_traits().chain(data.principal_def_id());
self.out.extend(component_traits.map(|did| {
traits::Obligation::new(
cause.clone(),
param_env,
ty::Predicate::ObjectSafe(did)
))
);
ty::Predicate::ObjectSafe(did),
)
}));
}
}
@ -569,16 +561,22 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// is satisfied to ensure termination.)
ty::Infer(_) => {
let ty = self.infcx.shallow_resolve(ty);
if let ty::Infer(_) = ty.kind { // not yet resolved...
if ty == ty0 { // ...this is the type we started from! no progress.
if let ty::Infer(_) = ty.kind {
// not yet resolved...
if ty == ty0 {
// ...this is the type we started from! no progress.
return false;
}
let cause = self.cause(traits::MiscObligation);
self.out.push( // ...not the type we started from, so we made progress.
traits::Obligation::new(cause,
self.param_env,
ty::Predicate::WellFormed(ty)));
self.out.push(
// ...not the type we started from, so we made progress.
traits::Obligation::new(
cause,
self.param_env,
ty::Predicate::WellFormed(ty),
),
);
} else {
// Yes, resolved, proceed with the
// result. Should never return false because
@ -593,27 +591,27 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
return true;
}
fn nominal_obligations(&mut self,
def_id: DefId,
substs: SubstsRef<'tcx>)
-> Vec<traits::PredicateObligation<'tcx>>
{
let predicates =
self.infcx.tcx.predicates_of(def_id)
.instantiate(self.infcx.tcx, substs);
fn nominal_obligations(
&mut self,
def_id: DefId,
substs: SubstsRef<'tcx>,
) -> Vec<traits::PredicateObligation<'tcx>> {
let predicates = self.infcx.tcx.predicates_of(def_id).instantiate(self.infcx.tcx, substs);
let cause = self.cause(traits::ItemObligation(def_id));
predicates.predicates
.into_iter()
.map(|pred| traits::Obligation::new(cause.clone(),
self.param_env,
pred))
.filter(|pred| !pred.has_escaping_bound_vars())
.collect()
predicates
.predicates
.into_iter()
.map(|pred| traits::Obligation::new(cause.clone(), self.param_env, pred))
.filter(|pred| !pred.has_escaping_bound_vars())
.collect()
}
fn from_object_ty(&mut self, ty: Ty<'tcx>,
data: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
region: ty::Region<'tcx>) {
fn from_object_ty(
&mut self,
ty: Ty<'tcx>,
data: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
region: ty::Region<'tcx>,
) {
// Imagine a type like this:
//
// trait Foo { }
@ -646,19 +644,20 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
if !data.has_escaping_bound_vars() && !region.has_escaping_bound_vars() {
let implicit_bounds =
object_region_bounds(self.infcx.tcx, data);
let implicit_bounds = object_region_bounds(self.infcx.tcx, data);
let explicit_bound = region;
self.out.reserve(implicit_bounds.len());
for implicit_bound in implicit_bounds {
let cause = self.cause(traits::ObjectTypeBound(ty, explicit_bound));
let outlives = ty::Binder::dummy(
ty::OutlivesPredicate(explicit_bound, implicit_bound));
self.out.push(traits::Obligation::new(cause,
self.param_env,
outlives.to_predicate()));
let outlives =
ty::Binder::dummy(ty::OutlivesPredicate(explicit_bound, implicit_bound));
self.out.push(traits::Obligation::new(
cause,
self.param_env,
outlives.to_predicate(),
));
}
}
}
@ -679,13 +678,16 @@ pub fn object_region_bounds<'tcx>(
// a placeholder type.
let open_ty = tcx.mk_ty_infer(ty::FreshTy(0));
let predicates = existential_predicates.iter().filter_map(|predicate| {
if let ty::ExistentialPredicate::Projection(_) = *predicate.skip_binder() {
None
} else {
Some(predicate.with_self_ty(tcx, open_ty))
}
}).collect();
let predicates = existential_predicates
.iter()
.filter_map(|predicate| {
if let ty::ExistentialPredicate::Projection(_) = *predicate.skip_binder() {
None
} else {
Some(predicate.with_self_ty(tcx, open_ty))
}
})
.collect();
tcx.required_region_bounds(open_ty, predicates)
}

View File

@ -2,38 +2,38 @@ use crate::attributes;
use crate::back::bytecode;
use crate::back::lto::ThinBuffer;
use crate::base;
use crate::common;
use crate::consts;
use crate::context::{get_reloc_model, is_pie_binary};
use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
use crate::llvm_util;
use crate::ModuleLlvm;
use crate::type_::Type;
use crate::context::{is_pie_binary, get_reloc_model};
use crate::common;
use crate::LlvmCodegenBackend;
use crate::ModuleLlvm;
use log::debug;
use rustc::bug;
use rustc::hir::def_id::LOCAL_CRATE;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler};
use rustc_codegen_ssa::traits::*;
use rustc::session::config::{self, OutputType, Passes, Lto, Sanitizer, SwitchWithOptPath};
use rustc::session::config::{self, Lto, OutputType, Passes, Sanitizer, SwitchWithOptPath};
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_codegen_ssa::{RLIB_BYTECODE_EXTENSION, ModuleCodegen, CompiledModule};
use rustc::util::common::time_ext;
use rustc_fs_util::{path_to_c_string, link_or_copy};
use rustc_codegen_ssa::back::write::{run_assembler, CodegenContext, ModuleConfig};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, RLIB_BYTECODE_EXTENSION};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{Handler, FatalError};
use log::debug;
use rustc_errors::{FatalError, Handler};
use rustc_fs_util::{link_or_copy, path_to_c_string};
use libc::{c_char, c_int, c_uint, c_void, size_t};
use std::ffi::CString;
use std::fs;
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::slice;
use std::str;
use std::sync::Arc;
use std::slice;
use libc::{c_int, c_uint, c_void, c_char, size_t};
pub const RELOC_MODEL_ARGS : [(&str, llvm::RelocMode); 7] = [
pub const RELOC_MODEL_ARGS: [(&str, llvm::RelocMode); 7] = [
("pic", llvm::RelocMode::PIC),
("static", llvm::RelocMode::Static),
("default", llvm::RelocMode::Default),
@ -50,7 +50,7 @@ pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[
("large", llvm::CodeModel::Large),
];
pub const TLS_MODEL_ARGS : [(&str, llvm::ThreadLocalMode); 4] = [
pub const TLS_MODEL_ARGS: [(&str, llvm::ThreadLocalMode); 4] = [
("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic),
("local-dynamic", llvm::ThreadLocalMode::LocalDynamic),
("initial-exec", llvm::ThreadLocalMode::InitialExec),
@ -65,12 +65,13 @@ pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
}
pub fn write_output_file(
handler: &rustc_errors::Handler,
target: &'ll llvm::TargetMachine,
pm: &llvm::PassManager<'ll>,
m: &'ll llvm::Module,
output: &Path,
file_type: llvm::FileType) -> Result<(), FatalError> {
handler: &rustc_errors::Handler,
target: &'ll llvm::TargetMachine,
pm: &llvm::PassManager<'ll>,
m: &'ll llvm::Module,
output: &Path,
file_type: llvm::FileType,
) -> Result<(), FatalError> {
unsafe {
let output_c = path_to_c_string(output);
let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
@ -85,9 +86,8 @@ pub fn create_informational_target_machine(
sess: &Session,
find_features: bool,
) -> &'static mut llvm::TargetMachine {
target_machine_factory(sess, config::OptLevel::No, find_features)().unwrap_or_else(|err| {
llvm_err(sess.diagnostic(), &err).raise()
})
target_machine_factory(sess, config::OptLevel::No, find_features)()
.unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
}
pub fn create_target_machine(
@ -95,13 +95,12 @@ pub fn create_target_machine(
find_features: bool,
) -> &'static mut llvm::TargetMachine {
target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
.unwrap_or_else(|err| {
llvm_err(tcx.sess.diagnostic(), &err).raise()
})
.unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
}
pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize)
{
pub fn to_llvm_opt_settings(
cfg: config::OptLevel,
) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
use self::config::OptLevel::*;
match cfg {
No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
@ -116,9 +115,11 @@ pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, ll
// If find_features is true this won't access `sess.crate_types` by assuming
// that `is_pie_binary` is false. When we discover LLVM target features
// `sess.crate_types` is uninitialized so we cannot access it.
pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_features: bool)
-> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync>
{
pub fn target_machine_factory(
sess: &Session,
optlvl: config::OptLevel,
find_features: bool,
) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
let reloc_model = get_reloc_model(sess);
let (opt_level, _) = to_llvm_opt_settings(optlvl);
@ -127,22 +128,18 @@ pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_fea
let ffunction_sections = sess.target.target.options.function_sections;
let fdata_sections = ffunction_sections;
let code_model_arg = sess.opts.cg.code_model.as_ref().or(
sess.target.target.options.code_model.as_ref(),
);
let code_model_arg =
sess.opts.cg.code_model.as_ref().or(sess.target.target.options.code_model.as_ref());
let code_model = match code_model_arg {
Some(s) => {
match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid code model",
code_model_arg));
sess.abort_if_errors();
bug!();
}
Some(s) => match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid code model", code_model_arg));
sess.abort_if_errors();
bug!();
}
}
},
None => llvm::CodeModel::None,
};
@ -152,9 +149,9 @@ pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_fea
// On the wasm target once the `atomics` feature is enabled that means that
// we're no longer single-threaded, or otherwise we don't want LLVM to
// lower atomic operations to single-threaded operations.
if singlethread &&
sess.target.target.llvm_target.contains("wasm32") &&
features.iter().any(|s| *s == "+atomics")
if singlethread
&& sess.target.target.llvm_target.contains("wasm32")
&& features.iter().any(|s| *s == "+atomics")
{
singlethread = false;
}
@ -173,7 +170,10 @@ pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_fea
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(), abi.as_ptr(),
triple.as_ptr(),
cpu.as_ptr(),
features.as_ptr(),
abi.as_ptr(),
code_model,
reloc_model,
opt_level,
@ -190,8 +190,7 @@ pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_fea
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
})
})
}
@ -199,10 +198,10 @@ pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_fea
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
name: &str,
) {
if !cgcx.save_temps {
return
return;
}
unsafe {
let ext = format!("{}.bc", name);
@ -220,9 +219,11 @@ pub struct DiagnosticHandlers<'a> {
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
pub fn new(
cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context,
) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
@ -243,17 +244,17 @@ impl<'a> Drop for DiagnosticHandlers<'a> {
}
}
unsafe extern "C" fn report_inline_asm(cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
unsafe extern "C" fn report_inline_asm(
cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint,
) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
if user.is_null() {
return
return;
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
@ -265,15 +266,13 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
return;
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
report_inline_asm(cgcx, &llvm::twine_to_string(inline.message), inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
@ -283,33 +282,35 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
diag_handler.note_without_error(&format!(
"optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message
));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
})
.expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
llvm::diagnostic::UnknownDiagnostic(..) => {}
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
pub(crate) unsafe fn optimize(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
@ -376,11 +377,15 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
llvm::LLVMAddAnalysisPasses(tm, fpm);
llvm::LLVMAddAnalysisPasses(tm, mpm);
let opt_level = to_llvm_opt_settings(opt_level).0;
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
let prepare_for_thin_lto = cgcx.lto == Lto::Thin
|| cgcx.lto == Lto::ThinLocal
|| (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
b,
extra_passes.as_ptr(),
extra_passes.len() as size_t,
);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
@ -401,13 +406,17 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
if using_thin_buffers && !have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
diag_handler.err(
"The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
-C passes=name-anon-globals to the compiler command line.",
);
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
bug!(
"We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen."
);
}
}
}
@ -417,19 +426,19 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
time_ext(
config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| llvm::LLVMRustRunFunctionPassManager(fpm, llmod),
);
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
time_ext(
config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| llvm::LLVMRunPassManager(mpm, llmod),
);
}
// Deallocate managers that we're now done with
@ -439,9 +448,7 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
Ok(())
}
unsafe fn add_sanitizer_passes(config: &ModuleConfig,
passes: &mut Vec<&'static mut llvm::Pass>) {
unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
let sanitizer = match &config.sanitizer {
None => return,
Some(s) => s,
@ -464,12 +471,12 @@ unsafe fn add_sanitizer_passes(config: &ModuleConfig,
}
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
pub(crate) unsafe fn codegen(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
@ -491,11 +498,14 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
unsafe fn with_codegen<'ll, F, R>(
tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F,
) -> R
where
F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMAddAnalysisPasses(tm, cpm);
@ -519,7 +529,6 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
@ -552,88 +561,103 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
embed_bitcode(cgcx, llcx, llmod, None);
}
time_ext(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()),
time_ext(
config.time_passes,
&format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_ir");
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
if config.emit_ir {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_ir");
let out =
cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
extern "C" fn demangle_callback(input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
extern "C" fn demangle_callback(
input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t,
) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
let output = unsafe {
slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
};
let mut cursor = io::Cursor::new(output);
let output = unsafe {
slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
};
let mut cursor = io::Cursor::new(output);
let demangled = match rustc_demangle::try_demangle(input) {
Ok(d) => d,
Err(_) => return 0,
};
let demangled = match rustc_demangle::try_demangle(input) {
Ok(d) => d,
Err(_) => return 0,
};
if let Err(_) = write!(cursor, "{:#}", demangled) {
// Possible only if provided buffer is not big enough
return 0;
if let Err(_) = write!(cursor, "{:#}", demangled) {
// Possible only if provided buffer is not big enough
return 0;
}
cursor.position() as size_t
}
cursor.position() as size_t
let result =
llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
result.into_result().map_err(|()| {
let msg = format!("failed to write LLVM IR to {}", out.display());
llvm_err(diag_handler, &msg)
})?;
}
let result =
llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
result.into_result().map_err(|()| {
let msg = format!("failed to write LLVM IR to {}", out.display());
llvm_err(diag_handler, &msg)
})?;
}
if config.emit_asm || asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_asm");
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
if config.emit_asm || asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_asm");
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
// We can't use the same module for asm and binary output, because that triggers
// various errors like invalid IR or broken binaries, so we might have to clone the
// module to produce the asm output
let llmod = if config.emit_obj {
llvm::LLVMCloneModule(llmod)
} else {
llmod
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &path,
llvm::FileType::AssemblyFile)
})?;
}
if write_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_obj");
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
llvm::FileType::ObjectFile)
})?;
} else if asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_asm_to_obj");
let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
if !config.emit_asm && !cgcx.save_temps {
drop(fs::remove_file(&assembly));
// We can't use the same module for asm and binary output, because that triggers
// various errors like invalid IR or broken binaries, so we might have to clone the
// module to produce the asm output
let llmod = if config.emit_obj { llvm::LLVMCloneModule(llmod) } else { llmod };
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(
diag_handler,
tm,
cpm,
llmod,
&path,
llvm::FileType::AssemblyFile,
)
})?;
}
}
Ok(())
})?;
if write_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_obj");
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(
diag_handler,
tm,
cpm,
llmod,
&obj_out,
llvm::FileType::ObjectFile,
)
})?;
} else if asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_asm_to_obj");
let assembly =
cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
if !config.emit_asm && !cgcx.save_temps {
drop(fs::remove_file(&assembly));
}
}
Ok(())
},
)?;
if copy_bc_to_obj {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
@ -651,10 +675,12 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
drop(handlers);
}
Ok(module.into_compiled_module(config.emit_obj,
config.emit_bc,
config.emit_bc_compressed,
&cgcx.output_filenames))
Ok(module.into_compiled_module(
config.emit_obj,
config.emit_bc,
config.emit_bc_compressed,
&cgcx.output_filenames,
))
}
/// Embed the bitcode of an LLVM module in the LLVM module itself.
@ -675,10 +701,12 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
///
/// Basically all of this is us attempting to follow in the footsteps of clang
/// on iOS. See #35968 for lots more info.
unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
unsafe fn embed_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>,
) {
let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
@ -687,14 +715,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
);
llvm::LLVMSetInitializer(llglobal, llconst);
let is_apple = cgcx.opts.target_triple.triple().contains("-ios") ||
cgcx.opts.target_triple.triple().contains("-darwin");
let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
|| cgcx.opts.target_triple.triple().contains("-darwin");
let section = if is_apple {
"__LLVM,__bitcode\0"
} else {
".llvmbc\0"
};
let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@ -706,28 +730,26 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
"rustc.embedded.cmdline\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
let section = if is_apple {
"__LLVM,__cmdline\0"
} else {
".llvmcmd\0"
};
let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
}
pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
config: &ModuleConfig,
opt_level: llvm::CodeGenOptLevel,
prepare_for_thin_lto: bool,
f: &mut dyn FnMut(&llvm::PassManagerBuilder)) {
pub unsafe fn with_llvm_pmb(
llmod: &llvm::Module,
config: &ModuleConfig,
opt_level: llvm::CodeGenOptLevel,
prepare_for_thin_lto: bool,
f: &mut dyn FnMut(&llvm::PassManagerBuilder),
) {
use std::ptr;
// Create the PassManagerBuilder for LLVM. We configure it with
// reasonable defaults and prepare it to actually populate the pass
// manager.
let builder = llvm::LLVMPassManagerBuilderCreate();
let opt_size = config.opt_size.map(|x| to_llvm_opt_settings(x).1)
.unwrap_or(llvm::CodeGenOptSizeNone);
let opt_size =
config.opt_size.map(|x| to_llvm_opt_settings(x).1).unwrap_or(llvm::CodeGenOptSizeNone);
let inline_threshold = config.inline_threshold;
let pgo_gen_path = match config.pgo_gen {
@ -740,14 +762,13 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
Some(CString::new(format!("{}", path.display())).unwrap())
}
SwitchWithOptPath::Disabled => {
None
}
SwitchWithOptPath::Disabled => None,
};
let pgo_use_path = config.pgo_use.as_ref().map(|path_buf| {
CString::new(path_buf.to_string_lossy().as_bytes()).unwrap()
});
let pgo_use_path = config
.pgo_use
.as_ref()
.map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap());
llvm::LLVMRustConfigurePassManagerBuilder(
builder,
@ -794,9 +815,7 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
(llvm::CodeGenOptLevel::Default, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
}
(llvm::CodeGenOptLevel::Other, ..) => {
bug!("CodeGenOptLevel::Other selected")
}
(llvm::CodeGenOptLevel::Other, ..) => bug!("CodeGenOptLevel::Other selected"),
}
f(builder);
@ -811,36 +830,28 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
fn create_msvc_imps(
cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module
llmod: &llvm::Module,
) {
if !cgcx.msvc_imps_needed {
return
return;
}
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on x86. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g., no extra
// underscores added in front).
let prefix = if cgcx.target_arch == "x86" {
"\x01__imp__"
} else {
"\x01__imp_"
};
let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
unsafe {
let i8p_ty = Type::i8p_llcx(llcx);
let globals = base::iter_globals(llmod)
.filter(|&val| {
llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage &&
llvm::LLVMIsDeclaration(val) == 0
llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
&& llvm::LLVMIsDeclaration(val) == 0
})
.filter_map(|val| {
// Exclude some symbols that we know are not Rust symbols.
let name = llvm::get_value_name(val);
if ignored(name) {
None
} else {
Some((val, name))
}
if ignored(name) { None } else { Some((val, name)) }
})
.map(move |(val, name)| {
let mut imp_name = prefix.as_bytes().to_vec();
@ -851,9 +862,7 @@ fn create_msvc_imps(
.collect::<Vec<_>>();
for (imp_name, val) in globals {
let imp = llvm::LLVMAddGlobal(llmod,
i8p_ty,
imp_name.as_ptr().cast());
let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
}

View File

@ -1,27 +1,26 @@
use crate::llvm::{self, SetUnnamedAddr, True};
use crate::debuginfo;
use crate::common::CodegenCx;
use crate::base;
use crate::common::CodegenCx;
use crate::debuginfo;
use crate::llvm::{self, SetUnnamedAddr, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use libc::c_uint;
use rustc::hir::def_id::DefId;
use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint,
Pointer, ErrorHandled};
use rustc::mir::mono::MonoItem;
use rustc::hir::Node;
use rustc_target::abi::HasDataLayout;
use rustc::ty::{self, Ty, Instance};
use rustc_codegen_ssa::traits::*;
use syntax::symbol::{Symbol, sym};
use syntax_pos::Span;
use rustc::{bug, span_bug};
use log::debug;
use rustc::hir::def_id::DefId;
use rustc::hir::Node;
use rustc::mir::interpret::{read_target_uint, Allocation, ConstValue, ErrorHandled, Pointer};
use rustc::mir::mono::MonoItem;
use rustc::ty::{self, Instance, Ty};
use rustc::{bug, span_bug};
use rustc_codegen_ssa::traits::*;
use rustc_target::abi::HasDataLayout;
use syntax::symbol::{sym, Symbol};
use syntax_pos::Span;
use rustc::ty::layout::{self, Size, Align, LayoutOf};
use rustc::ty::layout::{self, Align, LayoutOf, Size};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
use rustc::hir::{self, CodegenFnAttrFlags, CodegenFnAttrs};
use std::ffi::CStr;
@ -51,14 +50,13 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the relocation as a relocation pointer offset.
alloc.inspect_with_undef_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
as u64;
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
},
cx.type_i8p()
&layout::Scalar { value: layout::Primitive::Pointer, valid_range: 0..=!0 },
cx.type_i8p(),
));
next_offset = offset + pointer_size;
}
@ -84,19 +82,13 @@ pub fn codegen_static_initializer(
let static_ = cx.tcx.const_eval_poly(def_id)?;
let alloc = match static_.val {
ty::ConstKind::Value(ConstValue::ByRef {
alloc, offset,
}) if offset.bytes() == 0 => {
alloc
},
ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) if offset.bytes() == 0 => alloc,
_ => bug!("static const eval returned {:#?}", static_),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: Align) {
fn set_global_alignment(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
@ -118,7 +110,7 @@ fn check_and_apply_linkage(
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: Symbol,
span: Span
span: Span,
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
let sym = sym.as_str();
@ -134,7 +126,9 @@ fn check_and_apply_linkage(
cx.layout_of(mt.ty).llvm_type(cx)
} else {
cx.sess().span_fatal(
span, "must have type `*const T` or `*mut T` due to `#[linkage]` attribute")
span,
"must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
)
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
@ -149,7 +143,7 @@ fn check_and_apply_linkage(
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
});
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
@ -164,16 +158,12 @@ fn check_and_apply_linkage(
}
pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstPointerCast(val, ty)
}
unsafe { llvm::LLVMConstPointerCast(val, ty) }
}
impl CodegenCx<'ll, 'tcx> {
crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstBitCast(val, ty)
}
unsafe { llvm::LLVMConstBitCast(val, ty) }
}
crate fn static_addr_of_mut(
@ -186,13 +176,12 @@ impl CodegenCx<'ll, 'tcx> {
let gv = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = self.define_global(&name[..],
self.val_ty(cv)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
let gv = self.define_global(&name[..], self.val_ty(cv)).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
}
_ => self.define_private_global(self.val_ty(cv)),
};
llvm::LLVMSetInitializer(gv, cv);
@ -208,13 +197,14 @@ impl CodegenCx<'ll, 'tcx> {
return g;
}
let defined_in_current_codegen_unit = self.codegen_unit
.items()
.contains_key(&MonoItem::Static(def_id));
assert!(!defined_in_current_codegen_unit,
"consts::get_static() should always hit the cache for \
let defined_in_current_codegen_unit =
self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
assert!(
!defined_in_current_codegen_unit,
"consts::get_static() should always hit the cache for \
statics defined in the same CGU, but did not for `{:?}`",
def_id);
def_id
);
let ty = instance.ty(self.tcx);
let sym = self.tcx.symbol_name(instance).name;
@ -222,12 +212,9 @@ impl CodegenCx<'ll, 'tcx> {
debug!("get_static: sym={} instance={:?}", sym, instance);
let g = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) {
let llty = self.layout_of(ty).llvm_type(self);
let (g, attrs) = match self.tcx.hir().get(id) {
Node::Item(&hir::Item {
attrs, span, kind: hir::ItemKind::Static(..), ..
}) => {
Node::Item(&hir::Item { attrs, span, kind: hir::ItemKind::Static(..), .. }) => {
let sym_str = sym.as_str();
if let Some(g) = self.get_declared_value(&sym_str) {
if self.val_ty(g) != self.type_ptr_to(llty) {
@ -247,13 +234,16 @@ impl CodegenCx<'ll, 'tcx> {
}
Node::ForeignItem(&hir::ForeignItem {
ref attrs, span, kind: hir::ForeignItemKind::Static(..), ..
ref attrs,
span,
kind: hir::ForeignItemKind::Static(..),
..
}) => {
let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
(check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), &**attrs)
}
item => bug!("get_static: expected static, found {:?}", item)
item => bug!("get_static: expected static, found {:?}", item),
};
debug!("get_static: sym={} attrs={:?}", sym, attrs);
@ -283,8 +273,7 @@ impl CodegenCx<'ll, 'tcx> {
llvm::set_thread_local_mode(g, self.tls_model);
}
let needs_dll_storage_attr =
self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the attrs. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin based LTO is enabled.
@ -292,9 +281,11 @@ impl CodegenCx<'ll, 'tcx> {
// If this assertion triggers, there's something wrong with commandline
// argument validation.
debug_assert!(!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
self.tcx.sess.target.target.options.is_like_msvc &&
self.tcx.sess.opts.cg.prefer_dynamic));
debug_assert!(
!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
&& self.tcx.sess.target.target.options.is_like_msvc
&& self.tcx.sess.opts.cg.prefer_dynamic)
);
if needs_dll_storage_attr {
// This item is external but not foreign, i.e., it originates from an external Rust
@ -329,12 +320,7 @@ impl CodegenCx<'ll, 'tcx> {
}
impl StaticMethods for CodegenCx<'ll, 'tcx> {
fn static_addr_of(
&self,
cv: &'ll Value,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
@ -354,11 +340,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
gv
}
fn codegen_static(
&self,
def_id: DefId,
is_mutable: bool,
) {
fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
unsafe {
let attrs = self.tcx.codegen_fn_attrs(def_id);
@ -395,7 +377,11 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
let visibility = llvm::LLVMRustGetVisibility(g);
let new_g = llvm::LLVMRustGetOrInsertGlobal(
self.llmod, name.as_ptr().cast(), name.len(), val_llty);
self.llmod,
name.as_ptr().cast(),
name.len(),
val_llty,
);
llvm::LLVMRustSetLinkage(new_g, linkage);
llvm::LLVMRustSetVisibility(new_g, visibility);
@ -464,7 +450,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
alloc.inspect_with_undef_and_ptr_outside_interpreter(0..alloc.len())
alloc
.inspect_with_undef_and_ptr_outside_interpreter(0..alloc.len())
.iter()
.all(|b| *b == 0)
};
@ -477,7 +464,6 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
}
}
// Wasm statics with custom link sections get special treatment as they
// go into custom sections of the wasm executable.
if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
@ -492,8 +478,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state (not
// as part of the interpreter execution).
let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(
0..alloc.len());
let bytes =
alloc.inspect_with_undef_and_ptr_outside_interpreter(0..alloc.len());
let alloc = llvm::LLVMMDStringInContext(
self.llcx,
bytes.as_ptr().cast(),

File diff suppressed because it is too large Load Diff

View File

@ -13,58 +13,80 @@
//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
use crate::back::write::{
OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm,
submit_post_lto_module_to_llvm,
start_async_codegen, submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm,
OngoingCodegen,
};
use crate::common::{RealPredicate, TypeKind, IntPredicate};
use crate::common::{IntPredicate, RealPredicate, TypeKind};
use crate::meth;
use crate::mir;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
use rustc::hir;
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::middle::cstore::EncodedMetadata;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::weak_lang_items;
use rustc::mir::mono::{CodegenUnitNameBuilder, CodegenUnit, MonoItem};
use rustc::ty::{self, Ty, TyCtxt, Instance};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::util::common::{time, print_time_passes_entry, set_time_depth, time_depth};
use rustc::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
use rustc::session::config::{self, EntryFnType, Lto};
use rustc::session::Session;
use rustc::ty::layout::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx};
use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc::ty::query::Providers;
use rustc::ty::{self, Instance, Ty, TyCtxt};
use rustc::util::common::{print_time_passes_entry, set_time_depth, time, time_depth};
use rustc::util::nodemap::FxHashMap;
use rustc_codegen_utils::{check_for_rustc_errors_attr, symbol_names_test};
use rustc_index::vec::Idx;
use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr};
use rustc_session::cgu_reuse_tracker::CguReuse;
use syntax::attr;
use syntax_pos::Span;
use std::cmp;
use std::ops::{Deref, DerefMut};
use std::time::{Instant, Duration};
use std::time::{Duration, Instant};
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
signed: bool)
-> IntPredicate {
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
match op {
hir::BinOpKind::Eq => IntPredicate::IntEQ,
hir::BinOpKind::Ne => IntPredicate::IntNE,
hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT },
hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE },
hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT },
hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE },
op => {
bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
found {:?}",
op)
hir::BinOpKind::Lt => {
if signed {
IntPredicate::IntSLT
} else {
IntPredicate::IntULT
}
}
hir::BinOpKind::Le => {
if signed {
IntPredicate::IntSLE
} else {
IntPredicate::IntULE
}
}
hir::BinOpKind::Gt => {
if signed {
IntPredicate::IntSGT
} else {
IntPredicate::IntUGT
}
}
hir::BinOpKind::Ge => {
if signed {
IntPredicate::IntSGE
} else {
IntPredicate::IntUGE
}
}
op => bug!(
"comparison_op_to_icmp_predicate: expected comparison operator, \
found {:?}",
op
),
}
}
@ -77,9 +99,11 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
hir::BinOpKind::Gt => RealPredicate::RealOGT,
hir::BinOpKind::Ge => RealPredicate::RealOGE,
op => {
bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
bug!(
"comparison_op_to_fcmp_predicate: expected comparison operator, \
found {:?}",
op);
op
);
}
}
}
@ -97,7 +121,7 @@ pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let cmp = bin_op_to_fcmp_predicate(op);
let cmp = bx.fcmp(cmp, lhs, rhs);
return bx.sext(cmp, ret_ty);
},
}
ty::Uint(_) => false,
ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
@ -136,17 +160,13 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
.field(cx, FAT_PTR_EXTRA);
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)).field(cx, FAT_PTR_EXTRA);
cx.const_ptrcast(
meth::get_vtable(cx, source, data.principal()),
cx.backend_type(vtable_ptr),
)
}
_ => bug!(
"unsized_info: invalid unsizing {:?} -> {:?}",
source, target
),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
@ -159,12 +179,9 @@ pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
) -> (Bx::Value, Bx::Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.kind, &dst_ty.kind) {
(&ty::Ref(_, a, _),
&ty::Ref(_, b, _)) |
(&ty::Ref(_, a, _),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
(&ty::Ref(_, a, _), &ty::Ref(_, b, _))
| (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
@ -193,8 +210,10 @@ pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
// FIXME(eddyb) move these out of this `match` arm, so they're always
// applied, uniformly, no matter the source/destination types.
(bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
(
bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)),
)
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
@ -210,9 +229,9 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
match (&src_ty.kind, &dst_ty.kind) {
(&ty::Ref(..), &ty::Ref(..)) |
(&ty::Ref(..), &ty::RawPtr(..)) |
(&ty::RawPtr(..), &ty::RawPtr(..)) => {
(&ty::Ref(..), &ty::Ref(..))
| (&ty::Ref(..), &ty::RawPtr(..))
| (&ty::RawPtr(..), &ty::RawPtr(..)) => {
let (base, info) = match bx.load_operand(src).val {
OperandValue::Pair(base, info) => {
// fat-ptr to fat-ptr unsize preserves the vtable
@ -224,10 +243,8 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
(bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bx, base, src_ty, dst_ty)
}
OperandValue::Ref(..) => bug!()
OperandValue::Immediate(base) => unsize_thin_ptr(bx, base, src_ty, dst_ty),
OperandValue::Ref(..) => bug!(),
};
OperandValue::Pair(base, info).store(bx, dst);
}
@ -244,18 +261,21 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align,
src_f.layout, MemFlags::empty());
memcpy_ty(
bx,
dst_f.llval,
dst_f.align,
src_f.llval,
src_f.align,
src_f.layout,
MemFlags::empty(),
);
} else {
coerce_unsized_into(bx, src_f, dst_f);
}
}
}
_ => bug!(
"coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
dst_ty,
),
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
}
}
@ -313,11 +333,7 @@ pub fn from_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
val: Bx::Value,
) -> Bx::Value {
if bx.cx().val_ty(val) == bx.cx().type_i1() {
bx.zext(val, bx.cx().type_i8())
} else {
val
}
if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { val }
}
pub fn to_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
@ -375,7 +391,7 @@ pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
/// users main function.
pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(cx: &'a Bx::CodegenCx) {
let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
Some((def_id, _)) => { (def_id, cx.tcx().def_span(def_id)) },
Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)),
None => return,
};
@ -393,7 +409,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(cx: &'
match et {
Some(EntryFnType::Main) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, true),
Some(EntryFnType::Start) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, false),
None => {} // Do nothing.
None => {} // Do nothing.
}
fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
@ -417,15 +433,14 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(cx: &'
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
let main_ret_ty = cx.tcx().erase_regions(
&main_ret_ty.no_bound_vars().unwrap(),
);
let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap());
if cx.get_defined_value("main").is_some() {
// FIXME: We should be smart and show a better diagnostic here.
cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
.emit();
cx.sess()
.struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
.emit();
cx.sess().abort_if_errors();
bug!();
}
@ -449,10 +464,13 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(cx: &'
ty::ParamEnv::reveal_all(),
start_def_id,
cx.tcx().intern_substs(&[main_ret_ty.into()]),
).unwrap()
)
.unwrap(),
);
(start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
arg_argc, arg_argv])
(
start_fn,
vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
)
} else {
debug!("using user-defined start fn");
(rust_main, vec![arg_argc, arg_argv])
@ -467,9 +485,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(cx: &'
/// Obtain the `argc` and `argv` values to pass to the rust start function.
fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
bx: &mut Bx
) -> (Bx::Value, Bx::Value)
{
bx: &mut Bx,
) -> (Bx::Value, Bx::Value) {
if cx.sess().target.target.options.main_needs_argc_argv {
// Params from native `main()` used as args for rust start function
let param_argc = bx.get_param(0);
@ -496,8 +513,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
check_for_rustc_errors_attr(tcx);
// Skip crate items and just output metadata in -Z no-codegen mode.
if tcx.sess.opts.debugging_opts.no_codegen ||
!tcx.sess.opts.output_types.should_codegen() {
if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
let ongoing_codegen = start_async_codegen(backend, tcx, metadata, 1);
ongoing_codegen.codegen_finished(tcx);
@ -538,28 +554,21 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
// linkage, then it's already got an allocator shim and we'll be using that
// one instead. If nothing exists then it's our job to generate the
// allocator!
let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE)
.iter()
.any(|(_, list)| {
use rustc::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
use rustc::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let allocator_module = if any_dynamic_crate {
None
} else if let Some(kind) = tcx.allocator_kind() {
let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("allocator")).to_string();
let llmod_id =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
let mut modules = backend.new_metadata(tcx, &llmod_id);
time(tcx.sess, "write allocator module", || {
backend.codegen_allocator(tcx, &mut modules, kind)
});
Some(ModuleCodegen {
name: llmod_id,
module_llvm: modules,
kind: ModuleKind::Allocator,
})
Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator })
} else {
None
};
@ -570,13 +579,15 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
if need_metadata_module {
// Codegen the encoded metadata.
let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("metadata")).to_string();
let metadata_cgu_name =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
time(tcx.sess, "write compressed metadata", || {
backend.write_compressed_metadata(tcx, &ongoing_codegen.metadata,
&mut metadata_llvm_module);
backend.write_compressed_metadata(
tcx,
&ongoing_codegen.metadata,
&mut metadata_llvm_module,
);
});
let metadata_module = ModuleCodegen {
@ -612,19 +623,26 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
false
}
CguReuse::PreLto => {
submit_pre_lto_module_to_llvm(&backend, tcx, &ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
submit_pre_lto_module_to_llvm(
&backend,
tcx,
&ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
},
);
true
}
CguReuse::PostLto => {
submit_post_lto_module_to_llvm(&backend, &ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
submit_post_lto_module_to_llvm(
&backend,
&ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
},
);
true
}
};
@ -636,9 +654,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
// -Ztime-passes output manually.
let time_depth = time_depth();
set_time_depth(time_depth + 1);
print_time_passes_entry(tcx.sess.time_passes(),
"codegen to LLVM IR",
total_codegen_time);
print_time_passes_entry(tcx.sess.time_passes(), "codegen to LLVM IR", total_codegen_time);
set_time_depth(time_depth);
::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
@ -699,13 +715,9 @@ impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
}
fn assert_and_save_dep_graph(tcx: TyCtxt<'_>) {
time(tcx.sess,
"assert dep graph",
|| ::rustc_incremental::assert_dep_graph(tcx));
time(tcx.sess, "assert dep graph", || ::rustc_incremental::assert_dep_graph(tcx));
time(tcx.sess,
"serialize dep graph",
|| ::rustc_incremental::save_dep_graph(tcx));
time(tcx.sess, "serialize dep graph", || ::rustc_incremental::save_dep_graph(tcx));
}
impl CrateInfo {
@ -765,7 +777,8 @@ impl CrateInfo {
// No need to look for lang items that are whitelisted and don't
// actually need to exist.
let missing = missing.iter()
let missing = missing
.iter()
.cloned()
.filter(|&l| !weak_lang_items::whitelisted(tcx, l))
.collect();
@ -812,15 +825,15 @@ pub fn provide_both(providers: &mut Providers<'_>) {
providers.dllimport_foreign_items = |tcx, krate| {
let module_map = tcx.foreign_modules(krate);
let module_map = module_map.iter()
.map(|lib| (lib.def_id, lib))
.collect::<FxHashMap<_, _>>();
let module_map =
module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>();
let dllimports = tcx.native_libraries(krate)
let dllimports = tcx
.native_libraries(krate)
.iter()
.filter(|lib| {
if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
return false
return false;
}
let cfg = match lib.cfg {
Some(ref cfg) => cfg,
@ -835,21 +848,20 @@ pub fn provide_both(providers: &mut Providers<'_>) {
tcx.arena.alloc(dllimports)
};
providers.is_dllimport_foreign_item = |tcx, def_id| {
tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
};
providers.is_dllimport_foreign_item =
|tcx, def_id| tcx.dllimport_foreign_items(def_id.krate).contains(&def_id);
}
fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
if !tcx.dep_graph.is_fully_enabled() {
return CguReuse::No
return CguReuse::No;
}
let work_product_id = &cgu.work_product_id();
if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No
return CguReuse::No;
}
// Try to mark the CGU as green. If it we can do so, it means that nothing
@ -859,17 +871,15 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
// know that later). If we are not doing LTO, there is only one optimized
// version of each module, so we re-use that.
let dep_node = cgu.codegen_dep_node(tcx);
assert!(!tcx.dep_graph.dep_node_exists(&dep_node),
assert!(
!tcx.dep_graph.dep_node_exists(&dep_node),
"CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
cgu.name());
cgu.name()
);
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
// We can re-use either the pre- or the post-thinlto state
if tcx.sess.lto() != Lto::No {
CguReuse::PreLto
} else {
CguReuse::PostLto
}
if tcx.sess.lto() != Lto::No { CguReuse::PreLto } else { CguReuse::PostLto }
} else {
CguReuse::No
}

View File

@ -1,15 +1,15 @@
use super::{FunctionCx, LocalRef};
use super::operand::OperandValue;
use super::{FunctionCx, LocalRef};
use crate::MemFlags;
use crate::common::IntPredicate;
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
use rustc::ty::{self, Instance, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use rustc::ty::layout::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx};
use rustc::ty::{self, Instance, Ty};
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
@ -27,31 +27,14 @@ pub struct PlaceRef<'tcx, V> {
}
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(
llval: V,
layout: TyLayout<'tcx>,
) -> PlaceRef<'tcx, V> {
pub fn new_sized(llval: V, layout: TyLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
llval,
llextra: None,
layout,
align: layout.align.abi
}
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
pub fn new_sized_aligned(
llval: V,
layout: TyLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, V> {
pub fn new_sized_aligned(llval: V, layout: TyLayout<'tcx>, align: Align) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
llval,
llextra: None,
layout,
align
}
PlaceRef { llval, llextra: None, layout, align }
}
fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -60,12 +43,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
layout: TyLayout<'tcx>,
) -> PlaceRef<'tcx, V> {
assert!(!bx.cx().type_has_metadata(layout.ty));
PlaceRef {
llval,
llextra: None,
layout,
align: layout.align.abi
}
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
// FIXME(eddyb) pass something else for the name so no work is done
@ -92,10 +70,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
Self::alloca(bx, ptr_layout)
}
pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
&self,
cx: &Cx
) -> V {
pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
@ -112,7 +87,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
/// Access a field, at a point when the value's case is known.
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self, bx: &mut Bx,
self,
bx: &mut Bx,
ix: usize,
) -> Self {
let field = self.layout.field(bx.cx(), ix);
@ -133,11 +109,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
llextra: if bx.cx().type_has_metadata(field.ty) {
self.llextra
} else {
None
},
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
}
@ -149,8 +121,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// * packed struct - there is no alignment padding
match field.ty.kind {
_ if self.llextra.is_none() => {
debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, self.llval);
debug!(
"unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, self.llval
);
return simple();
}
_ if !field.is_unsized() => return simple(),
@ -222,7 +196,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &mut Bx,
cast_to: Ty<'tcx>
cast_to: Ty<'tcx>,
) -> V {
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
if self.layout.abi.is_uninhabited() {
@ -230,7 +204,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
layout::Variants::Single { index } => {
let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
let discr_val = self
.layout
.ty
.discriminant_for_variant(bx.cx().tcx(), index)
.map_or(index.as_u32() as u128, |discr| discr.val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
@ -252,7 +229,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// let LLVM interpret the `i1` as signed, because
// then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
_ => false
_ => false,
};
bx.intcast(encoded_discr.immediate(), cast_to, signed)
}
@ -330,7 +307,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
variant_index: VariantIdx
variant_index: VariantIdx,
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
@ -353,20 +330,19 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx.store(
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
ptr.align);
ptr.align,
);
}
layout::Variants::Multiple {
discr_kind: layout::DiscriminantKind::Niche {
dataful_variant,
ref niche_variants,
niche_start,
},
discr_kind:
layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
discr_index,
..
} => {
if variant_index != dataful_variant {
if bx.cx().sess().target.target.arch == "arm" ||
bx.cx().sess().target.target.arch == "aarch64" {
if bx.cx().sess().target.target.arch == "arm"
|| bx.cx().sess().target.target.arch == "aarch64"
{
// FIXME(#34427): as workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
@ -377,8 +353,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
let niche = self.project_field(bx, discr_index);
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128)
.wrapping_add(niche_start);
let niche_value = (niche_value as u128).wrapping_add(niche_start);
// FIXME(eddyb): check the actual primitive type here.
let niche_llval = if niche_value == 0 {
// HACK(eddyb): using `c_null` as it works on all types.
@ -395,7 +370,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
llindex: V
llindex: V,
) -> Self {
// Statically compute the offset if we can, otherwise just use the element size,
// as this will yield the lowest alignment.
@ -417,7 +392,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
variant_index: VariantIdx
variant_index: VariantIdx,
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
@ -442,17 +417,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_place(
&mut self,
bx: &mut Bx,
place_ref: &mir::PlaceRef<'_, 'tcx>
place_ref: &mir::PlaceRef<'_, 'tcx>,
) -> PlaceRef<'tcx, Bx::Value> {
debug!("codegen_place(place_ref={:?})", place_ref);
let cx = self.cx;
let tcx = self.cx.tcx();
let result = match place_ref {
mir::PlaceRef {
base: mir::PlaceBase::Local(index),
projection: [],
} => {
mir::PlaceRef { base: mir::PlaceBase::Local(index), projection: [] } => {
match self.locals[*index] {
LocalRef::Place(place) => {
return place;
@ -466,11 +438,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
mir::PlaceRef {
base: mir::PlaceBase::Static(box mir::Static {
ty,
kind: mir::StaticKind::Promoted(promoted, substs),
def_id,
}),
base:
mir::PlaceBase::Static(box mir::Static {
ty,
kind: mir::StaticKind::Promoted(promoted, substs),
def_id,
}),
projection: [],
} => {
let instance = Instance::new(*def_id, self.monomorphize(substs));
@ -478,10 +451,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match bx.tcx().const_eval_promoted(instance, *promoted) {
Ok(val) => match val.val {
ty::ConstKind::Value(mir::interpret::ConstValue::ByRef {
alloc, offset
}) => {
bx.cx().from_const_alloc(layout, alloc, offset)
}
alloc,
offset,
}) => bx.cx().from_const_alloc(layout, alloc, offset),
_ => bug!("promoteds should have an allocation: {:?}", val),
},
Err(_) => {
@ -492,19 +464,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.abort();
// We still have to return a place but it doesn't matter,
// this code is unreachable.
let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(bx.cx().backend_type(layout))
);
let llval =
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout)));
PlaceRef::new_sized(llval, layout)
}
}
}
mir::PlaceRef {
base: mir::PlaceBase::Static(box mir::Static {
ty,
kind: mir::StaticKind::Static,
def_id,
}),
base:
mir::PlaceBase::Static(box mir::Static {
ty,
kind: mir::StaticKind::Static,
def_id,
}),
projection: [],
} => {
// NB: The layout of a static may be unsized as is the case when working
@ -512,26 +484,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let layout = cx.layout_of(self.monomorphize(&ty));
let static_ = bx.get_static(*def_id);
PlaceRef::new_thin_place(bx, static_, layout)
},
mir::PlaceRef {
base,
projection: [proj_base @ .., mir::ProjectionElem::Deref],
} => {
// Load the pointer from its location.
self.codegen_consume(bx, &mir::PlaceRef {
base,
projection: proj_base,
}).deref(bx.cx())
}
mir::PlaceRef {
base,
projection: [proj_base @ .., elem],
} => {
mir::PlaceRef { base, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
// Load the pointer from its location.
self.codegen_consume(bx, &mir::PlaceRef { base, projection: proj_base })
.deref(bx.cx())
}
mir::PlaceRef { base, projection: [proj_base @ .., elem] } => {
// FIXME turn this recursion into iteration
let cg_base = self.codegen_place(bx, &mir::PlaceRef {
base,
projection: proj_base,
});
let cg_base =
self.codegen_place(bx, &mir::PlaceRef { base, projection: proj_base });
match elem {
mir::ProjectionElem::Deref => bug!(),
@ -539,50 +501,54 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cg_base.project_field(bx, field.index())
}
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(
mir::Place::from(*index)
);
let index = &mir::Operand::Copy(mir::Place::from(*index));
let index = self.codegen_operand(bx, index);
let llindex = index.immediate();
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
mir::ProjectionElem::ConstantIndex {
offset,
from_end: false,
min_length: _,
} => {
let lloffset = bx.cx().const_usize(*offset as u64);
cg_base.project_index(bx, lloffset)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
min_length: _ } => {
mir::ProjectionElem::ConstantIndex {
offset,
from_end: true,
min_length: _,
} => {
let lloffset = bx.cx().const_usize(*offset as u64);
let lllen = cg_base.len(bx.cx());
let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::Subslice { from, to, from_end } => {
let mut subslice = cg_base.project_index(bx,
bx.cx().const_usize(*from as u64));
let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
.projection_ty(tcx, elem).ty;
let mut subslice =
cg_base.project_index(bx, bx.cx().const_usize(*from as u64));
let projected_ty =
PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
assert!(from_end, "slice subslices should be `from_end`");
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
bx.cx().const_usize((*from as u64) + (*to as u64))));
subslice.llextra = Some(bx.sub(
cg_base.llextra.unwrap(),
bx.cx().const_usize((*from as u64) + (*to as u64)),
));
}
// Cast the place pointer type to the new
// array or slice type (`*[%_; new_len]`).
subslice.llval = bx.pointercast(subslice.llval,
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
subslice.llval = bx.pointercast(
subslice.llval,
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
);
subslice
}
mir::ProjectionElem::Downcast(_, v) => {
cg_base.project_downcast(bx, *v)
}
mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, *v),
}
}
};
@ -592,12 +558,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
let place_ty = mir::Place::ty_from(
place_ref.base,
place_ref.projection,
*self.mir,
tcx,
);
let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, *self.mir, tcx);
self.monomorphize(&place_ty.ty)
}
}

View File

@ -1,13 +1,13 @@
use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId};
use rustc::hir::map::{DefPathData, DisambiguatedDefPathData};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Instance};
use rustc::ty::print::{Printer, Print};
use rustc::ty::subst::{GenericArg, Subst, GenericArgKind};
use rustc::ty::print::{Print, Printer};
use rustc::ty::subst::{GenericArg, GenericArgKind, Subst};
use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_target::spec::abi::Abi;
use syntax::ast::{IntTy, UintTy, FloatTy};
use syntax::ast::{FloatTy, IntTy, UintTy};
use std::fmt::Write;
use std::ops::Range;
@ -19,8 +19,7 @@ pub(super) fn mangle(
) -> String {
let def_id = instance.def_id();
// FIXME(eddyb) this should ideally not be needed.
let substs =
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let substs = tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
let prefix = "_R";
let mut cx = SymbolMangler {
@ -36,12 +35,7 @@ pub(super) fn mangle(
out: String::from(prefix),
};
cx = if instance.is_vtable_shim() {
cx.path_append_ns(
|cx| cx.print_def_path(def_id, substs),
'S',
0,
"",
).unwrap()
cx.path_append_ns(|cx| cx.print_def_path(def_id, substs), 'S', 0, "").unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
@ -183,9 +177,10 @@ impl SymbolMangler<'tcx> {
fn in_binder<T>(
mut self,
value: &ty::Binder<T>,
print_value: impl FnOnce(Self, &T) -> Result<Self, !>
print_value: impl FnOnce(Self, &T) -> Result<Self, !>,
) -> Result<Self, !>
where T: TypeFoldable<'tcx>
where
T: TypeFoldable<'tcx>,
{
let regions = if value.has_late_bound_regions() {
self.tcx.collect_referenced_late_bound_regions(value)
@ -196,16 +191,20 @@ impl SymbolMangler<'tcx> {
let mut lifetime_depths =
self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
let lifetimes = regions.into_iter().map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
},
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
}).max().map_or(0, |max| max + 1);
let lifetimes = regions
.into_iter()
.map(|br| {
match br {
ty::BrAnon(i) => {
// FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`.
assert_ne!(i, 0);
i - 1
}
_ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
}
})
.max()
.map_or(0, |max| max + 1);
self.push_opt_integer_62("G", lifetimes as u64);
lifetime_depths.end += lifetimes;
@ -263,8 +262,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
let key = self.tcx.def_key(impl_def_id);
let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
let mut param_env = self.tcx.param_env(impl_def_id)
.with_reveal_all();
let mut param_env = self.tcx.param_env(impl_def_id).with_reveal_all();
if !substs.is_empty() {
param_env = param_env.subst(self.tcx, substs);
}
@ -272,8 +270,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
match &mut impl_trait_ref {
Some(impl_trait_ref) => {
assert_eq!(impl_trait_ref.self_ty(), self_ty);
*impl_trait_ref =
self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
*impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
self_ty = impl_trait_ref.self_ty();
}
None => {
@ -289,10 +286,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
)
}
fn print_region(
mut self,
region: ty::Region<'_>,
) -> Result<Self::Region, Self::Error> {
fn print_region(mut self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
let i = match *region {
// Erased lifetimes use the index 0, for a
// shorter mangling of `L_`.
@ -318,10 +312,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
Ok(self)
}
fn print_type(
mut self,
ty: Ty<'tcx>,
) -> Result<Self::Type, Self::Error> {
fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
// Basic types, never cached (single-character).
let basic_type = match ty.kind {
ty::Bool => "b",
@ -345,8 +336,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
ty::Never => "z",
// Placeholders (should be demangled as `_`).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => "p",
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => "p",
_ => "",
};
@ -362,14 +352,15 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
match ty.kind {
// Basic types, handled above.
ty::Bool | ty::Char | ty::Str |
ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Never => unreachable!(),
ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => {
unreachable!()
}
ty::Tuple(_) if ty.is_unit() => unreachable!(),
// Placeholders, also handled as part of basic types.
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) |
ty::Infer(_) | ty::Error => unreachable!(),
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => {
unreachable!()
}
ty::Ref(r, ty, mutbl) => {
self.push(match mutbl {
@ -409,13 +400,13 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
}
// Mangle all nominal types as paths.
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs) |
ty::FnDef(def_id, substs) |
ty::Opaque(def_id, substs) |
ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) |
ty::Closure(def_id, substs) |
ty::Generator(def_id, substs, _) => {
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
| ty::FnDef(def_id, substs)
| ty::Opaque(def_id, substs)
| ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
| ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs })
| ty::Closure(def_id, substs)
| ty::Generator(def_id, substs, _) => {
self = self.print_def_path(def_id, substs)?;
}
ty::Foreign(def_id) => {
@ -460,9 +451,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
self = r.print(self)?;
}
ty::GeneratorWitness(_) => {
bug!("symbol_names: unexpected `GeneratorWitness`")
}
ty::GeneratorWitness(_) => bug!("symbol_names: unexpected `GeneratorWitness`"),
}
// Only cache types that do not refer to an enclosing
@ -502,10 +491,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
Ok(self)
}
fn print_const(
mut self,
ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> {
fn print_const(mut self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) {
return self.print_backref(i);
}
@ -514,8 +500,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
match ct.ty.kind {
ty::Uint(_) => {}
_ => {
bug!("symbol_names: unsupported constant of type `{}` ({:?})",
ct.ty, ct);
bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct);
}
}
self = ct.ty.print(self)?;
@ -539,10 +524,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
Ok(self)
}
fn path_crate(
mut self,
cnum: CrateNum,
) -> Result<Self::Path, Self::Error> {
fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
self.push("C");
let fingerprint = self.tcx.crate_disambiguator(cnum).to_fingerprint();
self.push_disambiguator(fingerprint.to_smaller_hash());
@ -612,7 +594,7 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
print_prefix,
ns,
disambiguated_data.disambiguator as u64,
name.as_ref().map_or("", |s| &s[..])
name.as_ref().map_or("", |s| &s[..]),
)
}
fn path_generic_args(
@ -621,17 +603,13 @@ impl Printer<'tcx> for SymbolMangler<'tcx> {
args: &[GenericArg<'tcx>],
) -> Result<Self::Path, Self::Error> {
// Don't print any regions if they're all erased.
let print_regions = args.iter().any(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
}
let print_regions = args.iter().any(|arg| match arg.unpack() {
GenericArgKind::Lifetime(r) => *r != ty::ReErased,
_ => false,
});
let args = args.iter().cloned().filter(|arg| {
match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
}
let args = args.iter().cloned().filter(|arg| match arg.unpack() {
GenericArgKind::Lifetime(_) => print_regions,
_ => true,
});
if args.clone().next().is_none() {

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,14 @@
use rustc::hir;
use rustc::hir::Node;
use rustc::mir::{self, ClearCrossCrate, Local, LocalInfo, Location, ReadOnlyBodyAndCache};
use rustc::mir::{Mutability, Place, PlaceRef, PlaceBase, ProjectionElem};
use rustc::mir::{Mutability, Place, PlaceBase, PlaceRef, ProjectionElem};
use rustc::ty::{self, Ty, TyCtxt};
use rustc_index::vec::Idx;
use syntax_pos::Span;
use syntax_pos::symbol::kw;
use syntax_pos::Span;
use crate::borrow_check::MirBorrowckCtxt;
use crate::borrow_check::diagnostics::BorrowedContentSource;
use crate::borrow_check::MirBorrowckCtxt;
use crate::util::collect_writes::FindAssignments;
use rustc_errors::Applicability;
@ -42,16 +42,12 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
debug!("report_mutability_error: access_place_desc={:?}", access_place_desc);
match the_place_err {
PlaceRef {
base: PlaceBase::Local(local),
projection: [],
} => {
PlaceRef { base: PlaceBase::Local(local), projection: [] } => {
item_msg = format!("`{}`", access_place_desc.unwrap());
if access_place.as_local().is_some() {
reason = ", as it is not declared as mutable".to_string();
} else {
let name = self.local_names[*local]
.expect("immutable unnamed local");
let name = self.local_names[*local].expect("immutable unnamed local");
reason = format!(", as `{}` is not declared as mutable", name);
}
}
@ -61,12 +57,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
Place::ty_from(
&the_place_err.base,
proj_base,
*self.body,
self.infcx.tcx
).ty));
Place::ty_from(&the_place_err.base, proj_base, *self.body, self.infcx.tcx).ty
));
item_msg = format!("`{}`", access_place_desc.unwrap());
if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
@ -77,17 +69,15 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
}
PlaceRef {
base: &PlaceBase::Local(local),
projection: [ProjectionElem::Deref],
} if self.body.local_decls[local].is_ref_for_guard() => {
PlaceRef { base: &PlaceBase::Local(local), projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_ref_for_guard() =>
{
item_msg = format!("`{}`", access_place_desc.unwrap());
reason = ", as it is immutable for the pattern guard".to_string();
}
PlaceRef {
base: &PlaceBase::Local(local),
projection: [ProjectionElem::Deref],
} if self.body.local_decls[local].is_ref_to_static() => {
PlaceRef { base: &PlaceBase::Local(local), projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_ref_to_static() =>
{
if access_place.projection.len() == 1 {
item_msg = format!("immutable static item `{}`", access_place_desc.unwrap());
reason = String::new();
@ -102,13 +92,11 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
}
}
PlaceRef {
base: _,
projection: [proj_base @ .., ProjectionElem::Deref],
} => {
if the_place_err.base == &PlaceBase::Local(Local::new(1)) &&
proj_base.is_empty() &&
!self.upvars.is_empty() {
PlaceRef { base: _, projection: [proj_base @ .., ProjectionElem::Deref] } => {
if the_place_err.base == &PlaceBase::Local(Local::new(1))
&& proj_base.is_empty()
&& !self.upvars.is_empty()
{
item_msg = format!("`{}`", access_place_desc.unwrap());
debug_assert!(self.body.local_decls[Local::new(1)].ty.is_region_ptr());
debug_assert!(is_closure_or_generator(
@ -121,12 +109,11 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
.ty
));
reason =
if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
", as it is a captured variable in a `Fn` closure".to_string()
} else {
", as `Fn` closures cannot mutate their captured variables".to_string()
}
reason = if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
", as it is a captured variable in a `Fn` closure".to_string()
} else {
", as `Fn` closures cannot mutate their captured variables".to_string()
}
} else {
let source = self.borrowed_content_source(PlaceRef {
base: the_place_err.base,
@ -149,26 +136,13 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
}
PlaceRef {
base: PlaceBase::Static(_),
..
PlaceRef { base: PlaceBase::Static(_), .. }
| PlaceRef { base: _, projection: [.., ProjectionElem::Index(_)] }
| PlaceRef { base: _, projection: [.., ProjectionElem::ConstantIndex { .. }] }
| PlaceRef { base: _, projection: [.., ProjectionElem::Subslice { .. }] }
| PlaceRef { base: _, projection: [.., ProjectionElem::Downcast(..)] } => {
bug!("Unexpected immutable place.")
}
| PlaceRef {
base: _,
projection: [.., ProjectionElem::Index(_)],
}
| PlaceRef {
base: _,
projection: [.., ProjectionElem::ConstantIndex { .. }],
}
| PlaceRef {
base: _,
projection: [.., ProjectionElem::Subslice { .. }],
}
| PlaceRef {
base: _,
projection: [.., ProjectionElem::Downcast(..)],
} => bug!("Unexpected immutable place."),
}
debug!("report_mutability_error: item_msg={:?}, reason={:?}", item_msg, reason);
@ -191,18 +165,14 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let borrow_spans = self.borrow_spans(span, location);
let borrow_span = borrow_spans.args_or_use();
err = self.cannot_borrow_path_as_mutable_because(
borrow_span,
&item_msg,
&reason,
);
err = self.cannot_borrow_path_as_mutable_because(borrow_span, &item_msg, &reason);
borrow_spans.var_span_label(
&mut err,
format!(
"mutable borrow occurs due to use of `{}` in closure",
// always Some() if the message is printed.
self.describe_place(access_place.as_ref()).unwrap_or_default(),
)
),
);
borrow_span
}
@ -219,11 +189,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// after the field access).
PlaceRef {
base,
projection: [proj_base @ ..,
ProjectionElem::Deref,
ProjectionElem::Field(field, _),
ProjectionElem::Deref,
],
projection:
[proj_base @ .., ProjectionElem::Deref, ProjectionElem::Field(field, _), ProjectionElem::Deref],
} => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
@ -239,49 +206,50 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
Applicability::MaybeIncorrect,
);
}
},
}
// Suggest removing a `&mut` from the use of a mutable reference.
PlaceRef {
base: PlaceBase::Local(local),
projection: [],
} if {
self.body.local_decls.get(*local).map(|local_decl| {
if let LocalInfo::User(ClearCrossCrate::Set(
mir::BindingForm::ImplicitSelf(kind)
)) = local_decl.local_info {
// Check if the user variable is a `&mut self` and we can therefore
// suggest removing the `&mut`.
//
// Deliberately fall into this case for all implicit self types,
// so that we don't fall in to the next case with them.
kind == mir::ImplicitSelfKind::MutRef
} else if Some(kw::SelfLower) == self.local_names[*local] {
// Otherwise, check if the name is the self kewyord - in which case
// we have an explicit self. Do the same thing in this case and check
// for a `self: &mut Self` to suggest removing the `&mut`.
if let ty::Ref(
_, _, hir::Mutability::Mut
) = local_decl.ty.kind {
true
} else {
false
}
} else {
false
}
}).unwrap_or(false)
} => {
PlaceRef { base: PlaceBase::Local(local), projection: [] }
if {
self.body
.local_decls
.get(*local)
.map(|local_decl| {
if let LocalInfo::User(ClearCrossCrate::Set(
mir::BindingForm::ImplicitSelf(kind),
)) = local_decl.local_info
{
// Check if the user variable is a `&mut self` and we can therefore
// suggest removing the `&mut`.
//
// Deliberately fall into this case for all implicit self types,
// so that we don't fall in to the next case with them.
kind == mir::ImplicitSelfKind::MutRef
} else if Some(kw::SelfLower) == self.local_names[*local] {
// Otherwise, check if the name is the self kewyord - in which case
// we have an explicit self. Do the same thing in this case and check
// for a `self: &mut Self` to suggest removing the `&mut`.
if let ty::Ref(_, _, hir::Mutability::Mut) = local_decl.ty.kind {
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
} =>
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_label(span, "try removing `&mut` here");
},
}
// We want to suggest users use `let mut` for local (user
// variable) mutations...
PlaceRef {
base: PlaceBase::Local(local),
projection: [],
} if self.body.local_decls[*local].can_be_made_mutable() => {
PlaceRef { base: PlaceBase::Local(local), projection: [] }
if self.body.local_decls[*local].can_be_made_mutable() =>
{
// ... but it doesn't make sense to suggest it on
// variables that are `ref x`, `ref mut x`, `&self`,
// or `&mut self` (such variables are simply not
@ -310,8 +278,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
err.span_label(span, format!("cannot {ACT}", ACT = act));
let upvar_hir_id = self.upvars[upvar_index.index()].var_hir_id;
if let Some(Node::Binding(pat)) = self.infcx.tcx.hir().find(upvar_hir_id)
{
if let Some(Node::Binding(pat)) = self.infcx.tcx.hir().find(upvar_hir_id) {
if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,
_,
@ -332,10 +299,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// complete hack to approximate old AST-borrowck
// diagnostic: if the span starts with a mutable borrow of
// a local variable, then just suggest the user remove it.
PlaceRef {
base: PlaceBase::Local(_),
projection: [],
} if {
PlaceRef { base: PlaceBase::Local(_), projection: [] }
if {
if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
snippet.starts_with("&mut ")
} else {
@ -347,10 +312,9 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
err.span_label(span, "try removing `&mut` here");
}
PlaceRef {
base: PlaceBase::Local(local),
projection: [ProjectionElem::Deref],
} if self.body.local_decls[*local].is_ref_for_guard() => {
PlaceRef { base: PlaceBase::Local(local), projection: [ProjectionElem::Deref] }
if self.body.local_decls[*local].is_ref_for_guard() =>
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.note(
"variables bound in patterns are immutable until the end of the pattern guard",
@ -362,10 +326,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
//
// FIXME: can this case be generalized to work for an
// arbitrary base for the projection?
PlaceRef {
base: PlaceBase::Local(local),
projection: [ProjectionElem::Deref],
} if self.body.local_decls[*local].is_user_variable() =>
PlaceRef { base: PlaceBase::Local(local), projection: [ProjectionElem::Deref] }
if self.body.local_decls[*local].is_user_variable() =>
{
let local_decl = &self.body.local_decls[*local];
let suggestion = match local_decl.local_info {
@ -449,41 +411,32 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
PlaceRef {
base,
projection: [ProjectionElem::Deref],
// FIXME document what is this 1 magic number about
} if *base == PlaceBase::Local(Local::new(1)) &&
!self.upvars.is_empty() =>
{
// FIXME document what is this 1 magic number about
} if *base == PlaceBase::Local(Local::new(1)) && !self.upvars.is_empty() => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_help(
self.body.span,
"consider changing this to accept closures that implement `FnMut`"
"consider changing this to accept closures that implement `FnMut`",
);
}
PlaceRef {
base: _,
projection: [.., ProjectionElem::Deref],
} => {
PlaceRef { base: _, projection: [.., ProjectionElem::Deref] } => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
match opt_source {
Some(BorrowedContentSource::OverloadedDeref(ty)) => {
err.help(
&format!(
"trait `DerefMut` is required to modify through a dereference, \
err.help(&format!(
"trait `DerefMut` is required to modify through a dereference, \
but it is not implemented for `{}`",
ty,
),
);
},
ty,
));
}
Some(BorrowedContentSource::OverloadedIndex(ty)) => {
err.help(
&format!(
"trait `IndexMut` is required to modify indexed content, \
err.help(&format!(
"trait `IndexMut` is required to modify indexed content, \
but it is not implemented for `{}`",
ty,
),
);
ty,
));
}
_ => (),
}
@ -503,17 +456,20 @@ fn suggest_ampmut_self<'tcx>(
local_decl: &mir::LocalDecl<'tcx>,
) -> (Span, String) {
let sp = local_decl.source_info.span;
(sp, match tcx.sess.source_map().span_to_snippet(sp) {
Ok(snippet) => {
let lt_pos = snippet.find('\'');
if let Some(lt_pos) = lt_pos {
format!("&{}mut self", &snippet[lt_pos..snippet.len() - 4])
} else {
"&mut self".to_string()
(
sp,
match tcx.sess.source_map().span_to_snippet(sp) {
Ok(snippet) => {
let lt_pos = snippet.find('\'');
if let Some(lt_pos) = lt_pos {
format!("&{}mut self", &snippet[lt_pos..snippet.len() - 4])
} else {
"&mut self".to_string()
}
}
}
_ => "&mut self".to_string()
})
_ => "&mut self".to_string(),
},
)
}
// When we want to suggest a user change a local variable to be a `&mut`, there
@ -542,10 +498,9 @@ fn suggest_ampmut<'tcx>(
if !locations.is_empty() {
let assignment_rhs_span = body.source_info(locations[0]).span;
if let Ok(src) = tcx.sess.source_map().span_to_snippet(assignment_rhs_span) {
if let (true, Some(ws_pos)) = (
src.starts_with("&'"),
src.find(|c: char| -> bool { c.is_whitespace() }),
) {
if let (true, Some(ws_pos)) =
(src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
{
let lt_name = &src[1..ws_pos];
let ty = &src[ws_pos..];
return (assignment_rhs_span, format!("&{} mut {}", lt_name, ty));
@ -567,10 +522,9 @@ fn suggest_ampmut<'tcx>(
};
if let Ok(src) = tcx.sess.source_map().span_to_snippet(highlight_span) {
if let (true, Some(ws_pos)) = (
src.starts_with("&'"),
src.find(|c: char| -> bool { c.is_whitespace() }),
) {
if let (true, Some(ws_pos)) =
(src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
{
let lt_name = &src[1..ws_pos];
let ty = &src[ws_pos..];
return (highlight_span, format!("&{} mut{}", lt_name, ty));
@ -579,12 +533,14 @@ fn suggest_ampmut<'tcx>(
let ty_mut = local_decl.ty.builtin_deref(true).unwrap();
assert_eq!(ty_mut.mutbl, hir::Mutability::Not);
(highlight_span,
if local_decl.ty.is_region_ptr() {
format!("&mut {}", ty_mut.ty)
} else {
format!("*mut {}", ty_mut.ty)
})
(
highlight_span,
if local_decl.ty.is_region_ptr() {
format!("&mut {}", ty_mut.ty)
} else {
format!("*mut {}", ty_mut.ty)
},
)
}
fn is_closure_or_generator(ty: Ty<'_>) -> bool {
@ -613,10 +569,11 @@ fn annotate_struct_field(
// Now we're dealing with the actual struct that we're going to suggest a change to,
// we can expect a field that is an immutable reference to a type.
if let hir::Node::Field(field) = node {
if let hir::TyKind::Rptr(lifetime, hir::MutTy {
mutbl: hir::Mutability::Not,
ref ty
}) = field.ty.kind {
if let hir::TyKind::Rptr(
lifetime,
hir::MutTy { mutbl: hir::Mutability::Not, ref ty },
) = field.ty.kind
{
// Get the snippets in two parts - the named lifetime (if there is one) and
// type being referenced, that way we can reconstruct the snippet without loss
// of detail.
@ -629,10 +586,7 @@ fn annotate_struct_field(
return Some((
field.ty.span,
format!(
"&{}mut {}",
lifetime_snippet, &*type_snippet,
),
format!("&{}mut {}", lifetime_snippet, &*type_snippet,),
));
}
}
@ -645,9 +599,7 @@ fn annotate_struct_field(
/// If possible, suggest replacing `ref` with `ref mut`.
fn suggest_ref_mut(tcx: TyCtxt<'_>, binding_span: Span) -> Option<String> {
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).ok()?;
if hi_src.starts_with("ref")
&& hi_src["ref".len()..].starts_with(rustc_lexer::is_whitespace)
{
if hi_src.starts_with("ref") && hi_src["ref".len()..].starts_with(rustc_lexer::is_whitespace) {
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else {

File diff suppressed because it is too large Load Diff

View File

@ -46,25 +46,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
scope: Option<region::Scope>,
expr: Expr<'tcx>,
) -> BlockAnd<Rvalue<'tcx>> {
debug!(
"expr_as_rvalue(block={:?}, scope={:?}, expr={:?})",
block, scope, expr
);
debug!("expr_as_rvalue(block={:?}, scope={:?}, expr={:?})", block, scope, expr);
let this = self;
let expr_span = expr.span;
let source_info = this.source_info(expr_span);
match expr.kind {
ExprKind::Scope {
region_scope,
lint_level,
value,
} => {
ExprKind::Scope { region_scope, lint_level, value } => {
let region_scope = (region_scope, source_info);
this.in_scope(region_scope, lint_level, |this| {
this.as_rvalue(block, scope, value)
})
this.in_scope(region_scope, lint_level, |this| this.as_rvalue(block, scope, value))
}
ExprKind::Repeat { value, count } => {
let value_operand = unpack!(block = this.as_operand(block, scope, value));
@ -106,35 +97,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// The `Box<T>` temporary created here is not a part of the HIR,
// and therefore is not considered during generator OIBIT
// determination. See the comment about `box` at `yield_in_scope`.
let result = this
.local_decls
.push(LocalDecl::new_internal(expr.ty, expr_span));
let result = this.local_decls.push(LocalDecl::new_internal(expr.ty, expr_span));
this.cfg.push(
block,
Statement {
source_info,
kind: StatementKind::StorageLive(result),
},
Statement { source_info, kind: StatementKind::StorageLive(result) },
);
if let Some(scope) = scope {
// schedule a shallow free of that memory, lest we unwind:
this.schedule_drop_storage_and_value(
expr_span,
scope,
result,
);
this.schedule_drop_storage_and_value(expr_span, scope, result);
}
// malloc some memory of suitable type (thus far, uninitialized):
let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty);
this.cfg
.push_assign(block, source_info, &Place::from(result), box_);
this.cfg.push_assign(block, source_info, &Place::from(result), box_);
// initialize the box contents:
unpack!(
block = this.into(
&this.hir.tcx().mk_place_deref(Place::from(result)),
block, value
block,
value
)
);
block.and(Rvalue::Use(Operand::Move(Place::from(result))))
@ -193,12 +175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block.and(Rvalue::Aggregate(box AggregateKind::Tuple, fields))
}
ExprKind::Closure {
closure_id,
substs,
upvars,
movability,
} => {
ExprKind::Closure { closure_id, substs, upvars, movability } => {
// see (*) above
let operands: Vec<_> = upvars
.into_iter()
@ -225,9 +202,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
match upvar.kind {
ExprKind::Borrow {
borrow_kind:
BorrowKind::Mut {
allow_two_phase_borrow: false,
},
BorrowKind::Mut { allow_two_phase_borrow: false },
arg,
} => unpack!(
block = this.limit_capture_mutability(
@ -238,7 +213,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
}
}).collect();
})
.collect();
let result = match substs {
UpvarSubsts::Generator(substs) => {
// We implicitly set the discriminant to 0. See
@ -261,11 +237,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
this.cfg.terminate(
block,
source_info,
TerminatorKind::Yield {
value: value,
resume: resume,
drop: cleanup,
},
TerminatorKind::Yield { value: value, resume: resume, drop: cleanup },
);
resume.and(this.unit_rvalue())
}
@ -414,29 +386,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let this = self;
let source_info = this.source_info(upvar_span);
let temp = this
.local_decls
.push(LocalDecl::new_temp(upvar_ty, upvar_span));
let temp = this.local_decls.push(LocalDecl::new_temp(upvar_ty, upvar_span));
this.cfg.push(
block,
Statement {
source_info,
kind: StatementKind::StorageLive(temp),
},
);
this.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
let arg_place = unpack!(block = this.as_place(block, arg));
let mutability = match arg_place.as_ref() {
PlaceRef {
base: &PlaceBase::Local(local),
projection: &[],
} => this.local_decls[local].mutability,
PlaceRef {
base: &PlaceBase::Local(local),
projection: &[ProjectionElem::Deref],
} => {
PlaceRef { base: &PlaceBase::Local(local), projection: &[] } => {
this.local_decls[local].mutability
}
PlaceRef { base: &PlaceBase::Local(local), projection: &[ProjectionElem::Deref] } => {
debug_assert!(
this.local_decls[local].is_ref_for_guard(),
"Unexpected capture place",
@ -449,16 +409,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
| PlaceRef {
ref base,
projection: &[
ref proj_base @ ..,
ProjectionElem::Field(upvar_index, _),
ProjectionElem::Deref
],
projection:
&[ref proj_base @ .., ProjectionElem::Field(upvar_index, _), ProjectionElem::Deref],
} => {
let place = PlaceRef {
base,
projection: proj_base,
};
let place = PlaceRef { base, projection: proj_base };
// Not projected from the implicit `self` in a closure.
debug_assert!(
@ -480,9 +434,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let borrow_kind = match mutability {
Mutability::Not => BorrowKind::Unique,
Mutability::Mut => BorrowKind::Mut {
allow_two_phase_borrow: false,
},
Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
};
this.cfg.push_assign(
@ -496,11 +448,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// anything because no values with a destructor can be created in
// a constant at this time, even if the type may need dropping.
if let Some(temp_lifetime) = temp_lifetime {
this.schedule_drop_storage_and_value(
upvar_span,
temp_lifetime,
temp,
);
this.schedule_drop_storage_and_value(upvar_span, temp_lifetime, temp);
}
block.and(Operand::Move(Place::from(temp)))

View File

@ -11,13 +11,13 @@ use crate::build::{BlockAnd, BlockAndExtension, Builder};
use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode};
use crate::hair::{self, *};
use rustc::hir::HirId;
use rustc::mir::*;
use rustc::middle::region;
use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty};
use rustc::mir::*;
use rustc::ty::layout::VariantIdx;
use rustc_index::bit_set::BitSet;
use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use smallvec::{SmallVec, smallvec};
use rustc_index::bit_set::BitSet;
use smallvec::{smallvec, SmallVec};
use syntax::ast::Name;
use syntax_pos::Span;
@ -139,9 +139,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// create binding start block for link them by false edges
let candidate_count = arms.iter().map(|c| c.top_pats_hack().len()).sum::<usize>();
let pre_binding_blocks: Vec<_> = (0..candidate_count)
.map(|_| self.cfg.start_new_block())
.collect();
let pre_binding_blocks: Vec<_> =
(0..candidate_count).map(|_| self.cfg.start_new_block()).collect();
let mut match_has_guard = false;
@ -155,29 +154,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.map(|arm| {
let arm_has_guard = arm.guard.is_some();
match_has_guard |= arm_has_guard;
let arm_candidates: Vec<_> = arm.top_pats_hack()
let arm_candidates: Vec<_> = arm
.top_pats_hack()
.iter()
.zip(candidate_pre_binding_blocks.by_ref())
.map(
|(pattern, pre_binding_block)| {
Candidate {
span: pattern.span,
match_pairs: smallvec![
MatchPair::new(scrutinee_place.clone(), pattern),
],
bindings: vec![],
ascriptions: vec![],
otherwise_block: if arm_has_guard {
Some(self.cfg.start_new_block())
} else {
None
},
pre_binding_block: *pre_binding_block,
next_candidate_pre_binding_block:
next_candidate_pre_binding_blocks.next().copied(),
}
.map(|(pattern, pre_binding_block)| Candidate {
span: pattern.span,
match_pairs: smallvec![MatchPair::new(scrutinee_place.clone(), pattern),],
bindings: vec![],
ascriptions: vec![],
otherwise_block: if arm_has_guard {
Some(self.cfg.start_new_block())
} else {
None
},
)
pre_binding_block: *pre_binding_block,
next_candidate_pre_binding_block: next_candidate_pre_binding_blocks
.next()
.copied(),
})
.collect();
(arm, arm_candidates)
})
@ -226,50 +221,53 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Step 5. Create everything else: the guards and the arms.
let match_scope = self.scopes.topmost();
let arm_end_blocks: Vec<_> = arm_candidates.into_iter().map(|(arm, mut candidates)| {
let arm_source_info = self.source_info(arm.span);
let arm_scope = (arm.scope, arm_source_info);
self.in_scope(arm_scope, arm.lint_level, |this| {
let body = this.hir.mirror(arm.body.clone());
let scope = this.declare_bindings(
None,
arm.span,
&arm.top_pats_hack()[0],
ArmHasGuard(arm.guard.is_some()),
Some((Some(&scrutinee_place), scrutinee_span)),
);
let arm_block;
if candidates.len() == 1 {
arm_block = this.bind_and_guard_matched_candidate(
candidates.pop().unwrap(),
arm.guard.clone(),
&fake_borrow_temps,
scrutinee_span,
match_scope,
let arm_end_blocks: Vec<_> = arm_candidates
.into_iter()
.map(|(arm, mut candidates)| {
let arm_source_info = self.source_info(arm.span);
let arm_scope = (arm.scope, arm_source_info);
self.in_scope(arm_scope, arm.lint_level, |this| {
let body = this.hir.mirror(arm.body.clone());
let scope = this.declare_bindings(
None,
arm.span,
&arm.top_pats_hack()[0],
ArmHasGuard(arm.guard.is_some()),
Some((Some(&scrutinee_place), scrutinee_span)),
);
} else {
arm_block = this.cfg.start_new_block();
for candidate in candidates {
this.clear_top_scope(arm.scope);
let binding_end = this.bind_and_guard_matched_candidate(
candidate,
let arm_block;
if candidates.len() == 1 {
arm_block = this.bind_and_guard_matched_candidate(
candidates.pop().unwrap(),
arm.guard.clone(),
&fake_borrow_temps,
scrutinee_span,
match_scope,
);
this.cfg.goto(binding_end, source_info, arm_block);
} else {
arm_block = this.cfg.start_new_block();
for candidate in candidates {
this.clear_top_scope(arm.scope);
let binding_end = this.bind_and_guard_matched_candidate(
candidate,
arm.guard.clone(),
&fake_borrow_temps,
scrutinee_span,
match_scope,
);
this.cfg.goto(binding_end, source_info, arm_block);
}
}
}
if let Some(source_scope) = scope {
this.source_scope = source_scope;
}
if let Some(source_scope) = scope {
this.source_scope = source_scope;
}
this.into(destination, arm_block, body)
this.into(destination, arm_block, body)
})
})
}).collect();
.collect();
// all the arm blocks will rejoin here
let end_block = self.cfg.start_new_block();
@ -291,12 +289,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) -> BlockAnd<()> {
match *irrefutable_pat.kind {
// Optimize the case of `let x = ...` to write directly into `x`
PatKind::Binding {
mode: BindingMode::ByValue,
var,
subpattern: None,
..
} => {
PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => {
let place =
self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard);
unpack!(block = self.into(&place, block, initializer));
@ -318,20 +311,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// dubious way, so it may be that the test is kind of
// broken.
PatKind::AscribeUserType {
subpattern: Pat {
kind: box PatKind::Binding {
mode: BindingMode::ByValue,
var,
subpattern: None,
subpattern:
Pat {
kind:
box PatKind::Binding {
mode: BindingMode::ByValue,
var,
subpattern: None,
..
},
..
},
..
},
ascription: hair::pattern::Ascription {
user_ty: pat_ascription_ty,
variance: _,
user_ty_span,
},
ascription:
hair::pattern::Ascription { user_ty: pat_ascription_ty, variance: _, user_ty_span },
} => {
let place =
self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard);
@ -353,10 +345,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Statement {
source_info: ty_source_info,
kind: StatementKind::AscribeUserType(
box(
place,
user_ty,
),
box (place, user_ty),
// We always use invariant as the variance here. This is because the
// variance field from the ascription refers to the variance to use
// when applying the type to the value being matched, but this
@ -504,13 +493,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) -> Place<'tcx> {
let local_id = self.var_local_id(var, for_guard);
let source_info = self.source_info(span);
self.cfg.push(
block,
Statement {
source_info,
kind: StatementKind::StorageLive(local_id),
},
);
self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
let region_scope = self.hir.region_scope_tree.var_scope(var.local_id);
self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
Place::from(local_id)
@ -519,12 +502,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
pub fn schedule_drop_for_binding(&mut self, var: HirId, span: Span, for_guard: ForGuard) {
let local_id = self.var_local_id(var, for_guard);
let region_scope = self.hir.region_scope_tree.var_scope(var.local_id);
self.schedule_drop(
span,
region_scope,
local_id,
DropKind::Value,
);
self.schedule_drop(span, region_scope, local_id, DropKind::Value);
}
pub(super) fn visit_bindings(
@ -544,31 +522,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) {
debug!("visit_bindings: pattern={:?} pattern_user_ty={:?}", pattern, pattern_user_ty);
match *pattern.kind {
PatKind::Binding {
mutability,
name,
mode,
var,
ty,
ref subpattern,
..
} => {
PatKind::Binding { mutability, name, mode, var, ty, ref subpattern, .. } => {
f(self, mutability, name, mode, var, pattern.span, ty, pattern_user_ty.clone());
if let Some(subpattern) = subpattern.as_ref() {
self.visit_bindings(subpattern, pattern_user_ty, f);
}
}
PatKind::Array {
ref prefix,
ref slice,
ref suffix,
}
| PatKind::Slice {
ref prefix,
ref slice,
ref suffix,
} => {
PatKind::Array { ref prefix, ref slice, ref suffix }
| PatKind::Slice { ref prefix, ref slice, ref suffix } => {
let from = u32::try_from(prefix.len()).unwrap();
let to = u32::try_from(suffix.len()).unwrap();
for subpattern in prefix {
@ -590,11 +552,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
PatKind::AscribeUserType {
ref subpattern,
ascription: hair::pattern::Ascription {
ref user_ty,
user_ty_span,
variance: _,
},
ascription: hair::pattern::Ascription { ref user_ty, user_ty_span, variance: _ },
} => {
// This corresponds to something like
//
@ -627,8 +585,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
PatKind::Variant { adt_def, substs: _, variant_index, ref subpatterns } => {
for subpattern in subpatterns {
let subpattern_user_ty = pattern_user_ty.clone().variant(
adt_def, variant_index, subpattern.field);
let subpattern_user_ty =
pattern_user_ty.clone().variant(adt_def, variant_index, subpattern.field);
self.visit_bindings(&subpattern.pattern, subpattern_user_ty, f);
}
}
@ -736,10 +694,7 @@ enum TestKind<'tcx> {
Range(PatRange<'tcx>),
/// Test length of the slice is equal to len
Len {
len: u64,
op: BinOp,
},
Len { len: u64, op: BinOp },
}
#[derive(Debug)]
@ -789,10 +744,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) {
debug!(
"matched_candidate(span={:?}, candidates={:?}, start_block={:?}, otherwise_block={:?})",
span,
candidates,
start_block,
otherwise_block,
span, candidates, start_block, otherwise_block,
);
// Start by simplifying candidates. Once this process is complete, all
@ -805,22 +757,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// The candidates are sorted by priority. Check to see whether the
// higher priority candidates (and hence at the front of the slice)
// have satisfied all their match pairs.
let fully_matched = candidates
.iter()
.take_while(|c| c.match_pairs.is_empty())
.count();
debug!(
"match_candidates: {:?} candidates fully matched",
fully_matched
);
let fully_matched = candidates.iter().take_while(|c| c.match_pairs.is_empty()).count();
debug!("match_candidates: {:?} candidates fully matched", fully_matched);
let (matched_candidates, unmatched_candidates) = candidates.split_at_mut(fully_matched);
let block: BasicBlock = if !matched_candidates.is_empty() {
let otherwise_block = self.select_matched_candidates(
matched_candidates,
start_block,
fake_borrows,
);
let otherwise_block =
self.select_matched_candidates(matched_candidates, start_block, fake_borrows);
if let Some(last_otherwise_block) = otherwise_block {
last_otherwise_block
@ -848,13 +791,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
// Test for the remaining candidates.
self.test_candidates(
span,
unmatched_candidates,
block,
otherwise_block,
fake_borrows,
);
self.test_candidates(span, unmatched_candidates, block, otherwise_block, fake_borrows);
}
/// Link up matched candidates. For example, if we have something like
@ -903,8 +840,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// _ => 3,
// }
if let Some(fake_borrows) = fake_borrows {
for Binding { source, .. }
in matched_candidates.iter().flat_map(|candidate| &candidate.bindings)
for Binding { source, .. } in
matched_candidates.iter().flat_map(|candidate| &candidate.bindings)
{
if let Some(i) =
source.projection.iter().rposition(|elem| *elem == ProjectionElem::Deref)
@ -924,8 +861,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.position(|c| c.otherwise_block.is_none())
.unwrap_or(matched_candidates.len() - 1);
let (reachable_candidates, unreachable_candidates)
= matched_candidates.split_at_mut(fully_matched_with_guard + 1);
let (reachable_candidates, unreachable_candidates) =
matched_candidates.split_at_mut(fully_matched_with_guard + 1);
let first_candidate = &reachable_candidates[0];
let first_prebinding_block = first_candidate.pre_binding_block;
@ -967,7 +904,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
let last_candidate = reachable_candidates.last().unwrap();
if let Some(otherwise) = last_candidate.otherwise_block {
let source_info = self.source_info(last_candidate.span);
@ -1114,11 +1050,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// may want to add cases based on the candidates that are
// available
match test.kind {
TestKind::SwitchInt {
switch_ty,
ref mut options,
ref mut indices,
} => {
TestKind::SwitchInt { switch_ty, ref mut options, ref mut indices } => {
for candidate in candidates.iter() {
if !self.add_cases_to_switch(
&match_place,
@ -1131,10 +1063,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
}
TestKind::Switch {
adt_def: _,
ref mut variants,
} => {
TestKind::Switch { adt_def: _, ref mut variants } => {
for candidate in candidates.iter() {
if !self.add_variants_to_switch(&match_place, candidate, variants) {
break;
@ -1145,18 +1074,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
// Insert a Shallow borrow of any places that is switched on.
fake_borrows.as_mut().map(|fb| {
fb.insert(match_place.clone())
});
fake_borrows.as_mut().map(|fb| fb.insert(match_place.clone()));
// perform the test, branching to one of N blocks. For each of
// those N possible outcomes, create a (initially empty)
// vector of candidates. Those are the candidates that still
// apply if the test has that particular outcome.
debug!(
"match_candidates: test={:?} match_pair={:?}",
test, match_pair
);
debug!("match_candidates: test={:?} match_pair={:?}", test, match_pair);
let mut target_candidates: Vec<Vec<&mut Candidate<'pat, 'tcx>>> = vec![];
target_candidates.resize_with(test.targets(), Default::default);
@ -1201,38 +1125,36 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
otherwise_block = Some(remainder_start.unwrap());
};
target_candidates.into_iter().map(|mut candidates| {
if candidates.len() != 0 {
let candidate_start = &mut None;
this.match_candidates(
span,
candidate_start,
otherwise_block,
&mut *candidates,
fake_borrows,
);
candidate_start.unwrap()
} else {
*otherwise_block.get_or_insert_with(|| {
let unreachable = this.cfg.start_new_block();
let source_info = this.source_info(span);
this.cfg.terminate(
unreachable,
source_info,
TerminatorKind::Unreachable,
target_candidates
.into_iter()
.map(|mut candidates| {
if candidates.len() != 0 {
let candidate_start = &mut None;
this.match_candidates(
span,
candidate_start,
otherwise_block,
&mut *candidates,
fake_borrows,
);
unreachable
})
}
}).collect()
candidate_start.unwrap()
} else {
*otherwise_block.get_or_insert_with(|| {
let unreachable = this.cfg.start_new_block();
let source_info = this.source_info(span);
this.cfg.terminate(
unreachable,
source_info,
TerminatorKind::Unreachable,
);
unreachable
})
}
})
.collect()
};
self.perform_test(
block,
&match_place,
&test,
make_target_blocks,
);
self.perform_test(block, &match_place, &test, make_target_blocks);
}
// Determine the fake borrows that are needed to ensure that the place
@ -1249,8 +1171,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut all_fake_borrows = Vec::with_capacity(fake_borrows.len());
// Insert a Shallow borrow of the prefixes of any fake borrows.
for place in fake_borrows
{
for place in fake_borrows {
let mut cursor = place.projection.as_ref();
while let [proj_base @ .., elem] = cursor {
cursor = proj_base;
@ -1259,10 +1180,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Insert a shallow borrow after a deref. For other
// projections the borrow of prefix_cursor will
// conflict with any mutation of base.
all_fake_borrows.push(PlaceRef {
base: &place.base,
projection: proj_base,
});
all_fake_borrows.push(PlaceRef { base: &place.base, projection: proj_base });
}
}
@ -1275,21 +1193,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
debug!("add_fake_borrows all_fake_borrows = {:?}", all_fake_borrows);
all_fake_borrows.into_iter().map(|matched_place| {
let fake_borrow_deref_ty = Place::ty_from(
matched_place.base,
matched_place.projection,
&self.local_decls,
tcx,
)
.ty;
let fake_borrow_ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
let fake_borrow_temp = self.local_decls.push(
LocalDecl::new_temp(fake_borrow_ty, temp_span)
);
all_fake_borrows
.into_iter()
.map(|matched_place| {
let fake_borrow_deref_ty = Place::ty_from(
matched_place.base,
matched_place.projection,
&self.local_decls,
tcx,
)
.ty;
let fake_borrow_ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
let fake_borrow_temp =
self.local_decls.push(LocalDecl::new_temp(fake_borrow_ty, temp_span));
(matched_place, fake_borrow_temp)
}).collect()
(matched_place, fake_borrow_temp)
})
.collect()
}
}
@ -1424,10 +1344,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if let Some(guard) = guard {
let tcx = self.hir.tcx();
self.bind_matched_candidate_for_guard(
block,
&candidate.bindings,
);
self.bind_matched_candidate_for_guard(block, &candidate.bindings);
let guard_frame = GuardFrame {
locals: candidate
.bindings
@ -1449,12 +1366,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
projection: tcx.intern_place_elems(place.projection),
},
);
self.cfg.push_assign(
block,
scrutinee_source_info,
&Place::from(*temp),
borrow,
);
self.cfg.push_assign(block, scrutinee_source_info, &Place::from(*temp), borrow);
}
// the block to branch to if the guard fails; if there is no
@ -1464,13 +1376,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
};
let source_info = self.source_info(guard.span);
let guard_end = self.source_info(tcx.sess.source_map().end_point(guard.span));
let (post_guard_block, otherwise_post_guard_block)
= self.test_bool(block, guard, source_info);
let (post_guard_block, otherwise_post_guard_block) =
self.test_bool(block, guard, source_info);
let guard_frame = self.guard_context.pop().unwrap();
debug!(
"Exiting guard building context with locals: {:?}",
guard_frame
);
debug!("Exiting guard building context with locals: {:?}", guard_frame);
for &(_, temp) in fake_borrows {
let cause = FakeReadCause::ForMatchGuard;
@ -1520,10 +1429,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let cause = FakeReadCause::ForGuardBinding;
self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(local_id));
}
self.bind_matched_candidate_for_arm_body(
post_guard_block,
by_value_bindings,
);
self.bind_matched_candidate_for_arm_body(post_guard_block, by_value_bindings);
post_guard_block
} else {
@ -1544,25 +1450,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
debug!(
"adding user ascription at span {:?} of place {:?} and {:?}",
source_info.span,
ascription.source,
ascription.user_ty,
source_info.span, ascription.source, ascription.user_ty,
);
let user_ty = ascription.user_ty.clone().user_ty(
&mut self.canonical_user_type_annotations,
ascription.source.ty(&self.local_decls, self.hir.tcx()).ty,
source_info.span
source_info.span,
);
self.cfg.push(
block,
Statement {
source_info,
kind: StatementKind::AscribeUserType(
box(
ascription.source.clone(),
user_ty,
),
box (ascription.source.clone(), user_ty),
ascription.variance,
),
},
@ -1570,11 +1471,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
fn bind_matched_candidate_for_guard(
&mut self,
block: BasicBlock,
bindings: &[Binding<'tcx>],
) {
fn bind_matched_candidate_for_guard(&mut self, block: BasicBlock, bindings: &[Binding<'tcx>]) {
debug!("bind_matched_candidate_for_guard(block={:?}, bindings={:?})", block, bindings);
// Assign each of the bindings. Since we are binding for a
@ -1593,8 +1490,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
match binding.binding_mode {
BindingMode::ByValue => {
let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, binding.source.clone());
self.cfg
.push_assign(block, source_info, &ref_for_guard, rvalue);
self.cfg.push_assign(block, source_info, &ref_for_guard, rvalue);
}
BindingMode::ByRef(borrow_kind) => {
let value_for_arm = self.storage_live_binding(
@ -1605,11 +1501,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
);
let rvalue = Rvalue::Ref(re_erased, borrow_kind, binding.source.clone());
self.cfg
.push_assign(block, source_info, &value_for_arm, rvalue);
self.cfg.push_assign(block, source_info, &value_for_arm, rvalue);
let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, value_for_arm);
self.cfg
.push_assign(block, source_info, &ref_for_guard, rvalue);
self.cfg.push_assign(block, source_info, &ref_for_guard, rvalue);
}
}
}
@ -1619,7 +1513,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
block: BasicBlock,
bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
) where 'tcx: 'b {
) where
'tcx: 'b,
{
debug!("bind_matched_candidate_for_arm_body(block={:?})", block);
let re_erased = self.hir.tcx().lifetimes.re_erased;
@ -1667,10 +1563,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
);
let tcx = self.hir.tcx();
let debug_source_info = SourceInfo {
span: source_info.span,
scope: visibility_scope,
};
let debug_source_info = SourceInfo { span: source_info.span, scope: visibility_scope };
let binding_mode = match mode {
BindingMode::ByValue => ty::BindingMode::BindByValue(mutability.into()),
BindingMode::ByRef(_) => ty::BindingMode::BindByReference(mutability.into()),
@ -1683,18 +1576,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
source_info,
internal: false,
is_block_tail: None,
local_info: LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
VarBindingForm {
binding_mode,
// hypothetically, `visit_bindings` could try to unzip
// an outermost hir::Ty as we descend, matching up
// idents in pat; but complex w/ unclear UI payoff.
// Instead, just abandon providing diagnostic info.
opt_ty_info: None,
opt_match_place,
pat_span,
},
))),
local_info: LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
binding_mode,
// hypothetically, `visit_bindings` could try to unzip
// an outermost hir::Ty as we descend, matching up
// idents in pat; but complex w/ unclear UI payoff.
// Instead, just abandon providing diagnostic info.
opt_ty_info: None,
opt_match_place,
pat_span,
}))),
};
let for_arm_body = self.local_decls.push(local);
self.var_debug_info.push(VarDebugInfo {
@ -1719,10 +1610,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
source_info: debug_source_info,
place: ref_for_guard.into(),
});
LocalsForNode::ForGuard {
ref_for_guard,
for_arm_body,
}
LocalsForNode::ForGuard { ref_for_guard, for_arm_body }
} else {
LocalsForNode::One(for_arm_body)
};

View File

@ -1,22 +1,22 @@
use crate::build;
use crate::build::scope::DropKind;
use crate::hair::cx::Cx;
use crate::hair::{LintLevel, BindingMode, PatKind};
use crate::hair::{BindingMode, LintLevel, PatKind};
use crate::transform::MirSource;
use crate::util as mir_util;
use rustc::hir;
use rustc::hir::{Node, GeneratorKind};
use rustc::hir::def_id::DefId;
use rustc::hir::{GeneratorKind, Node};
use rustc::middle::lang_items;
use rustc::middle::region;
use rustc::mir::*;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::Subst;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::HirIdMap;
use rustc_target::spec::PanicStrategy;
use rustc_index::vec::{IndexVec, Idx};
use std::u32;
use rustc_index::vec::{Idx, IndexVec};
use rustc_target::spec::abi::Abi;
use rustc_target::spec::PanicStrategy;
use std::u32;
use syntax::attr::{self, UnwindAttr};
use syntax::symbol::kw;
use syntax_pos::Span;
@ -30,40 +30,27 @@ pub fn mir_build(tcx: TyCtxt<'_>, def_id: DefId) -> BodyAndCache<'_> {
// Figure out what primary body this item has.
let (body_id, return_ty_span) = match tcx.hir().get(id) {
Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(_, decl, body_id, _, _), .. })
| Node::Item(
hir::Item {
kind: hir::ItemKind::Fn(hir::FnSig { decl, .. }, _, body_id),
..
}
)
| Node::ImplItem(
hir::ImplItem {
kind: hir::ImplItemKind::Method(hir::FnSig { decl, .. }, body_id),
..
}
)
| Node::TraitItem(
hir::TraitItem {
kind: hir::TraitItemKind::Method(
hir::FnSig { decl, .. },
hir::TraitMethod::Provided(body_id),
),
..
}
) => {
(*body_id, decl.output.span())
}
| Node::Item(hir::Item {
kind: hir::ItemKind::Fn(hir::FnSig { decl, .. }, _, body_id),
..
})
| Node::ImplItem(hir::ImplItem {
kind: hir::ImplItemKind::Method(hir::FnSig { decl, .. }, body_id),
..
})
| Node::TraitItem(hir::TraitItem {
kind:
hir::TraitItemKind::Method(hir::FnSig { decl, .. }, hir::TraitMethod::Provided(body_id)),
..
}) => (*body_id, decl.output.span()),
Node::Item(hir::Item { kind: hir::ItemKind::Static(ty, _, body_id), .. })
| Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, body_id), .. })
| Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(ty, body_id), .. })
| Node::TraitItem(
hir::TraitItem { kind: hir::TraitItemKind::Const(ty, Some(body_id)), .. }
) => {
(*body_id, ty.span)
}
Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => {
(*body, tcx.hir().span(*hir_id))
}
| Node::TraitItem(hir::TraitItem {
kind: hir::TraitItemKind::Const(ty, Some(body_id)),
..
}) => (*body_id, ty.span),
Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => (*body, tcx.hir().span(*hir_id)),
_ => span_bug!(tcx.hir().span(id), "can't build MIR for {:?}", def_id),
};
@ -100,61 +87,54 @@ pub fn mir_build(tcx: TyCtxt<'_>, def_id: DefId) -> BodyAndCache<'_> {
};
let body = tcx.hir().body(body_id);
let explicit_arguments =
body.params
.iter()
.enumerate()
.map(|(index, arg)| {
let owner_id = tcx.hir().body_owner(body_id);
let opt_ty_info;
let self_arg;
if let Some(ref fn_decl) = tcx.hir().fn_decl_by_hir_id(owner_id) {
opt_ty_info = fn_decl.inputs.get(index).map(|ty| ty.span);
self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
match fn_decl.implicit_self {
hir::ImplicitSelfKind::Imm => Some(ImplicitSelfKind::Imm),
hir::ImplicitSelfKind::Mut => Some(ImplicitSelfKind::Mut),
hir::ImplicitSelfKind::ImmRef => Some(ImplicitSelfKind::ImmRef),
hir::ImplicitSelfKind::MutRef => Some(ImplicitSelfKind::MutRef),
_ => None,
}
} else {
None
};
} else {
opt_ty_info = None;
self_arg = None;
let explicit_arguments = body.params.iter().enumerate().map(|(index, arg)| {
let owner_id = tcx.hir().body_owner(body_id);
let opt_ty_info;
let self_arg;
if let Some(ref fn_decl) = tcx.hir().fn_decl_by_hir_id(owner_id) {
opt_ty_info = fn_decl.inputs.get(index).map(|ty| ty.span);
self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
match fn_decl.implicit_self {
hir::ImplicitSelfKind::Imm => Some(ImplicitSelfKind::Imm),
hir::ImplicitSelfKind::Mut => Some(ImplicitSelfKind::Mut),
hir::ImplicitSelfKind::ImmRef => Some(ImplicitSelfKind::ImmRef),
hir::ImplicitSelfKind::MutRef => Some(ImplicitSelfKind::MutRef),
_ => None,
}
} else {
None
};
} else {
opt_ty_info = None;
self_arg = None;
}
// C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
// (as it's created inside the body itself, not passed in from outside).
let ty = if fn_sig.c_variadic && index == fn_sig.inputs().len() {
let va_list_did = tcx.require_lang_item(
lang_items::VaListTypeLangItem,
Some(arg.span),
);
let region = tcx.mk_region(ty::ReScope(region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite
}));
// C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
// (as it's created inside the body itself, not passed in from outside).
let ty = if fn_sig.c_variadic && index == fn_sig.inputs().len() {
let va_list_did =
tcx.require_lang_item(lang_items::VaListTypeLangItem, Some(arg.span));
let region = tcx.mk_region(ty::ReScope(region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite,
}));
tcx.type_of(va_list_did).subst(tcx, &[region.into()])
} else {
fn_sig.inputs()[index]
};
tcx.type_of(va_list_did).subst(tcx, &[region.into()])
} else {
fn_sig.inputs()[index]
};
ArgInfo(ty, opt_ty_info, Some(&arg), self_arg)
});
ArgInfo(ty, opt_ty_info, Some(&arg), self_arg)
});
let arguments = implicit_argument.into_iter().chain(explicit_arguments);
let (yield_ty, return_ty) = if body.generator_kind.is_some() {
let gen_sig = match ty.kind {
ty::Generator(gen_def_id, gen_substs, ..) =>
gen_substs.as_generator().sig(gen_def_id, tcx),
_ =>
span_bug!(tcx.hir().span(id),
"generator w/o generator type: {:?}", ty),
ty::Generator(gen_def_id, gen_substs, ..) => {
gen_substs.as_generator().sig(gen_def_id, tcx)
}
_ => span_bug!(tcx.hir().span(id), "generator w/o generator type: {:?}", ty),
};
(Some(gen_sig.yield_ty), gen_sig.return_ty)
} else {
@ -191,8 +171,7 @@ pub fn mir_build(tcx: TyCtxt<'_>, def_id: DefId) -> BodyAndCache<'_> {
build::construct_const(cx, body_id, return_ty, return_ty_span)
};
mir_util::dump_mir(tcx, None, "mir_map", &0,
MirSource::item(def_id), &body, |_, _| Ok(()) );
mir_util::dump_mir(tcx, None, "mir_map", &0, MirSource::item(def_id), &body, |_, _| Ok(()));
lints::check(tcx, &body, def_id);
@ -214,7 +193,7 @@ fn liberated_closure_env_ty(
let (closure_def_id, closure_substs) = match closure_ty.kind {
ty::Closure(closure_def_id, closure_substs) => (closure_def_id, closure_substs),
_ => bug!("closure expr does not have closure type: {:?}", closure_ty)
_ => bug!("closure expr does not have closure type: {:?}", closure_ty),
};
let closure_env_ty = tcx.closure_env_ty(closure_def_id, closure_substs).unwrap();
@ -232,7 +211,7 @@ pub enum BlockFrame {
Statement {
/// If true, then statement discards result from evaluating
/// the expression (such as examples 1 and 2 above).
ignores_expr_result: bool
ignores_expr_result: bool,
},
/// Evaluation is currently within the tail expression of a block.
@ -243,7 +222,7 @@ pub enum BlockFrame {
/// the result of evaluating the block's tail expression.
///
/// Example: `let _ = { STMT_1; EXPR };`
tail_result_is_ignored: bool
tail_result_is_ignored: bool,
},
/// Generic mark meaning that the block occurred as a subexpression
@ -258,19 +237,17 @@ impl BlockFrame {
match *self {
BlockFrame::TailExpr { .. } => true,
BlockFrame::Statement { .. } |
BlockFrame::SubExpr => false,
BlockFrame::Statement { .. } | BlockFrame::SubExpr => false,
}
}
fn is_statement(&self) -> bool {
match *self {
BlockFrame::Statement { .. } => true,
BlockFrame::TailExpr { .. } |
BlockFrame::SubExpr => false,
BlockFrame::TailExpr { .. } | BlockFrame::SubExpr => false,
}
}
}
}
#[derive(Debug)]
struct BlockContext(Vec<BlockFrame>);
@ -348,9 +325,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
impl BlockContext {
fn new() -> Self { BlockContext(vec![]) }
fn push(&mut self, bf: BlockFrame) { self.0.push(bf); }
fn pop(&mut self) -> Option<BlockFrame> { self.0.pop() }
fn new() -> Self {
BlockContext(vec![])
}
fn push(&mut self, bf: BlockFrame) {
self.0.push(bf);
}
fn pop(&mut self) -> Option<BlockFrame> {
self.0.pop()
}
/// Traverses the frames on the `BlockContext`, searching for either
/// the first block-tail expression frame with no intervening
@ -367,8 +350,9 @@ impl BlockContext {
match bf {
BlockFrame::SubExpr => continue,
BlockFrame::Statement { .. } => break,
&BlockFrame::TailExpr { tail_result_is_ignored } =>
return Some(BlockTailInfo { tail_result_is_ignored })
&BlockFrame::TailExpr { tail_result_is_ignored } => {
return Some(BlockTailInfo { tail_result_is_ignored });
}
}
}
@ -390,8 +374,8 @@ impl BlockContext {
Some(BlockFrame::SubExpr) => false,
// otherwise: use accumulated is_ignored state.
Some(BlockFrame::TailExpr { tail_result_is_ignored: ignored }) |
Some(BlockFrame::Statement { ignores_expr_result: ignored }) => *ignored,
Some(BlockFrame::TailExpr { tail_result_is_ignored: ignored })
| Some(BlockFrame::Statement { ignores_expr_result: ignored }) => *ignored,
}
}
}
@ -422,9 +406,7 @@ struct GuardFrameLocal {
impl GuardFrameLocal {
fn new(id: hir::HirId, _binding_mode: BindingMode) -> Self {
GuardFrameLocal {
id: id,
}
GuardFrameLocal { id: id }
}
}
@ -457,13 +439,18 @@ enum ForGuard {
impl LocalsForNode {
fn local_id(&self, for_guard: ForGuard) -> Local {
match (self, for_guard) {
(&LocalsForNode::One(local_id), ForGuard::OutsideGuard) |
(&LocalsForNode::ForGuard { ref_for_guard: local_id, .. }, ForGuard::RefWithinGuard) |
(&LocalsForNode::ForGuard { for_arm_body: local_id, .. }, ForGuard::OutsideGuard) =>
local_id,
(&LocalsForNode::One(local_id), ForGuard::OutsideGuard)
| (
&LocalsForNode::ForGuard { ref_for_guard: local_id, .. },
ForGuard::RefWithinGuard,
)
| (&LocalsForNode::ForGuard { for_arm_body: local_id, .. }, ForGuard::OutsideGuard) => {
local_id
}
(&LocalsForNode::One(_), ForGuard::RefWithinGuard) =>
bug!("anything with one local should never be within a guard."),
(&LocalsForNode::One(_), ForGuard::RefWithinGuard) => {
bug!("anything with one local should never be within a guard.")
}
}
}
}
@ -503,20 +490,16 @@ impl BlockAndExtension for BasicBlock {
/// Update a block pointer and return the value.
/// Use it like `let x = unpack!(block = self.foo(block, foo))`.
macro_rules! unpack {
($x:ident = $c:expr) => {
{
let BlockAnd(b, v) = $c;
$x = b;
v
}
};
($x:ident = $c:expr) => {{
let BlockAnd(b, v) = $c;
$x = b;
v
}};
($c:expr) => {
{
let BlockAnd(b, ()) = $c;
b
}
};
($c:expr) => {{
let BlockAnd(b, ()) = $c;
b
}};
}
fn should_abort_on_panic(tcx: TyCtxt<'_>, fn_def_id: DefId, _abi: Abi) -> bool {
@ -525,10 +508,14 @@ fn should_abort_on_panic(tcx: TyCtxt<'_>, fn_def_id: DefId, _abi: Abi) -> bool {
let unwind_attr = attr::find_unwind_attr(Some(tcx.sess.diagnostic()), attrs);
// We never unwind, so it's not relevant to stop an unwind.
if tcx.sess.panic_strategy() != PanicStrategy::Unwind { return false; }
if tcx.sess.panic_strategy() != PanicStrategy::Unwind {
return false;
}
// We cannot add landing pads, so don't add one.
if tcx.sess.no_landing_pads() { return false; }
if tcx.sess.no_landing_pads() {
return false;
}
// This is a special case: some functions have a C abi but are meant to
// unwind anyway. Don't stop them.
@ -555,7 +542,7 @@ fn construct_fn<'a, 'tcx, A>(
body: &'tcx hir::Body<'tcx>,
) -> Body<'tcx>
where
A: Iterator<Item=ArgInfo<'tcx>>
A: Iterator<Item = ArgInfo<'tcx>>,
{
let arguments: Vec<_> = arguments.collect();
@ -565,55 +552,63 @@ where
let fn_def_id = tcx_hir.local_def_id(fn_id);
let mut builder = Builder::new(hir,
let mut builder = Builder::new(
hir,
span,
arguments.len(),
safety,
return_ty,
return_ty_span,
body.generator_kind);
body.generator_kind,
);
let call_site_scope = region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite
};
let arg_scope = region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::Arguments
};
let call_site_scope =
region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite };
let arg_scope =
region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments };
let mut block = START_BLOCK;
let source_info = builder.source_info(span);
let call_site_s = (call_site_scope, source_info);
unpack!(block = builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
if should_abort_on_panic(tcx, fn_def_id, abi) {
builder.schedule_abort();
}
unpack!(
block = builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
if should_abort_on_panic(tcx, fn_def_id, abi) {
builder.schedule_abort();
}
let arg_scope_s = (arg_scope, source_info);
// `return_block` is called when we evaluate a `return` expression, so
// we just use `START_BLOCK` here.
unpack!(block = builder.in_breakable_scope(
None,
START_BLOCK,
Place::return_place(),
|builder| {
builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
builder.args_and_body(block, fn_def_id, &arguments, arg_scope, &body.value)
})
},
));
// Attribute epilogue to function's closing brace
let fn_end = span.shrink_to_hi();
let source_info = builder.source_info(fn_end);
let return_block = builder.return_block();
builder.cfg.goto(block, source_info, return_block);
builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
// Attribute any unreachable codepaths to the function's closing brace
if let Some(unreachable_block) = builder.cached_unreachable_block {
builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable);
}
return_block.unit()
}));
let arg_scope_s = (arg_scope, source_info);
// `return_block` is called when we evaluate a `return` expression, so
// we just use `START_BLOCK` here.
unpack!(
block = builder.in_breakable_scope(
None,
START_BLOCK,
Place::return_place(),
|builder| {
builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
builder.args_and_body(
block,
fn_def_id,
&arguments,
arg_scope,
&body.value,
)
})
},
)
);
// Attribute epilogue to function's closing brace
let fn_end = span.shrink_to_hi();
let source_info = builder.source_info(fn_end);
let return_block = builder.return_block();
builder.cfg.goto(block, source_info, return_block);
builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
// Attribute any unreachable codepaths to the function's closing brace
if let Some(unreachable_block) = builder.cached_unreachable_block {
builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable);
}
return_block.unit()
})
);
assert_eq!(block, builder.return_block());
let mut spread_arg = None;
@ -621,8 +616,7 @@ where
// RustCall pseudo-ABI untuples the last argument.
spread_arg = Some(Local::new(arguments.len()));
}
info!("fn_id {:?} has attrs {:?}", fn_def_id,
tcx.get_attrs(fn_def_id));
info!("fn_id {:?} has attrs {:?}", fn_def_id, tcx.get_attrs(fn_def_id));
let mut body = builder.finish();
body.spread_arg = spread_arg;
@ -638,15 +632,7 @@ fn construct_const<'a, 'tcx>(
let tcx = hir.tcx();
let owner_id = tcx.hir().body_owner(body_id);
let span = tcx.hir().span(owner_id);
let mut builder = Builder::new(
hir,
span,
0,
Safety::Safe,
const_ty,
const_ty_span,
None,
);
let mut builder = Builder::new(hir, span, 0, Safety::Safe, const_ty, const_ty_span, None);
let mut block = START_BLOCK;
let ast_expr = &tcx.hir().body(body_id).value;
@ -662,17 +648,13 @@ fn construct_const<'a, 'tcx>(
// Constants may be match expressions in which case an unreachable block may
// be created, so terminate it properly.
if let Some(unreachable_block) = builder.cached_unreachable_block {
builder.cfg.terminate(unreachable_block, source_info,
TerminatorKind::Unreachable);
builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable);
}
builder.finish()
}
fn construct_error<'a, 'tcx>(
hir: Cx<'a, 'tcx>,
body_id: hir::BodyId
) -> Body<'tcx> {
fn construct_error<'a, 'tcx>(hir: Cx<'a, 'tcx>, body_id: hir::BodyId) -> Body<'tcx> {
let owner_id = hir.tcx().hir().body_owner(body_id);
let span = hir.tcx().hir().span(owner_id);
let ty = hir.tcx().types.err;
@ -683,14 +665,15 @@ fn construct_error<'a, 'tcx>(
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
fn new(hir: Cx<'a, 'tcx>,
span: Span,
arg_count: usize,
safety: Safety,
return_ty: Ty<'tcx>,
return_span: Span,
generator_kind: Option<GeneratorKind>)
-> Builder<'a, 'tcx> {
fn new(
hir: Cx<'a, 'tcx>,
span: Span,
arg_count: usize,
safety: Safety,
return_ty: Ty<'tcx>,
return_span: Span,
generator_kind: Option<GeneratorKind>,
) -> Builder<'a, 'tcx> {
let lint_level = LintLevel::Explicit(hir.root_lint_level);
let mut builder = Builder {
hir,
@ -722,7 +705,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
assert_eq!(
builder.new_source_scope(span, lint_level, Some(safety)),
OUTERMOST_SOURCE_SCOPE);
OUTERMOST_SOURCE_SCOPE
);
builder.source_scopes[OUTERMOST_SOURCE_SCOPE].parent_scope = None;
builder
@ -744,23 +728,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.var_debug_info,
self.fn_span,
self.hir.control_flow_destroyed(),
self.generator_kind
self.generator_kind,
)
}
fn args_and_body(&mut self,
mut block: BasicBlock,
fn_def_id: DefId,
arguments: &[ArgInfo<'tcx>],
argument_scope: region::Scope,
ast_body: &'tcx hir::Expr)
-> BlockAnd<()>
{
fn args_and_body(
&mut self,
mut block: BasicBlock,
fn_def_id: DefId,
arguments: &[ArgInfo<'tcx>],
argument_scope: region::Scope,
ast_body: &'tcx hir::Expr,
) -> BlockAnd<()> {
// Allocate locals for the function arguments
for &ArgInfo(ty, _, arg_opt, _) in arguments.iter() {
let source_info = SourceInfo {
scope: OUTERMOST_SOURCE_SCOPE,
span: arg_opt.map_or(self.fn_span, |arg| arg.pat.span)
span: arg_opt.map_or(self.fn_span, |arg| arg.pat.span),
};
let arg_local = self.local_decls.push(LocalDecl {
mutability: Mutability::Mut,
@ -804,51 +788,54 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let (def_id, upvar_substs) = match closure_ty.kind {
ty::Closure(def_id, substs) => (def_id, ty::UpvarSubsts::Closure(substs)),
ty::Generator(def_id, substs, _) => (def_id, ty::UpvarSubsts::Generator(substs)),
_ => span_bug!(self.fn_span, "upvars with non-closure env ty {:?}", closure_ty)
_ => span_bug!(self.fn_span, "upvars with non-closure env ty {:?}", closure_ty),
};
let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
let upvars_with_tys = upvars.iter().zip(upvar_tys);
self.upvar_mutbls = upvars_with_tys.enumerate().map(|(i, ((&var_id, &upvar_id), ty))| {
let capture = hir_tables.upvar_capture(upvar_id);
self.upvar_mutbls = upvars_with_tys
.enumerate()
.map(|(i, ((&var_id, &upvar_id), ty))| {
let capture = hir_tables.upvar_capture(upvar_id);
let mut mutability = Mutability::Not;
let mut name = kw::Invalid;
if let Some(Node::Binding(pat)) = tcx_hir.find(var_id) {
if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
name = ident.name;
match hir_tables.extract_binding_mode(tcx.sess, pat.hir_id, pat.span) {
Some(ty::BindByValue(hir::Mutability::Mut)) => {
mutability = Mutability::Mut;
let mut mutability = Mutability::Not;
let mut name = kw::Invalid;
if let Some(Node::Binding(pat)) = tcx_hir.find(var_id) {
if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
name = ident.name;
match hir_tables.extract_binding_mode(tcx.sess, pat.hir_id, pat.span) {
Some(ty::BindByValue(hir::Mutability::Mut)) => {
mutability = Mutability::Mut;
}
Some(_) => mutability = Mutability::Not,
_ => {}
}
Some(_) => mutability = Mutability::Not,
_ => {}
}
}
}
let mut projs = closure_env_projs.clone();
projs.push(ProjectionElem::Field(Field::new(i), ty));
match capture {
ty::UpvarCapture::ByValue => {}
ty::UpvarCapture::ByRef(..) => {
projs.push(ProjectionElem::Deref);
}
};
let mut projs = closure_env_projs.clone();
projs.push(ProjectionElem::Field(Field::new(i), ty));
match capture {
ty::UpvarCapture::ByValue => {}
ty::UpvarCapture::ByRef(..) => {
projs.push(ProjectionElem::Deref);
}
};
self.var_debug_info.push(VarDebugInfo {
name,
source_info: SourceInfo {
scope: OUTERMOST_SOURCE_SCOPE,
span: tcx_hir.span(var_id),
},
place: Place {
base: closure_env_arg.into(),
projection: tcx.intern_place_elems(&projs),
},
});
self.var_debug_info.push(VarDebugInfo {
name,
source_info: SourceInfo {
scope: OUTERMOST_SOURCE_SCOPE,
span: tcx_hir.span(var_id),
},
place: Place {
base: closure_env_arg.into(),
projection: tcx.intern_place_elems(&projs),
},
});
mutability
}).collect();
mutability
})
.collect();
}
let mut scope = None;
@ -862,7 +849,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Make sure we drop (parts of) the argument even when not matched on.
self.schedule_drop(
arg_opt.as_ref().map_or(ast_body.span, |arg| arg.pat.span),
argument_scope, local, DropKind::Value,
argument_scope,
local,
DropKind::Value,
);
if let Some(arg) = arg_opt {
@ -881,22 +870,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} => {
self.local_decls[local].mutability = mutability;
self.local_decls[local].source_info.scope = self.source_scope;
self.local_decls[local].local_info =
if let Some(kind) = self_binding {
LocalInfo::User(ClearCrossCrate::Set(
BindingForm::ImplicitSelf(*kind),
))
} else {
let binding_mode = ty::BindingMode::BindByValue(mutability.into());
LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
VarBindingForm {
binding_mode,
opt_ty_info,
opt_match_place: Some((Some(place.clone()), span)),
pat_span: span,
},
)))
};
self.local_decls[local].local_info = if let Some(kind) = self_binding {
LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(*kind)))
} else {
let binding_mode = ty::BindingMode::BindByValue(mutability.into());
LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
VarBindingForm {
binding_mode,
opt_ty_info,
opt_match_place: Some((Some(place.clone()), span)),
pat_span: span,
},
)))
};
self.var_indices.insert(var, LocalsForNode::One(local));
}
_ => {
@ -927,13 +913,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
arg_hir_id: hir::HirId,
original_source_scope: SourceScope,
pattern_span: Span
pattern_span: Span,
) {
let tcx = self.hir.tcx();
let current_root = tcx.maybe_lint_level_root_bounded(
arg_hir_id,
self.hir.root_lint_level
);
let current_root = tcx.maybe_lint_level_root_bounded(arg_hir_id, self.hir.root_lint_level);
let parent_root = tcx.maybe_lint_level_root_bounded(
self.source_scopes[original_source_scope]
.local_data
@ -943,11 +926,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.hir.root_lint_level,
);
if current_root != parent_root {
self.source_scope = self.new_source_scope(
pattern_span,
LintLevel::Explicit(current_root),
None
);
self.source_scope =
self.new_source_scope(pattern_span, LintLevel::Explicit(current_root), None);
}
}

View File

@ -1,30 +1,31 @@
// Not in interpret to make sure we do not use private implementation details
use std::fmt;
use std::error::Error;
use std::borrow::{Borrow, Cow};
use std::hash::Hash;
use std::collections::hash_map::Entry;
use std::convert::TryInto;
use std::error::Error;
use std::fmt;
use std::hash::Hash;
use crate::interpret::eval_nullary_intrinsic;
use rustc::hir::def::DefKind;
use rustc::hir::def_id::DefId;
use rustc::mir::interpret::{ConstEvalErr, ErrorHandled, ScalarMaybeUndef};
use rustc::mir;
use rustc::ty::{self, Ty, TyCtxt, subst::Subst};
use rustc::ty::layout::{self, HasTyCtxt, LayoutOf, VariantIdx};
use rustc::mir::interpret::{ConstEvalErr, ErrorHandled, ScalarMaybeUndef};
use rustc::traits::Reveal;
use rustc::ty::layout::{self, HasTyCtxt, LayoutOf, VariantIdx};
use rustc::ty::{self, subst::Subst, Ty, TyCtxt};
use rustc_data_structures::fx::FxHashMap;
use crate::interpret::eval_nullary_intrinsic;
use syntax::{source_map::{Span, DUMMY_SP}, symbol::Symbol};
use syntax::{
source_map::{Span, DUMMY_SP},
symbol::Symbol,
};
use crate::interpret::{self,
PlaceTy, MPlaceTy, OpTy, ImmTy, Immediate, Scalar, Pointer,
RawConst, ConstValue, Machine,
InterpResult, InterpErrorInfo, GlobalId, InterpCx, StackPopCleanup, AssertMessage,
Allocation, AllocId, MemoryKind, Memory,
snapshot, RefTracking, intern_const_alloc_recursive,
use crate::interpret::{
self, intern_const_alloc_recursive, snapshot, AllocId, Allocation, AssertMessage, ConstValue,
GlobalId, ImmTy, Immediate, InterpCx, InterpErrorInfo, InterpResult, MPlaceTy, Machine, Memory,
MemoryKind, OpTy, PlaceTy, Pointer, RawConst, RefTracking, Scalar, StackPopCleanup,
};
/// Number of steps until the detector even starts doing anything.
@ -94,7 +95,7 @@ fn op_to_const<'tcx>(
let ptr = mplace.ptr.to_ptr().unwrap();
let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id);
ConstValue::ByRef { alloc, offset: ptr.offset }
},
}
// see comment on `let try_as_immediate` above
Err(ImmTy { imm: Immediate::Scalar(x), .. }) => match x {
ScalarMaybeUndef::Scalar(s) => ConstValue::Scalar(s),
@ -108,30 +109,23 @@ fn op_to_const<'tcx>(
let ptr = mplace.ptr.to_ptr().unwrap();
let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id);
ConstValue::ByRef { alloc, offset: ptr.offset }
},
}
},
Err(ImmTy { imm: Immediate::ScalarPair(a, b), .. }) => {
let (data, start) = match a.not_undef().unwrap() {
Scalar::Ptr(ptr) => (
ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id),
ptr.offset.bytes(),
),
Scalar::Ptr(ptr) => {
(ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id), ptr.offset.bytes())
}
Scalar::Raw { .. } => (
ecx.tcx.intern_const_alloc(Allocation::from_byte_aligned_bytes(
b"" as &[u8],
)),
ecx.tcx.intern_const_alloc(Allocation::from_byte_aligned_bytes(b"" as &[u8])),
0,
),
};
let len = b.to_machine_usize(&ecx.tcx.tcx).unwrap();
let start = start.try_into().unwrap();
let len: usize = len.try_into().unwrap();
ConstValue::Slice {
data,
start,
end: start + len,
}
},
ConstValue::Slice { data, start, end: start + len }
}
};
ecx.tcx.mk_const(ty::Const { val: ty::ConstKind::Value(val), ty: op.layout.ty })
}
@ -159,7 +153,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
let decl = body.local_decls.get(arg).expect("arg missing from local_decls");
let layout = ecx.layout_of(decl.ty.subst(tcx, cid.instance.substs))?;
assert!(layout.is_zst())
};
}
ecx.push_stack_frame(
cid.instance,
@ -196,11 +190,7 @@ impl fmt::Display for ConstEvalError {
use self::ConstEvalError::*;
match *self {
NeedsRfc(ref msg) => {
write!(
f,
"\"{}\" needs an rfc before being allowed inside constants",
msg
)
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
}
ConstAccessesStatic => write!(f, "constant accesses static"),
}
@ -251,38 +241,32 @@ impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
#[inline(always)]
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>
where
K: Borrow<Q>,
{
FxHashMap::contains_key(self, k)
}
#[inline(always)]
fn insert(&mut self, k: K, v: V) -> Option<V>
{
fn insert(&mut self, k: K, v: V) -> Option<V> {
FxHashMap::insert(self, k, v)
}
#[inline(always)]
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>
where
K: Borrow<Q>,
{
FxHashMap::remove(self, k)
}
#[inline(always)]
fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
self.iter()
.filter_map(move |(k, v)| f(k, &*v))
.collect()
self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
}
#[inline(always)]
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E>
{
fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
match self.get(&k) {
Some(v) => Ok(v),
None => {
@ -293,12 +277,7 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
}
#[inline(always)]
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>
{
fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
match self.entry(k) {
Entry::Occupied(e) => Ok(e.into_mut()),
Entry::Vacant(e) => {
@ -347,7 +326,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock> // unwinding is not supported in consts
_unwind: Option<mir::BasicBlock>, // unwinding is not supported in consts
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
debug!("find_mir_or_eval_fn: {:?}", instance);
@ -387,10 +366,11 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
Ok(body) => *body,
Err(err) => {
if let err_unsup!(NoMirFor(ref path)) = err.kind {
return Err(
ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path))
.into(),
);
return Err(ConstEvalError::NeedsRfc(format!(
"calling extern function `{}`",
path
))
.into());
}
return Err(err);
}
@ -402,7 +382,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
fn_val: !,
_args: &[OpTy<'tcx>],
_ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
match fn_val {}
}
@ -413,16 +393,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
if ecx.emulate_intrinsic(span, instance, args, ret)? {
return Ok(());
}
// An intrinsic that we do not support
let intrinsic_name = ecx.tcx.item_name(instance.def_id());
Err(
ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", intrinsic_name)).into()
)
Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", intrinsic_name)).into())
}
fn assert_panic(
@ -450,22 +428,15 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
OverflowNeg => err_panic!(OverflowNeg),
DivisionByZero => err_panic!(DivisionByZero),
RemainderByZero => err_panic!(RemainderByZero),
ResumedAfterReturn(generator_kind)
=> err_panic!(ResumedAfterReturn(*generator_kind)),
ResumedAfterPanic(generator_kind)
=> err_panic!(ResumedAfterPanic(*generator_kind)),
ResumedAfterReturn(generator_kind) => err_panic!(ResumedAfterReturn(*generator_kind)),
ResumedAfterPanic(generator_kind) => err_panic!(ResumedAfterPanic(*generator_kind)),
Panic { .. } => bug!("`Panic` variant cannot occur in MIR"),
}
.into())
}
fn ptr_to_int(
_mem: &Memory<'mir, 'tcx, Self>,
_ptr: Pointer,
) -> InterpResult<'tcx, u64> {
Err(
ConstEvalError::NeedsRfc("pointer-to-integer cast".to_string()).into(),
)
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
Err(ConstEvalError::NeedsRfc("pointer-to-integer cast".to_string()).into())
}
fn binary_ptr_op(
@ -474,9 +445,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_left: ImmTy<'tcx>,
_right: ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
Err(
ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into(),
)
Err(ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
}
fn find_foreign_static(
@ -498,10 +467,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
}
#[inline(always)]
fn tag_static_base_pointer(
_memory_extra: &MemoryExtra,
_id: AllocId,
) -> Self::PointerTag {
fn tag_static_base_pointer(_memory_extra: &MemoryExtra, _id: AllocId) -> Self::PointerTag {
()
}
@ -509,9 +475,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_dest: PlaceTy<'tcx>,
) -> InterpResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(),
)
Err(ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into())
}
fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
@ -530,12 +494,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
}
let span = ecx.frame().span;
ecx.machine.loop_detector.observe_and_analyze(
*ecx.tcx,
span,
&ecx.memory,
&ecx.stack[..],
)
ecx.machine.loop_detector.observe_and_analyze(*ecx.tcx, span, &ecx.memory, &ecx.stack[..])
}
#[inline(always)]
@ -643,11 +602,7 @@ fn validate_and_turn_into_const<'tcx>(
let mplace = ecx.raw_const_to_mplace(constant)?;
let mut ref_tracking = RefTracking::new(mplace);
while let Some((mplace, path)) = ref_tracking.todo.pop() {
ecx.validate_operand(
mplace.into(),
path,
Some(&mut ref_tracking),
)?;
ecx.validate_operand(mplace.into(), path, Some(&mut ref_tracking))?;
}
// Now that we validated, turn this into a proper constant.
// Statics/promoteds are always `ByRef`, for the rest `op_to_const` decides
@ -693,7 +648,7 @@ pub fn const_eval_validated_provider<'tcx>(
// Promoteds should never be "too generic" when getting evaluated.
// They either don't get evaluated, or we are in a monomorphic context
assert!(key.value.promoted.is_none());
},
}
// dedupliate calls
other => return other,
}
@ -707,17 +662,14 @@ pub fn const_eval_validated_provider<'tcx>(
ty::FnDef(_, substs) => substs,
_ => bug!("intrinsic with type {:?}", ty),
};
return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs)
.map_err(|error| {
let span = tcx.def_span(def_id);
let error = ConstEvalErr { error: error.kind, stacktrace: vec![], span };
error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
})
return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
let span = tcx.def_span(def_id);
let error = ConstEvalErr { error: error.kind, stacktrace: vec![], span };
error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
});
}
tcx.const_eval_raw(key).and_then(|val| {
validate_and_turn_into_const(tcx, val, key)
})
tcx.const_eval_raw(key).and_then(|val| validate_and_turn_into_const(tcx, val, key))
}
pub fn const_eval_raw_provider<'tcx>(
@ -737,7 +689,7 @@ pub fn const_eval_raw_provider<'tcx>(
key.param_env.reveal = Reveal::UserFacing;
match tcx.const_eval_raw(key) {
// try again with reveal all as requested
Err(ErrorHandled::TooGeneric) => {},
Err(ErrorHandled::TooGeneric) => {}
// dedupliate calls
other => return other,
}
@ -770,72 +722,68 @@ pub fn const_eval_raw_provider<'tcx>(
);
let res = ecx.load_mir(cid.instance.def, cid.promoted);
res.and_then(
|body| eval_body_using_ecx(&mut ecx, cid, *body)
).and_then(|place| {
Ok(RawConst {
alloc_id: place.ptr.assert_ptr().alloc_id,
ty: place.layout.ty
res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, *body))
.and_then(|place| {
Ok(RawConst { alloc_id: place.ptr.assert_ptr().alloc_id, ty: place.layout.ty })
})
}).map_err(|error| {
let err = error_to_const_error(&ecx, error);
// errors in statics are always emitted as fatal errors
if is_static {
// Ensure that if the above error was either `TooGeneric` or `Reported`
// an error must be reported.
let v = err.report_as_error(ecx.tcx, "could not evaluate static initializer");
tcx.sess.delay_span_bug(
err.span,
&format!("static eval failure did not emit an error: {:#?}", v)
);
v
} else if def_id.is_local() {
// constant defined in this crate, we can figure out a lint level!
match tcx.def_kind(def_id) {
// constants never produce a hard error at the definition site. Anything else is
// a backwards compatibility hazard (and will break old versions of winapi for sure)
//
// note that validation may still cause a hard error on this very same constant,
// because any code that existed before validation could not have failed validation
// thus preventing such a hard error from being a backwards compatibility hazard
Some(DefKind::Const) | Some(DefKind::AssocConst) => {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
err.report_as_lint(
tcx.at(tcx.def_span(def_id)),
"any use of this value will cause an error",
hir_id,
Some(err.span),
)
},
// promoting runtime code is only allowed to error if it references broken constants
// any other kind of error will be reported to the user as a deny-by-default lint
_ => if let Some(p) = cid.promoted {
let span = tcx.promoted_mir(def_id)[p].span;
if let err_inval!(ReferencedConstant) = err.error {
err.report_as_error(
tcx.at(span),
"evaluation of constant expression failed",
)
} else {
.map_err(|error| {
let err = error_to_const_error(&ecx, error);
// errors in statics are always emitted as fatal errors
if is_static {
// Ensure that if the above error was either `TooGeneric` or `Reported`
// an error must be reported.
let v = err.report_as_error(ecx.tcx, "could not evaluate static initializer");
tcx.sess.delay_span_bug(
err.span,
&format!("static eval failure did not emit an error: {:#?}", v),
);
v
} else if def_id.is_local() {
// constant defined in this crate, we can figure out a lint level!
match tcx.def_kind(def_id) {
// constants never produce a hard error at the definition site. Anything else is
// a backwards compatibility hazard (and will break old versions of winapi for sure)
//
// note that validation may still cause a hard error on this very same constant,
// because any code that existed before validation could not have failed validation
// thus preventing such a hard error from being a backwards compatibility hazard
Some(DefKind::Const) | Some(DefKind::AssocConst) => {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
err.report_as_lint(
tcx.at(span),
"reaching this expression at runtime will panic or abort",
tcx.hir().as_local_hir_id(def_id).unwrap(),
tcx.at(tcx.def_span(def_id)),
"any use of this value will cause an error",
hir_id,
Some(err.span),
)
}
// anything else (array lengths, enum initializers, constant patterns) are reported
// as hard errors
} else {
err.report_as_error(
ecx.tcx,
"evaluation of constant value failed",
)
},
// promoting runtime code is only allowed to error if it references broken constants
// any other kind of error will be reported to the user as a deny-by-default lint
_ => {
if let Some(p) = cid.promoted {
let span = tcx.promoted_mir(def_id)[p].span;
if let err_inval!(ReferencedConstant) = err.error {
err.report_as_error(
tcx.at(span),
"evaluation of constant expression failed",
)
} else {
err.report_as_lint(
tcx.at(span),
"reaching this expression at runtime will panic or abort",
tcx.hir().as_local_hir_id(def_id).unwrap(),
Some(err.span),
)
}
// anything else (array lengths, enum initializers, constant patterns) are reported
// as hard errors
} else {
err.report_as_error(ecx.tcx, "evaluation of constant value failed")
}
}
}
} else {
// use of broken constant from other crate
err.report_as_error(ecx.tcx, "could not evaluate constant")
}
} else {
// use of broken constant from other crate
err.report_as_error(ecx.tcx, "could not evaluate constant")
}
})
})
}

View File

@ -7,21 +7,21 @@ use std::hash::Hash;
use rustc::mir;
use rustc::mir::interpret::truncate;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{
self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx, PrimitiveExt
self, Align, HasDataLayout, LayoutOf, PrimitiveExt, Size, TyLayout, VariantIdx,
};
use rustc::ty::TypeFoldable;
use rustc::ty::{self, Ty};
use rustc_macros::HashStable;
use super::{
GlobalId, AllocId, Allocation, Scalar, InterpResult, Pointer, PointerArithmetic,
InterpCx, Machine, AllocMap, AllocationExtra,
RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind, LocalValue,
AllocId, AllocMap, Allocation, AllocationExtra, GlobalId, ImmTy, Immediate, InterpCx,
InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer, PointerArithmetic,
RawConst, Scalar, ScalarMaybeUndef,
};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
pub struct MemPlace<Tag=(), Id=AllocId> {
pub struct MemPlace<Tag = (), Id = AllocId> {
/// A place may have an integral pointer for ZSTs, and since it might
/// be turned back into a reference before ever being dereferenced.
/// However, it may never be undef.
@ -34,20 +34,17 @@ pub struct MemPlace<Tag=(), Id=AllocId> {
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
pub enum Place<Tag=(), Id=AllocId> {
pub enum Place<Tag = (), Id = AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Tag, Id>),
/// To support alloc-free locals, we are able to write directly to a local.
/// (Without that optimization, we'd just always be a `MemPlace`.)
Local {
frame: usize,
local: mir::Local,
},
Local { frame: usize, local: mir::Local },
}
#[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx, Tag=()> {
pub struct PlaceTy<'tcx, Tag = ()> {
place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyLayout<'tcx>,
}
@ -62,7 +59,7 @@ impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> {
/// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct MPlaceTy<'tcx, Tag=()> {
pub struct MPlaceTy<'tcx, Tag = ()> {
mplace: MemPlace<Tag>,
pub layout: TyLayout<'tcx>,
}
@ -78,10 +75,7 @@ impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> {
impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
PlaceTy {
place: Place::Ptr(mplace.mplace),
layout: mplace.layout
}
PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
}
}
@ -89,11 +83,7 @@ impl<Tag> MemPlace<Tag> {
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
pub fn replace_tag(self, new_tag: Tag) -> Self {
MemPlace {
ptr: self.ptr.erase_tag().with_tag(new_tag),
align: self.align,
meta: self.meta,
}
MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta }
}
#[inline]
@ -107,11 +97,7 @@ impl<Tag> MemPlace<Tag> {
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
MemPlace {
ptr,
align,
meta: None,
}
MemPlace { ptr, align, meta: None }
}
/// Produces a Place that will error if attempted to be read from or written to
@ -156,19 +142,16 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
MPlaceTy {
mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()),
layout.align.abi
layout.align.abi,
),
layout
layout,
}
}
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
pub fn replace_tag(self, new_tag: Tag) -> Self {
MPlaceTy {
mplace: self.mplace.replace_tag(new_tag),
layout: self.layout,
}
MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
}
#[inline]
@ -179,10 +162,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
layout: TyLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset(offset, meta, cx)?,
layout,
})
Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout })
}
#[inline]
@ -195,8 +175,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.kind {
ty::Slice(..) | ty::Str =>
return self.mplace.meta.unwrap().to_machine_usize(cx),
ty::Slice(..) | ty::Str => return self.mplace.meta.unwrap().to_machine_usize(cx),
_ => bug!("len not supported on unsized type {:?}", self.layout.ty),
}
} else {
@ -256,7 +235,6 @@ impl<Tag: ::std::fmt::Debug> Place<Tag> {
match self {
Place::Ptr(mplace) => mplace,
_ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),
}
}
}
@ -288,9 +266,8 @@ where
&self,
val: ImmTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type = val.layout.ty.builtin_deref(true)
.expect("`ref_to_mplace` called on non-ptr type")
.ty;
let pointee_type =
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
let layout = self.layout_of(pointee_type)?;
let (ptr, meta) = match *val {
Immediate::Scalar(ptr) => (ptr.not_undef()?, None),
@ -347,7 +324,8 @@ where
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let (size, align) = self.size_and_align_of_mplace(place)?
let (size, align) = self
.size_and_align_of_mplace(place)?
.unwrap_or((place.layout.size, place.layout.align.abi));
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
place.mplace.align = align; // maximally strict checking
@ -379,8 +357,9 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
let offset = match base.layout.fields {
layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
offsets[usize::try_from(field).unwrap()],
layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
offsets[usize::try_from(field).unwrap()]
}
layout::FieldPlacement::Array { stride, .. } => {
let len = base.len(self)?;
if field >= len {
@ -390,9 +369,13 @@ where
stride * field
}
layout::FieldPlacement::Union(count) => {
assert!(field < count as u64,
"Tried to access field {} of union {:#?} with {} fields",
field, base.layout, count);
assert!(
field < count as u64,
"Tried to access field {} of union {:#?} with {} fields",
field,
base.layout,
count
);
// Offset is always 0
Size::from_bytes(0)
}
@ -409,13 +392,14 @@ where
let align = match self.size_and_align_of(base.meta, field_layout)? {
Some((_, align)) => align,
None if offset == Size::ZERO =>
// An extern type at offset 0, we fall back to its static alignment.
// FIXME: Once we have made decisions for how to handle size and alignment
// of `extern type`, this should be adapted. It is just a temporary hack
// to get some code to work that probably ought to work.
field_layout.align.abi,
None =>
bug!("Cannot compute offset for extern type field at non-0 offset"),
// An extern type at offset 0, we fall back to its static alignment.
// FIXME: Once we have made decisions for how to handle size and alignment
// of `extern type`, this should be adapted. It is just a temporary hack
// to get some code to work that probably ought to work.
{
field_layout.align.abi
}
None => bug!("Cannot compute offset for extern type field at non-0 offset"),
};
(base.meta, offset.align_to(align))
} else {
@ -467,8 +451,7 @@ where
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
let from_offset = match base.layout.fields {
layout::FieldPlacement::Array { stride, .. } =>
stride * from,
layout::FieldPlacement::Array { stride, .. } => stride * from,
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};
@ -477,14 +460,12 @@ where
let (meta, ty) = match base.layout.ty.kind {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) =>
(None, self.tcx.mk_array(inner, inner_len)),
ty::Array(inner, _) => (None, self.tcx.mk_array(inner, inner_len)),
ty::Slice(..) => {
let len = Scalar::from_uint(inner_len, self.pointer_size());
(Some(len), base.layout.ty)
}
_ =>
bug!("cannot subslice non-array type: `{:?}`", base.layout.ty),
_ => bug!("cannot subslice non-array type: `{:?}`", base.layout.ty),
};
let layout = self.layout_of(ty)?;
base.offset(from_offset, meta, layout, self)
@ -520,11 +501,7 @@ where
self.mplace_field(base, u64::try_from(n).unwrap())?
}
ConstantIndex {
offset,
min_length,
from_end,
} => {
ConstantIndex { offset, min_length, from_end } => {
let n = base.len(self)?;
if n < min_length as u64 {
// This can only be reached in ConstProp and non-rustc-MIR.
@ -542,8 +519,9 @@ where
self.mplace_field(base, index)?
}
Subslice { from, to, from_end } =>
self.mplace_subslice(base, u64::from(from), u64::from(to), from_end)?,
Subslice { from, to, from_end } => {
self.mplace_subslice(base, u64::from(from), u64::from(to), from_end)?
}
})
}
@ -569,8 +547,9 @@ where
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.place {
Place::Ptr(mplace) =>
self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(),
Place::Ptr(mplace) => {
self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into()
}
Place::Local { .. } => {
let layout = base.layout.for_variant(self, variant);
PlaceTy { layout, ..base }
@ -586,7 +565,7 @@ where
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.place_field(base, field.index() as u64)?,
Field(field, _) => self.place_field(base, field.index() as u64)?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
// For the other variants, we have to force an allocation.
@ -602,7 +581,7 @@ where
/// `eval_place` and `eval_place_to_op`.
pub(super) fn eval_static_to_mplace(
&self,
place_static: &mir::Static<'tcx>
place_static: &mir::Static<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::StaticKind;
@ -617,10 +596,7 @@ where
throw_inval!(TooGeneric);
}
self.const_eval_raw(GlobalId {
instance,
promoted: Some(promoted),
})?
self.const_eval_raw(GlobalId { instance, promoted: Some(promoted) })?
}
StaticKind::Static => {
@ -674,19 +650,14 @@ where
// bail out.
None => Place::null(&*self),
},
layout: self.layout_of(
self.subst_from_frame_and_normalize_erasing_regions(
self.frame().body.return_ty()
)
)?,
layout: self.layout_of(self.subst_from_frame_and_normalize_erasing_regions(
self.frame().body.return_ty(),
))?,
}
},
}
PlaceBase::Local(local) => PlaceTy {
// This works even for dead/uninitialized locals; we check further when writing
place: Place::Local {
frame: self.cur_frame(),
local: *local,
},
place: Place::Local { frame: self.cur_frame(), local: *local },
layout: self.layout_of_local(self.frame(), *local, None)?,
},
PlaceBase::Static(place_static) => self.eval_static_to_mplace(&place_static)?.into(),
@ -756,13 +727,19 @@ where
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
match src {
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
assert_eq!(self.pointer_size(), dest.layout.size,
"Size mismatch when writing pointer"),
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) =>
assert_eq!(Size::from_bytes(size.into()), dest.layout.size,
"Size mismatch when writing bits"),
Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) => assert_eq!(
self.pointer_size(),
dest.layout.size,
"Size mismatch when writing pointer"
),
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) => {
assert_eq!(
Size::from_bytes(size.into()),
dest.layout.size,
"Size mismatch when writing bits"
)
}
Immediate::Scalar(ScalarMaybeUndef::Undef) => {} // undef can have any size
Immediate::ScalarPair(_, _) => {
// FIXME: Can we check anything here?
}
@ -785,7 +762,7 @@ where
mplace
}
}
},
}
Place::Ptr(mplace) => mplace, // already referring to memory
};
let dest = MPlaceTy { mplace, layout: dest.layout };
@ -808,8 +785,7 @@ where
// wrong type.
// Invalid places are a thing: the return place of a diverging function
let ptr = match self.check_mplace_access(dest, None)?
{
let ptr = match self.check_mplace_access(dest, None)? {
Some(ptr) => ptr,
None => return Ok(()), // zero-sized access
};
@ -821,12 +797,16 @@ where
match value {
Immediate::Scalar(scalar) => {
match dest.layout.abi {
layout::Abi::Scalar(_) => {}, // fine
_ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout)
layout::Abi::Scalar(_) => {} // fine
_ => {
bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}", dest.layout)
}
}
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
tcx, ptr, scalar, dest.layout.size
tcx,
ptr,
scalar,
dest.layout.size,
)
}
Immediate::ScalarPair(a_val, b_val) => {
@ -835,8 +815,10 @@ where
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout)
_ => bug!(
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout
),
};
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
@ -846,12 +828,8 @@ where
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
self.memory
.get_raw_mut(ptr.alloc_id)?
.write_scalar(tcx, ptr, a_val, a_size)?;
self.memory
.get_raw_mut(b_ptr.alloc_id)?
.write_scalar(tcx, b_ptr, b_val, b_size)
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(tcx, ptr, a_val, a_size)?;
self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(tcx, b_ptr, b_val, b_size)
}
}
}
@ -885,8 +863,12 @@ where
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(src.layout.details == dest.layout.details,
"Layout mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
assert!(
src.layout.details == dest.layout.details,
"Layout mismatch when copying!\nsrc: {:#?}\ndest: {:#?}",
src,
dest
);
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
let src = match self.try_read_immediate(src)? {
@ -906,15 +888,19 @@ where
// is being initialized!
let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
let size = size.unwrap_or_else(|| {
assert!(!dest.layout.is_unsized(),
"Cannot copy into already initialized unsized place");
assert!(
!dest.layout.is_unsized(),
"Cannot copy into already initialized unsized place"
);
dest.layout.size
});
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
let src = self.check_mplace_access(src, Some(size))
let src = self
.check_mplace_access(src, Some(size))
.expect("places should be checked on creation");
let dest = self.check_mplace_access(dest, Some(size))
let dest = self
.check_mplace_access(dest, Some(size))
.expect("places should be checked on creation");
let (src_ptr, dest_ptr) = match (src, dest) {
(Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
@ -922,12 +908,7 @@ where
_ => bug!("The pointers should both be Some or both None"),
};
self.memory.copy(
src_ptr,
dest_ptr,
size,
/*nonoverlapping*/ true,
)
self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true)
}
/// Copies the data from an operand to a place. The layouts may disagree, but they must
@ -942,12 +923,18 @@ where
return self.copy_op(src, dest);
}
// We still require the sizes to match.
assert!(src.layout.size == dest.layout.size,
"Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
assert!(
src.layout.size == dest.layout.size,
"Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}",
src,
dest
);
// Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
// to avoid that here.
assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot transmute unsized data");
assert!(
!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot transmute unsized data"
);
// The hard case is `ScalarPair`. `src` is already read from memory in this case,
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
@ -1006,7 +993,8 @@ where
// that has different alignment than the outer field.
// We also need to support unsized types, and hence cannot use `allocate`.
let local_layout = self.layout_of_local(&self.stack[frame], local, None)?;
let (size, align) = self.size_and_align_of(meta, local_layout)?
let (size, align) = self
.size_and_align_of(meta, local_layout)?
.expect("Cannot allocate for non-dyn-sized type");
let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
let mplace = MemPlace { ptr: ptr.into(), align, meta };
@ -1026,7 +1014,7 @@ where
Err(mplace) => (mplace, None), // this already was an indirect local
}
}
Place::Ptr(mplace) => (mplace, None)
Place::Ptr(mplace) => (mplace, None),
};
// Return with the original layout, so that the caller can go on
Ok((MPlaceTy { mplace, layout: place.layout }, size))
@ -1057,11 +1045,8 @@ where
) -> MPlaceTy<'tcx, M::PointerTag> {
let ptr = self.memory.allocate_static_bytes(str.as_bytes(), kind);
let meta = Scalar::from_uint(str.len() as u128, self.pointer_size());
let mplace = MemPlace {
ptr: ptr.into(),
align: Align::from_bytes(1).unwrap(),
meta: Some(meta),
};
let mplace =
MemPlace { ptr: ptr.into(), align: Align::from_bytes(1).unwrap(), meta: Some(meta) };
let layout = self.layout_of(self.tcx.mk_static_str()).unwrap();
MPlaceTy { mplace, layout }
@ -1072,7 +1057,6 @@ where
variant_index: VariantIdx,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Layout computation excludes uninhabited variants from consideration
// therefore there's no way to represent those variants in the given layout.
if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
@ -1105,11 +1089,8 @@ where
self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
}
layout::Variants::Multiple {
discr_kind: layout::DiscriminantKind::Niche {
dataful_variant,
ref niche_variants,
niche_start,
},
discr_kind:
layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
discr: ref discr_layout,
discr_index,
..
@ -1119,7 +1100,8 @@ where
if variant_index != dataful_variant {
let variants_start = niche_variants.start().as_u32();
let variant_index_relative = variant_index.as_u32()
let variant_index_relative = variant_index
.as_u32()
.checked_sub(variants_start)
.expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`:
@ -1156,8 +1138,10 @@ where
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
-> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
pub(super) fn unpack_dyn_trait(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable(); // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;
@ -1170,10 +1154,7 @@ where
assert_eq!(align, layout.align.abi);
}
let mplace = MPlaceTy {
mplace: MemPlace { meta: None, ..*mplace },
layout
};
let mplace = MPlaceTy { mplace: MemPlace { meta: None, ..*mplace }, layout };
Ok((instance, mplace))
}
}

View File

@ -176,32 +176,32 @@
use crate::monomorphize;
use rustc::hir::{self, CodegenFnAttrFlags};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::mir::interpret::{AllocId, ConstValue};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::{self, CodegenFnAttrFlags};
use rustc::middle::lang_items::{ExchangeMallocFnLangItem, StartFnLangItem};
use rustc::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc::ty::{self, TypeFoldable, Ty, TyCtxt, GenericParamDefKind, Instance};
use rustc::ty::print::obsolete::DefPathBasedNames;
use rustc::ty::adjustment::{CustomCoerceUnsized, PointerCast};
use rustc::session::config::EntryFnType;
use rustc::mir::{self, Location, PlaceBase, Static, StaticKind};
use rustc::mir::interpret::{AllocId, ConstValue};
use rustc::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
use rustc::mir::mono::{InstantiationMode, MonoItem};
use rustc::mir::visit::Visitor as MirVisitor;
use rustc::mir::mono::{MonoItem, InstantiationMode};
use rustc::mir::interpret::{Scalar, GlobalAlloc, ErrorHandled};
use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
use rustc::mir::{self, Location, PlaceBase, Static, StaticKind};
use rustc::session::config::EntryFnType;
use rustc::ty::adjustment::{CustomCoerceUnsized, PointerCast};
use rustc::ty::print::obsolete::DefPathBasedNames;
use rustc::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc::ty::{self, GenericParamDefKind, Instance, Ty, TyCtxt, TypeFoldable};
use rustc::util::common::time;
use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet};
use rustc_data_structures::sync::{par_iter, MTLock, MTRef, ParallelIterator};
use rustc_index::bit_set::GrowableBitSet;
use rustc_data_structures::sync::{MTRef, MTLock, ParallelIterator, par_iter};
use std::iter;
#[derive(PartialEq)]
pub enum MonoItemCollectionMode {
Eager,
Lazy
Lazy,
}
/// Maps every mono item to all mono items it references in its
@ -220,7 +220,6 @@ pub struct InliningMap<'tcx> {
}
impl<'tcx> InliningMap<'tcx> {
fn new() -> InliningMap<'tcx> {
InliningMap {
index: FxHashMap::default(),
@ -229,10 +228,9 @@ impl<'tcx> InliningMap<'tcx> {
}
}
fn record_accesses<I>(&mut self,
source: MonoItem<'tcx>,
new_targets: I)
where I: Iterator<Item=(MonoItem<'tcx>, bool)> + ExactSizeIterator
fn record_accesses<I>(&mut self, source: MonoItem<'tcx>, new_targets: I)
where
I: Iterator<Item = (MonoItem<'tcx>, bool)> + ExactSizeIterator,
{
assert!(!self.index.contains_key(&source));
@ -257,12 +255,11 @@ impl<'tcx> InliningMap<'tcx> {
// Internally iterate over all items referenced by `source` which will be
// made available for inlining.
pub fn with_inlining_candidates<F>(&self, source: MonoItem<'tcx>, mut f: F)
where F: FnMut(MonoItem<'tcx>)
where
F: FnMut(MonoItem<'tcx>),
{
if let Some(&(start_index, end_index)) = self.index.get(&source) {
for (i, candidate) in self.targets[start_index .. end_index]
.iter()
.enumerate() {
for (i, candidate) in self.targets[start_index..end_index].iter().enumerate() {
if self.inlines.contains(start_index + i) {
f(*candidate);
}
@ -272,10 +269,11 @@ impl<'tcx> InliningMap<'tcx> {
// Internally iterate over all items and the things each accesses.
pub fn iter_accesses<F>(&self, mut f: F)
where F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>])
where
F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>]),
{
for (&accessor, &(start_index, end_index)) in &self.index {
f(accessor, &self.targets[start_index .. end_index])
f(accessor, &self.targets[start_index..end_index])
}
}
}
@ -287,8 +285,7 @@ pub fn collect_crate_mono_items(
let _prof_timer = tcx.prof.generic_activity("monomorphization_collector");
let roots = time(tcx.sess, "collecting roots", || {
let _prof_timer = tcx.prof
.generic_activity("monomorphization_collector_root_collections");
let _prof_timer = tcx.prof.generic_activity("monomorphization_collector_root_collections");
collect_roots(tcx, mode)
});
@ -298,8 +295,7 @@ pub fn collect_crate_mono_items(
let mut inlining_map = MTLock::new(InliningMap::new());
{
let _prof_timer = tcx.prof
.generic_activity("monomorphization_collector_graph_walk");
let _prof_timer = tcx.prof.generic_activity("monomorphization_collector_graph_walk");
let visited: MTRef<'_, _> = &mut visited;
let inlining_map: MTRef<'_, _> = &mut inlining_map;
@ -307,11 +303,7 @@ pub fn collect_crate_mono_items(
time(tcx.sess, "collecting mono items", || {
par_iter(roots).for_each(|root| {
let mut recursion_depths = DefIdMap::default();
collect_items_rec(tcx,
root,
visited,
&mut recursion_depths,
inlining_map);
collect_items_rec(tcx, root, visited, &mut recursion_depths, inlining_map);
});
});
}
@ -330,12 +322,7 @@ fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionMode) -> Vec<MonoItem<
debug!("collect_roots: entry_fn = {:?}", entry_fn);
let mut visitor = RootCollector {
tcx,
mode,
entry_fn,
output: &mut roots,
};
let mut visitor = RootCollector { tcx, mode, entry_fn, output: &mut roots };
tcx.hir().krate().visit_all_item_likes(&mut visitor);
@ -388,9 +375,7 @@ fn collect_items_rec<'tcx>(
debug_assert!(should_monomorphize_locally(tcx, &instance));
// Keep track of the monomorphization recursion depth
recursion_depth_reset = Some(check_recursion_limit(tcx,
instance,
recursion_depths));
recursion_depth_reset = Some(check_recursion_limit(tcx, instance, recursion_depths));
check_type_length_limit(tcx, instance);
collect_neighbours(tcx, instance, &mut neighbors);
@ -423,10 +408,8 @@ fn record_accesses<'tcx>(
mono_item.instantiation_mode(tcx) == InstantiationMode::LocalCopy
};
let accesses = callees.into_iter()
.map(|mono_item| {
(*mono_item, is_inlining_candidate(mono_item))
});
let accesses =
callees.into_iter().map(|mono_item| (*mono_item, is_inlining_candidate(mono_item)));
inlining_map.lock_mut().record_accesses(caller, accesses);
}
@ -452,8 +435,7 @@ fn check_recursion_limit<'tcx>(
// more than the recursion limit is assumed to be causing an
// infinite expansion.
if recursion_depth > *tcx.sess.recursion_limit.get() {
let error = format!("reached the recursion limit while instantiating `{}`",
instance);
let error = format!("reached the recursion limit while instantiating `{}`", instance);
if let Some(hir_id) = tcx.hir().as_local_hir_id(def_id) {
tcx.sess.span_fatal(tcx.hir().span(hir_id), &error);
} else {
@ -498,18 +480,17 @@ fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
// Only use the shrunk version if it's really shorter.
// This also avoids the case where before and after slices overlap.
if shrunk.len() < s.len() {
shrunk
} else {
s
}
if shrunk.len() < s.len() { shrunk } else { s }
};
let msg = format!("reached the type-length limit while instantiating `{}`",
shrink(instance.to_string(), 32, 32));
let msg = format!(
"reached the type-length limit while instantiating `{}`",
shrink(instance.to_string(), 32, 32)
);
let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg);
diag.note(&format!(
"consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate",
type_length));
type_length
));
diag.emit();
tcx.sess.abort_if_errors();
}
@ -531,7 +512,9 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
// have to instantiate all methods of the trait being cast to, so we
// can build the appropriate vtable.
mir::Rvalue::Cast(
mir::CastKind::Pointer(PointerCast::Unsize), ref operand, target_ty
mir::CastKind::Pointer(PointerCast::Unsize),
ref operand,
target_ty,
) => {
let target_ty = self.tcx.subst_and_normalize_erasing_regions(
self.param_substs,
@ -544,21 +527,24 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
ty::ParamEnv::reveal_all(),
&source_ty,
);
let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.tcx,
source_ty,
target_ty);
let (source_ty, target_ty) =
find_vtable_types_for_unsizing(self.tcx, source_ty, target_ty);
// This could also be a different Unsize instruction, like
// from a fixed sized array to a slice. But we are only
// interested in things that produce a vtable.
if target_ty.is_trait() && !source_ty.is_trait() {
create_mono_items_for_vtable_methods(self.tcx,
target_ty,
source_ty,
self.output);
create_mono_items_for_vtable_methods(
self.tcx,
target_ty,
source_ty,
self.output,
);
}
}
mir::Rvalue::Cast(
mir::CastKind::Pointer(PointerCast::ReifyFnPointer), ref operand, _
mir::CastKind::Pointer(PointerCast::ReifyFnPointer),
ref operand,
_,
) => {
let fn_ty = operand.ty(self.body, self.tcx);
let fn_ty = self.tcx.subst_and_normalize_erasing_regions(
@ -569,7 +555,9 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
visit_fn_use(self.tcx, fn_ty, false, &mut self.output);
}
mir::Rvalue::Cast(
mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)), ref operand, _
mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
ref operand,
_,
) => {
let source_ty = operand.ty(self.body, self.tcx);
let source_ty = self.tcx.subst_and_normalize_erasing_regions(
@ -580,8 +568,11 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
match source_ty.kind {
ty::Closure(def_id, substs) => {
let instance = Instance::resolve_closure(
self.tcx, def_id,
substs, ty::ClosureKind::FnOnce);
self.tcx,
def_id,
substs,
ty::ClosureKind::FnOnce,
);
if should_monomorphize_locally(self.tcx, &instance) {
self.output.push(create_fn_mono_item(instance));
}
@ -614,9 +605,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
self.super_const(constant);
}
fn visit_terminator_kind(&mut self,
kind: &mir::TerminatorKind<'tcx>,
location: Location) {
fn visit_terminator_kind(&mut self, kind: &mir::TerminatorKind<'tcx>, location: Location) {
debug!("visiting terminator {:?} @ {:?}", kind, location);
let tcx = self.tcx;
@ -630,8 +619,8 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
);
visit_fn_use(self.tcx, callee_ty, true, &mut self.output);
}
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
mir::TerminatorKind::Drop { ref location, .. }
| mir::TerminatorKind::DropAndReplace { ref location, .. } => {
let ty = location.ty(self.body, self.tcx).ty;
let ty = tcx.subst_and_normalize_erasing_regions(
self.param_substs,
@ -640,26 +629,28 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
);
visit_drop_use(self.tcx, ty, true, self.output);
}
mir::TerminatorKind::Goto { .. } |
mir::TerminatorKind::SwitchInt { .. } |
mir::TerminatorKind::Resume |
mir::TerminatorKind::Abort |
mir::TerminatorKind::Return |
mir::TerminatorKind::Unreachable |
mir::TerminatorKind::Assert { .. } => {}
mir::TerminatorKind::GeneratorDrop |
mir::TerminatorKind::Yield { .. } |
mir::TerminatorKind::FalseEdges { .. } |
mir::TerminatorKind::FalseUnwind { .. } => bug!(),
mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::SwitchInt { .. }
| mir::TerminatorKind::Resume
| mir::TerminatorKind::Abort
| mir::TerminatorKind::Return
| mir::TerminatorKind::Unreachable
| mir::TerminatorKind::Assert { .. } => {}
mir::TerminatorKind::GeneratorDrop
| mir::TerminatorKind::Yield { .. }
| mir::TerminatorKind::FalseEdges { .. }
| mir::TerminatorKind::FalseUnwind { .. } => bug!(),
}
self.super_terminator_kind(kind, location);
}
fn visit_place_base(&mut self,
place_base: &mir::PlaceBase<'tcx>,
_context: mir::visit::PlaceContext,
location: Location) {
fn visit_place_base(
&mut self,
place_base: &mir::PlaceBase<'tcx>,
_context: mir::visit::PlaceContext,
location: Location,
) {
match place_base {
PlaceBase::Static(box Static { kind: StaticKind::Static, def_id, .. }) => {
debug!("visiting static {:?} @ {:?}", def_id, location);
@ -678,11 +669,11 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
let instance = Instance::new(*def_id, substs.subst(self.tcx, self.param_substs));
match self.tcx.const_eval_promoted(instance, *promoted) {
Ok(val) => collect_const(self.tcx, val, substs, self.output),
Err(ErrorHandled::Reported) => {},
Err(ErrorHandled::Reported) => {}
Err(ErrorHandled::TooGeneric) => {
let span = self.tcx.promoted_mir(*def_id)[*promoted].span;
span_bug!(span, "collection encountered polymorphic constant")
},
}
}
}
PlaceBase::Local(_) => {
@ -709,11 +700,8 @@ fn visit_fn_use<'tcx>(
output: &mut Vec<MonoItem<'tcx>>,
) {
if let ty::FnDef(def_id, substs) = ty.kind {
let resolver = if is_direct_call {
ty::Instance::resolve
} else {
ty::Instance::resolve_for_fn_ptr
};
let resolver =
if is_direct_call { ty::Instance::resolve } else { ty::Instance::resolve_for_fn_ptr };
let instance = resolver(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap();
visit_instance_use(tcx, instance, is_direct_call, output);
}
@ -727,12 +715,11 @@ fn visit_instance_use<'tcx>(
) {
debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call);
if !should_monomorphize_locally(tcx, &instance) {
return
return;
}
match instance.def {
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::Intrinsic(_) => {
ty::InstanceDef::Virtual(..) | ty::InstanceDef::Intrinsic(_) => {
if !is_direct_call {
bug!("{:?} being reified", instance);
}
@ -743,13 +730,13 @@ fn visit_instance_use<'tcx>(
output.push(create_fn_mono_item(instance));
}
}
ty::InstanceDef::DropGlue(_, Some(_)) |
ty::InstanceDef::VtableShim(..) |
ty::InstanceDef::ReifyShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Item(..) |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::CloneShim(..) => {
ty::InstanceDef::DropGlue(_, Some(_))
| ty::InstanceDef::VtableShim(..)
| ty::InstanceDef::ReifyShim(..)
| ty::InstanceDef::ClosureOnceShim { .. }
| ty::InstanceDef::Item(..)
| ty::InstanceDef::FnPtrShim(..)
| ty::InstanceDef::CloneShim(..) => {
output.push(create_fn_mono_item(instance));
}
}
@ -761,14 +748,14 @@ fn visit_instance_use<'tcx>(
fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) -> bool {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::VtableShim(..) |
ty::InstanceDef::ReifyShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Intrinsic(_) |
ty::InstanceDef::CloneShim(..) => return true
ty::InstanceDef::VtableShim(..)
| ty::InstanceDef::ReifyShim(..)
| ty::InstanceDef::ClosureOnceShim { .. }
| ty::InstanceDef::Virtual(..)
| ty::InstanceDef::FnPtrShim(..)
| ty::InstanceDef::DropGlue(..)
| ty::InstanceDef::Intrinsic(_)
| ty::InstanceDef::CloneShim(..) => return true,
};
if tcx.is_foreign_item(def_id) {
@ -781,8 +768,9 @@ fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx
return true;
}
if tcx.is_reachable_non_generic(def_id) ||
is_available_upstream_generic(tcx, def_id, instance.substs) {
if tcx.is_reachable_non_generic(def_id)
|| is_available_upstream_generic(tcx, def_id, instance.substs)
{
// We can link to the item in question, no instance needed
// in this crate.
return false;
@ -804,21 +792,21 @@ fn should_monomorphize_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx
// monomorphizations but always instantiate our own internal versions
// instead.
if !tcx.sess.opts.share_generics() {
return false
return false;
}
// If this instance has non-erasable parameters, it cannot be a shared
// monomorphization. Non-generic instances are already handled above
// by `is_reachable_non_generic()`.
if substs.non_erasable_generics().next().is_none() {
return false
return false;
}
// Take a look at the available monomorphizations listed in the metadata
// of upstream crates.
tcx.upstream_monomorphizations_for(def_id)
.map(|set| set.contains_key(substs))
.unwrap_or(false)
.map(|set| set.contains_key(substs))
.unwrap_or(false)
}
}
@ -886,43 +874,42 @@ fn find_vtable_types_for_unsizing<'tcx>(
};
match (&source_ty.kind, &target_ty.kind) {
(&ty::Ref(_, a, _),
&ty::Ref(_, b, _)) |
(&ty::Ref(_, a, _),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
(&ty::Ref(_, a, _), &ty::Ref(_, b, _))
| (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
ptr_vtable(a, b)
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
}
(&ty::Adt(source_adt_def, source_substs),
&ty::Adt(target_adt_def, target_substs)) => {
(&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
let kind =
monomorphize::custom_coerce_unsize_info(tcx, source_ty, target_ty);
let kind = monomorphize::custom_coerce_unsize_info(tcx, source_ty, target_ty);
let coerce_index = match kind {
CustomCoerceUnsized::Struct(i) => i
CustomCoerceUnsized::Struct(i) => i,
};
let source_fields = &source_adt_def.non_enum_variant().fields;
let target_fields = &target_adt_def.non_enum_variant().fields;
assert!(coerce_index < source_fields.len() &&
source_fields.len() == target_fields.len());
assert!(
coerce_index < source_fields.len() && source_fields.len() == target_fields.len()
);
find_vtable_types_for_unsizing(tcx,
find_vtable_types_for_unsizing(
tcx,
source_fields[coerce_index].ty(tcx, source_substs),
target_fields[coerce_index].ty(tcx, target_substs)
target_fields[coerce_index].ty(tcx, target_substs),
)
}
_ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
source_ty,
target_ty)
_ => bug!(
"find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
source_ty,
target_ty
),
}
}
@ -939,8 +926,12 @@ fn create_mono_items_for_vtable_methods<'tcx>(
impl_ty: Ty<'tcx>,
output: &mut Vec<MonoItem<'tcx>>,
) {
assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_bound_vars() &&
!impl_ty.needs_subst() && !impl_ty.has_escaping_bound_vars());
assert!(
!trait_ty.needs_subst()
&& !trait_ty.has_escaping_bound_vars()
&& !impl_ty.needs_subst()
&& !impl_ty.has_escaping_bound_vars()
);
if let ty::Dynamic(ref trait_ty, ..) = trait_ty.kind {
if let Some(principal) = trait_ty.principal() {
@ -949,12 +940,19 @@ fn create_mono_items_for_vtable_methods<'tcx>(
// Walk all methods of the trait, including those of its supertraits
let methods = tcx.vtable_methods(poly_trait_ref);
let methods = methods.iter().cloned().filter_map(|method| method)
.map(|(def_id, substs)| ty::Instance::resolve_for_vtable(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
substs).unwrap())
let methods = methods
.iter()
.cloned()
.filter_map(|method| method)
.map(|(def_id, substs)| {
ty::Instance::resolve_for_vtable(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
substs,
)
.unwrap()
})
.filter(|&instance| should_monomorphize_locally(tcx, &instance))
.map(|instance| create_fn_mono_item(instance));
output.extend(methods);
@ -979,33 +977,33 @@ struct RootCollector<'a, 'tcx> {
impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
fn visit_item(&mut self, item: &'v hir::Item<'v>) {
match item.kind {
hir::ItemKind::ExternCrate(..) |
hir::ItemKind::Use(..) |
hir::ItemKind::ForeignMod(..) |
hir::ItemKind::TyAlias(..) |
hir::ItemKind::Trait(..) |
hir::ItemKind::TraitAlias(..) |
hir::ItemKind::OpaqueTy(..) |
hir::ItemKind::Mod(..) => {
hir::ItemKind::ExternCrate(..)
| hir::ItemKind::Use(..)
| hir::ItemKind::ForeignMod(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Trait(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::Mod(..) => {
// Nothing to do, just keep recursing.
}
hir::ItemKind::Impl(..) => {
if self.mode == MonoItemCollectionMode::Eager {
create_mono_items_for_default_impls(self.tcx,
item,
self.output);
create_mono_items_for_default_impls(self.tcx, item, self.output);
}
}
hir::ItemKind::Enum(_, ref generics) |
hir::ItemKind::Struct(_, ref generics) |
hir::ItemKind::Union(_, ref generics) => {
hir::ItemKind::Enum(_, ref generics)
| hir::ItemKind::Struct(_, ref generics)
| hir::ItemKind::Union(_, ref generics) => {
if generics.params.is_empty() {
if self.mode == MonoItemCollectionMode::Eager {
let def_id = self.tcx.hir().local_def_id(item.hir_id);
debug!("RootCollector: ADT drop-glue for {}",
def_id_to_string(self.tcx, def_id));
debug!(
"RootCollector: ADT drop-glue for {}",
def_id_to_string(self.tcx, def_id)
);
let ty = Instance::new(def_id, InternalSubsts::empty()).ty(self.tcx);
visit_drop_use(self.tcx, ty, true, self.output);
@ -1013,15 +1011,15 @@ impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
}
}
hir::ItemKind::GlobalAsm(..) => {
debug!("RootCollector: ItemKind::GlobalAsm({})",
def_id_to_string(self.tcx,
self.tcx.hir().local_def_id(item.hir_id)));
debug!(
"RootCollector: ItemKind::GlobalAsm({})",
def_id_to_string(self.tcx, self.tcx.hir().local_def_id(item.hir_id))
);
self.output.push(MonoItem::GlobalAsm(item.hir_id));
}
hir::ItemKind::Static(..) => {
let def_id = self.tcx.hir().local_def_id(item.hir_id);
debug!("RootCollector: ItemKind::Static({})",
def_id_to_string(self.tcx, def_id));
debug!("RootCollector: ItemKind::Static({})", def_id_to_string(self.tcx, def_id));
self.output.push(MonoItem::Static(def_id));
}
hir::ItemKind::Const(..) => {
@ -1060,17 +1058,19 @@ impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
impl RootCollector<'_, 'v> {
fn is_root(&self, def_id: DefId) -> bool {
!item_requires_monomorphization(self.tcx, def_id) && match self.mode {
MonoItemCollectionMode::Eager => {
true
!item_requires_monomorphization(self.tcx, def_id)
&& match self.mode {
MonoItemCollectionMode::Eager => true,
MonoItemCollectionMode::Lazy => {
self.entry_fn.map(|(id, _)| id) == Some(def_id)
|| self.tcx.is_reachable_non_generic(def_id)
|| self
.tcx
.codegen_fn_attrs(def_id)
.flags
.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
}
}
MonoItemCollectionMode::Lazy => {
self.entry_fn.map(|(id, _)| id) == Some(def_id) ||
self.tcx.is_reachable_non_generic(def_id) ||
self.tcx.codegen_fn_attrs(def_id).flags.contains(
CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
}
}
}
/// If `def_id` represents a root, pushes it onto the list of
@ -1106,16 +1106,15 @@ impl RootCollector<'_, 'v> {
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
let main_ret_ty = self.tcx.erase_regions(
&main_ret_ty.no_bound_vars().unwrap(),
);
let main_ret_ty = self.tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap());
let start_instance = Instance::resolve(
self.tcx,
ty::ParamEnv::reveal_all(),
start_def_id,
self.tcx.intern_substs(&[main_ret_ty.into()])
).unwrap();
self.tcx.intern_substs(&[main_ret_ty.into()]),
)
.unwrap();
self.output.push(create_fn_mono_item(start_instance));
}
@ -1136,28 +1135,24 @@ fn create_mono_items_for_default_impls<'tcx>(
for param in &generics.params {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => {}
hir::GenericParamKind::Type { .. } |
hir::GenericParamKind::Const { .. } => {
return
hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => {
return;
}
}
}
let impl_def_id = tcx.hir().local_def_id(item.hir_id);
debug!("create_mono_items_for_default_impls(item={})",
def_id_to_string(tcx, impl_def_id));
debug!(
"create_mono_items_for_default_impls(item={})",
def_id_to_string(tcx, impl_def_id)
);
if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
let param_env = ty::ParamEnv::reveal_all();
let trait_ref = tcx.normalize_erasing_regions(
param_env,
trait_ref,
);
let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref);
let overridden_methods: FxHashSet<_> =
impl_item_refs.iter()
.map(|iiref| iiref.ident.modern())
.collect();
impl_item_refs.iter().map(|iiref| iiref.ident.modern()).collect();
for method in tcx.provided_trait_methods(trait_ref.def_id) {
if overridden_methods.contains(&method.ident.modern()) {
continue;
@ -1167,31 +1162,25 @@ fn create_mono_items_for_default_impls<'tcx>(
continue;
}
let substs = InternalSubsts::for_item(tcx, method.def_id, |param, _| {
match param.kind {
let substs =
InternalSubsts::for_item(tcx, method.def_id, |param, _| match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
GenericParamDefKind::Type { .. } |
GenericParamDefKind::Const => {
GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => {
trait_ref.substs[param.index as usize]
}
}
});
let instance = ty::Instance::resolve(tcx,
param_env,
method.def_id,
substs).unwrap();
});
let instance =
ty::Instance::resolve(tcx, param_env, method.def_id, substs).unwrap();
let mono_item = create_fn_mono_item(instance);
if mono_item.is_instantiable(tcx)
&& should_monomorphize_locally(tcx, &instance) {
if mono_item.is_instantiable(tcx) && should_monomorphize_locally(tcx, &instance)
{
output.push(mono_item);
}
}
}
}
_ => {
bug!()
}
_ => bug!(),
}
}
@ -1211,7 +1200,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut Vec<Mon
for &((), inner) in alloc.relocations().values() {
collect_miri(tcx, inner, output);
}
},
}
Some(GlobalAlloc::Function(fn_instance)) => {
if should_monomorphize_locally(tcx, &fn_instance) {
trace!("collecting {:?} with {:#?}", alloc_id, fn_instance);
@ -1231,12 +1220,8 @@ fn collect_neighbours<'tcx>(
debug!("collect_neighbours: {:?}", instance.def_id());
let body = tcx.instance_mir(instance.def);
MirNeighborCollector {
tcx,
body: &body,
output,
param_substs: instance.substs,
}.visit_body(body);
MirNeighborCollector { tcx, body: &body, output, param_substs: instance.substs }
.visit_body(body);
}
fn def_id_to_string(tcx: TyCtxt<'_>, def_id: DefId) -> String {
@ -1255,17 +1240,15 @@ fn collect_const<'tcx>(
debug!("visiting const {:?}", constant);
let param_env = ty::ParamEnv::reveal_all();
let substituted_constant = tcx.subst_and_normalize_erasing_regions(
param_substs,
param_env,
&constant,
);
let substituted_constant =
tcx.subst_and_normalize_erasing_regions(param_substs, param_env, &constant);
match substituted_constant.val {
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Ptr(ptr))) =>
collect_miri(tcx, ptr.alloc_id, output),
ty::ConstKind::Value(ConstValue::Slice { data: alloc, start: _, end: _ }) |
ty::ConstKind::Value(ConstValue::ByRef { alloc, .. }) => {
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Ptr(ptr))) => {
collect_miri(tcx, ptr.alloc_id, output)
}
ty::ConstKind::Value(ConstValue::Slice { data: alloc, start: _, end: _ })
| ty::ConstKind::Value(ConstValue::ByRef { alloc, .. }) => {
for &((), id) in alloc.relocations().values() {
collect_miri(tcx, id, output);
}
@ -1273,12 +1256,12 @@ fn collect_const<'tcx>(
ty::ConstKind::Unevaluated(def_id, substs) => {
match tcx.const_eval_resolve(param_env, def_id, substs, None) {
Ok(val) => collect_const(tcx, val, param_substs, output),
Err(ErrorHandled::Reported) => {},
Err(ErrorHandled::TooGeneric) => span_bug!(
tcx.def_span(def_id), "collection encountered polymorphic constant",
),
Err(ErrorHandled::Reported) => {}
Err(ErrorHandled::TooGeneric) => {
span_bug!(tcx.def_span(def_id), "collection encountered polymorphic constant",)
}
}
},
_ => {},
}
_ => {}
}
}

View File

@ -6,11 +6,12 @@
// This pass is supposed to perform only simple checks not requiring name resolution
// or type checking or some other kind of complex analysis.
use std::mem;
use errors::{Applicability, FatalError};
use rustc::lint;
use rustc::session::Session;
use rustc_data_structures::fx::FxHashMap;
use rustc_parse::validate_attr;
use std::mem;
use syntax::ast::*;
use syntax::attr;
use syntax::expand::is_proc_macro_attr;
@ -20,7 +21,6 @@ use syntax::symbol::{kw, sym};
use syntax::visit::{self, Visitor};
use syntax::{span_err, struct_span_err, walk_list};
use syntax_pos::Span;
use errors::{Applicability, FatalError};
use rustc_error_codes::*;
@ -68,8 +68,9 @@ impl<'a> AstValidator<'a> {
AssocTyConstraintKind::Equality { .. } => {}
AssocTyConstraintKind::Bound { .. } => {
if self.is_assoc_ty_bound_banned {
self.err_handler().span_err(constraint.span,
"associated type bounds are not allowed within structs, enums, or unions"
self.err_handler().span_err(
constraint.span,
"associated type bounds are not allowed within structs, enums, or unions",
);
}
}
@ -125,9 +126,7 @@ impl<'a> AstValidator<'a> {
}
fn check_lifetime(&self, ident: Ident) {
let valid_names = [kw::UnderscoreLifetime,
kw::StaticLifetime,
kw::Invalid];
let valid_names = [kw::UnderscoreLifetime, kw::StaticLifetime, kw::Invalid];
if !valid_names.contains(&ident.name) && ident.without_first_quote().is_reserved() {
self.err_handler().span_err(ident.span, "lifetimes cannot use keyword names");
}
@ -142,13 +141,11 @@ impl<'a> AstValidator<'a> {
fn invalid_visibility(&self, vis: &Visibility, note: Option<&str>) {
if let VisibilityKind::Inherited = vis.node {
return
return;
}
let mut err = struct_span_err!(self.session,
vis.span,
E0449,
"unnecessary visibility qualifier");
let mut err =
struct_span_err!(self.session, vis.span, E0449, "unnecessary visibility qualifier");
if vis.node.is_pub() {
err.span_label(vis.span, "`pub` not permitted here because it's implied");
}
@ -161,10 +158,10 @@ impl<'a> AstValidator<'a> {
fn check_decl_no_pat(decl: &FnDecl, mut report_err: impl FnMut(Span, bool)) {
for Param { pat, .. } in &decl.inputs {
match pat.kind {
PatKind::Ident(BindingMode::ByValue(Mutability::Not), _, None) |
PatKind::Wild => {}
PatKind::Ident(BindingMode::ByValue(Mutability::Mut), _, None) =>
report_err(pat.span, true),
PatKind::Ident(BindingMode::ByValue(Mutability::Not), _, None) | PatKind::Wild => {}
PatKind::Ident(BindingMode::ByValue(Mutability::Mut), _, None) => {
report_err(pat.span, true)
}
_ => report_err(pat.span, false),
}
}
@ -174,26 +171,34 @@ impl<'a> AstValidator<'a> {
if asyncness.is_async() {
struct_span_err!(self.session, span, E0706, "trait fns cannot be declared `async`")
.note("`async` trait functions are not currently supported")
.note("consider using the `async-trait` crate: \
https://crates.io/crates/async-trait")
.note(
"consider using the `async-trait` crate: \
https://crates.io/crates/async-trait",
)
.emit();
}
}
fn check_trait_fn_not_const(&self, constness: Spanned<Constness>) {
if constness.node == Constness::Const {
struct_span_err!(self.session, constness.span, E0379,
"trait fns cannot be declared const")
.span_label(constness.span, "trait fns cannot be const")
.emit();
struct_span_err!(
self.session,
constness.span,
E0379,
"trait fns cannot be declared const"
)
.span_label(constness.span, "trait fns cannot be const")
.emit();
}
}
fn no_questions_in_bounds(&self, bounds: &GenericBounds, where_: &str, is_trait: bool) {
for bound in bounds {
if let GenericBound::Trait(ref poly, TraitBoundModifier::Maybe) = *bound {
let mut err = self.err_handler().struct_span_err(poly.span,
&format!("`?Trait` is not permitted in {}", where_));
let mut err = self.err_handler().struct_span_err(
poly.span,
&format!("`?Trait` is not permitted in {}", where_),
);
if is_trait {
let path_str = pprust::path_to_string(&poly.trait_ref.path);
err.note(&format!("traits are `?{}` by default", path_str));
@ -223,48 +228,61 @@ impl<'a> AstValidator<'a> {
ExprKind::Lit(..) | ExprKind::Err => {}
ExprKind::Path(..) if allow_paths => {}
ExprKind::Unary(UnOp::Neg, ref inner)
if match inner.kind { ExprKind::Lit(_) => true, _ => false } => {}
_ => self.err_handler().span_err(expr.span, "arbitrary expressions aren't allowed \
in patterns")
if match inner.kind {
ExprKind::Lit(_) => true,
_ => false,
} => {}
_ => self.err_handler().span_err(
expr.span,
"arbitrary expressions aren't allowed \
in patterns",
),
}
}
fn check_late_bound_lifetime_defs(&self, params: &[GenericParam]) {
// Check only lifetime parameters are present and that the lifetime
// parameters that are present have no bounds.
let non_lt_param_spans: Vec<_> = params.iter().filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if !param.bounds.is_empty() {
let spans: Vec<_> = param.bounds.iter().map(|b| b.span()).collect();
self.err_handler()
.span_err(spans, "lifetime bounds cannot be used in this context");
let non_lt_param_spans: Vec<_> = params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => {
if !param.bounds.is_empty() {
let spans: Vec<_> = param.bounds.iter().map(|b| b.span()).collect();
self.err_handler()
.span_err(spans, "lifetime bounds cannot be used in this context");
}
None
}
None
}
_ => Some(param.ident.span),
}).collect();
_ => Some(param.ident.span),
})
.collect();
if !non_lt_param_spans.is_empty() {
self.err_handler().span_err(non_lt_param_spans,
"only lifetime parameters can be used in this context");
self.err_handler().span_err(
non_lt_param_spans,
"only lifetime parameters can be used in this context",
);
}
}
fn check_fn_decl(&self, fn_decl: &FnDecl) {
match &*fn_decl.inputs {
[Param { ty, span, .. }] => if let TyKind::CVarArgs = ty.kind {
self.err_handler()
.span_err(
[Param { ty, span, .. }] => {
if let TyKind::CVarArgs = ty.kind {
self.err_handler().span_err(
*span,
"C-variadic function must be declared with at least one named argument",
);
},
[ps @ .., _] => for Param { ty, span, .. } in ps {
if let TyKind::CVarArgs = ty.kind {
self.err_handler()
.span_err(
}
}
[ps @ .., _] => {
for Param { ty, span, .. } in ps {
if let TyKind::CVarArgs = ty.kind {
self.err_handler().span_err(
*span,
"`...` must be the last argument of a C-variadic function",
);
}
}
}
_ => {}
@ -278,16 +296,22 @@ impl<'a> AstValidator<'a> {
let arr = [sym::allow, sym::cfg, sym::cfg_attr, sym::deny, sym::forbid, sym::warn];
!arr.contains(&attr.name_or_empty()) && attr::is_builtin_attr(attr)
})
.for_each(|attr| if attr.is_doc_comment() {
self.err_handler().struct_span_err(
attr.span,
"documentation comments cannot be applied to function parameters"
)
.span_label(attr.span, "doc comments are not allowed here")
.emit();
} else {
self.err_handler().span_err(attr.span, "allow, cfg, cfg_attr, deny, \
forbid, and warn are the only allowed built-in attributes in function parameters")
.for_each(|attr| {
if attr.is_doc_comment() {
self.err_handler()
.struct_span_err(
attr.span,
"documentation comments cannot be applied to function parameters",
)
.span_label(attr.span, "doc comments are not allowed here")
.emit();
} else {
self.err_handler().span_err(
attr.span,
"allow, cfg, cfg_attr, deny, \
forbid, and warn are the only allowed built-in attributes in function parameters",
)
}
});
}
@ -348,14 +372,7 @@ enum GenericPosition {
fn validate_generics_order<'a>(
sess: &Session,
handler: &errors::Handler,
generics: impl Iterator<
Item = (
ParamKindOrd,
Option<&'a [GenericBound]>,
Span,
Option<String>
),
>,
generics: impl Iterator<Item = (ParamKindOrd, Option<&'a [GenericBound]>, Span, Option<String>)>,
pos: GenericPosition,
span: Span,
) {
@ -410,13 +427,15 @@ fn validate_generics_order<'a>(
};
for (param_ord, (max_param, spans)) in &out_of_order {
let mut err = handler.struct_span_err(spans.clone(),
let mut err = handler.struct_span_err(
spans.clone(),
&format!(
"{} {pos}s must be declared prior to {} {pos}s",
param_ord,
max_param,
pos = pos_str,
));
),
);
if let GenericPosition::Param = pos {
err.span_suggestion(
span,
@ -464,8 +483,13 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
TyKind::BareFn(ref bfty) => {
self.check_fn_decl(&bfty.decl);
Self::check_decl_no_pat(&bfty.decl, |span, _| {
struct_span_err!(self.session, span, E0561,
"patterns aren't allowed in function pointer types").emit();
struct_span_err!(
self.session,
span,
E0561,
"patterns aren't allowed in function pointer types"
)
.emit();
});
self.check_late_bound_lifetime_defs(&bfty.generic_params);
}
@ -474,8 +498,12 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
for bound in bounds {
if let GenericBound::Outlives(ref lifetime) = *bound {
if any_lifetime_bounds {
span_err!(self.session, lifetime.ident.span, E0226,
"only a single explicit lifetime bound is permitted");
span_err!(
self.session,
lifetime.ident.span,
E0226,
"only a single explicit lifetime bound is permitted"
);
break;
}
any_lifetime_bounds = true;
@ -486,7 +514,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
TyKind::ImplTrait(_, ref bounds) => {
if self.is_impl_trait_banned {
struct_span_err!(
self.session, ty.span, E0667,
self.session,
ty.span,
E0667,
"`impl Trait` is not allowed in path parameters"
)
.emit();
@ -494,7 +524,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if let Some(outer_impl_trait_sp) = self.outer_impl_trait {
struct_span_err!(
self.session, ty.span, E0666,
self.session,
ty.span,
E0666,
"nested `impl Trait` is not allowed"
)
.span_label(outer_impl_trait_sp, "outer `impl Trait`")
@ -502,8 +534,10 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
.emit();
}
if !bounds.iter()
.any(|b| if let GenericBound::Trait(..) = *b { true } else { false }) {
if !bounds
.iter()
.any(|b| if let GenericBound::Trait(..) = *b { true } else { false })
{
self.err_handler().span_err(ty.span, "at least one trait must be specified");
}
@ -527,7 +561,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
fn visit_item(&mut self, item: &'a Item) {
if item.attrs.iter().any(|attr| is_proc_macro_attr(attr) ) {
if item.attrs.iter().any(|attr| is_proc_macro_attr(attr)) {
self.has_proc_macro_decls = true;
}
@ -537,7 +571,8 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if let TyKind::Err = ty.kind {
self.err_handler()
.struct_span_err(item.span, "`impl Trait for .. {}` is an obsolete syntax")
.help("use `auto trait Trait {}` instead").emit();
.help("use `auto trait Trait {}` instead")
.emit();
}
if unsafety == Unsafety::Unsafe && polarity == ImplPolarity::Negative {
span_err!(self.session, item.span, E0198, "negative impls cannot be unsafe");
@ -551,8 +586,10 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
}
ItemKind::Impl(unsafety, polarity, defaultness, _, None, _, _) => {
self.invalid_visibility(&item.vis,
Some("place qualifiers on individual impl items instead"));
self.invalid_visibility(
&item.vis,
Some("place qualifiers on individual impl items instead"),
);
if unsafety == Unsafety::Unsafe {
span_err!(self.session, item.span, E0197, "inherent impls cannot be unsafe");
}
@ -562,7 +599,8 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if defaultness == Defaultness::Default {
self.err_handler()
.struct_span_err(item.span, "inherent impls cannot be default")
.note("only trait implementations may be annotated with default").emit();
.note("only trait implementations may be annotated with default")
.emit();
}
}
ItemKind::Fn(ref sig, ref generics, _) => {
@ -588,8 +626,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
// Reject C-varadic type unless the function is `unsafe extern "C"` semantically.
match sig.header.ext {
Extern::Explicit(StrLit { symbol_unescaped: sym::C, .. }) |
Extern::Implicit if sig.header.unsafety == Unsafety::Unsafe => {}
Extern::Explicit(StrLit { symbol_unescaped: sym::C, .. })
| Extern::Implicit
if sig.header.unsafety == Unsafety::Unsafe => {}
_ => self.check_c_varadic_type(&sig.decl),
}
}
@ -611,19 +650,31 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if is_auto == IsAuto::Yes {
// Auto traits cannot have generics, super traits nor contain items.
if !generics.params.is_empty() {
struct_span_err!(self.session, item.span, E0567,
struct_span_err!(
self.session,
item.span,
E0567,
"auto traits cannot have generic parameters"
).emit();
)
.emit();
}
if !bounds.is_empty() {
struct_span_err!(self.session, item.span, E0568,
struct_span_err!(
self.session,
item.span,
E0568,
"auto traits cannot have super traits"
).emit();
)
.emit();
}
if !trait_items.is_empty() {
struct_span_err!(self.session, item.span, E0380,
struct_span_err!(
self.session,
item.span,
E0380,
"auto traits cannot have methods or associated items"
).emit();
)
.emit();
}
}
self.no_questions_in_bounds(bounds, "supertraits", true);
@ -634,12 +685,11 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
ItemKind::Union(ref vdata, _) => {
if let VariantData::Tuple(..) | VariantData::Unit(..) = vdata {
self.err_handler().span_err(item.span,
"tuple and unit unions are not permitted");
self.err_handler()
.span_err(item.span, "tuple and unit unions are not permitted");
}
if vdata.fields().is_empty() {
self.err_handler().span_err(item.span,
"unions cannot have zero fields");
self.err_handler().span_err(item.span, "unions cannot have zero fields");
}
}
_ => {}
@ -653,9 +703,14 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
ForeignItemKind::Fn(ref decl, _) => {
self.check_fn_decl(decl);
Self::check_decl_no_pat(decl, |span, _| {
struct_span_err!(self.session, span, E0130,
"patterns aren't allowed in foreign function declarations")
.span_label(span, "pattern not allowed in foreign function").emit();
struct_span_err!(
self.session,
span,
E0130,
"patterns aren't allowed in foreign function declarations"
)
.span_label(span, "pattern not allowed in foreign function")
.emit();
});
}
ForeignItemKind::Static(..) | ForeignItemKind::Ty | ForeignItemKind::Macro(..) => {}
@ -673,11 +728,16 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.session,
self.err_handler(),
data.args.iter().map(|arg| {
(match arg {
GenericArg::Lifetime(..) => ParamKindOrd::Lifetime,
GenericArg::Type(..) => ParamKindOrd::Type,
GenericArg::Const(..) => ParamKindOrd::Const,
}, None, arg.span(), None)
(
match arg {
GenericArg::Lifetime(..) => ParamKindOrd::Lifetime,
GenericArg::Type(..) => ParamKindOrd::Type,
GenericArg::Const(..) => ParamKindOrd::Const,
},
None,
arg.span(),
None,
)
}),
GenericPosition::Arg,
generic_args.span(),
@ -686,8 +746,11 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
// Type bindings such as `Item = impl Debug` in `Iterator<Item = Debug>`
// are allowed to contain nested `impl Trait`.
self.with_impl_trait(None, |this| {
walk_list!(this, visit_assoc_ty_constraint_from_generic_args,
&data.constraints);
walk_list!(
this,
visit_assoc_ty_constraint_from_generic_args,
&data.constraints
);
});
}
GenericArgs::Parenthesized(ref data) => {
@ -790,10 +853,16 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.with_banned_assoc_ty_bound(|this| visit::walk_struct_def(this, s))
}
fn visit_enum_def(&mut self, enum_definition: &'a EnumDef,
generics: &'a Generics, item_id: NodeId, _: Span) {
self.with_banned_assoc_ty_bound(
|this| visit::walk_enum_def(this, enum_definition, generics, item_id))
fn visit_enum_def(
&mut self,
enum_definition: &'a EnumDef,
generics: &'a Generics,
item_id: NodeId,
_: Span,
) {
self.with_banned_assoc_ty_bound(|this| {
visit::walk_enum_def(this, enum_definition, generics, item_id)
})
}
fn visit_impl_item(&mut self, ii: &'a AssocItem) {
@ -827,14 +896,18 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if mut_ident {
self.lint_buffer.buffer_lint(
lint::builtin::PATTERNS_IN_FNS_WITHOUT_BODY,
ti.id, span,
"patterns aren't allowed in methods without bodies"
ti.id,
span,
"patterns aren't allowed in methods without bodies",
);
} else {
struct_span_err!(
self.session, span, E0642,
self.session,
span,
E0642,
"patterns aren't allowed in methods without bodies"
).emit();
)
.emit();
}
});
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,9 @@
use crate::check::{FnCtxt, Expectation, Diverges, Needs};
use crate::check::coercion::CoerceMany;
use crate::check::{Diverges, Expectation, FnCtxt, Needs};
use rustc::hir::{self, ExprKind};
use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc::traits::ObligationCauseCode;
use rustc::traits::{IfExpressionCause, MatchExpressionArmCause, ObligationCause};
use rustc::traits::{ObligationCauseCode};
use rustc::ty::Ty;
use syntax_pos::Span;
@ -56,20 +56,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// rust-lang/rust#55810: Typecheck patterns first (via eager
// collection into `Vec`), so we get types for all bindings.
let all_arm_pats_diverge: Vec<_> = arms.iter().map(|arm| {
let mut all_pats_diverge = Diverges::WarnedAlways;
self.diverges.set(Diverges::Maybe);
self.check_pat_top(&arm.pat, discrim_ty, Some(discrim.span));
all_pats_diverge &= self.diverges.get();
let all_arm_pats_diverge: Vec<_> = arms
.iter()
.map(|arm| {
let mut all_pats_diverge = Diverges::WarnedAlways;
self.diverges.set(Diverges::Maybe);
self.check_pat_top(&arm.pat, discrim_ty, Some(discrim.span));
all_pats_diverge &= self.diverges.get();
// As discussed with @eddyb, this is for disabling unreachable_code
// warnings on patterns (they're now subsumed by unreachable_patterns
// warnings).
match all_pats_diverge {
Diverges::Maybe => Diverges::Maybe,
Diverges::Always { .. } | Diverges::WarnedAlways => Diverges::WarnedAlways,
}
}).collect();
// As discussed with @eddyb, this is for disabling unreachable_code
// warnings on patterns (they're now subsumed by unreachable_patterns
// warnings).
match all_pats_diverge {
Diverges::Maybe => Diverges::Maybe,
Diverges::Always { .. } | Diverges::WarnedAlways => Diverges::WarnedAlways,
}
})
.collect();
// Now typecheck the blocks.
//
@ -100,7 +103,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
CoerceMany::with_coercion_sites(coerce_first, arms)
};
let mut other_arms = vec![]; // used only for diagnostics
let mut other_arms = vec![]; // used only for diagnostics
let mut prior_arm_ty = None;
for (i, (arm, pats_diverge)) in arms.iter().zip(all_arm_pats_diverge).enumerate() {
if let Some(g) = &arm.guard {
@ -113,11 +116,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
self.diverges.set(pats_diverge);
let arm_ty = if source_if && if_no_else && i != 0 && self.if_fallback_coercion(
expr.span,
&arms[0].body,
&mut coercion,
) {
let arm_ty = if source_if
&& if_no_else
&& i != 0
&& self.if_fallback_coercion(expr.span, &arms[0].body, &mut coercion)
{
tcx.types.err
} else {
// Only call this if this is not an `if` expr with an expected type and no `else`
@ -147,15 +150,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// The reason for the first arm to fail is not that the match arms diverge,
// but rather that there's a prior obligation that doesn't hold.
0 => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)),
_ => (expr.span,
ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
_ => (
expr.span,
ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
arm_span,
source: match_src,
prior_arms: other_arms.clone(),
last_ty: prior_arm_ty.unwrap(),
discrim_hir_id: discrim.hir_id,
})
),
}),
),
};
let cause = self.cause(span, code);
coercion.coerce(self, &cause, &arm.body, arm_ty);
@ -177,8 +181,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
all_arms_diverge = Diverges::Always {
span: expr.span,
custom_note: Some(
"any code following this `match` expression is unreachable, as all arms diverge"
)
"any code following this `match` expression is unreachable, as all arms diverge",
),
};
}
@ -218,18 +222,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, span);
let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse);
let mut error = false;
coercion.coerce_forced_unit(self, &cause, &mut |err| {
if let Some((span, msg)) = &ret_reason {
err.span_label(*span, msg.as_str());
} else if let ExprKind::Block(block, _) = &then_expr.kind {
if let Some(expr) = &block.expr {
err.span_label(expr.span, "found here".to_string());
coercion.coerce_forced_unit(
self,
&cause,
&mut |err| {
if let Some((span, msg)) = &ret_reason {
err.span_label(*span, msg.as_str());
} else if let ExprKind::Block(block, _) = &then_expr.kind {
if let Some(expr) = &block.expr {
err.span_label(expr.span, "found here".to_string());
}
}
}
err.note("`if` expressions without `else` evaluate to `()`");
err.help("consider adding an `else` block that evaluates to the expected type");
error = true;
}, ret_reason.is_none());
err.note("`if` expressions without `else` evaluate to `()`");
err.help("consider adding an `else` block that evaluates to the expected type");
error = true;
},
ret_reason.is_none(),
);
error
}
@ -244,20 +253,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let node = hir.get(containing_id);
if let Block(block) = node {
// check that the body's parent is an fn
let parent = hir.get(
hir.get_parent_node(
hir.get_parent_node(block.hir_id),
),
);
if let (Some(expr), Item(hir::Item {
kind: hir::ItemKind::Fn(..), ..
})) = (&block.expr, parent) {
let parent = hir.get(hir.get_parent_node(hir.get_parent_node(block.hir_id)));
if let (Some(expr), Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })) =
(&block.expr, parent)
{
// check that the `if` expr without `else` is the fn body's expr
if expr.span == span {
return self.get_fn_decl(hir_id).map(|(fn_decl, _)| (
fn_decl.output.span(),
format!("expected `{}` because of this return type", fn_decl.output),
));
return self.get_fn_decl(hir_id).map(|(fn_decl, _)| {
(
fn_decl.output.span(),
format!("expected `{}` because of this return type", fn_decl.output),
)
});
}
}
}
@ -309,7 +316,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// possibly incorrect trailing `;` in the else arm
remove_semicolon = self.could_remove_semicolon(block, then_ty);
stmt.span
} else { // empty block; point at its entirety
} else {
// empty block; point at its entirety
// Avoid overlapping spans that aren't as readable:
// ```
// 2 | let x = if true {
@ -342,7 +350,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
else_expr.span
}
} else { // shouldn't happen unless the parser has done something weird
} else {
// shouldn't happen unless the parser has done something weird
else_expr.span
};
@ -354,20 +363,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// possibly incorrect trailing `;` in the else arm
remove_semicolon = remove_semicolon.or(self.could_remove_semicolon(block, else_ty));
stmt.span
} else { // empty block; point at its entirety
outer_sp = None; // same as in `error_sp`; cleanup output
} else {
// empty block; point at its entirety
outer_sp = None; // same as in `error_sp`; cleanup output
then_expr.span
}
} else { // shouldn't happen unless the parser has done something weird
} else {
// shouldn't happen unless the parser has done something weird
then_expr.span
};
// Finally construct the cause:
self.cause(error_sp, ObligationCauseCode::IfExpression(box IfExpressionCause {
then: then_sp,
outer: outer_sp,
semicolon: remove_semicolon,
}))
self.cause(
error_sp,
ObligationCauseCode::IfExpression(box IfExpressionCause {
then: then_sp,
outer: outer_sp,
semicolon: remove_semicolon,
}),
)
}
fn demand_discriminant_type(
@ -427,7 +441,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// (once introduced) is populated by the time we get here.
//
// See #44848.
let contains_ref_bindings = arms.iter()
let contains_ref_bindings = arms
.iter()
.filter_map(|a| a.pat.contains_explicit_ref_binding())
.max_by_key(|m| match *m {
hir::Mutability::Mut => 1,

View File

@ -55,23 +55,23 @@ use errors::DiagnosticBuilder;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::ptr::P;
use rustc::infer::{Coercion, InferResult, InferOk};
use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc::infer::{Coercion, InferOk, InferResult};
use rustc::traits::{self, ObligationCause, ObligationCauseCode};
use rustc::ty::adjustment::{
Adjustment, Adjust, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast,
};
use rustc::ty::{self, TypeAndMut, Ty};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::error::TypeError;
use rustc::ty::fold::TypeFoldable;
use rustc::ty::relate::RelateResult;
use rustc::ty::subst::SubstsRef;
use rustc::ty::{self, Ty, TypeAndMut};
use rustc_target::spec::abi::Abi;
use smallvec::{smallvec, SmallVec};
use std::ops::Deref;
use syntax::feature_gate;
use syntax::symbol::sym;
use syntax_pos;
use rustc_target::spec::abi::Abi;
use rustc_error_codes::*;
@ -97,31 +97,32 @@ impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> {
type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>;
fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability,
to_mutbl: hir::Mutability)
-> RelateResult<'tcx, ()> {
fn coerce_mutbls<'tcx>(
from_mutbl: hir::Mutability,
to_mutbl: hir::Mutability,
) -> RelateResult<'tcx, ()> {
match (from_mutbl, to_mutbl) {
(hir::Mutability::Mut, hir::Mutability::Mut) |
(hir::Mutability::Not, hir::Mutability::Not) |
(hir::Mutability::Mut, hir::Mutability::Not) => Ok(()),
(hir::Mutability::Mut, hir::Mutability::Mut)
| (hir::Mutability::Not, hir::Mutability::Not)
| (hir::Mutability::Mut, hir::Mutability::Not) => Ok(()),
(hir::Mutability::Not, hir::Mutability::Mut) => Err(TypeError::Mutability),
}
}
fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> { vec![] }
fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> {
vec![]
}
fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
move |target| vec![Adjustment { kind, target }]
}
fn success<'tcx>(adj: Vec<Adjustment<'tcx>>,
target: Ty<'tcx>,
obligations: traits::PredicateObligations<'tcx>)
-> CoerceResult<'tcx> {
Ok(InferOk {
value: (adj, target),
obligations
})
fn success<'tcx>(
adj: Vec<Adjustment<'tcx>>,
target: Ty<'tcx>,
obligations: traits::PredicateObligations<'tcx>,
) -> CoerceResult<'tcx> {
Ok(InferOk { value: (adj, target), obligations })
}
impl<'f, 'tcx> Coerce<'f, 'tcx> {
@ -130,12 +131,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
cause: ObligationCause<'tcx>,
allow_two_phase: AllowTwoPhase,
) -> Self {
Coerce {
fcx,
cause,
allow_two_phase,
use_lub: false,
}
Coerce { fcx, cause, allow_two_phase, use_lub: false }
}
fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
@ -151,13 +147,12 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
}
/// Unify two types (using sub or lub) and produce a specific coercion.
fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F)
-> CoerceResult<'tcx>
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F) -> CoerceResult<'tcx>
where
F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
{
self.unify(&a, &b).and_then(|InferOk { value: ty, obligations }| {
success(f(ty), ty, obligations)
})
self.unify(&a, &b)
.and_then(|InferOk { value: ty, obligations }| success(f(ty), ty, obligations))
}
fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
@ -181,12 +176,10 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
return if self.shallow_resolve(b).is_ty_var() {
// Micro-optimization: no need for this if `b` is
// already resolved in some way.
let diverging_ty = self.next_diverging_ty_var(
TypeVariableOrigin {
kind: TypeVariableOriginKind::AdjustmentType,
span: self.cause.span,
},
);
let diverging_ty = self.next_diverging_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::AdjustmentType,
span: self.cause.span,
});
self.unify_and(&b, &diverging_ty, simple(Adjust::NeverToAny))
} else {
success(simple(Adjust::NeverToAny)(b), b, vec![])
@ -259,13 +252,13 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
r_b: ty::Region<'tcx>,
mt_b: TypeAndMut<'tcx>)
-> CoerceResult<'tcx>
{
fn coerce_borrowed_pointer(
&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
r_b: ty::Region<'tcx>,
mt_b: TypeAndMut<'tcx>,
) -> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b);
// If we have a parameter of type `&M T_a` and the value
@ -380,11 +373,13 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
}
r_borrow_var.unwrap()
};
let derefd_ty_a = self.tcx.mk_ref(r,
TypeAndMut {
ty: referent_ty,
mutbl: mt_b.mutbl, // [1] above
});
let derefd_ty_a = self.tcx.mk_ref(
r,
TypeAndMut {
ty: referent_ty,
mutbl: mt_b.mutbl, // [1] above
},
);
match self.unify(derefd_ty_a, b) {
Ok(ok) => {
found = Some(ok);
@ -429,8 +424,8 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
}
let needs = Needs::maybe_mut_place(mt_b.mutbl);
let InferOk { value: mut adjustments, obligations: o }
= autoderef.adjust_steps_as_infer_ok(self, needs);
let InferOk { value: mut adjustments, obligations: o } =
autoderef.adjust_steps_as_infer_ok(self, needs);
obligations.extend(o);
obligations.extend(autoderef.into_obligations());
@ -442,31 +437,28 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
};
let mutbl = match mt_b.mutbl {
hir::Mutability::Not => AutoBorrowMutability::Not,
hir::Mutability::Mut => AutoBorrowMutability::Mut {
allow_two_phase_borrow: self.allow_two_phase,
hir::Mutability::Mut => {
AutoBorrowMutability::Mut { allow_two_phase_borrow: self.allow_two_phase }
}
};
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
target: ty
target: ty,
});
debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}",
ty,
adjustments);
debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments);
success(adjustments, ty, obligations)
}
// &[T; n] or &mut [T; n] -> &[T]
// or &mut [T; n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self, source: Ty<'tcx>, target: Ty<'tcx>) -> CoerceResult<'tcx> {
debug!("coerce_unsized(source={:?}, target={:?})", source, target);
let traits = (self.tcx.lang_items().unsize_trait(),
self.tcx.lang_items().coerce_unsized_trait());
let traits =
(self.tcx.lang_items().unsize_trait(), self.tcx.lang_items().coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
@ -493,32 +485,28 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// implementation. If it happens that this coercion is a function argument,
// the reborrow in coerce_borrowed_ptr will pick it up.
allow_two_phase_borrow: AllowTwoPhase::No,
}
},
};
Some((Adjustment {
kind: Adjust::Deref(None),
target: ty_a
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
target: self.tcx.mk_ref(r_borrow, ty::TypeAndMut {
mutbl: mutbl_b,
ty: ty_a
})
}))
Some((
Adjustment { kind: Adjust::Deref(None), target: ty_a },
Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
target: self
.tcx
.mk_ref(r_borrow, ty::TypeAndMut { mutbl: mutbl_b, ty: ty_a }),
},
))
}
(&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
coerce_mutbls(mt_a, mt_b)?;
Some((Adjustment {
kind: Adjust::Deref(None),
target: ty_a
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)),
target: self.tcx.mk_ptr(ty::TypeAndMut {
mutbl: mt_b,
ty: ty_a
})
}))
Some((
Adjustment { kind: Adjust::Deref(None), target: ty_a },
Adjustment {
kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)),
target: self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mt_b, ty: ty_a }),
},
))
}
_ => None,
};
@ -534,15 +522,10 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
};
let coerce_target = self.next_ty_var(origin);
let mut coercion = self.unify_and(coerce_target, target, |target| {
let unsize = Adjustment {
kind: Adjust::Pointer(PointerCast::Unsize),
target
};
let unsize = Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target };
match reborrow {
None => vec![unsize],
Some((ref deref, ref autoref)) => {
vec![deref.clone(), autoref.clone(), unsize]
}
Some((ref deref, ref autoref)) => vec![deref.clone(), autoref.clone(), unsize],
}
})?;
@ -562,13 +545,14 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// and almost never more than 3. By using a SmallVec we avoid an
// allocation, at the (very small) cost of (occasionally) having to
// shift subsequent elements down when removing the front element.
let mut queue: SmallVec<[_; 4]> =
smallvec![self.tcx.predicate_for_trait_def(self.fcx.param_env,
cause,
coerce_unsized_did,
0,
coerce_source,
&[coerce_target.into()])];
let mut queue: SmallVec<[_; 4]> = smallvec![self.tcx.predicate_for_trait_def(
self.fcx.param_env,
cause,
coerce_unsized_did,
0,
coerce_source,
&[coerce_target.into()]
)];
let mut has_unsized_tuple_coercion = false;
@ -604,8 +588,9 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
let unsize_ty = trait_ref.skip_binder().input_types().nth(1).unwrap();
debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_ref);
match (&self_ty.kind, &unsize_ty.kind) {
(ty::Infer(ty::TyVar(v)),
ty::Dynamic(..)) if self.type_var_is_sized(*v) => {
(ty::Infer(ty::TyVar(v)), ty::Dynamic(..))
if self.type_var_is_sized(*v) =>
{
debug!("coerce_unsized: have sized infer {:?}", v);
coercion.obligations.push(obligation);
// `$0: Unsize<dyn Trait>` where we know that `$0: Sized`, try going
@ -637,9 +622,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
queue.extend(vtable.nested_obligations())
}
Ok(Some(vtable)) => queue.extend(vtable.nested_obligations()),
}
}
@ -656,19 +639,21 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
Ok(coercion)
}
fn coerce_from_safe_fn<F, G>(&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>,
to_unsafe: F,
normal: G)
-> CoerceResult<'tcx>
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
fn coerce_from_safe_fn<F, G>(
&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>,
to_unsafe: F,
normal: G,
) -> CoerceResult<'tcx>
where
F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
{
if let ty::FnPtr(fn_ty_b) = b.kind {
if let (hir::Unsafety::Normal, hir::Unsafety::Unsafe)
= (fn_ty_a.unsafety(), fn_ty_b.unsafety())
if let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) =
(fn_ty_a.unsafety(), fn_ty_b.unsafety())
{
let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
return self.unify_and(unsafe_a, b, to_unsafe);
@ -677,11 +662,12 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
self.unify_and(a, b, normal)
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
fn coerce_from_fn_pointer(
&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>,
) -> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a Rust function item
//! into a closure or a `proc`.
//!
@ -689,14 +675,16 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
let b = self.shallow_resolve(b);
debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
self.coerce_from_safe_fn(a, fn_ty_a, b,
simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)), identity)
self.coerce_from_safe_fn(
a,
fn_ty_a,
b,
simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)),
identity,
)
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a Rust function item
//! into a closure or a `proc`.
@ -707,9 +695,8 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
ty::FnPtr(_) => {
let a_sig = a.fn_sig(self.tcx);
// Intrinsics are not coercible to function pointers
if a_sig.abi() == Abi::RustIntrinsic ||
a_sig.abi() == Abi::PlatformIntrinsic {
return Err(TypeError::IntrinsicCast);
if a_sig.abi() == Abi::RustIntrinsic || a_sig.abi() == Abi::PlatformIntrinsic {
return Err(TypeError::IntrinsicCast);
}
let InferOk { value: a_sig, mut obligations } =
self.normalize_associated_types_in_as_infer_ok(self.cause.span, &a_sig);
@ -723,15 +710,15 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
vec![
Adjustment {
kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
target: a_fn_pointer
target: a_fn_pointer,
},
Adjustment {
kind: Adjust::Pointer(PointerCast::UnsafeFnPointer),
target: unsafe_ty
target: unsafe_ty,
},
]
},
simple(Adjust::Pointer(PointerCast::ReifyFnPointer))
simple(Adjust::Pointer(PointerCast::ReifyFnPointer)),
)?;
obligations.extend(o2);
@ -741,12 +728,13 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
}
}
fn coerce_closure_to_fn(&self,
a: Ty<'tcx>,
def_id_a: DefId,
substs_a: SubstsRef<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
fn coerce_closure_to_fn(
&self,
a: Ty<'tcx>,
def_id_a: DefId,
substs_a: SubstsRef<'tcx>,
b: Ty<'tcx>,
) -> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a non-capturing closure
//! into a function pointer.
//!
@ -764,52 +752,46 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
let sig = self.closure_sig(def_id_a, substs_a);
let unsafety = fn_ty.unsafety();
let pointer_ty = self.tcx.coerce_closure_fn_ty(sig, unsafety);
debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})",
a, b, pointer_ty);
self.unify_and(pointer_ty, b, simple(
Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))
))
debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty);
self.unify_and(
pointer_ty,
b,
simple(Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))),
)
}
_ => self.unify_and(a, b, identity),
}
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: hir::Mutability)
-> CoerceResult<'tcx> {
fn coerce_unsafe_ptr(
&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: hir::Mutability,
) -> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
let (is_ref, mt_a) = match a.kind {
ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
ty::RawPtr(mt) => (false, mt),
_ => return self.unify_and(a, b, identity)
_ => return self.unify_and(a, b, identity),
};
// Check that the types which they point at are compatible.
let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut {
mutbl: mutbl_b,
ty: mt_a.ty,
});
let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mutbl_b, ty: mt_a.ty });
coerce_mutbls(mt_a.mutbl, mutbl_b)?;
// Although references and unsafe ptrs have the same
// representation, we still register an Adjust::DerefRef so that
// regionck knows that the region for `a` must be valid here.
if is_ref {
self.unify_and(a_unsafe, b, |target| {
vec![Adjustment {
kind: Adjust::Deref(None),
target: mt_a.ty
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)),
target
}]
vec![
Adjustment { kind: Adjust::Deref(None), target: mt_a.ty },
Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)), target },
]
})
} else if mt_a.mutbl != mutbl_b {
self.unify_and(
a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer))
)
self.unify_and(a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer)))
} else {
self.unify_and(a_unsafe, b, identity)
}
@ -837,11 +819,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let (adjustments, _) = self.register_infer_ok_obligations(ok);
self.apply_adjustments(expr, adjustments);
Ok(if expr_ty.references_error() {
self.tcx.types.err
} else {
target
})
Ok(if expr_ty.references_error() { self.tcx.types.err } else { target })
}
/// Same as `try_coerce()`, but without side-effects.
@ -861,14 +839,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
///
/// This is really an internal helper. From outside the coercion
/// module, you should instantiate a `CoerceMany` instance.
fn try_find_coercion_lub<E>(&self,
cause: &ObligationCause<'tcx>,
exprs: &[E],
prev_ty: Ty<'tcx>,
new: &hir::Expr,
new_ty: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where E: AsCoercionSite
fn try_find_coercion_lub<E>(
&self,
cause: &ObligationCause<'tcx>,
exprs: &[E],
prev_ty: Ty<'tcx>,
new: &hir::Expr,
new_ty: Ty<'tcx>,
) -> RelateResult<'tcx, Ty<'tcx>>
where
E: AsCoercionSite,
{
let prev_ty = self.resolve_vars_with_obligations(prev_ty);
let new_ty = self.resolve_vars_with_obligations(new_ty);
@ -879,10 +859,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let (&ty::FnDef(..), &ty::FnDef(..)) = (&prev_ty.kind, &new_ty.kind) {
// Don't reify if the function types have a LUB, i.e., they
// are the same function and their parameters have a LUB.
let lub_ty = self.commit_if_ok(|_| {
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
}).map(|ok| self.register_infer_ok_obligations(ok));
let lub_ty = self
.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
.map(|ok| self.register_infer_ok_obligations(ok));
if lub_ty.is_ok() {
// We have a LUB of prev_ty and new_ty, just return it.
@ -894,20 +873,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let a_sig = self.normalize_associated_types_in(new.span, &a_sig);
let b_sig = new_ty.fn_sig(self.tcx);
let b_sig = self.normalize_associated_types_in(new.span, &b_sig);
let sig = self.at(cause, self.param_env)
.trace(prev_ty, new_ty)
.lub(&a_sig, &b_sig)
.map(|ok| self.register_infer_ok_obligations(ok))?;
let sig = self
.at(cause, self.param_env)
.trace(prev_ty, new_ty)
.lub(&a_sig, &b_sig)
.map(|ok| self.register_infer_ok_obligations(ok))?;
// Reify both sides and return the reified fn pointer type.
let fn_ptr = self.tcx.mk_fn_ptr(sig);
for expr in exprs.iter().map(|e| e.as_coercion_site()).chain(Some(new)) {
// The only adjustment that can produce an fn item is
// `NeverToAny`, so this should always be valid.
self.apply_adjustments(expr, vec![Adjustment {
kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
target: fn_ptr
}]);
self.apply_adjustments(
expr,
vec![Adjustment {
kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
target: fn_ptr,
}],
);
}
return Ok(fn_ptr);
}
@ -941,10 +924,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
for expr in exprs {
let expr = expr.as_coercion_site();
let noop = match self.tables.borrow().expr_adjustments(expr) {
&[
Adjustment { kind: Adjust::Deref(_), .. },
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }
] => {
&[Adjustment { kind: Adjust::Deref(_), .. }, Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }] =>
{
match self.node_ty(expr.hir_id).kind {
ty::Ref(_, _, mt_orig) => {
let mutbl_adj: hir::Mutability = mutbl_adj.into();
@ -961,10 +942,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
if !noop {
return self.commit_if_ok(|_|
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
).map(|ok| self.register_infer_ok_obligations(ok));
return self
.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
.map(|ok| self.register_infer_ok_obligations(ok));
}
}
@ -974,10 +954,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some(e) = first_error {
Err(e)
} else {
self.commit_if_ok(|_|
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
).map(|ok| self.register_infer_ok_obligations(ok))
self.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
.map(|ok| self.register_infer_ok_obligations(ok))
}
}
Ok(ok) => {
@ -1062,19 +1040,12 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
/// expected to pass each element in the slice to `coerce(...)` in
/// order. This is used with arrays in particular to avoid
/// needlessly cloning the slice.
pub fn with_coercion_sites(expected_ty: Ty<'tcx>,
coercion_sites: &'exprs [E])
-> Self {
pub fn with_coercion_sites(expected_ty: Ty<'tcx>, coercion_sites: &'exprs [E]) -> Self {
Self::make(expected_ty, Expressions::UpFront(coercion_sites))
}
fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self {
CoerceMany {
expected_ty,
final_ty: None,
expressions,
pushed: 0,
}
CoerceMany { expected_ty, final_ty: None, expressions, pushed: 0 }
}
/// Returns the "expected type" with which this coercion was
@ -1109,11 +1080,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expression: &'tcx hir::Expr,
expression_ty: Ty<'tcx>,
) {
self.coerce_inner(fcx,
cause,
Some(expression),
expression_ty,
None, false)
self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false)
}
/// Indicates that one of the inputs is a "forced unit". This
@ -1135,12 +1102,14 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
augment_error: &mut dyn FnMut(&mut DiagnosticBuilder<'_>),
label_unit_as_expected: bool,
) {
self.coerce_inner(fcx,
cause,
None,
fcx.tcx.mk_unit(),
Some(augment_error),
label_unit_as_expected)
self.coerce_inner(
fcx,
cause,
None,
fcx.tcx.mk_unit(),
Some(augment_error),
label_unit_as_expected,
)
}
/// The inner coercion "engine". If `expression` is `None`, this
@ -1212,11 +1181,11 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
// Another example is `break` with no argument expression.
assert!(expression_ty.is_unit(), "if let hack without unit type");
fcx.at(cause, fcx.param_env)
.eq_exp(label_expression_as_expected, expression_ty, self.merged_ty())
.map(|infer_ok| {
fcx.register_infer_ok_obligations(infer_ok);
expression_ty
})
.eq_exp(label_expression_as_expected, expression_ty, self.merged_ty())
.map(|infer_ok| {
fcx.register_infer_ok_obligations(infer_ok);
expression_ty
})
};
match result {
@ -1228,8 +1197,10 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
Expressions::UpFront(coercion_sites) => {
// if the user gave us an array to validate, check that we got
// the next expression in the list, as expected
assert_eq!(coercion_sites[self.pushed].as_coercion_site().hir_id,
e.hir_id);
assert_eq!(
coercion_sites[self.pushed].as_coercion_site().hir_id,
e.hir_id
);
}
}
self.pushed += 1;
@ -1255,8 +1226,11 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
match cause.code {
ObligationCauseCode::ReturnNoExpression => {
err = struct_span_err!(
fcx.tcx.sess, cause.span, E0069,
"`return;` in a function whose return type is not `()`");
fcx.tcx.sess,
cause.span,
E0069,
"`return;` in a function whose return type is not `()`"
);
err.span_label(cause.span, "return type is not `()`");
}
ObligationCauseCode::BlockTailExpression(blk_id) => {
@ -1273,7 +1247,14 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
}
ObligationCauseCode::ReturnValue(id) => {
err = self.report_return_mismatched_types(
cause, expected, found, coercion_error, fcx, id, None);
cause,
expected,
found,
coercion_error,
fcx,
id,
None,
);
}
_ => {
err = fcx.report_mismatched_types(cause, expected, found, coercion_error);
@ -1330,12 +1311,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
let parent_id = fcx.tcx.hir().get_parent_node(id);
let fn_decl = if let Some((expr, blk_id)) = expression {
pointing_at_return_type = fcx.suggest_mismatched_types_on_tail(
&mut err,
expr,
expected,
found,
cause.span,
blk_id,
&mut err, expr, expected, found, cause.span, blk_id,
);
let parent = fcx.tcx.hir().get(parent_id);
if let (Some(match_expr), true, false) = (
@ -1356,7 +1332,12 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
if let (Some((fn_decl, can_suggest)), _) = (fn_decl, pointing_at_return_type) {
if expression.is_none() {
pointing_at_return_type |= fcx.suggest_missing_return_type(
&mut err, &fn_decl, expected, found, can_suggest);
&mut err,
&fn_decl,
expected,
found,
can_suggest,
);
}
if !pointing_at_return_type {
return_sp = Some(fn_decl.output.span()); // `impl Trait` return type
@ -1403,7 +1384,8 @@ impl AsCoercionSite for P<hir::Expr> {
}
impl<'a, T> AsCoercionSite for &'a T
where T: AsCoercionSite
where
T: AsCoercionSite,
{
fn as_coercion_site(&self) -> &hir::Expr {
(**self).as_coercion_site()

View File

@ -1,17 +1,17 @@
use super::{probe, MethodCallee};
use crate::astconv::AstConv;
use crate::check::{FnCtxt, PlaceOp, callee, Needs};
use crate::hir::GenericArg;
use crate::check::{callee, FnCtxt, Needs, PlaceOp};
use crate::hir::def_id::DefId;
use rustc::ty::subst::{Subst, SubstsRef};
use crate::hir::GenericArg;
use rustc::hir;
use rustc::infer::{self, InferOk};
use rustc::traits;
use rustc::ty::{self, Ty, GenericParamDefKind};
use rustc::ty::adjustment::{Adjustment, Adjust, OverloadedDeref, PointerCast};
use rustc::ty::adjustment::{Adjust, Adjustment, OverloadedDeref, PointerCast};
use rustc::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc::ty::fold::TypeFoldable;
use rustc::infer::{self, InferOk};
use rustc::hir;
use rustc::ty::subst::{Subst, SubstsRef};
use rustc::ty::{self, GenericParamDefKind, Ty};
use syntax_pos::Span;
use std::ops::Deref;
@ -47,9 +47,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> ConfirmResult<'tcx> {
debug!(
"confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})",
unadjusted_self_ty,
pick,
segment.args,
unadjusted_self_ty, pick, segment.args,
);
let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr);
@ -64,12 +62,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
self_expr: &'tcx hir::Expr,
call_expr: &'tcx hir::Expr,
) -> ConfirmContext<'a, 'tcx> {
ConfirmContext {
fcx,
span,
self_expr,
call_expr,
}
ConfirmContext { fcx, span, self_expr, call_expr }
}
fn confirm(
@ -125,11 +118,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
}
// Create the final `MethodCallee`.
let callee = MethodCallee {
def_id: pick.item.def_id,
substs: all_substs,
sig: method_sig,
};
let callee = MethodCallee { def_id: pick.item.def_id, substs: all_substs, sig: method_sig };
if let Some(hir::Mutability::Mut) = pick.autoref {
self.convert_place_derefs_to_mutable();
@ -167,32 +156,23 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
if let Some(mutbl) = pick.autoref {
let region = self.next_region_var(infer::Autoref(self.span));
target = self.tcx.mk_ref(region, ty::TypeAndMut {
mutbl,
ty: target
});
target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl, ty: target });
let mutbl = match mutbl {
hir::Mutability::Not => AutoBorrowMutability::Not,
hir::Mutability::Mut => AutoBorrowMutability::Mut {
// Method call receivers are the primary use case
// for two-phase borrows.
allow_two_phase_borrow: AllowTwoPhase::Yes,
}
},
};
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
target
});
adjustments
.push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)), target });
if let Some(unsize_target) = pick.unsize {
target = self.tcx.mk_ref(region, ty::TypeAndMut {
mutbl: mutbl.into(),
ty: unsize_target
});
adjustments.push(Adjustment {
kind: Adjust::Pointer(PointerCast::Unsize),
target
});
target = self
.tcx
.mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsize_target });
adjustments.push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target });
}
} else {
// No unsizing should be performed without autoref (at
@ -216,16 +196,19 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
///
/// Note that this substitution may include late-bound regions from the impl level. If so,
/// these are instantiated later in the `instantiate_method_sig` routine.
fn fresh_receiver_substs(&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>)
-> SubstsRef<'tcx> {
fn fresh_receiver_substs(
&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>,
) -> SubstsRef<'tcx> {
match pick.kind {
probe::InherentImplPick => {
let impl_def_id = pick.item.container.id();
assert!(self.tcx.impl_trait_ref(impl_def_id).is_none(),
"impl {:?} is not an inherent impl",
impl_def_id);
assert!(
self.tcx.impl_trait_ref(impl_def_id).is_none(),
"impl {:?} is not an inherent impl",
impl_def_id
);
self.impl_self_ty(self.span, impl_def_id).substs
}
@ -246,10 +229,10 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id);
let upcast_trait_ref =
this.replace_bound_vars_with_fresh_vars(&upcast_poly_trait_ref);
debug!("original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
original_poly_trait_ref,
upcast_trait_ref,
trait_def_id);
debug!(
"original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
original_poly_trait_ref, upcast_trait_ref, trait_def_id
);
upcast_trait_ref.substs
})
}
@ -286,22 +269,24 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
self.fcx
.autoderef(self.span, self_ty)
.include_raw_pointers()
.filter_map(|(ty, _)|
match ty.kind {
ty::Dynamic(ref data, ..) => {
Some(closure(self, ty, data.principal().unwrap_or_else(|| {
span_bug!(self.span, "calling trait method on empty object?")
})))
},
_ => None,
}
)
.filter_map(|(ty, _)| match ty.kind {
ty::Dynamic(ref data, ..) => Some(closure(
self,
ty,
data.principal().unwrap_or_else(|| {
span_bug!(self.span, "calling trait method on empty object?")
}),
)),
_ => None,
})
.next()
.unwrap_or_else(||
span_bug!(self.span,
"self-type `{}` for ObjectPick never dereferenced to an object",
self_ty)
)
.unwrap_or_else(|| {
span_bug!(
self.span,
"self-type `{}` for ObjectPick never dereferenced to an object",
self_ty
)
})
}
fn instantiate_method_substs(
@ -315,11 +300,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// variables.
let generics = self.tcx.generics_of(pick.item.def_id);
AstConv::check_generic_arg_count_for_call(
self.tcx,
self.span,
&generics,
&seg,
true, // `is_method_call`
self.tcx, self.span, &generics, &seg, true, // `is_method_call`
);
// Create subst for early-bound lifetime parameters, combining
@ -335,26 +316,18 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Provide the generic args, and whether types should be inferred.
|_| {
// The last argument of the returned tuple here is unimportant.
if let Some(ref data) = seg.args {
(Some(data), false)
} else {
(None, false)
}
if let Some(ref data) = seg.args { (Some(data), false) } else { (None, false) }
},
// Provide substitutions for parameters for which (valid) arguments have been provided.
|param, arg| {
match (&param.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
self.to_ty(ty).into()
}
(GenericParamDefKind::Const, GenericArg::Const(ct)) => {
self.to_const(&ct.value, self.tcx.type_of(param.def_id)).into()
}
_ => unreachable!(),
|param, arg| match (&param.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => self.to_ty(ty).into(),
(GenericParamDefKind::Const, GenericArg::Const(ct)) => {
self.to_const(&ct.value, self.tcx.type_of(param.def_id)).into()
}
_ => unreachable!(),
},
// Provide substitutions for parameters for which arguments are inferred.
|_, param, _| self.var_for_def(self.span, param),
@ -367,10 +340,12 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
self.register_predicates(obligations);
}
Err(_) => {
span_bug!(self.span,
"{} was a subtype of {} but now is not?",
self_ty,
method_self_ty);
span_bug!(
self.span,
"{} was a subtype of {} but now is not?",
self_ty,
method_self_ty
);
}
}
}
@ -378,20 +353,18 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// NOTE: this returns the *unnormalized* predicates and method sig. Because of
// inference guessing, the predicates and method signature can't be normalized
// until we unify the `Self` type.
fn instantiate_method_sig(&mut self,
pick: &probe::Pick<'tcx>,
all_substs: SubstsRef<'tcx>)
-> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) {
debug!("instantiate_method_sig(pick={:?}, all_substs={:?})",
pick,
all_substs);
fn instantiate_method_sig(
&mut self,
pick: &probe::Pick<'tcx>,
all_substs: SubstsRef<'tcx>,
) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) {
debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs);
// Instantiate the bounds on the method with the
// type/early-bound-regions substitutions performed. There can
// be no late-bound regions appearing here.
let def_id = pick.item.def_id;
let method_predicates = self.tcx.predicates_of(def_id)
.instantiate(self.tcx, all_substs);
let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_substs);
debug!("method_predicates after subst = {:?}", method_predicates);
@ -404,8 +377,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// `instantiate_type_scheme` can normalize associated types that
// may reference those regions.
let method_sig = self.replace_bound_vars_with_fresh_vars(&sig);
debug!("late-bound lifetimes from method instantiated, method_sig={:?}",
method_sig);
debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig);
let method_sig = method_sig.subst(self.tcx, all_substs);
debug!("type scheme substituted, method_sig={:?}", method_sig);
@ -413,17 +385,21 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
(method_sig, method_predicates)
}
fn add_obligations(&mut self,
fty: Ty<'tcx>,
all_substs: SubstsRef<'tcx>,
method_predicates: &ty::InstantiatedPredicates<'tcx>) {
debug!("add_obligations: fty={:?} all_substs={:?} method_predicates={:?}",
fty,
all_substs,
method_predicates);
fn add_obligations(
&mut self,
fty: Ty<'tcx>,
all_substs: SubstsRef<'tcx>,
method_predicates: &ty::InstantiatedPredicates<'tcx>,
) {
debug!(
"add_obligations: fty={:?} all_substs={:?} method_predicates={:?}",
fty, all_substs, method_predicates
);
self.add_obligations_for_parameters(traits::ObligationCause::misc(self.span, self.body_id),
method_predicates);
self.add_obligations_for_parameters(
traits::ObligationCause::misc(self.span, self.body_id),
method_predicates,
);
// this is a projection from a trait reference, so we have to
// make sure that the trait reference inputs are well-formed.
@ -447,9 +423,9 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
loop {
match exprs.last().unwrap().kind {
hir::ExprKind::Field(ref expr, _) |
hir::ExprKind::Index(ref expr, _) |
hir::ExprKind::Unary(hir::UnDeref, ref expr) => exprs.push(&expr),
hir::ExprKind::Field(ref expr, _)
| hir::ExprKind::Index(ref expr, _)
| hir::ExprKind::Unary(hir::UnDeref, ref expr) => exprs.push(&expr),
_ => break,
}
}
@ -467,10 +443,8 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Do not mutate adjustments in place, but rather take them,
// and replace them after mutating them, to avoid having the
// tables borrowed during (`deref_mut`) method resolution.
let previous_adjustments = self.tables
.borrow_mut()
.adjustments_mut()
.remove(expr.hir_id);
let previous_adjustments =
self.tables.borrow_mut().adjustments_mut().remove(expr.hir_id);
if let Some(mut adjustments) = previous_adjustments {
let needs = Needs::MutPlace;
for adjustment in &mut adjustments {
@ -478,10 +452,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
if let Some(ok) = self.try_overloaded_deref(expr.span, source, needs) {
let method = self.register_infer_ok_obligations(ok);
if let ty::Ref(region, _, mutbl) = method.sig.output().kind {
*deref = OverloadedDeref {
region,
mutbl,
};
*deref = OverloadedDeref { region, mutbl };
}
}
}
@ -494,44 +465,49 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
hir::ExprKind::Index(ref base_expr, ref index_expr) => {
let index_expr_ty = self.node_ty(index_expr.hir_id);
self.convert_place_op_to_mutable(
PlaceOp::Index, expr, base_expr, &[index_expr_ty]);
PlaceOp::Index,
expr,
base_expr,
&[index_expr_ty],
);
}
hir::ExprKind::Unary(hir::UnDeref, ref base_expr) => {
self.convert_place_op_to_mutable(
PlaceOp::Deref, expr, base_expr, &[]);
self.convert_place_op_to_mutable(PlaceOp::Deref, expr, base_expr, &[]);
}
_ => {}
}
}
}
fn convert_place_op_to_mutable(&self,
op: PlaceOp,
expr: &hir::Expr,
base_expr: &hir::Expr,
arg_tys: &[Ty<'tcx>])
{
debug!("convert_place_op_to_mutable({:?}, {:?}, {:?}, {:?})",
op, expr, base_expr, arg_tys);
fn convert_place_op_to_mutable(
&self,
op: PlaceOp,
expr: &hir::Expr,
base_expr: &hir::Expr,
arg_tys: &[Ty<'tcx>],
) {
debug!("convert_place_op_to_mutable({:?}, {:?}, {:?}, {:?})", op, expr, base_expr, arg_tys);
if !self.tables.borrow().is_method_call(expr) {
debug!("convert_place_op_to_mutable - builtin, nothing to do");
return
return;
}
let base_ty = self.tables.borrow().expr_adjustments(base_expr).last()
let base_ty = self
.tables
.borrow()
.expr_adjustments(base_expr)
.last()
.map_or_else(|| self.node_ty(expr.hir_id), |adj| adj.target);
let base_ty = self.resolve_vars_if_possible(&base_ty);
// Need to deref because overloaded place ops take self by-reference.
let base_ty = base_ty.builtin_deref(false)
.expect("place op takes something that is not a ref")
.ty;
let base_ty =
base_ty.builtin_deref(false).expect("place op takes something that is not a ref").ty;
let method = self.try_overloaded_place_op(
expr.span, base_ty, arg_tys, Needs::MutPlace, op);
let method = self.try_overloaded_place_op(expr.span, base_ty, arg_tys, Needs::MutPlace, op);
let method = match method {
Some(ok) => self.register_infer_ok_obligations(ok),
None => return self.tcx.sess.delay_span_bug(expr.span, "re-trying op failed")
None => return self.tcx.sess.delay_span_bug(expr.span, "re-trying op failed"),
};
debug!("convert_place_op_to_mutable: method={:?}", method);
self.write_method_call(expr.hir_id, method);
@ -545,10 +521,9 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Convert the autoref in the base expr to mutable with the correct
// region and mutability.
let base_expr_ty = self.node_ty(base_expr.hir_id);
if let Some(adjustments) = self.tables
.borrow_mut()
.adjustments_mut()
.get_mut(base_expr.hir_id) {
if let Some(adjustments) =
self.tables.borrow_mut().adjustments_mut().get_mut(base_expr.hir_id)
{
let mut source = base_expr_ty;
for adjustment in &mut adjustments[..] {
if let Adjust::Borrow(AutoBorrow::Ref(..)) = adjustment.kind {
@ -560,21 +535,19 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// deployment, conservatively omit
// overloaded operators.
allow_two_phase_borrow: AllowTwoPhase::No,
}
},
};
adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(region, mutbl));
adjustment.target = self.tcx.mk_ref(region, ty::TypeAndMut {
ty: source,
mutbl: mutbl.into(),
});
adjustment.target =
self.tcx.mk_ref(region, ty::TypeAndMut { ty: source, mutbl: mutbl.into() });
}
source = adjustment.target;
}
// If we have an autoref followed by unsizing at the end, fix the unsize target.
match adjustments[..] {
[.., Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), ref mut target }] => {
[.., Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. }, Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), ref mut target }] =>
{
*target = method.sig.inputs()[0];
}
_ => {}
@ -585,27 +558,25 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
///////////////////////////////////////////////////////////////////////////
// MISCELLANY
fn predicates_require_illegal_sized_bound(&self,
predicates: &ty::InstantiatedPredicates<'tcx>)
-> bool {
fn predicates_require_illegal_sized_bound(
&self,
predicates: &ty::InstantiatedPredicates<'tcx>,
) -> bool {
let sized_def_id = match self.tcx.lang_items().sized_trait() {
Some(def_id) => def_id,
None => return false,
};
traits::elaborate_predicates(self.tcx, predicates.predicates.clone())
.filter_map(|predicate| {
match predicate {
ty::Predicate::Trait(trait_pred) if trait_pred.def_id() == sized_def_id =>
Some(trait_pred),
_ => None,
.filter_map(|predicate| match predicate {
ty::Predicate::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => {
Some(trait_pred)
}
_ => None,
})
.any(|trait_pred| {
match trait_pred.skip_binder().self_ty().kind {
ty::Dynamic(..) => true,
_ => false,
}
.any(|trait_pred| match trait_pred.skip_binder().self_ty().kind {
ty::Dynamic(..) => true,
_ => false,
})
}
@ -619,27 +590,31 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
}
}
fn upcast(&mut self,
source_trait_ref: ty::PolyTraitRef<'tcx>,
target_trait_def_id: DefId)
-> ty::PolyTraitRef<'tcx> {
let upcast_trait_refs = self.tcx
.upcast_choices(source_trait_ref.clone(), target_trait_def_id);
fn upcast(
&mut self,
source_trait_ref: ty::PolyTraitRef<'tcx>,
target_trait_def_id: DefId,
) -> ty::PolyTraitRef<'tcx> {
let upcast_trait_refs =
self.tcx.upcast_choices(source_trait_ref.clone(), target_trait_def_id);
// must be exactly one trait ref or we'd get an ambig error etc
if upcast_trait_refs.len() != 1 {
span_bug!(self.span,
"cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
source_trait_ref,
target_trait_def_id,
upcast_trait_refs);
span_bug!(
self.span,
"cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
source_trait_ref,
target_trait_def_id,
upcast_trait_refs
);
}
upcast_trait_refs.into_iter().next().unwrap()
}
fn replace_bound_vars_with_fresh_vars<T>(&self, value: &ty::Binder<T>) -> T
where T: TypeFoldable<'tcx>
where
T: TypeFoldable<'tcx>,
{
self.fcx.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, value).0
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,22 @@
use crate::check::{Inherited, FnCtxt};
use crate::check::{FnCtxt, Inherited};
use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter};
use crate::hir::def_id::DefId;
use rustc::traits::{self, ObligationCause, ObligationCauseCode};
use rustc::ty::{self, Ty, TyCtxt, GenericParamDefKind, TypeFoldable, ToPredicate};
use rustc::ty::subst::{Subst, InternalSubsts};
use rustc::util::nodemap::{FxHashSet, FxHashMap};
use rustc::middle::lang_items;
use rustc::infer::opaque_types::may_define_opaque_type;
use rustc::middle::lang_items;
use rustc::traits::{self, ObligationCause, ObligationCauseCode};
use rustc::ty::subst::{InternalSubsts, Subst};
use rustc::ty::{self, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use errors::DiagnosticBuilder;
use syntax::ast;
use syntax::feature_gate;
use syntax_pos::Span;
use syntax::symbol::sym;
use errors::DiagnosticBuilder;
use syntax_pos::Span;
use rustc::hir::itemlikevisit::ParItemLikeVisitor;
use rustc::hir;
use rustc::hir::itemlikevisit::ParItemLikeVisitor;
use rustc_error_codes::*;
@ -73,9 +73,11 @@ pub fn check_item_well_formed(tcx: TyCtxt<'_>, def_id: DefId) {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
let item = tcx.hir().expect_item(hir_id);
debug!("check_item_well_formed(it.hir_id={:?}, it.name={})",
item.hir_id,
tcx.def_path_str(def_id));
debug!(
"check_item_well_formed(it.hir_id={:?}, it.name={})",
item.hir_id,
tcx.def_path_str(def_id)
);
match item.kind {
// Right now we check that every default trait implementation
@ -96,7 +98,8 @@ pub fn check_item_well_formed(tcx: TyCtxt<'_>, def_id: DefId) {
// won't be allowed unless there's an *explicit* implementation of `Send`
// for `T`
hir::ItemKind::Impl(_, _, defaultness, _, ref trait_ref, ref self_ty, _) => {
let is_auto = tcx.impl_trait_ref(tcx.hir().local_def_id(item.hir_id))
let is_auto = tcx
.impl_trait_ref(tcx.hir().local_def_id(item.hir_id))
.map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id));
let polarity = tcx.impl_polarity(def_id);
if let (hir::Defaultness::Default { .. }, true) = (defaultness, is_auto) {
@ -109,9 +112,13 @@ pub fn check_item_well_formed(tcx: TyCtxt<'_>, def_id: DefId) {
ty::ImplPolarity::Negative => {
// FIXME(#27579): what amount of WF checking do we need for neg impls?
if trait_ref.is_some() && !is_auto {
span_err!(tcx.sess, item.span, E0192,
"negative impls are only allowed for \
auto traits (e.g., `Send` and `Sync`)")
span_err!(
tcx.sess,
item.span,
E0192,
"negative impls are only allowed for \
auto traits (e.g., `Send` and `Sync`)"
)
}
}
ty::ImplPolarity::Reservation => {
@ -128,29 +135,25 @@ pub fn check_item_well_formed(tcx: TyCtxt<'_>, def_id: DefId) {
hir::ItemKind::Const(ref ty, ..) => {
check_item_type(tcx, item.hir_id, ty.span, false);
}
hir::ItemKind::ForeignMod(ref module) => for it in module.items.iter() {
if let hir::ForeignItemKind::Static(ref ty, ..) = it.kind {
check_item_type(tcx, it.hir_id, ty.span, true);
hir::ItemKind::ForeignMod(ref module) => {
for it in module.items.iter() {
if let hir::ForeignItemKind::Static(ref ty, ..) = it.kind {
check_item_type(tcx, it.hir_id, ty.span, true);
}
}
},
}
hir::ItemKind::Struct(ref struct_def, ref ast_generics) => {
check_type_defn(tcx, item, false, |fcx| {
vec![fcx.non_enum_variant(struct_def)]
});
check_type_defn(tcx, item, false, |fcx| vec![fcx.non_enum_variant(struct_def)]);
check_variances_for_type_defn(tcx, item, ast_generics);
}
hir::ItemKind::Union(ref struct_def, ref ast_generics) => {
check_type_defn(tcx, item, true, |fcx| {
vec![fcx.non_enum_variant(struct_def)]
});
check_type_defn(tcx, item, true, |fcx| vec![fcx.non_enum_variant(struct_def)]);
check_variances_for_type_defn(tcx, item, ast_generics);
}
hir::ItemKind::Enum(ref enum_def, ref ast_generics) => {
check_type_defn(tcx, item, true, |fcx| {
fcx.enum_variants(enum_def)
});
check_type_defn(tcx, item, true, |fcx| fcx.enum_variants(enum_def));
check_variances_for_type_defn(tcx, item, ast_generics);
}
@ -170,7 +173,7 @@ pub fn check_trait_item(tcx: TyCtxt<'_>, def_id: DefId) {
let method_sig = match trait_item.kind {
hir::TraitItemKind::Method(ref sig, _) => Some(sig),
_ => None
_ => None,
};
check_associated_item(tcx, trait_item.hir_id, trait_item.span, method_sig);
}
@ -181,7 +184,7 @@ pub fn check_impl_item(tcx: TyCtxt<'_>, def_id: DefId) {
let method_sig = match impl_item.kind {
hir::ImplItemKind::Method(ref sig, _) => Some(sig),
_ => None
_ => None,
};
check_associated_item(tcx, impl_item.hir_id, impl_item.span, method_sig);
@ -201,8 +204,9 @@ fn check_associated_item(
let (mut implied_bounds, self_ty) = match item.container {
ty::TraitContainer(_) => (vec![], fcx.tcx.types.self_param),
ty::ImplContainer(def_id) => (fcx.impl_implied_bounds(def_id, span),
fcx.tcx.type_of(def_id))
ty::ImplContainer(def_id) => {
(fcx.impl_implied_bounds(def_id, span), fcx.tcx.type_of(def_id))
}
};
match item.kind {
@ -214,8 +218,7 @@ fn check_associated_item(
ty::AssocKind::Method => {
let sig = fcx.tcx.fn_sig(item.def_id);
let sig = fcx.normalize_associated_types_in(span, &sig);
check_fn_or_method(tcx, fcx, span, sig,
item.def_id, &mut implied_bounds);
check_fn_or_method(tcx, fcx, span, sig, item.def_id, &mut implied_bounds);
let sig_if_method = sig_if_method.expect("bad signature for method");
check_method_receiver(fcx, sig_if_method, &item, self_ty);
}
@ -271,27 +274,20 @@ fn check_type_defn<'tcx, F>(
let ty = variant.fields.last().unwrap().ty;
let ty = fcx.tcx.erase_regions(&ty);
if ty.has_local_value() {
fcx_tcx.sess.delay_span_bug(
item.span, &format!("inference variables in {:?}", ty));
// Just treat unresolved type expression as if it needs drop.
true
fcx_tcx
.sess
.delay_span_bug(item.span, &format!("inference variables in {:?}", ty));
// Just treat unresolved type expression as if it needs drop.
true
} else {
ty.needs_drop(fcx_tcx, fcx_tcx.param_env(def_id))
}
}
};
let all_sized =
all_sized ||
variant.fields.is_empty() ||
needs_drop_copy();
let unsized_len = if all_sized {
0
} else {
1
};
for (idx, field) in variant.fields[..variant.fields.len() - unsized_len]
.iter()
.enumerate()
let all_sized = all_sized || variant.fields.is_empty() || needs_drop_copy();
let unsized_len = if all_sized { 0 } else { 1 };
for (idx, field) in
variant.fields[..variant.fields.len() - unsized_len].iter().enumerate()
{
let last = idx == variant.fields.len() - 1;
fcx.register_bound(
@ -305,16 +301,19 @@ fn check_type_defn<'tcx, F>(
Some(i) => i,
None => bug!(),
},
last
}
)
last,
},
),
);
}
// All field types must be well-formed.
for field in &variant.fields {
fcx.register_wf_obligation(field.ty, field.span,
ObligationCauseCode::MiscObligation)
fcx.register_wf_obligation(
field.ty,
field.span,
ObligationCauseCode::MiscObligation,
)
}
}
@ -338,7 +337,8 @@ fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
tcx.def_span(*associated_def_id),
E0714,
"marker traits cannot have associated items",
).emit();
)
.emit();
}
}
@ -354,18 +354,12 @@ fn check_item_fn(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
let sig = fcx.tcx.fn_sig(def_id);
let sig = fcx.normalize_associated_types_in(item.span, &sig);
let mut implied_bounds = vec![];
check_fn_or_method(tcx, fcx, item.span, sig,
def_id, &mut implied_bounds);
check_fn_or_method(tcx, fcx, item.span, sig, def_id, &mut implied_bounds);
implied_bounds
})
}
fn check_item_type(
tcx: TyCtxt<'_>,
item_id: hir::HirId,
ty_span: Span,
allow_foreign_ty: bool,
) {
fn check_item_type(tcx: TyCtxt<'_>, item_id: hir::HirId, ty_span: Span, allow_foreign_ty: bool) {
debug!("check_item_type: {:?}", item_id);
for_id(tcx, item_id, ty_span).with_fcx(|fcx, tcx| {
@ -411,10 +405,8 @@ fn check_impl<'tcx>(
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
let trait_ref = fcx.tcx.impl_trait_ref(item_def_id).unwrap();
let trait_ref = fcx.normalize_associated_types_in(
ast_trait_ref.path.span,
&trait_ref,
);
let trait_ref =
fcx.normalize_associated_types_in(ast_trait_ref.path.span, &trait_ref);
let obligations = ty::wf::trait_obligations(
fcx,
fcx.param_env,
@ -430,8 +422,11 @@ fn check_impl<'tcx>(
None => {
let self_ty = fcx.tcx.type_of(item_def_id);
let self_ty = fcx.normalize_associated_types_in(item.span, &self_ty);
fcx.register_wf_obligation(self_ty, ast_self_ty.span,
ObligationCauseCode::MiscObligation);
fcx.register_wf_obligation(
self_ty,
ast_self_ty.span,
ObligationCauseCode::MiscObligation,
);
}
}
@ -454,13 +449,11 @@ fn check_where_clauses<'tcx, 'fcx>(
let predicates = fcx.tcx.predicates_of(def_id);
let generics = tcx.generics_of(def_id);
let is_our_default = |def: &ty::GenericParamDef| {
match def.kind {
GenericParamDefKind::Type { has_default, .. } => {
has_default && def.index >= generics.parent_count as u32
}
_ => unreachable!()
let is_our_default = |def: &ty::GenericParamDef| match def.kind {
GenericParamDefKind::Type { has_default, .. } => {
has_default && def.index >= generics.parent_count as u32
}
_ => unreachable!(),
};
// Check that concrete defaults are well-formed. See test `type-check-defaults.rs`.
@ -477,8 +470,11 @@ fn check_where_clauses<'tcx, 'fcx>(
// parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
// be sure if it will error or not as user might always specify the other.
if !ty.needs_subst() {
fcx.register_wf_obligation(ty, fcx.tcx.def_span(param.def_id),
ObligationCauseCode::MiscObligation);
fcx.register_wf_obligation(
ty,
fcx.tcx.def_span(param.def_id),
ObligationCauseCode::MiscObligation,
);
}
}
}
@ -521,55 +517,62 @@ fn check_where_clauses<'tcx, 'fcx>(
});
// Now we build the substituted predicates.
let default_obligations = predicates.predicates.iter().flat_map(|&(pred, sp)| {
#[derive(Default)]
struct CountParams { params: FxHashSet<u32> }
impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
if let ty::Param(param) = t.kind {
self.params.insert(param.index);
let default_obligations = predicates
.predicates
.iter()
.flat_map(|&(pred, sp)| {
#[derive(Default)]
struct CountParams {
params: FxHashSet<u32>,
}
impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
if let ty::Param(param) = t.kind {
self.params.insert(param.index);
}
t.super_visit_with(self)
}
t.super_visit_with(self)
}
fn visit_region(&mut self, _: ty::Region<'tcx>) -> bool {
true
}
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
if let ty::ConstKind::Param(param) = c.val {
self.params.insert(param.index);
fn visit_region(&mut self, _: ty::Region<'tcx>) -> bool {
true
}
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
if let ty::ConstKind::Param(param) = c.val {
self.params.insert(param.index);
}
c.super_visit_with(self)
}
c.super_visit_with(self)
}
}
let mut param_count = CountParams::default();
let has_region = pred.visit_with(&mut param_count);
let substituted_pred = pred.subst(fcx.tcx, substs);
// Don't check non-defaulted params, dependent defaults (including lifetimes)
// or preds with multiple params.
if substituted_pred.references_error() || param_count.params.len() > 1 || has_region {
None
} else if predicates.predicates.iter().any(|&(p, _)| p == substituted_pred) {
// Avoid duplication of predicates that contain no parameters, for example.
None
} else {
Some((substituted_pred, sp))
}
}).map(|(pred, sp)| {
// Convert each of those into an obligation. So if you have
// something like `struct Foo<T: Copy = String>`, we would
// take that predicate `T: Copy`, substitute to `String: Copy`
// (actually that happens in the previous `flat_map` call),
// and then try to prove it (in this case, we'll fail).
//
// Note the subtle difference from how we handle `predicates`
// below: there, we are not trying to prove those predicates
// to be *true* but merely *well-formed*.
let pred = fcx.normalize_associated_types_in(sp, &pred);
let cause = traits::ObligationCause::new(sp, fcx.body_id, traits::ItemObligation(def_id));
traits::Obligation::new(cause, fcx.param_env, pred)
});
let mut param_count = CountParams::default();
let has_region = pred.visit_with(&mut param_count);
let substituted_pred = pred.subst(fcx.tcx, substs);
// Don't check non-defaulted params, dependent defaults (including lifetimes)
// or preds with multiple params.
if substituted_pred.references_error() || param_count.params.len() > 1 || has_region {
None
} else if predicates.predicates.iter().any(|&(p, _)| p == substituted_pred) {
// Avoid duplication of predicates that contain no parameters, for example.
None
} else {
Some((substituted_pred, sp))
}
})
.map(|(pred, sp)| {
// Convert each of those into an obligation. So if you have
// something like `struct Foo<T: Copy = String>`, we would
// take that predicate `T: Copy`, substitute to `String: Copy`
// (actually that happens in the previous `flat_map` call),
// and then try to prove it (in this case, we'll fail).
//
// Note the subtle difference from how we handle `predicates`
// below: there, we are not trying to prove those predicates
// to be *true* but merely *well-formed*.
let pred = fcx.normalize_associated_types_in(sp, &pred);
let cause =
traits::ObligationCause::new(sp, fcx.body_id, traits::ItemObligation(def_id));
traits::Obligation::new(cause, fcx.param_env, pred)
});
let mut predicates = predicates.instantiate_identity(fcx.tcx);
@ -580,14 +583,10 @@ fn check_where_clauses<'tcx, 'fcx>(
let predicates = fcx.normalize_associated_types_in(span, &predicates);
debug!("check_where_clauses: predicates={:?}", predicates.predicates);
let wf_obligations =
predicates.predicates
.iter()
.flat_map(|p| ty::wf::predicate_obligations(fcx,
fcx.param_env,
fcx.body_id,
p,
span));
let wf_obligations = predicates
.predicates
.iter()
.flat_map(|p| ty::wf::predicate_obligations(fcx, fcx.param_env, fcx.body_id, p, span));
for obligation in wf_obligations.chain(default_obligations) {
debug!("next obligation cause: {:?}", obligation.cause);
@ -681,13 +680,12 @@ fn check_opaque_types<'fcx, 'tcx>(
)
.emit();
}
}
},
ty::subst::GenericArgKind::Lifetime(region) => {
let param_span = tcx.def_span(param.def_id);
if let ty::ReStatic = region {
tcx
.sess
tcx.sess
.struct_span_err(
span,
"non-defining opaque type use \
@ -724,22 +722,18 @@ fn check_opaque_types<'fcx, 'tcx>(
)
.emit();
}
}
},
} // match subst
} // for (subst, param)
for (_, spans) in seen {
if spans.len() > 1 {
tcx
.sess
tcx.sess
.struct_span_err(
span,
"non-defining opaque type use \
in defining scope",
).
span_note(
spans,
"lifetime used multiple times",
)
.span_note(spans, "lifetime used multiple times")
.emit();
}
}
@ -756,10 +750,7 @@ fn check_opaque_types<'fcx, 'tcx>(
// type Foo<T: Bar> = impl Baz + 'static;
// fn foo<U: Bar>() -> Foo<U> { .. *}
let predicates = tcx.predicates_of(def_id);
trace!(
"check_opaque_types: may define, predicates={:#?}",
predicates,
);
trace!("check_opaque_types: may define, predicates={:#?}", predicates,);
for &(pred, _) in predicates.predicates {
let substituted_pred = pred.subst(fcx.tcx, substs);
// Avoid duplication of predicates that contain no parameters, for example.
@ -777,8 +768,7 @@ fn check_opaque_types<'fcx, 'tcx>(
substituted_predicates
}
const HELP_FOR_SELF_TYPE: &str =
"consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
`self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one \
of the previous types except `Self`)";
@ -804,18 +794,13 @@ fn check_method_receiver<'fcx, 'tcx>(
debug!("check_method_receiver: sig={:?}", sig);
let self_ty = fcx.normalize_associated_types_in(span, &self_ty);
let self_ty = fcx.tcx.liberate_late_bound_regions(
method.def_id,
&ty::Binder::bind(self_ty)
);
let self_ty = fcx.tcx.liberate_late_bound_regions(method.def_id, &ty::Binder::bind(self_ty));
let receiver_ty = sig.inputs()[0];
let receiver_ty = fcx.normalize_associated_types_in(span, &receiver_ty);
let receiver_ty = fcx.tcx.liberate_late_bound_regions(
method.def_id,
&ty::Binder::bind(receiver_ty)
);
let receiver_ty =
fcx.tcx.liberate_late_bound_regions(method.def_id, &ty::Binder::bind(receiver_ty));
if fcx.tcx.features().arbitrary_self_types {
if !receiver_is_valid(fcx, span, receiver_ty, self_ty, true) {
@ -851,8 +836,10 @@ fn e0307(fcx: &FnCtxt<'fcx, 'tcx>, span: Span, receiver_ty: Ty<'_>) {
fcx.tcx.sess.diagnostic(),
span,
E0307,
"invalid `self` parameter type: {:?}", receiver_ty,
).note("type of `self` must be `Self` or a type that dereferences to it")
"invalid `self` parameter type: {:?}",
receiver_ty,
)
.note("type of `self` must be `Self` or a type that dereferences to it")
.help(HELP_FOR_SELF_TYPE)
.emit();
}
@ -882,7 +869,7 @@ fn receiver_is_valid<'fcx, 'tcx>(
if let Some(mut err) = fcx.demand_eqtype_with_origin(&cause, self_ty, receiver_ty) {
err.emit();
}
return true
return true;
}
let mut autoderef = fcx.autoderef(span, receiver_ty);
@ -895,27 +882,26 @@ fn receiver_is_valid<'fcx, 'tcx>(
// The first type is `receiver_ty`, which we know its not equal to `self_ty`; skip it.
autoderef.next();
let receiver_trait_def_id = fcx.tcx.require_lang_item(
lang_items::ReceiverTraitLangItem,
None,
);
let receiver_trait_def_id = fcx.tcx.require_lang_item(lang_items::ReceiverTraitLangItem, None);
// Keep dereferencing `receiver_ty` until we get to `self_ty`.
loop {
if let Some((potential_self_ty, _)) = autoderef.next() {
debug!("receiver_is_valid: potential self type `{:?}` to match `{:?}`",
potential_self_ty, self_ty);
debug!(
"receiver_is_valid: potential self type `{:?}` to match `{:?}`",
potential_self_ty, self_ty
);
if can_eq_self(potential_self_ty) {
autoderef.finalize(fcx);
if let Some(mut err) = fcx.demand_eqtype_with_origin(
&cause, self_ty, potential_self_ty
) {
if let Some(mut err) =
fcx.demand_eqtype_with_origin(&cause, self_ty, potential_self_ty)
{
err.emit();
}
break
break;
} else {
// Without `feature(arbitrary_self_types)`, we require that each step in the
// deref chain implement `receiver`
@ -927,12 +913,11 @@ fn receiver_is_valid<'fcx, 'tcx>(
potential_self_ty,
)
{
return false
return false;
}
}
} else {
debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`",
receiver_ty, self_ty);
debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", receiver_ty, self_ty);
// If he receiver already has errors reported due to it, consider it valid to avoid
// unnecessary errors (#58712).
return receiver_ty.references_error();
@ -943,7 +928,7 @@ fn receiver_is_valid<'fcx, 'tcx>(
if !arbitrary_self_types_enabled
&& !receiver_is_implemented(fcx, receiver_trait_def_id, cause.clone(), receiver_ty)
{
return false
return false;
}
true
@ -955,22 +940,20 @@ fn receiver_is_implemented(
cause: ObligationCause<'tcx>,
receiver_ty: Ty<'tcx>,
) -> bool {
let trait_ref = ty::TraitRef{
let trait_ref = ty::TraitRef {
def_id: receiver_trait_def_id,
substs: fcx.tcx.mk_substs_trait(receiver_ty, &[]),
};
let obligation = traits::Obligation::new(
cause,
fcx.param_env,
trait_ref.to_predicate()
);
let obligation = traits::Obligation::new(cause, fcx.param_env, trait_ref.to_predicate());
if fcx.predicate_must_hold_modulo_regions(&obligation) {
true
} else {
debug!("receiver_is_implemented: type `{:?}` does not implement `Receiver` trait",
receiver_ty);
debug!(
"receiver_is_implemented: type `{:?}` does not implement `Receiver` trait",
receiver_ty
);
false
}
}
@ -990,18 +973,14 @@ fn check_variances_for_type_defn<'tcx>(
assert_eq!(ty_predicates.parent, None);
let variances = tcx.variances_of(item_def_id);
let mut constrained_parameters: FxHashSet<_> =
variances.iter().enumerate()
.filter(|&(_, &variance)| variance != ty::Bivariant)
.map(|(index, _)| Parameter(index as u32))
.collect();
let mut constrained_parameters: FxHashSet<_> = variances
.iter()
.enumerate()
.filter(|&(_, &variance)| variance != ty::Bivariant)
.map(|(index, _)| Parameter(index as u32))
.collect();
identify_constrained_generic_params(
tcx,
ty_predicates,
None,
&mut constrained_parameters,
);
identify_constrained_generic_params(tcx, ty_predicates, None, &mut constrained_parameters);
for (index, _) in variances.iter().enumerate() {
if constrained_parameters.contains(&Parameter(index as u32)) {
@ -1011,7 +990,7 @@ fn check_variances_for_type_defn<'tcx>(
let param = &hir_generics.params[index];
match param.name {
hir::ParamName::Error => { }
hir::ParamName::Error => {}
_ => report_bivariance(tcx, param.span, param.name.ident().name),
}
}
@ -1029,7 +1008,7 @@ fn report_bivariance(tcx: TyCtxt<'_>, span: Span, param_name: ast::Name) {
tcx.def_path_str(def_id),
)
} else {
format!( "consider removing `{}` or referring to it in a field", param_name)
format!("consider removing `{}` or referring to it in a field", param_name)
};
err.help(&msg);
err.emit();
@ -1041,10 +1020,7 @@ fn check_false_global_bounds(fcx: &FnCtxt<'_, '_>, span: Span, id: hir::HirId) {
let empty_env = ty::ParamEnv::empty();
let def_id = fcx.tcx.hir().local_def_id(id);
let predicates = fcx.tcx.predicates_of(def_id).predicates
.iter()
.map(|(p, _)| *p)
.collect();
let predicates = fcx.tcx.predicates_of(def_id).predicates.iter().map(|(p, _)| *p).collect();
// Check elaborated bounds.
let implied_obligations = traits::elaborate_predicates(fcx.tcx, predicates);
@ -1053,11 +1029,7 @@ fn check_false_global_bounds(fcx: &FnCtxt<'_, '_>, span: Span, id: hir::HirId) {
if pred.is_global() && !pred.has_late_bound_regions() {
let pred = fcx.normalize_associated_types_in(span, &pred);
let obligation = traits::Obligation::new(
traits::ObligationCause::new(
span,
id,
traits::TrivialBound,
),
traits::ObligationCause::new(span, id, traits::TrivialBound),
empty_env,
pred,
);
@ -1074,9 +1046,7 @@ pub struct CheckTypeWellFormedVisitor<'tcx> {
impl CheckTypeWellFormedVisitor<'tcx> {
pub fn new(tcx: TyCtxt<'tcx>) -> CheckTypeWellFormedVisitor<'tcx> {
CheckTypeWellFormedVisitor {
tcx,
}
CheckTypeWellFormedVisitor { tcx }
}
}
@ -1114,22 +1084,22 @@ struct AdtField<'tcx> {
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn non_enum_variant(&self, struct_def: &hir::VariantData<'_>) -> AdtVariant<'tcx> {
let fields = struct_def.fields().iter().map(|field| {
let field_ty = self.tcx.type_of(self.tcx.hir().local_def_id(field.hir_id));
let field_ty = self.normalize_associated_types_in(field.span,
&field_ty);
let field_ty = self.resolve_vars_if_possible(&field_ty);
debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty);
AdtField { ty: field_ty, span: field.span }
})
.collect();
let fields = struct_def
.fields()
.iter()
.map(|field| {
let field_ty = self.tcx.type_of(self.tcx.hir().local_def_id(field.hir_id));
let field_ty = self.normalize_associated_types_in(field.span, &field_ty);
let field_ty = self.resolve_vars_if_possible(&field_ty);
debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty);
AdtField { ty: field_ty, span: field.span }
})
.collect();
AdtVariant { fields }
}
fn enum_variants(&self, enum_def: &hir::EnumDef<'_>) -> Vec<AdtVariant<'tcx>> {
enum_def.variants.iter()
.map(|variant| self.non_enum_variant(&variant.data))
.collect()
enum_def.variants.iter().map(|variant| self.non_enum_variant(&variant.data)).collect()
}
fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec<Ty<'tcx>> {
@ -1151,13 +1121,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
fn error_392(
tcx: TyCtxt<'_>,
span: Span,
param_name: ast::Name,
) -> DiagnosticBuilder<'_> {
let mut err = struct_span_err!(tcx.sess, span, E0392,
"parameter `{}` is never used", param_name);
fn error_392(tcx: TyCtxt<'_>, span: Span, param_name: ast::Name) -> DiagnosticBuilder<'_> {
let mut err =
struct_span_err!(tcx.sess, span, E0392, "parameter `{}` is never used", param_name);
err.span_label(span, "unused parameter");
err
}

View File

@ -1,16 +1,42 @@
use super::{ErrorCodes, LangString, Markdown, MarkdownHtml, IdMap, Ignore};
use super::plain_summary_line;
use super::{ErrorCodes, IdMap, Ignore, LangString, Markdown, MarkdownHtml};
use std::cell::RefCell;
use syntax::edition::{Edition, DEFAULT_EDITION};
#[test]
fn test_unique_id() {
let input = ["foo", "examples", "examples", "method.into_iter","examples",
"method.into_iter", "foo", "main", "search", "methods",
"examples", "method.into_iter", "assoc_type.Item", "assoc_type.Item"];
let expected = ["foo", "examples", "examples-1", "method.into_iter", "examples-2",
"method.into_iter-1", "foo-1", "main", "search", "methods",
"examples-3", "method.into_iter-2", "assoc_type.Item", "assoc_type.Item-1"];
let input = [
"foo",
"examples",
"examples",
"method.into_iter",
"examples",
"method.into_iter",
"foo",
"main",
"search",
"methods",
"examples",
"method.into_iter",
"assoc_type.Item",
"assoc_type.Item",
];
let expected = [
"foo",
"examples",
"examples-1",
"method.into_iter",
"examples-2",
"method.into_iter-1",
"foo-1",
"main",
"search",
"methods",
"examples-3",
"method.into_iter-2",
"assoc_type.Item",
"assoc_type.Item-1",
];
let map = RefCell::new(IdMap::new());
let test = || {
@ -25,96 +51,161 @@ fn test_unique_id() {
#[test]
fn test_lang_string_parse() {
fn t(s: &str,
should_panic: bool, no_run: bool, ignore: Ignore, rust: bool, test_harness: bool,
compile_fail: bool, allow_fail: bool, error_codes: Vec<String>,
edition: Option<Edition>) {
assert_eq!(LangString::parse(s, ErrorCodes::Yes, true), LangString {
should_panic,
no_run,
ignore,
rust,
test_harness,
compile_fail,
error_codes,
original: s.to_owned(),
allow_fail,
edition,
})
fn t(
s: &str,
should_panic: bool,
no_run: bool,
ignore: Ignore,
rust: bool,
test_harness: bool,
compile_fail: bool,
allow_fail: bool,
error_codes: Vec<String>,
edition: Option<Edition>,
) {
assert_eq!(
LangString::parse(s, ErrorCodes::Yes, true),
LangString {
should_panic,
no_run,
ignore,
rust,
test_harness,
compile_fail,
error_codes,
original: s.to_owned(),
allow_fail,
edition,
}
)
}
let ignore_foo = Ignore::Some(vec!("foo".to_string()));
let ignore_foo = Ignore::Some(vec!["foo".to_string()]);
fn v() -> Vec<String> {
Vec::new()
}
// ignore-tidy-linelength
// marker | should_panic | no_run | ignore | rust | test_harness
// | compile_fail | allow_fail | error_codes | edition
t("", false, false, Ignore::None, true, false, false, false, v(), None);
t("rust", false, false, Ignore::None, true, false, false, false, v(), None);
t("sh", false, false, Ignore::None, false, false, false, false, v(), None);
t("ignore", false, false, Ignore::All, true, false, false, false, v(), None);
t("ignore-foo", false, false, ignore_foo, true, false, false, false, v(), None);
t("should_panic", true, false, Ignore::None, true, false, false, false, v(), None);
t("no_run", false, true, Ignore::None, true, false, false, false, v(), None);
t("test_harness", false, false, Ignore::None, true, true, false, false, v(), None);
t("compile_fail", false, true, Ignore::None, true, false, true, false, v(), None);
t("allow_fail", false, false, Ignore::None, true, false, false, true, v(), None);
t("{.no_run .example}", false, true, Ignore::None, true, false, false, false, v(), None);
t("{.sh .should_panic}", true, false, Ignore::None, false, false, false, false, v(), None);
t("{.example .rust}", false, false, Ignore::None, true, false, false, false, v(), None);
t("{.test_harness .rust}", false, false, Ignore::None, true, true, false, false, v(), None);
t("text, no_run", false, true, Ignore::None, false, false, false, false, v(), None);
t("text,no_run", false, true, Ignore::None, false, false, false, false, v(), None);
t("edition2015", false, false, Ignore::None, true, false, false, false, v(), Some(Edition::Edition2015));
t("edition2018", false, false, Ignore::None, true, false, false, false, v(), Some(Edition::Edition2018));
t("", false, false, Ignore::None, true, false, false, false, v(), None);
t("rust", false, false, Ignore::None, true, false, false, false, v(), None);
t("sh", false, false, Ignore::None, false, false, false, false, v(), None);
t("ignore", false, false, Ignore::All, true, false, false, false, v(), None);
t("ignore-foo", false, false, ignore_foo, true, false, false, false, v(), None);
t("should_panic", true, false, Ignore::None, true, false, false, false, v(), None);
t("no_run", false, true, Ignore::None, true, false, false, false, v(), None);
t("test_harness", false, false, Ignore::None, true, true, false, false, v(), None);
t("compile_fail", false, true, Ignore::None, true, false, true, false, v(), None);
t("allow_fail", false, false, Ignore::None, true, false, false, true, v(), None);
t("{.no_run .example}", false, true, Ignore::None, true, false, false, false, v(), None);
t("{.sh .should_panic}", true, false, Ignore::None, false, false, false, false, v(), None);
t("{.example .rust}", false, false, Ignore::None, true, false, false, false, v(), None);
t("{.test_harness .rust}", false, false, Ignore::None, true, true, false, false, v(), None);
t("text, no_run", false, true, Ignore::None, false, false, false, false, v(), None);
t("text,no_run", false, true, Ignore::None, false, false, false, false, v(), None);
t(
"edition2015",
false,
false,
Ignore::None,
true,
false,
false,
false,
v(),
Some(Edition::Edition2015),
);
t(
"edition2018",
false,
false,
Ignore::None,
true,
false,
false,
false,
v(),
Some(Edition::Edition2018),
);
}
#[test]
fn test_header() {
fn t(input: &str, expect: &str) {
let mut map = IdMap::new();
let output = Markdown(
input, &[], &mut map, ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
let output =
Markdown(input, &[], &mut map, ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
assert_eq!(output, expect, "original: {}", input);
}
t("# Foo bar", "<h1 id=\"foo-bar\" class=\"section-header\">\
<a href=\"#foo-bar\">Foo bar</a></h1>");
t("## Foo-bar_baz qux", "<h2 id=\"foo-bar_baz-qux\" class=\"section-\
header\"><a href=\"#foo-bar_baz-qux\">Foo-bar_baz qux</a></h2>");
t("### **Foo** *bar* baz!?!& -_qux_-%",
"<h3 id=\"foo-bar-baz--qux-\" class=\"section-header\">\
t(
"# Foo bar",
"<h1 id=\"foo-bar\" class=\"section-header\">\
<a href=\"#foo-bar\">Foo bar</a></h1>",
);
t(
"## Foo-bar_baz qux",
"<h2 id=\"foo-bar_baz-qux\" class=\"section-\
header\"><a href=\"#foo-bar_baz-qux\">Foo-bar_baz qux</a></h2>",
);
t(
"### **Foo** *bar* baz!?!& -_qux_-%",
"<h3 id=\"foo-bar-baz--qux-\" class=\"section-header\">\
<a href=\"#foo-bar-baz--qux-\"><strong>Foo</strong> \
<em>bar</em> baz!?!&amp; -<em>qux</em>-%</a></h3>");
t("#### **Foo?** & \\*bar?!* _`baz`_ ❤ #qux",
"<h4 id=\"foo--bar--baz--qux\" class=\"section-header\">\
<em>bar</em> baz!?!&amp; -<em>qux</em>-%</a></h3>",
);
t(
"#### **Foo?** & \\*bar?!* _`baz`_ ❤ #qux",
"<h4 id=\"foo--bar--baz--qux\" class=\"section-header\">\
<a href=\"#foo--bar--baz--qux\"><strong>Foo?</strong> &amp; *bar?!* \
<em><code>baz</code></em> #qux</a></h4>");
<em><code>baz</code></em> #qux</a></h4>",
);
}
#[test]
fn test_header_ids_multiple_blocks() {
let mut map = IdMap::new();
fn t(map: &mut IdMap, input: &str, expect: &str) {
let output = Markdown(input, &[], map,
ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
let output = Markdown(input, &[], map, ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
assert_eq!(output, expect, "original: {}", input);
}
t(&mut map, "# Example", "<h1 id=\"example\" class=\"section-header\">\
<a href=\"#example\">Example</a></h1>");
t(&mut map, "# Panics", "<h1 id=\"panics\" class=\"section-header\">\
<a href=\"#panics\">Panics</a></h1>");
t(&mut map, "# Example", "<h1 id=\"example-1\" class=\"section-header\">\
<a href=\"#example-1\">Example</a></h1>");
t(&mut map, "# Main", "<h1 id=\"main\" class=\"section-header\">\
<a href=\"#main\">Main</a></h1>");
t(&mut map, "# Example", "<h1 id=\"example-2\" class=\"section-header\">\
<a href=\"#example-2\">Example</a></h1>");
t(&mut map, "# Panics", "<h1 id=\"panics-1\" class=\"section-header\">\
<a href=\"#panics-1\">Panics</a></h1>");
t(
&mut map,
"# Example",
"<h1 id=\"example\" class=\"section-header\">\
<a href=\"#example\">Example</a></h1>",
);
t(
&mut map,
"# Panics",
"<h1 id=\"panics\" class=\"section-header\">\
<a href=\"#panics\">Panics</a></h1>",
);
t(
&mut map,
"# Example",
"<h1 id=\"example-1\" class=\"section-header\">\
<a href=\"#example-1\">Example</a></h1>",
);
t(
&mut map,
"# Main",
"<h1 id=\"main\" class=\"section-header\">\
<a href=\"#main\">Main</a></h1>",
);
t(
&mut map,
"# Example",
"<h1 id=\"example-2\" class=\"section-header\">\
<a href=\"#example-2\">Example</a></h1>",
);
t(
&mut map,
"# Panics",
"<h1 id=\"panics-1\" class=\"section-header\">\
<a href=\"#panics-1\">Panics</a></h1>",
);
}
#[test]
@ -136,8 +227,8 @@ fn test_plain_summary_line() {
fn test_markdown_html_escape() {
fn t(input: &str, expect: &str) {
let mut idmap = IdMap::new();
let output = MarkdownHtml(input, &mut idmap,
ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
let output =
MarkdownHtml(input, &mut idmap, ErrorCodes::Yes, DEFAULT_EDITION, &None).to_string();
assert_eq!(output, expect, "original: {}", input);
}

View File

@ -10,7 +10,7 @@ pub(crate) unsafe fn rel_ptr_mut<T>(offset: u64) -> *mut T {
(image_base() + offset) as *mut T
}
extern {
extern "C" {
static ENCLAVE_SIZE: usize;
}
@ -33,8 +33,7 @@ pub fn image_base() -> u64 {
pub fn is_enclave_range(p: *const u8, len: usize) -> bool {
let start = p as u64;
let end = start + (len as u64);
start >= image_base() &&
end <= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant
start >= image_base() && end <= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant
}
/// Returns `true` if the specified memory range is in userspace.
@ -44,6 +43,5 @@ pub fn is_enclave_range(p: *const u8, len: usize) -> bool {
pub fn is_user_range(p: *const u8, len: usize) -> bool {
let start = p as u64;
let end = start + (len as u64);
end <= image_base() ||
start >= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant
end <= image_base() || start >= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant
}

View File

@ -5,7 +5,7 @@
use crate::os::unix::prelude::*;
use crate::error::Error as StdError;
use crate::ffi::{CString, CStr, OsString, OsStr};
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::iter;
@ -16,12 +16,12 @@ use crate::path::{self, PathBuf};
use crate::ptr;
use crate::slice;
use crate::str;
use crate::sys_common::mutex::{Mutex, MutexGuard};
use crate::sys::cvt;
use crate::sys::fd;
use crate::sys_common::mutex::{Mutex, MutexGuard};
use crate::vec;
use libc::{c_int, c_char, c_void};
use libc::{c_char, c_int, c_void};
const TMPBUF_SZ: usize = 128;
@ -33,24 +33,32 @@ cfg_if::cfg_if! {
}
}
extern {
extern "C" {
#[cfg(not(target_os = "dragonfly"))]
#[cfg_attr(any(target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia",
target_os = "l4re"),
link_name = "__errno_location")]
#[cfg_attr(any(target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "redox",
target_env = "newlib"),
link_name = "__errno")]
#[cfg_attr(
any(
target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia",
target_os = "l4re"
),
link_name = "__errno_location"
)]
#[cfg_attr(
any(
target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "redox",
target_env = "newlib"
),
link_name = "__errno"
)]
#[cfg_attr(target_os = "solaris", link_name = "___errno")]
#[cfg_attr(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd"),
link_name = "__error")]
#[cfg_attr(
any(target_os = "macos", target_os = "ios", target_os = "freebsd"),
link_name = "__error"
)]
#[cfg_attr(target_os = "haiku", link_name = "_errnop")]
fn errno_location() -> *mut c_int;
}
@ -58,23 +66,18 @@ extern {
/// Returns the platform-specific value of errno
#[cfg(not(target_os = "dragonfly"))]
pub fn errno() -> i32 {
unsafe {
(*errno_location()) as i32
}
unsafe { (*errno_location()) as i32 }
}
/// Sets the platform-specific value of errno
#[cfg(all(not(target_os = "linux"),
not(target_os = "dragonfly")))] // needed for readdir and syscall!
#[cfg(all(not(target_os = "linux"), not(target_os = "dragonfly")))] // needed for readdir and syscall!
pub fn set_errno(e: i32) {
unsafe {
*errno_location() = e as c_int
}
unsafe { *errno_location() = e as c_int }
}
#[cfg(target_os = "dragonfly")]
pub fn errno() -> i32 {
extern {
extern "C" {
#[thread_local]
static errno: c_int;
}
@ -84,7 +87,7 @@ pub fn errno() -> i32 {
#[cfg(target_os = "dragonfly")]
pub fn set_errno(e: i32) {
extern {
extern "C" {
#[thread_local]
static mut errno: c_int;
}
@ -96,11 +99,9 @@ pub fn set_errno(e: i32) {
/// Gets a detailed string description for the given error number.
pub fn error_string(errno: i32) -> String {
extern {
#[cfg_attr(any(target_os = "linux", target_env = "newlib"),
link_name = "__xpg_strerror_r")]
fn strerror_r(errnum: c_int, buf: *mut c_char,
buflen: libc::size_t) -> c_int;
extern "C" {
#[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")]
fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int;
}
let mut buf = [0 as c_char; TMPBUF_SZ];
@ -154,41 +155,51 @@ pub fn chdir(p: &path::Path) -> io::Result<()> {
}
pub struct SplitPaths<'a> {
iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>,
fn(&'a [u8]) -> PathBuf>,
iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>,
}
pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> {
fn bytes_to_path(b: &[u8]) -> PathBuf {
PathBuf::from(<OsStr as OsStrExt>::from_bytes(b))
}
fn is_separator(b: &u8) -> bool { *b == PATH_SEPARATOR }
fn is_separator(b: &u8) -> bool {
*b == PATH_SEPARATOR
}
let unparsed = unparsed.as_bytes();
SplitPaths {
iter: unparsed.split(is_separator as fn(&u8) -> bool)
.map(bytes_to_path as fn(&[u8]) -> PathBuf)
iter: unparsed
.split(is_separator as fn(&u8) -> bool)
.map(bytes_to_path as fn(&[u8]) -> PathBuf),
}
}
impl<'a> Iterator for SplitPaths<'a> {
type Item = PathBuf;
fn next(&mut self) -> Option<PathBuf> { self.iter.next() }
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
fn next(&mut self) -> Option<PathBuf> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[derive(Debug)]
pub struct JoinPathsError;
pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError>
where I: Iterator<Item=T>, T: AsRef<OsStr>
where
I: Iterator<Item = T>,
T: AsRef<OsStr>,
{
let mut joined = Vec::new();
for (i, path) in paths.enumerate() {
let path = path.as_ref().as_bytes();
if i > 0 { joined.push(PATH_SEPARATOR) }
if i > 0 {
joined.push(PATH_SEPARATOR)
}
if path.contains(&PATH_SEPARATOR) {
return Err(JoinPathsError)
return Err(JoinPathsError);
}
joined.extend_from_slice(path);
}
@ -202,26 +213,41 @@ impl fmt::Display for JoinPathsError {
}
impl StdError for JoinPathsError {
fn description(&self) -> &str { "failed to join paths" }
fn description(&self) -> &str {
"failed to join paths"
}
}
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mut mib = [libc::CTL_KERN as c_int,
libc::KERN_PROC as c_int,
libc::KERN_PROC_PATHNAME as c_int,
-1 as c_int];
let mut mib = [
libc::CTL_KERN as c_int,
libc::KERN_PROC as c_int,
libc::KERN_PROC_PATHNAME as c_int,
-1 as c_int,
];
let mut sz = 0;
cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as libc::c_uint,
ptr::null_mut(), &mut sz, ptr::null_mut(), 0))?;
cvt(libc::sysctl(
mib.as_mut_ptr(),
mib.len() as libc::c_uint,
ptr::null_mut(),
&mut sz,
ptr::null_mut(),
0,
))?;
if sz == 0 {
return Err(io::Error::last_os_error())
return Err(io::Error::last_os_error());
}
let mut v: Vec<u8> = Vec::with_capacity(sz);
cvt(libc::sysctl(mib.as_mut_ptr(), mib.len() as libc::c_uint,
v.as_mut_ptr() as *mut libc::c_void, &mut sz,
ptr::null_mut(), 0))?;
cvt(libc::sysctl(
mib.as_mut_ptr(),
mib.len() as libc::c_uint,
v.as_mut_ptr() as *mut libc::c_void,
&mut sz,
ptr::null_mut(),
0,
))?;
if sz == 0 {
return Err(io::Error::last_os_error());
}
@ -236,17 +262,29 @@ pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, -1, libc::KERN_PROC_PATHNAME];
let mut path_len: usize = 0;
cvt(libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
ptr::null_mut(), &mut path_len,
ptr::null(), 0))?;
cvt(libc::sysctl(
mib.as_ptr(),
mib.len() as libc::c_uint,
ptr::null_mut(),
&mut path_len,
ptr::null(),
0,
))?;
if path_len <= 1 {
return Err(io::Error::new(io::ErrorKind::Other,
"KERN_PROC_PATHNAME sysctl returned zero-length string"))
return Err(io::Error::new(
io::ErrorKind::Other,
"KERN_PROC_PATHNAME sysctl returned zero-length string",
));
}
let mut path: Vec<u8> = Vec::with_capacity(path_len);
cvt(libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
path.as_ptr() as *mut libc::c_void, &mut path_len,
ptr::null(), 0))?;
cvt(libc::sysctl(
mib.as_ptr(),
mib.len() as libc::c_uint,
path.as_ptr() as *mut libc::c_void,
&mut path_len,
ptr::null(),
0,
))?;
path.set_len(path_len - 1); // chop off NUL
Ok(PathBuf::from(OsString::from_vec(path)))
}
@ -256,8 +294,10 @@ pub fn current_exe() -> io::Result<PathBuf> {
if curproc_exe.is_file() {
return crate::fs::read_link(curproc_exe);
}
Err(io::Error::new(io::ErrorKind::Other,
"/proc/curproc/exe doesn't point to regular file."))
Err(io::Error::new(
io::ErrorKind::Other,
"/proc/curproc/exe doesn't point to regular file.",
))
}
sysctl().or_else(|_| procfs())
}
@ -265,21 +305,15 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(target_os = "openbsd")]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mut mib = [libc::CTL_KERN,
libc::KERN_PROC_ARGS,
libc::getpid(),
libc::KERN_PROC_ARGV];
let mut mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, libc::getpid(), libc::KERN_PROC_ARGV];
let mib = mib.as_mut_ptr();
let mut argv_len = 0;
cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len,
ptr::null_mut(), 0))?;
cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len, ptr::null_mut(), 0))?;
let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize);
cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _,
&mut argv_len, ptr::null_mut(), 0))?;
cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _, &mut argv_len, ptr::null_mut(), 0))?;
argv.set_len(argv_len as usize);
if argv[0].is_null() {
return Err(io::Error::new(io::ErrorKind::Other,
"no current exe available"))
return Err(io::Error::new(io::ErrorKind::Other, "no current exe available"));
}
let argv0 = CStr::from_ptr(argv[0]).to_bytes();
if argv0[0] == b'.' || argv0.iter().any(|b| *b == b'/') {
@ -293,29 +327,30 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))]
pub fn current_exe() -> io::Result<PathBuf> {
match crate::fs::read_link("/proc/self/exe") {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
Err(io::Error::new(
io::ErrorKind::Other,
"no /proc/self/exe available. Is /proc mounted?"
))
},
Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::Error::new(
io::ErrorKind::Other,
"no /proc/self/exe available. Is /proc mounted?",
)),
other => other,
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub fn current_exe() -> io::Result<PathBuf> {
extern {
fn _NSGetExecutablePath(buf: *mut libc::c_char,
bufsize: *mut u32) -> libc::c_int;
extern "C" {
fn _NSGetExecutablePath(buf: *mut libc::c_char, bufsize: *mut u32) -> libc::c_int;
}
unsafe {
let mut sz: u32 = 0;
_NSGetExecutablePath(ptr::null_mut(), &mut sz);
if sz == 0 { return Err(io::Error::last_os_error()); }
if sz == 0 {
return Err(io::Error::last_os_error());
}
let mut v: Vec<u8> = Vec::with_capacity(sz as usize);
let err = _NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz);
if err != 0 { return Err(io::Error::last_os_error()); }
if err != 0 {
return Err(io::Error::last_os_error());
}
v.set_len(sz as usize - 1); // chop off trailing NUL
Ok(PathBuf::from(OsString::from_vec(v)))
}
@ -323,7 +358,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(any(target_os = "solaris"))]
pub fn current_exe() -> io::Result<PathBuf> {
extern {
extern "C" {
fn getexecname() -> *const c_char;
}
unsafe {
@ -336,11 +371,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
// Prepend a current working directory to the path if
// it doesn't contain an absolute pathname.
if filename[0] == b'/' {
Ok(path)
} else {
getcwd().map(|cwd| cwd.join(path))
}
if filename[0] == b'/' { Ok(path) } else { getcwd().map(|cwd| cwd.join(path)) }
}
}
}
@ -354,11 +385,11 @@ pub fn current_exe() -> io::Result<PathBuf> {
type_: i32,
sequence: i32,
init_order: i32,
init_routine: *mut libc::c_void, // function pointer
term_routine: *mut libc::c_void, // function pointer
init_routine: *mut libc::c_void, // function pointer
term_routine: *mut libc::c_void, // function pointer
device: libc::dev_t,
node: libc::ino_t,
name: [libc::c_char; 1024], // MAXPATHLEN
name: [libc::c_char; 1024], // MAXPATHLEN
text: *mut libc::c_void,
data: *mut libc::c_void,
text_size: i32,
@ -368,16 +399,20 @@ pub fn current_exe() -> io::Result<PathBuf> {
}
unsafe {
extern {
fn _get_next_image_info(team_id: i32, cookie: *mut i32,
info: *mut image_info, size: i32) -> i32;
extern "C" {
fn _get_next_image_info(
team_id: i32,
cookie: *mut i32,
info: *mut image_info,
size: i32,
) -> i32;
}
let mut info: image_info = mem::zeroed();
let mut cookie: i32 = 0;
// the executable can be found at team id 0
let result = _get_next_image_info(0, &mut cookie, &mut info,
mem::size_of::<image_info>() as i32);
let result =
_get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32);
if result != 0 {
use crate::io::ErrorKind;
Err(io::Error::new(ErrorKind::Other, "Error getting executable path"))
@ -406,19 +441,27 @@ pub struct Env {
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> { self.iter.next() }
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
fn next(&mut self) -> Option<(OsString, OsString)> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[cfg(target_os = "macos")]
pub unsafe fn environ() -> *mut *const *const c_char {
extern { fn _NSGetEnviron() -> *mut *const *const c_char; }
extern "C" {
fn _NSGetEnviron() -> *mut *const *const c_char;
}
_NSGetEnviron()
}
#[cfg(not(target_os = "macos"))]
pub unsafe fn environ() -> *mut *const *const c_char {
extern { static mut environ: *const *const c_char; }
extern "C" {
static mut environ: *const *const c_char;
}
&mut environ
}
@ -442,10 +485,7 @@ pub fn env() -> Env {
}
environ = environ.offset(1);
}
return Env {
iter: result.into_iter(),
_dont_send_or_sync_me: PhantomData,
}
return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData };
}
fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
@ -457,10 +497,12 @@ pub fn env() -> Env {
return None;
}
let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
pos.map(|p| (
OsStringExt::from_vec(input[..p].to_vec()),
OsStringExt::from_vec(input[p+1..].to_vec()),
))
pos.map(|p| {
(
OsStringExt::from_vec(input[..p].to_vec()),
OsStringExt::from_vec(input[p + 1..].to_vec()),
)
})
}
}
@ -500,9 +542,7 @@ pub fn unsetenv(n: &OsStr) -> io::Result<()> {
}
pub fn page_size() -> usize {
unsafe {
libc::sysconf(libc::_SC_PAGESIZE) as usize
}
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
pub fn temp_dir() -> PathBuf {
@ -516,19 +556,23 @@ pub fn temp_dir() -> PathBuf {
}
pub fn home_dir() -> Option<PathBuf> {
return crate::env::var_os("HOME").or_else(|| unsafe {
fallback()
}).map(PathBuf::from);
return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from);
#[cfg(any(target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox"))]
unsafe fn fallback() -> Option<OsString> { None }
#[cfg(not(any(target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox")))]
#[cfg(any(
target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox"
))]
unsafe fn fallback() -> Option<OsString> {
None
}
#[cfg(not(any(
target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox"
)))]
unsafe fn fallback() -> Option<OsString> {
let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) {
n if n < 0 => 512 as usize,
@ -537,13 +581,18 @@ pub fn home_dir() -> Option<PathBuf> {
let mut buf = Vec::with_capacity(amt);
let mut passwd: libc::passwd = mem::zeroed();
let mut result = ptr::null_mut();
match libc::getpwuid_r(libc::getuid(), &mut passwd, buf.as_mut_ptr(),
buf.capacity(), &mut result) {
match libc::getpwuid_r(
libc::getuid(),
&mut passwd,
buf.as_mut_ptr(),
buf.capacity(),
&mut result,
) {
0 if !result.is_null() => {
let ptr = passwd.pw_dir as *const _;
let bytes = CStr::from_ptr(ptr).to_bytes().to_vec();
Some(OsStringExt::from_vec(bytes))
},
}
_ => None,
}
}
@ -589,7 +638,7 @@ fn parse_glibc_version(version: &str) -> Option<(usize, usize)> {
let mut parsed_ints = version.split('.').map(str::parse::<usize>).fuse();
match (parsed_ints.next(), parsed_ints.next()) {
(Some(Ok(major)), Some(Ok(minor))) => Some((major, minor)),
_ => None
_ => None,
}
}

View File

@ -1,14 +1,14 @@
use rustc_data_structures::sync::Lrc;
use rustc_parse::lexer::StringReader;
use syntax::token::{self, Token, TokenKind};
use syntax::sess::ParseSess;
use syntax::source_map::{SourceMap, FilePathMapping};
use syntax::source_map::{FilePathMapping, SourceMap};
use syntax::token::{self, Token, TokenKind};
use syntax::util::comments::is_doc_comment;
use syntax::with_default_globals;
use syntax_pos::symbol::Symbol;
use syntax_pos::{BytePos, Span};
use errors::{Handler, emitter::EmitterWriter};
use errors::{emitter::EmitterWriter, Handler};
use std::io;
use std::path::PathBuf;
@ -22,17 +22,11 @@ fn mk_sess(sm: Lrc<SourceMap>) -> ParseSess {
None,
false,
);
ParseSess::with_span_handler(
Handler::with_emitter(true, None, Box::new(emitter)),
sm,
)
ParseSess::with_span_handler(Handler::with_emitter(true, None, Box::new(emitter)), sm)
}
// Creates a string reader for the given string.
fn setup<'a>(sm: &SourceMap,
sess: &'a ParseSess,
teststr: String)
-> StringReader<'a> {
fn setup<'a>(sm: &SourceMap, sess: &'a ParseSess, teststr: String) -> StringReader<'a> {
let sf = sm.new_source_file(PathBuf::from(teststr.clone()).into(), teststr);
StringReader::new(sess, sf, None)
}
@ -50,20 +44,14 @@ fn t1() {
assert_eq!(string_reader.next_token(), token::Comment);
assert_eq!(string_reader.next_token(), token::Whitespace);
let tok1 = string_reader.next_token();
let tok2 = Token::new(
mk_ident("fn"),
Span::with_root_ctxt(BytePos(21), BytePos(23)),
);
let tok2 = Token::new(mk_ident("fn"), Span::with_root_ctxt(BytePos(21), BytePos(23)));
assert_eq!(tok1.kind, tok2.kind);
assert_eq!(tok1.span, tok2.span);
assert_eq!(string_reader.next_token(), token::Whitespace);
// Read another token.
let tok3 = string_reader.next_token();
assert_eq!(string_reader.pos.clone(), BytePos(28));
let tok4 = Token::new(
mk_ident("main"),
Span::with_root_ctxt(BytePos(24), BytePos(28)),
);
let tok4 = Token::new(mk_ident("main"), Span::with_root_ctxt(BytePos(24), BytePos(28)));
assert_eq!(tok3.kind, tok4.kind);
assert_eq!(tok3.span, tok4.span);
@ -142,10 +130,7 @@ fn character_a() {
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
assert_eq!(
setup(&sm, &sh, "'a'".to_string()).next_token(),
mk_lit(token::Char, "a", None),
);
assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token(), mk_lit(token::Char, "a", None),);
})
}
@ -154,10 +139,7 @@ fn character_space() {
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
assert_eq!(
setup(&sm, &sh, "' '".to_string()).next_token(),
mk_lit(token::Char, " ", None),
);
assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token(), mk_lit(token::Char, " ", None),);
})
}
@ -213,7 +195,7 @@ fn literal_suffixes() {
setup(&sm, &sh, format!("{} suffix", $input)).next_token(),
mk_lit(token::$tok_type, $tok_contents, None),
);
}}
}};
}
test!("'a'", Char, "a");

View File

@ -1,19 +1,19 @@
use crate::tests::{matches_codepattern, string_to_stream, with_error_checking_parse};
use errors::PResult;
use rustc_parse::new_parser_from_source_str;
use syntax::ast::{self, Name, PatKind};
use syntax::attr::first_attr_value_str_by_name;
use syntax::sess::ParseSess;
use syntax::token::{self, Token};
use syntax::print::pprust::item_to_string;
use syntax::ptr::P;
use syntax::sess::ParseSess;
use syntax::source_map::FilePathMapping;
use syntax::symbol::{kw, sym};
use syntax::tokenstream::{DelimSpan, TokenTree, TokenStream};
use syntax::token::{self, Token};
use syntax::tokenstream::{DelimSpan, TokenStream, TokenTree};
use syntax::visit;
use syntax::with_default_globals;
use syntax_pos::{Span, BytePos, Pos, FileName};
use errors::PResult;
use syntax_pos::{BytePos, FileName, Pos, Span};
use std::path::PathBuf;
@ -25,8 +25,11 @@ fn sess() -> ParseSess {
///
/// Returns `Ok(Some(item))` when successful, `Ok(None)` when no item was found, and `Err`
/// when a syntax error occurred.
fn parse_item_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<'_, Option<P<ast::Item>>> {
fn parse_item_from_source_str(
name: FileName,
source: String,
sess: &ParseSess,
) -> PResult<'_, Option<P<ast::Item>>> {
new_parser_from_source_str(sess, name, source).parse_item()
}
@ -36,17 +39,18 @@ fn sp(a: u32, b: u32) -> Span {
}
/// Parses a string, return an expression.
fn string_to_expr(source_str : String) -> P<ast::Expr> {
fn string_to_expr(source_str: String) -> P<ast::Expr> {
with_error_checking_parse(source_str, &sess(), |p| p.parse_expr())
}
/// Parses a string, returns an item.
fn string_to_item(source_str : String) -> Option<P<ast::Item>> {
fn string_to_item(source_str: String) -> Option<P<ast::Item>> {
with_error_checking_parse(source_str, &sess(), |p| p.parse_item())
}
#[should_panic]
#[test] fn bad_path_expr_1() {
#[test]
fn bad_path_expr_1() {
with_default_globals(|| {
string_to_expr("::abc::def::return".to_string());
})
@ -54,51 +58,38 @@ fn string_to_item(source_str : String) -> Option<P<ast::Item>> {
// Checks the token-tree-ization of macros.
#[test]
fn string_to_tts_macro () {
fn string_to_tts_macro() {
with_default_globals(|| {
let tts: Vec<_> =
string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).trees().collect();
let tts: &[TokenTree] = &tts[..];
match tts {
[
TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }),
TokenTree::Token(Token { kind: token::Not, .. }),
TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }),
TokenTree::Delimited(_, macro_delim, macro_tts)
]
if name_macro_rules == &sym::macro_rules && name_zip.as_str() == "zip" => {
[TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }), TokenTree::Token(Token { kind: token::Not, .. }), TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), TokenTree::Delimited(_, macro_delim, macro_tts)]
if name_macro_rules == &sym::macro_rules && name_zip.as_str() == "zip" =>
{
let tts = &macro_tts.trees().collect::<Vec<_>>();
match &tts[..] {
[
TokenTree::Delimited(_, first_delim, first_tts),
TokenTree::Token(Token { kind: token::FatArrow, .. }),
TokenTree::Delimited(_, second_delim, second_tts),
]
if macro_delim == &token::Paren => {
[TokenTree::Delimited(_, first_delim, first_tts), TokenTree::Token(Token { kind: token::FatArrow, .. }), TokenTree::Delimited(_, second_delim, second_tts)]
if macro_delim == &token::Paren =>
{
let tts = &first_tts.trees().collect::<Vec<_>>();
match &tts[..] {
[
TokenTree::Token(Token { kind: token::Dollar, .. }),
TokenTree::Token(Token { kind: token::Ident(name, false), .. }),
]
if first_delim == &token::Paren && name.as_str() == "a" => {},
[TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. })]
if first_delim == &token::Paren && name.as_str() == "a" => {}
_ => panic!("value 3: {:?} {:?}", first_delim, first_tts),
}
let tts = &second_tts.trees().collect::<Vec<_>>();
match &tts[..] {
[
TokenTree::Token(Token { kind: token::Dollar, .. }),
TokenTree::Token(Token { kind: token::Ident(name, false), .. }),
]
if second_delim == &token::Paren && name.as_str() == "a" => {},
[TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. })]
if second_delim == &token::Paren && name.as_str() == "a" => {}
_ => panic!("value 4: {:?} {:?}", second_delim, second_tts),
}
},
}
_ => panic!("value 2: {:?} {:?}", macro_delim, macro_tts),
}
},
_ => panic!("value: {:?}",tts),
}
_ => panic!("value: {:?}", tts),
}
})
}
@ -118,23 +109,28 @@ fn string_to_tts_1() {
TokenTree::token(token::Ident(Name::intern("b"), false), sp(6, 7)).into(),
TokenTree::token(token::Colon, sp(8, 9)).into(),
TokenTree::token(token::Ident(sym::i32, false), sp(10, 13)).into(),
]).into(),
).into(),
])
.into(),
)
.into(),
TokenTree::Delimited(
DelimSpan::from_pair(sp(15, 16), sp(20, 21)),
token::DelimToken::Brace,
TokenStream::new(vec![
TokenTree::token(token::Ident(Name::intern("b"), false), sp(17, 18)).into(),
TokenTree::token(token::Semi, sp(18, 19)).into(),
]).into(),
).into()
])
.into(),
)
.into(),
]);
assert_eq!(tts, expected);
})
}
#[test] fn parse_use() {
#[test]
fn parse_use() {
with_default_globals(|| {
let use_s = "use foo::bar::baz;";
let vitem = string_to_item(use_s.to_string()).unwrap();
@ -148,7 +144,8 @@ fn string_to_tts_1() {
})
}
#[test] fn parse_extern_crate() {
#[test]
fn parse_extern_crate() {
with_default_globals(|| {
let ex_s = "extern crate foo;";
let vitem = string_to_item(ex_s.to_string()).unwrap();
@ -166,12 +163,12 @@ fn get_spans_of_pat_idents(src: &str) -> Vec<Span> {
let item = string_to_item(src.to_string()).unwrap();
struct PatIdentVisitor {
spans: Vec<Span>
spans: Vec<Span>,
}
impl<'a> visit::Visitor<'a> for PatIdentVisitor {
fn visit_pat(&mut self, p: &'a ast::Pat) {
match p.kind {
PatKind::Ident(_ , ref ident, _) => {
PatKind::Ident(_, ref ident, _) => {
self.spans.push(ident.span.clone());
}
_ => {
@ -185,27 +182,32 @@ fn get_spans_of_pat_idents(src: &str) -> Vec<Span> {
return v.spans;
}
#[test] fn span_of_self_arg_pat_idents_are_correct() {
#[test]
fn span_of_self_arg_pat_idents_are_correct() {
with_default_globals(|| {
let srcs = ["impl z { fn a (&self, &myarg: i32) {} }",
"impl z { fn a (&mut self, &myarg: i32) {} }",
"impl z { fn a (&'a self, &myarg: i32) {} }",
"impl z { fn a (self, &myarg: i32) {} }",
"impl z { fn a (self: Foo, &myarg: i32) {} }",
];
let srcs = [
"impl z { fn a (&self, &myarg: i32) {} }",
"impl z { fn a (&mut self, &myarg: i32) {} }",
"impl z { fn a (&'a self, &myarg: i32) {} }",
"impl z { fn a (self, &myarg: i32) {} }",
"impl z { fn a (self: Foo, &myarg: i32) {} }",
];
for &src in &srcs {
let spans = get_spans_of_pat_idents(src);
let (lo, hi) = (spans[0].lo(), spans[0].hi());
assert!("self" == &src[lo.to_usize()..hi.to_usize()],
"\"{}\" != \"self\". src=\"{}\"",
&src[lo.to_usize()..hi.to_usize()], src)
assert!(
"self" == &src[lo.to_usize()..hi.to_usize()],
"\"{}\" != \"self\". src=\"{}\"",
&src[lo.to_usize()..hi.to_usize()],
src
)
}
})
}
#[test] fn parse_exprs () {
#[test]
fn parse_exprs() {
with_default_globals(|| {
// just make sure that they parse....
string_to_expr("3 + 4".to_string());
@ -213,9 +215,11 @@ fn get_spans_of_pat_idents(src: &str) -> Vec<Span> {
})
}
#[test] fn attrs_fix_bug () {
#[test]
fn attrs_fix_bug() {
with_default_globals(|| {
string_to_item("pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
string_to_item(
"pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
-> Result<Box<Writer>, String> {
#[cfg(windows)]
fn wb() -> c_int {
@ -226,27 +230,32 @@ fn wb() -> c_int {
fn wb() -> c_int { O_WRONLY as c_int }
let mut fflags: c_int = wb();
}".to_string());
}"
.to_string(),
);
})
}
#[test] fn crlf_doc_comments() {
#[test]
fn crlf_doc_comments() {
with_default_globals(|| {
let sess = sess();
let name_1 = FileName::Custom("crlf_source_1".to_string());
let source = "/// doc comment\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name_1, source, &sess)
.unwrap().unwrap();
let item = parse_item_from_source_str(name_1, source, &sess).unwrap().unwrap();
let doc = first_attr_value_str_by_name(&item.attrs, sym::doc).unwrap();
assert_eq!(doc.as_str(), "/// doc comment");
let name_2 = FileName::Custom("crlf_source_2".to_string());
let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name_2, source, &sess)
.unwrap().unwrap();
let docs = item.attrs.iter().filter(|a| a.has_name(sym::doc))
.map(|a| a.value_str().unwrap().to_string()).collect::<Vec<_>>();
let item = parse_item_from_source_str(name_2, source, &sess).unwrap().unwrap();
let docs = item
.attrs
.iter()
.filter(|a| a.has_name(sym::doc))
.map(|a| a.value_str().unwrap().to_string())
.collect::<Vec<_>>();
let b: &[_] = &["/// doc comment".to_string(), "/// line 2".to_string()];
assert_eq!(&docs[..], b);
@ -261,15 +270,21 @@ let mut fflags: c_int = wb();
#[test]
fn ttdelim_span() {
fn parse_expr_from_source_str(
name: FileName, source: String, sess: &ParseSess
name: FileName,
source: String,
sess: &ParseSess,
) -> PResult<'_, P<ast::Expr>> {
new_parser_from_source_str(sess, name, source).parse_expr()
}
with_default_globals(|| {
let sess = sess();
let expr = parse_expr_from_source_str(PathBuf::from("foo").into(),
"foo!( fn main() { body } )".to_string(), &sess).unwrap();
let expr = parse_expr_from_source_str(
PathBuf::from("foo").into(),
"foo!( fn main() { body } )".to_string(),
&sess,
)
.unwrap();
let tts: Vec<_> = match expr.kind {
ast::ExprKind::Mac(ref mac) => mac.args.inner_tokens().trees().collect(),
@ -295,7 +310,9 @@ fn out_of_line_mod() {
PathBuf::from("foo").into(),
"mod foo { struct S; mod this_does_not_exist; }".to_owned(),
&sess(),
).unwrap().unwrap();
)
.unwrap()
.unwrap();
if let ast::ItemKind::Mod(ref m) = item.kind {
assert!(m.items.len() == 2);
@ -307,31 +324,31 @@ fn out_of_line_mod() {
#[test]
fn eqmodws() {
assert_eq!(matches_codepattern("",""),true);
assert_eq!(matches_codepattern("","a"),false);
assert_eq!(matches_codepattern("a",""),false);
assert_eq!(matches_codepattern("a","a"),true);
assert_eq!(matches_codepattern("a b","a \n\t\r b"),true);
assert_eq!(matches_codepattern("a b ","a \n\t\r b"),true);
assert_eq!(matches_codepattern("a b","a \n\t\r b "),false);
assert_eq!(matches_codepattern("a b","a b"),true);
assert_eq!(matches_codepattern("ab","a b"),false);
assert_eq!(matches_codepattern("a b","ab"),true);
assert_eq!(matches_codepattern(" a b","ab"),true);
assert_eq!(matches_codepattern("", ""), true);
assert_eq!(matches_codepattern("", "a"), false);
assert_eq!(matches_codepattern("a", ""), false);
assert_eq!(matches_codepattern("a", "a"), true);
assert_eq!(matches_codepattern("a b", "a \n\t\r b"), true);
assert_eq!(matches_codepattern("a b ", "a \n\t\r b"), true);
assert_eq!(matches_codepattern("a b", "a \n\t\r b "), false);
assert_eq!(matches_codepattern("a b", "a b"), true);
assert_eq!(matches_codepattern("ab", "a b"), false);
assert_eq!(matches_codepattern("a b", "ab"), true);
assert_eq!(matches_codepattern(" a b", "ab"), true);
}
#[test]
fn pattern_whitespace() {
assert_eq!(matches_codepattern("","\x0C"), false);
assert_eq!(matches_codepattern("a b ","a \u{0085}\n\t\r b"),true);
assert_eq!(matches_codepattern("a b","a \u{0085}\n\t\r b "),false);
assert_eq!(matches_codepattern("", "\x0C"), false);
assert_eq!(matches_codepattern("a b ", "a \u{0085}\n\t\r b"), true);
assert_eq!(matches_codepattern("a b", "a \u{0085}\n\t\r b "), false);
}
#[test]
fn non_pattern_whitespace() {
// These have the property 'White_Space' but not 'Pattern_White_Space'
assert_eq!(matches_codepattern("a b","a\u{2002}b"), false);
assert_eq!(matches_codepattern("a b","a\u{2002}b"), false);
assert_eq!(matches_codepattern("\u{205F}a b","ab"), false);
assert_eq!(matches_codepattern("a \u{3000}b","ab"), false);
assert_eq!(matches_codepattern("a b", "a\u{2002}b"), false);
assert_eq!(matches_codepattern("a b", "a\u{2002}b"), false);
assert_eq!(matches_codepattern("\u{205F}a b", "ab"), false);
assert_eq!(matches_codepattern("a \u{3000}b", "ab"), false);
}

View File

@ -1,14 +1,13 @@
/// The expansion from a test function to the appropriate test struct for libtest
/// Ideally, this code would be in libtest but for efficiency and error messages it lives here.
use crate::util::check_builtin_macro_attribute;
use syntax::ast;
use syntax::attr;
use syntax_expand::base::*;
use syntax::print::pprust;
use syntax::source_map::respan;
use syntax::symbol::{Symbol, sym};
use syntax::symbol::{sym, Symbol};
use syntax_expand::base::*;
use syntax_pos::Span;
use std::iter;
@ -24,24 +23,24 @@ pub fn expand_test_case(
ecx: &mut ExtCtxt<'_>,
attr_sp: Span,
meta_item: &ast::MetaItem,
anno_item: Annotatable
anno_item: Annotatable,
) -> Vec<Annotatable> {
check_builtin_macro_attribute(ecx, meta_item, sym::test_case);
if !ecx.ecfg.should_test { return vec![]; }
if !ecx.ecfg.should_test {
return vec![];
}
let sp = ecx.with_def_site_ctxt(attr_sp);
let mut item = anno_item.expect_item();
item = item.map(|mut item| {
item.vis = respan(item.vis.span, ast::VisibilityKind::Public);
item.ident.span = item.ident.span.with_ctxt(sp.ctxt());
item.attrs.push(
ecx.attribute(ecx.meta_word(sp, sym::rustc_test_marker))
);
item.attrs.push(ecx.attribute(ecx.meta_word(sp, sym::rustc_test_marker)));
item
});
return vec![Annotatable::Item(item)]
return vec![Annotatable::Item(item)];
}
pub fn expand_test(
@ -68,29 +67,39 @@ pub fn expand_test_or_bench(
cx: &mut ExtCtxt<'_>,
attr_sp: Span,
item: Annotatable,
is_bench: bool
is_bench: bool,
) -> Vec<Annotatable> {
// If we're not in test configuration, remove the annotated item
if !cx.ecfg.should_test { return vec![]; }
if !cx.ecfg.should_test {
return vec![];
}
let item =
if let Annotatable::Item(i) = item { i }
else {
cx.parse_sess.span_diagnostic.span_fatal(item.span(),
"`#[test]` attribute is only allowed on non associated functions").raise();
};
let item = if let Annotatable::Item(i) = item {
i
} else {
cx.parse_sess
.span_diagnostic
.span_fatal(
item.span(),
"`#[test]` attribute is only allowed on non associated functions",
)
.raise();
};
if let ast::ItemKind::Mac(_) = item.kind {
cx.parse_sess.span_diagnostic.span_warn(item.span,
"`#[test]` attribute should not be used on macros. Use `#[cfg(test)]` instead.");
cx.parse_sess.span_diagnostic.span_warn(
item.span,
"`#[test]` attribute should not be used on macros. Use `#[cfg(test)]` instead.",
);
return vec![Annotatable::Item(item)];
}
// has_*_signature will report any errors in the type so compilation
// will fail. We shouldn't try to expand in this case because the errors
// would be spurious.
if (!is_bench && !has_test_signature(cx, &item)) ||
(is_bench && !has_bench_signature(cx, &item)) {
if (!is_bench && !has_test_signature(cx, &item))
|| (is_bench && !has_bench_signature(cx, &item))
{
return vec![Annotatable::Item(item)];
}
@ -99,19 +108,15 @@ pub fn expand_test_or_bench(
let test_id = ast::Ident::new(sym::test, attr_sp);
// creates test::$name
let test_path = |name| {
cx.path(sp, vec![test_id, cx.ident_of(name, sp)])
};
let test_path = |name| cx.path(sp, vec![test_id, cx.ident_of(name, sp)]);
// creates test::ShouldPanic::$name
let should_panic_path = |name| {
cx.path(sp, vec![test_id, cx.ident_of("ShouldPanic", sp), cx.ident_of(name, sp)])
};
let should_panic_path =
|name| cx.path(sp, vec![test_id, cx.ident_of("ShouldPanic", sp), cx.ident_of(name, sp)]);
// creates test::TestType::$name
let test_type_path = |name| {
cx.path(sp, vec![test_id, cx.ident_of("TestType", sp), cx.ident_of(name, sp)])
};
let test_type_path =
|name| cx.path(sp, vec![test_id, cx.ident_of("TestType", sp), cx.ident_of(name, sp)]);
// creates $name: $expr
let field = |name, expr| cx.field_imm(sp, cx.ident_of(name, sp), expr);
@ -120,101 +125,151 @@ pub fn expand_test_or_bench(
// A simple ident for a lambda
let b = cx.ident_of("b", attr_sp);
cx.expr_call(sp, cx.expr_path(test_path("StaticBenchFn")), vec![
// |b| self::test::assert_test_result(
cx.lambda1(sp,
cx.expr_call(sp, cx.expr_path(test_path("assert_test_result")), vec![
// super::$test_fn(b)
cx.expr_call(sp,
cx.expr_path(cx.path(sp, vec![item.ident])),
vec![cx.expr_ident(sp, b)])
]),
b
)
// )
])
cx.expr_call(
sp,
cx.expr_path(test_path("StaticBenchFn")),
vec![
// |b| self::test::assert_test_result(
cx.lambda1(
sp,
cx.expr_call(
sp,
cx.expr_path(test_path("assert_test_result")),
vec![
// super::$test_fn(b)
cx.expr_call(
sp,
cx.expr_path(cx.path(sp, vec![item.ident])),
vec![cx.expr_ident(sp, b)],
),
],
),
b,
), // )
],
)
} else {
cx.expr_call(sp, cx.expr_path(test_path("StaticTestFn")), vec![
// || {
cx.lambda0(sp,
// test::assert_test_result(
cx.expr_call(sp, cx.expr_path(test_path("assert_test_result")), vec![
// $test_fn()
cx.expr_call(sp, cx.expr_path(cx.path(sp, vec![item.ident])), vec![])
// )
])
// }
)
// )
])
cx.expr_call(
sp,
cx.expr_path(test_path("StaticTestFn")),
vec![
// || {
cx.lambda0(
sp,
// test::assert_test_result(
cx.expr_call(
sp,
cx.expr_path(test_path("assert_test_result")),
vec![
// $test_fn()
cx.expr_call(sp, cx.expr_path(cx.path(sp, vec![item.ident])), vec![]), // )
],
), // }
), // )
],
)
};
let mut test_const = cx.item(sp, ast::Ident::new(item.ident.name, sp),
let mut test_const = cx.item(
sp,
ast::Ident::new(item.ident.name, sp),
vec![
// #[cfg(test)]
cx.attribute(attr::mk_list_item(ast::Ident::new(sym::cfg, attr_sp), vec![
attr::mk_nested_word_item(ast::Ident::new(sym::test, attr_sp))
])),
cx.attribute(attr::mk_list_item(
ast::Ident::new(sym::cfg, attr_sp),
vec![attr::mk_nested_word_item(ast::Ident::new(sym::test, attr_sp))],
)),
// #[rustc_test_marker]
cx.attribute(cx.meta_word(attr_sp, sym::rustc_test_marker)),
],
// const $ident: test::TestDescAndFn =
ast::ItemKind::Const(cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))),
ast::ItemKind::Const(
cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))),
// test::TestDescAndFn {
cx.expr_struct(sp, test_path("TestDescAndFn"), vec![
// desc: test::TestDesc {
field("desc", cx.expr_struct(sp, test_path("TestDesc"), vec![
// name: "path::to::test"
field("name", cx.expr_call(sp, cx.expr_path(test_path("StaticTestName")),
vec![
cx.expr_str(sp, Symbol::intern(&item_path(
// skip the name of the root module
&cx.current_expansion.module.mod_path[1..],
&item.ident
)))
])),
// ignore: true | false
field("ignore", cx.expr_bool(sp, should_ignore(&item))),
// allow_fail: true | false
field("allow_fail", cx.expr_bool(sp, should_fail(&item))),
// should_panic: ...
field("should_panic", match should_panic(cx, &item) {
// test::ShouldPanic::No
ShouldPanic::No => cx.expr_path(should_panic_path("No")),
// test::ShouldPanic::Yes
ShouldPanic::Yes(None) => cx.expr_path(should_panic_path("Yes")),
// test::ShouldPanic::YesWithMessage("...")
ShouldPanic::Yes(Some(sym)) => cx.expr_call(sp,
cx.expr_path(should_panic_path("YesWithMessage")),
vec![cx.expr_str(sp, sym)]),
}),
// test_type: ...
field("test_type", match test_type(cx) {
// test::TestType::UnitTest
TestType::UnitTest => cx.expr_path(test_type_path("UnitTest")),
// test::TestType::IntegrationTest
TestType::IntegrationTest => cx.expr_path(
test_type_path("IntegrationTest")
cx.expr_struct(
sp,
test_path("TestDescAndFn"),
vec![
// desc: test::TestDesc {
field(
"desc",
cx.expr_struct(
sp,
test_path("TestDesc"),
vec![
// name: "path::to::test"
field(
"name",
cx.expr_call(
sp,
cx.expr_path(test_path("StaticTestName")),
vec![cx.expr_str(
sp,
Symbol::intern(&item_path(
// skip the name of the root module
&cx.current_expansion.module.mod_path[1..],
&item.ident,
)),
)],
),
),
// ignore: true | false
field("ignore", cx.expr_bool(sp, should_ignore(&item))),
// allow_fail: true | false
field("allow_fail", cx.expr_bool(sp, should_fail(&item))),
// should_panic: ...
field(
"should_panic",
match should_panic(cx, &item) {
// test::ShouldPanic::No
ShouldPanic::No => cx.expr_path(should_panic_path("No")),
// test::ShouldPanic::Yes
ShouldPanic::Yes(None) => {
cx.expr_path(should_panic_path("Yes"))
}
// test::ShouldPanic::YesWithMessage("...")
ShouldPanic::Yes(Some(sym)) => cx.expr_call(
sp,
cx.expr_path(should_panic_path("YesWithMessage")),
vec![cx.expr_str(sp, sym)],
),
},
),
// test_type: ...
field(
"test_type",
match test_type(cx) {
// test::TestType::UnitTest
TestType::UnitTest => {
cx.expr_path(test_type_path("UnitTest"))
}
// test::TestType::IntegrationTest
TestType::IntegrationTest => {
cx.expr_path(test_type_path("IntegrationTest"))
}
// test::TestPath::Unknown
TestType::Unknown => {
cx.expr_path(test_type_path("Unknown"))
}
},
),
// },
],
),
// test::TestPath::Unknown
TestType::Unknown => cx.expr_path(test_type_path("Unknown")),
}),
// },
])),
// testfn: test::StaticTestFn(...) | test::StaticBenchFn(...)
field("testfn", test_fn)
// }
])
// }
));
test_const = test_const.map(|mut tc| { tc.vis.node = ast::VisibilityKind::Public; tc});
),
// testfn: test::StaticTestFn(...) | test::StaticBenchFn(...)
field("testfn", test_fn), // }
],
), // }
),
);
test_const = test_const.map(|mut tc| {
tc.vis.node = ast::VisibilityKind::Public;
tc
});
// extern crate test
let test_extern = cx.item(sp,
test_id,
vec![],
ast::ItemKind::ExternCrate(None)
);
let test_extern = cx.item(sp, test_id, vec![], ast::ItemKind::ExternCrate(None));
log::debug!("synthetic test item:\n{}\n", pprust::item_to_string(&test_const));
@ -224,13 +279,17 @@ pub fn expand_test_or_bench(
// The generated test case
Annotatable::Item(test_const),
// The original item
Annotatable::Item(item)
Annotatable::Item(item),
]
}
fn item_path(mod_path: &[ast::Ident], item_ident: &ast::Ident) -> String {
mod_path.iter().chain(iter::once(item_ident))
.map(|x| x.to_string()).collect::<Vec<String>>().join("::")
mod_path
.iter()
.chain(iter::once(item_ident))
.map(|x| x.to_string())
.collect::<Vec<String>>()
.join("::")
}
enum ShouldPanic {
@ -254,7 +313,8 @@ fn should_panic(cx: &ExtCtxt<'_>, i: &ast::Item) -> ShouldPanic {
match attr.meta_item_list() {
// Handle #[should_panic(expected = "foo")]
Some(list) => {
let msg = list.iter()
let msg = list
.iter()
.find(|mi| mi.check_name(sym::expected))
.and_then(|mi| mi.meta_item())
.and_then(|mi| mi.value_str());
@ -262,17 +322,21 @@ fn should_panic(cx: &ExtCtxt<'_>, i: &ast::Item) -> ShouldPanic {
sd.struct_span_warn(
attr.span,
"argument must be of the form: \
`expected = \"error message\"`"
).note("Errors in this attribute were erroneously \
`expected = \"error message\"`",
)
.note(
"Errors in this attribute were erroneously \
allowed and will become a hard error in a \
future release.").emit();
future release.",
)
.emit();
ShouldPanic::Yes(None)
} else {
ShouldPanic::Yes(msg)
}
},
}
// Handle #[should_panic] and #[should_panic = "expected"]
None => ShouldPanic::Yes(attr.value_str())
None => ShouldPanic::Yes(attr.value_str()),
}
}
None => ShouldPanic::No,
@ -312,27 +376,20 @@ fn has_test_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
let ref sd = cx.parse_sess.span_diagnostic;
if let ast::ItemKind::Fn(ref sig, ref generics, _) = i.kind {
if sig.header.unsafety == ast::Unsafety::Unsafe {
sd.span_err(
i.span,
"unsafe functions cannot be used for tests"
);
return false
sd.span_err(i.span, "unsafe functions cannot be used for tests");
return false;
}
if sig.header.asyncness.node.is_async() {
sd.span_err(
i.span,
"async functions cannot be used for tests"
);
return false
sd.span_err(i.span, "async functions cannot be used for tests");
return false;
}
// If the termination trait is active, the compiler will check that the output
// type implements the `Termination` trait as `libtest` enforces that.
let has_output = match sig.decl.output {
ast::FunctionRetTy::Default(..) => false,
ast::FunctionRetTy::Ty(ref t) if t.kind.is_unit() => false,
_ => true
_ => true,
};
if !sig.decl.inputs.is_empty() {
@ -344,15 +401,16 @@ fn has_test_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
(true, true) => {
sd.span_err(i.span, "functions using `#[should_panic]` must return `()`");
false
},
(true, false) => if !generics.params.is_empty() {
sd.span_err(i.span,
"functions used as tests must have signature fn() -> ()");
false
} else {
true
},
(false, _) => true
}
(true, false) => {
if !generics.params.is_empty() {
sd.span_err(i.span, "functions used as tests must have signature fn() -> ()");
false
} else {
true
}
}
(false, _) => true,
}
} else {
sd.span_err(i.span, "only functions may be used as tests");
@ -370,8 +428,11 @@ fn has_bench_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
};
if !has_sig {
cx.parse_sess.span_diagnostic.span_err(i.span, "functions used as benches must have \
signature `fn(&mut Bencher) -> impl Termination`");
cx.parse_sess.span_diagnostic.span_err(
i.span,
"functions used as benches must have \
signature `fn(&mut Bencher) -> impl Termination`",
);
}
has_sig

View File

@ -6,16 +6,16 @@
#![deny(warnings)]
use toml;
use serde::Serialize;
use toml;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::env;
use std::fs::{self, File};
use std::io::{self, Read, Write};
use std::path::{PathBuf, Path};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::collections::HashMap;
static HOSTS: &[&str] = &[
"aarch64-unknown-linux-gnu",
@ -154,10 +154,7 @@ static DOCS_TARGETS: &[&str] = &[
"x86_64-unknown-linux-gnu",
];
static MINGW: &[&str] = &[
"i686-pc-windows-gnu",
"x86_64-pc-windows-gnu",
];
static MINGW: &[&str] = &["i686-pc-windows-gnu", "x86_64-pc-windows-gnu"];
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
@ -193,7 +190,9 @@ struct Target {
}
impl Target {
fn unavailable() -> Self { Self::default() }
fn unavailable() -> Self {
Self::default()
}
}
#[derive(Serialize)]
@ -209,10 +208,12 @@ impl Component {
}
macro_rules! t {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => panic!("{} failed with {}", stringify!($e), e),
})
($e:expr) => {
match $e {
Ok(e) => e,
Err(e) => panic!("{} failed with {}", stringify!($e), e),
}
};
}
struct Builder {
@ -323,10 +324,21 @@ fn main() {
miri_git_commit_hash: None,
should_sign,
}.build();
}
.build();
}
enum PkgType { RustSrc, Cargo, Rls, Clippy, Rustfmt, LlvmTools, Lldb, Miri, Other }
enum PkgType {
RustSrc,
Cargo,
Rls,
Clippy,
Rustfmt,
LlvmTools,
Lldb,
Miri,
Other,
}
impl PkgType {
fn from_component(component: &str) -> Self {
@ -362,8 +374,8 @@ impl Builder {
self.rls_git_commit_hash = self.git_commit_hash("rls", "x86_64-unknown-linux-gnu");
self.clippy_git_commit_hash = self.git_commit_hash("clippy", "x86_64-unknown-linux-gnu");
self.rustfmt_git_commit_hash = self.git_commit_hash("rustfmt", "x86_64-unknown-linux-gnu");
self.llvm_tools_git_commit_hash = self.git_commit_hash("llvm-tools",
"x86_64-unknown-linux-gnu");
self.llvm_tools_git_commit_hash =
self.git_commit_hash("llvm-tools", "x86_64-unknown-linux-gnu");
self.lldb_git_commit_hash = self.git_commit_hash("lldb", "x86_64-unknown-linux-gnu");
self.miri_git_commit_hash = self.git_commit_hash("miri", "x86_64-unknown-linux-gnu");
@ -381,11 +393,14 @@ impl Builder {
/// Right now, we do this only for Miri.
fn check_toolstate(&mut self) {
let toolstates: Option<HashMap<String, String>> =
File::open(self.input.join("toolstates-linux.json")).ok()
File::open(self.input.join("toolstates-linux.json"))
.ok()
.and_then(|f| serde_json::from_reader(&f).ok());
let toolstates = toolstates.unwrap_or_else(|| {
println!("WARNING: `toolstates-linux.json` missing/malformed; \
assuming all tools failed");
println!(
"WARNING: `toolstates-linux.json` missing/malformed; \
assuming all tools failed"
);
HashMap::default() // Use empty map if anything went wrong.
});
// Mark some tools as missing based on toolstate.
@ -442,16 +457,36 @@ impl Builder {
fn add_profiles_to(&mut self, manifest: &mut Manifest) {
let mut profile = |name, pkgs| self.profile(name, &mut manifest.profiles, pkgs);
profile("minimal", &["rustc", "cargo", "rust-std", "rust-mingw"]);
profile("default", &[
"rustc", "cargo", "rust-std", "rust-mingw",
"rust-docs", "rustfmt-preview", "clippy-preview"
]);
profile("complete", &[
"rustc", "cargo", "rust-std", "rust-mingw",
"rust-docs", "rustfmt-preview", "clippy-preview",
"rls-preview", "rust-src", "llvm-tools-preview",
"lldb-preview", "rust-analysis", "miri-preview"
]);
profile(
"default",
&[
"rustc",
"cargo",
"rust-std",
"rust-mingw",
"rust-docs",
"rustfmt-preview",
"clippy-preview",
],
);
profile(
"complete",
&[
"rustc",
"cargo",
"rust-std",
"rust-mingw",
"rust-docs",
"rustfmt-preview",
"clippy-preview",
"rls-preview",
"rust-src",
"llvm-tools-preview",
"lldb-preview",
"rust-analysis",
"miri-preview",
],
);
// The compiler libraries are not stable for end users, and they're also huge, so we only
// `rustc-dev` for nightly users, and only in the "complete" profile. It's still possible
@ -462,10 +497,9 @@ impl Builder {
}
fn add_renames_to(&self, manifest: &mut Manifest) {
let mut rename = |from: &str, to: &str| manifest.renames.insert(
from.to_owned(),
Rename { to: to.to_owned() }
);
let mut rename = |from: &str, to: &str| {
manifest.renames.insert(from.to_owned(), Rename { to: to.to_owned() })
};
rename("rls", "rls-preview");
rename("rustfmt", "rustfmt-preview");
rename("clippy", "clippy-preview");
@ -474,10 +508,11 @@ impl Builder {
fn rust_package(&mut self, manifest: &Manifest) -> Package {
let mut pkg = Package {
version: self.cached_version("rust")
.as_ref()
.expect("Couldn't find Rust version")
.clone(),
version: self
.cached_version("rust")
.as_ref()
.expect("Couldn't find Rust version")
.clone(),
git_commit_hash: self.cached_git_commit_hash("rust").clone(),
target: BTreeMap::new(),
};
@ -486,7 +521,7 @@ impl Builder {
pkg.target.insert(host.to_string(), target);
} else {
pkg.target.insert(host.to_string(), Target::unavailable());
continue
continue;
}
}
pkg
@ -527,14 +562,12 @@ impl Builder {
]);
extensions.extend(
TARGETS.iter()
TARGETS
.iter()
.filter(|&&target| target != host)
.map(|target| Component::from_str("rust-std", target))
);
extensions.extend(
HOSTS.iter()
.map(|target| Component::from_str("rustc-dev", target))
.map(|target| Component::from_str("rust-std", target)),
);
extensions.extend(HOSTS.iter().map(|target| Component::from_str("rustc-dev", target)));
extensions.push(Component::from_str("rust-src", "*"));
// If the components/extensions don't actually exist for this
@ -542,7 +575,7 @@ impl Builder {
// lists.
let has_component = |c: &Component| {
if c.target == "*" {
return true
return true;
}
let pkg = match manifest.pkg.get(&c.pkg) {
Some(p) => p,
@ -564,26 +597,29 @@ impl Builder {
})
}
fn profile(&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str]) {
fn profile(
&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str],
) {
dst.insert(profile_name.to_owned(), pkgs.iter().map(|s| (*s).to_owned()).collect());
}
fn extend_profile(&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str]) {
dst.get_mut(profile_name).expect("existing profile")
fn extend_profile(
&mut self,
profile_name: &str,
dst: &mut BTreeMap<String, Vec<String>>,
pkgs: &[&str],
) {
dst.get_mut(profile_name)
.expect("existing profile")
.extend(pkgs.iter().map(|s| (*s).to_owned()));
}
fn package(&mut self,
pkgname: &str,
dst: &mut BTreeMap<String, Package>,
targets: &[&str]) {
let (version, mut is_present) = self.cached_version(pkgname)
fn package(&mut self, pkgname: &str, dst: &mut BTreeMap<String, Package>, targets: &[&str]) {
let (version, mut is_present) = self
.cached_version(pkgname)
.as_ref()
.cloned()
.map(|version| (version, true))
@ -594,46 +630,52 @@ impl Builder {
is_present = false; // Pretend the component is entirely missing.
}
let targets = targets.iter().map(|name| {
if is_present {
// The component generally exists, but it might still be missing for this target.
let filename = self.filename(pkgname, name);
let digest = match self.digests.remove(&filename) {
Some(digest) => digest,
// This component does not exist for this target -- skip it.
None => return (name.to_string(), Target::unavailable()),
};
let xz_filename = filename.replace(".tar.gz", ".tar.xz");
let xz_digest = self.digests.remove(&xz_filename);
let targets = targets
.iter()
.map(|name| {
if is_present {
// The component generally exists, but it might still be missing for this target.
let filename = self.filename(pkgname, name);
let digest = match self.digests.remove(&filename) {
Some(digest) => digest,
// This component does not exist for this target -- skip it.
None => return (name.to_string(), Target::unavailable()),
};
let xz_filename = filename.replace(".tar.gz", ".tar.xz");
let xz_digest = self.digests.remove(&xz_filename);
(name.to_string(), Target {
available: true,
url: Some(self.url(&filename)),
hash: Some(digest),
xz_url: xz_digest.as_ref().map(|_| self.url(&xz_filename)),
xz_hash: xz_digest,
components: None,
extensions: None,
})
} else {
// If the component is not present for this build add it anyway but mark it as
// unavailable -- this way rustup won't allow upgrades without --force
(name.to_string(), Target::unavailable())
}
}).collect();
(
name.to_string(),
Target {
available: true,
url: Some(self.url(&filename)),
hash: Some(digest),
xz_url: xz_digest.as_ref().map(|_| self.url(&xz_filename)),
xz_hash: xz_digest,
components: None,
extensions: None,
},
)
} else {
// If the component is not present for this build add it anyway but mark it as
// unavailable -- this way rustup won't allow upgrades without --force
(name.to_string(), Target::unavailable())
}
})
.collect();
dst.insert(pkgname.to_string(), Package {
version,
git_commit_hash: self.cached_git_commit_hash(pkgname).clone(),
target: targets,
});
dst.insert(
pkgname.to_string(),
Package {
version,
git_commit_hash: self.cached_git_commit_hash(pkgname).clone(),
target: targets,
},
);
}
fn url(&self, filename: &str) -> String {
format!("{}/{}/{}",
self.s3_address,
self.date,
filename)
format!("{}/{}/{}", self.s3_address, self.date, filename)
}
fn filename(&self, component: &str, target: &str) -> String {
@ -689,14 +731,14 @@ impl Builder {
fn untar<F>(&self, component: &str, target: &str, dir: F) -> Option<String>
where
F: FnOnce(String) -> String
F: FnOnce(String) -> String,
{
let mut cmd = Command::new("tar");
let filename = self.filename(component, target);
cmd.arg("xf")
.arg(self.input.join(&filename))
.arg(dir(filename.replace(".tar.gz", "")))
.arg("-O");
.arg(self.input.join(&filename))
.arg(dir(filename.replace(".tar.gz", "")))
.arg("-O");
let output = t!(cmd.output());
if output.status.success() {
Some(String::from_utf8_lossy(&output.stdout).trim().to_string())
@ -707,10 +749,11 @@ impl Builder {
fn hash(&self, path: &Path) -> String {
let sha = t!(Command::new("shasum")
.arg("-a").arg("256")
.arg(path.file_name().unwrap())
.current_dir(path.parent().unwrap())
.output());
.arg("-a")
.arg("256")
.arg(path.file_name().unwrap())
.current_dir(path.parent().unwrap())
.output());
assert!(sha.status.success());
let filename = path.file_name().unwrap().to_str().unwrap();
@ -734,11 +777,15 @@ impl Builder {
.arg("--no-tty")
.arg("--yes")
.arg("--batch")
.arg("--passphrase-fd").arg("0")
.arg("--personal-digest-preferences").arg("SHA512")
.arg("--passphrase-fd")
.arg("0")
.arg("--personal-digest-preferences")
.arg("SHA512")
.arg("--armor")
.arg("--output").arg(&asc)
.arg("--detach-sign").arg(path)
.arg("--output")
.arg(&asc)
.arg("--detach-sign")
.arg(path)
.stdin(Stdio::piped());
let mut child = t!(cmd.spawn());
t!(child.stdin.take().unwrap().write_all(self.gpg_passphrase.as_bytes()));
@ -748,8 +795,11 @@ impl Builder {
fn write_channel_files(&self, channel_name: &str, manifest: &Manifest) {
self.write(&toml::to_string(&manifest).unwrap(), channel_name, ".toml");
self.write(&manifest.date, channel_name, "-date.txt");
self.write(manifest.pkg["rust"].git_commit_hash.as_ref().unwrap(),
channel_name, "-git-commit-hash.txt");
self.write(
manifest.pkg["rust"].git_commit_hash.as_ref().unwrap(),
channel_name,
"-git-commit-hash.txt",
);
}
fn write(&self, contents: &str, channel_name: &str, suffix: &str) {