Auto merge of #33491 - arielb1:obligation-jungle, r=nikomatsakis

Replace the obligation forest with a graph

In the presence of caching, arbitrary nodes in the obligation forest can be merged, which makes it a general graph. Handle it as such, using cycle-detection algorithms in the processing.

I should do performance measurements sometime.

This was pretty much written as a proof-of-concept. Please help me write this in a less-ugly way. I should also add comments explaining what is going on.

r? @nikomatsakis
This commit is contained in:
bors 2016-05-16 18:39:59 -07:00
commit 786b26d7b4
18 changed files with 804 additions and 1048 deletions

View File

@ -26,11 +26,11 @@ use super::{
use fmt_macros::{Parser, Piece, Position};
use hir::def_id::DefId;
use infer::{InferCtxt, TypeOrigin};
use ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, Ty, TyCtxt, TypeFoldable, TypeVariants};
use infer::{InferCtxt};
use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable};
use ty::fast_reject;
use ty::fold::TypeFolder;
use ty::subst::{self, ParamSpace, Subst};
use ty::subst::{self, Subst};
use util::nodemap::{FnvHashMap, FnvHashSet};
use std::cmp;
@ -61,128 +61,6 @@ impl<'a, 'gcx, 'tcx> TraitErrorKey<'tcx> {
}
}
// Enum used to differentiate the "big" and "little" weights.
enum Weight {
Coarse,
Precise,
}
trait AssociatedWeight {
fn get_weight(&self) -> (u32, u32);
}
impl<'a> AssociatedWeight for TypeVariants<'a> {
// Left number is for "global"/"big" weight and right number is for better precision.
fn get_weight(&self) -> (u32, u32) {
match *self {
TypeVariants::TyBool => (1, 1),
TypeVariants::TyChar => (1, 2),
TypeVariants::TyStr => (1, 3),
TypeVariants::TyInt(_) => (2, 1),
TypeVariants::TyUint(_) => (2, 2),
TypeVariants::TyFloat(_) => (2, 3),
TypeVariants::TyRawPtr(_) => (2, 4),
TypeVariants::TyEnum(_, _) => (3, 1),
TypeVariants::TyStruct(_, _) => (3, 2),
TypeVariants::TyBox(_) => (3, 3),
TypeVariants::TyTuple(_) => (3, 4),
TypeVariants::TyArray(_, _) => (4, 1),
TypeVariants::TySlice(_) => (4, 2),
TypeVariants::TyRef(_, _) => (5, 1),
TypeVariants::TyFnDef(_, _, _) => (5, 2),
TypeVariants::TyFnPtr(_) => (5, 3),
TypeVariants::TyTrait(_) => (6, 1),
TypeVariants::TyClosure(_, _) => (7, 1),
TypeVariants::TyProjection(_) => (8, 1),
TypeVariants::TyParam(_) => (8, 2),
TypeVariants::TyInfer(_) => (8, 3),
TypeVariants::TyError => (9, 1),
}
}
}
// The "closer" the types are, the lesser the weight.
fn get_weight_diff(a: &ty::TypeVariants, b: &TypeVariants, weight: Weight) -> u32 {
let (w1, w2) = match weight {
Weight::Coarse => (a.get_weight().0, b.get_weight().0),
Weight::Precise => (a.get_weight().1, b.get_weight().1),
};
if w1 < w2 {
w2 - w1
} else {
w1 - w2
}
}
// Once we have "globally matching" types, we need to run another filter on them.
//
// In the function `get_best_matching_type`, we got the types which might fit the
// most to the type we're looking for. This second filter now intends to get (if
// possible) the type which fits the most.
//
// For example, the trait expects an `usize` and here you have `u32` and `i32`.
// Obviously, the "correct" one is `u32`.
fn filter_matching_types<'tcx>(weights: &[(usize, u32)],
imps: &[(DefId, subst::Substs<'tcx>)],
trait_types: &[ty::Ty<'tcx>])
-> usize {
let matching_weight = weights[0].1;
let iter = weights.iter().filter(|&&(_, weight)| weight == matching_weight);
let mut filtered_weights = vec!();
for &(pos, _) in iter {
let mut weight = 0;
for (type_to_compare, original_type) in imps[pos].1
.types
.get_slice(ParamSpace::TypeSpace)
.iter()
.zip(trait_types.iter()) {
weight += get_weight_diff(&type_to_compare.sty, &original_type.sty, Weight::Precise);
}
filtered_weights.push((pos, weight));
}
filtered_weights.sort_by(|a, b| a.1.cmp(&b.1));
filtered_weights[0].0
}
// Here, we run the "big" filter. Little example:
//
// We receive a `String`, an `u32` and an `i32`.
// The trait expected an `usize`.
// From human point of view, it's easy to determine that `String` doesn't correspond to
// the expected type at all whereas `u32` and `i32` could.
//
// This first filter intends to only keep the types which match the most.
fn get_best_matching_type<'tcx>(imps: &[(DefId, subst::Substs<'tcx>)],
trait_types: &[ty::Ty<'tcx>]) -> usize {
let mut weights = vec!();
for (pos, imp) in imps.iter().enumerate() {
let mut weight = 0;
for (type_to_compare, original_type) in imp.1
.types
.get_slice(ParamSpace::TypeSpace)
.iter()
.zip(trait_types.iter()) {
weight += get_weight_diff(&type_to_compare.sty, &original_type.sty, Weight::Coarse);
}
weights.push((pos, weight));
}
weights.sort_by(|a, b| a.1.cmp(&b.1));
if weights[0].1 == weights[1].1 {
filter_matching_types(&weights, &imps, trait_types)
} else {
weights[0].0
}
}
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
pub fn report_fulfillment_errors(&self, errors: &Vec<FulfillmentError<'tcx>>) {
for error in errors {
@ -272,72 +150,53 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
substs
}
fn get_current_failing_impl(&self,
trait_ref: &TraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>)
-> Option<(DefId, subst::Substs<'tcx>)> {
let simp = fast_reject::simplify_type(self.tcx,
trait_ref.self_ty(),
true);
let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id);
fn impl_with_self_type_of(&self,
trait_ref: ty::PolyTraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>)
-> Option<DefId>
{
let tcx = self.tcx;
let mut result = None;
let mut ambiguous = false;
match simp {
Some(_) => {
let mut matching_impls = Vec::new();
trait_def.for_each_impl(self.tcx, |def_id| {
let imp = self.tcx.impl_trait_ref(def_id).unwrap();
let substs = self.impl_substs(def_id, obligation.clone());
let imp = imp.subst(self.tcx, &substs);
let trait_self_ty = tcx.erase_late_bound_regions(&trait_ref).self_ty();
if self.eq_types(true,
TypeOrigin::Misc(obligation.cause.span),
trait_ref.self_ty(),
imp.self_ty()).is_ok() {
matching_impls.push((def_id, imp.substs.clone()));
}
});
if matching_impls.len() == 0 {
None
} else if matching_impls.len() == 1 {
Some(matching_impls[0].clone())
} else {
let end = trait_ref.input_types().len() - 1;
// we need to determine which type is the good one!
Some(matching_impls[get_best_matching_type(&matching_impls,
&trait_ref.input_types()[0..end])]
.clone())
if trait_self_ty.is_ty_var() {
return None;
}
self.tcx.lookup_trait_def(trait_ref.def_id())
.for_each_relevant_impl(self.tcx, trait_self_ty, |def_id| {
let impl_self_ty = tcx
.impl_trait_ref(def_id)
.unwrap()
.self_ty()
.subst(tcx, &self.impl_substs(def_id, obligation.clone()));
if !tcx.has_attr(def_id, "rustc_on_unimplemented") {
return;
}
},
None => None,
}
}
fn find_attr(&self,
def_id: DefId,
attr_name: &str)
-> Option<ast::Attribute> {
for item in self.tcx.get_attrs(def_id).iter() {
if item.check_name(attr_name) {
return Some(item.clone());
}
if let Ok(..) = self.can_equate(&trait_self_ty, &impl_self_ty) {
ambiguous = result.is_some();
result = Some(def_id);
}
});
if ambiguous {
None
} else {
result
}
None
}
fn on_unimplemented_note(&self,
trait_ref: ty::PolyTraitRef<'tcx>,
obligation: &PredicateObligation<'tcx>) -> Option<String> {
let def_id = self.impl_with_self_type_of(trait_ref, obligation)
.unwrap_or(trait_ref.def_id());
let trait_ref = trait_ref.skip_binder();
let def_id = match self.get_current_failing_impl(trait_ref, obligation) {
Some((def_id, _)) => {
if let Some(_) = self.find_attr(def_id, "rustc_on_unimplemented") {
def_id
} else {
trait_ref.def_id
}
},
None => trait_ref.def_id,
};
let span = obligation.cause.span;
let mut report = None;
for item in self.tcx.get_attrs(def_id).iter() {
@ -511,115 +370,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
/// that we can give a more helpful error message (and, in particular,
/// we do not suggest increasing the overflow limit, which is not
/// going to help).
pub fn report_overflow_error_cycle(&self, cycle: &Vec<PredicateObligation<'tcx>>) -> ! {
assert!(cycle.len() > 1);
debug!("report_overflow_error_cycle(cycle length = {})", cycle.len());
let cycle = self.resolve_type_vars_if_possible(cycle);
pub fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> ! {
let cycle = self.resolve_type_vars_if_possible(&cycle.to_owned());
assert!(cycle.len() > 0);
debug!("report_overflow_error_cycle: cycle={:?}", cycle);
assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate);
self.try_report_overflow_error_type_of_infinite_size(&cycle);
self.report_overflow_error(&cycle[0], false);
}
/// If a cycle results from evaluated whether something is Sized, that
/// is a particular special case that always results from a struct or
/// enum definition that lacks indirection (e.g., `struct Foo { x: Foo
/// }`). We wish to report a targeted error for this case.
pub fn try_report_overflow_error_type_of_infinite_size(&self,
cycle: &[PredicateObligation<'tcx>])
{
let sized_trait = match self.tcx.lang_items.sized_trait() {
Some(v) => v,
None => return,
};
let top_is_sized = {
match cycle[0].predicate {
ty::Predicate::Trait(ref data) => data.def_id() == sized_trait,
_ => false,
}
};
if !top_is_sized {
return;
}
// The only way to have a type of infinite size is to have,
// somewhere, a struct/enum type involved. Identify all such types
// and report the cycle to the user.
let struct_enum_tys: Vec<_> =
cycle.iter()
.flat_map(|obligation| match obligation.predicate {
ty::Predicate::Trait(ref data) => {
assert_eq!(data.def_id(), sized_trait);
let self_ty = data.skip_binder().trait_ref.self_ty(); // (*)
// (*) ok to skip binder because this is just
// error reporting and regions don't really
// matter
match self_ty.sty {
ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty),
_ => None,
}
}
_ => {
span_bug!(obligation.cause.span,
"Sized cycle involving non-trait-ref: {:?}",
obligation.predicate);
}
})
.collect();
assert!(!struct_enum_tys.is_empty());
// This is a bit tricky. We want to pick a "main type" in the
// listing that is local to the current crate, so we can give a
// good span to the user. But it might not be the first one in our
// cycle list. So find the first one that is local and then
// rotate.
let (main_index, main_def_id) =
struct_enum_tys.iter()
.enumerate()
.filter_map(|(index, ty)| match ty.sty {
ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _)
if adt_def.did.is_local() =>
Some((index, adt_def.did)),
_ =>
None,
})
.next()
.unwrap(); // should always be SOME local type involved!
// Rotate so that the "main" type is at index 0.
let struct_enum_tys: Vec<_> =
struct_enum_tys.iter()
.cloned()
.skip(main_index)
.chain(struct_enum_tys.iter().cloned().take(main_index))
.collect();
let tcx = self.tcx;
let mut err = tcx.recursive_type_with_infinite_size_error(main_def_id);
let len = struct_enum_tys.len();
if len > 2 {
err.note(&format!("type `{}` is embedded within `{}`...",
struct_enum_tys[0],
struct_enum_tys[1]));
for &next_ty in &struct_enum_tys[1..len-1] {
err.note(&format!("...which in turn is embedded within `{}`...", next_ty));
}
err.note(&format!("...which in turn is embedded within `{}`, \
completing the cycle.",
struct_enum_tys[len-1]));
}
err.emit();
self.tcx.sess.abort_if_errors();
bug!();
}
pub fn report_selection_error(&self,
obligation: &PredicateObligation<'tcx>,
error: &SelectionError<'tcx>,

View File

@ -10,13 +10,14 @@
use dep_graph::DepGraph;
use infer::{InferCtxt, InferOk};
use ty::{self, Ty, TyCtxt, TypeFoldable, ToPolyTraitRef};
use rustc_data_structures::obligation_forest::{Backtrace, ObligationForest, Error};
use std::iter;
use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, TyCtxt};
use rustc_data_structures::obligation_forest::{ObligationForest, Error};
use rustc_data_structures::obligation_forest::{ForestObligation, ObligationProcessor};
use std::marker::PhantomData;
use std::mem;
use syntax::ast;
use util::common::ErrorReported;
use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
use util::nodemap::{FnvHashSet, NodeMap};
use super::CodeAmbiguity;
use super::CodeProjectionError;
@ -29,16 +30,17 @@ use super::project;
use super::select::SelectionContext;
use super::Unimplemented;
impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> {
type Predicate = ty::Predicate<'tcx>;
fn as_predicate(&self) -> &Self::Predicate { &self.obligation.predicate }
}
pub struct GlobalFulfilledPredicates<'tcx> {
set: FnvHashSet<ty::PolyTraitPredicate<'tcx>>,
dep_graph: DepGraph,
}
#[derive(Debug)]
pub struct LocalFulfilledPredicates<'tcx> {
set: FnvHashSet<ty::Predicate<'tcx>>
}
/// The fulfillment context is used to drive trait resolution. It
/// consists of a list of obligations that must be (eventually)
/// satisfied. The job is to track which are satisfied, which yielded
@ -50,23 +52,9 @@ pub struct LocalFulfilledPredicates<'tcx> {
/// method `select_all_or_error` can be used to report any remaining
/// ambiguous cases as errors.
pub struct FulfillmentContext<'tcx> {
// a simple cache that aims to cache *exact duplicate obligations*
// and avoid adding them twice. This serves a different purpose
// than the `SelectionCache`: it avoids duplicate errors and
// permits recursive obligations, which are often generated from
// traits like `Send` et al.
//
// Note that because of type inference, a predicate can still
// occur twice in the predicates list, for example when 2
// initially-distinct type variables are unified after being
// inserted. Deduplicating the predicate set on selection had a
// significant performance cost the last time I checked.
duplicate_set: LocalFulfilledPredicates<'tcx>,
// A list of all obligations that have been registered with this
// fulfillment context.
predicates: ObligationForest<PendingPredicateObligation<'tcx>,
LocalFulfilledPredicates<'tcx>>,
predicates: ObligationForest<PendingPredicateObligation<'tcx>>,
// A list of new obligations due to RFC1592.
rfc1592_obligations: Vec<PredicateObligation<'tcx>>,
@ -115,7 +103,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
/// Creates a new fulfillment context.
pub fn new() -> FulfillmentContext<'tcx> {
FulfillmentContext {
duplicate_set: LocalFulfilledPredicates::new(),
predicates: ObligationForest::new(),
rfc1592_obligations: Vec::new(),
region_obligations: NodeMap(),
@ -184,19 +171,15 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
// debug output much nicer to read and so on.
let obligation = infcx.resolve_type_vars_if_possible(&obligation);
assert!(!obligation.has_escaping_regions());
if self.is_duplicate_or_add(infcx.tcx, &obligation.predicate) {
debug!("register_predicate_obligation({:?}) -- already seen, skip", obligation);
return;
if infcx.tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate)
{
return
}
debug!("register_predicate_obligation({:?})", obligation);
let obligation = PendingPredicateObligation {
self.predicates.register_obligation(PendingPredicateObligation {
obligation: obligation,
stalled_on: vec![]
};
self.predicates.push_tree(obligation, LocalFulfilledPredicates::new());
});
}
pub fn register_rfc1592_obligation(&mut self,
@ -261,32 +244,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
self.predicates.pending_obligations()
}
fn is_duplicate_or_add(&mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
predicate: &ty::Predicate<'tcx>)
-> bool {
// For "global" predicates -- that is, predicates that don't
// involve type parameters, inference variables, or regions
// other than 'static -- we can check the cache in the tcx,
// which allows us to leverage work from other threads. Note
// that we don't add anything to this cache yet (unlike the
// local cache). This is because the tcx cache maintains the
// invariant that it only contains things that have been
// proven, and we have not yet proven that `predicate` holds.
if tcx.fulfilled_predicates.borrow().check_duplicate(predicate) {
return true;
}
// If `predicate` is not global, or not present in the tcx
// cache, we can still check for it in our local cache and add
// it if not present. Note that if we find this predicate in
// the local cache we can stop immediately, without reporting
// any errors, even though we don't know yet if it is
// true. This is because, while we don't yet know if the
// predicate holds, we know that this same fulfillment context
// already is in the process of finding out.
self.duplicate_set.is_duplicate_or_add(predicate)
}
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
fn select(&mut self, selcx: &mut SelectionContext<'a, 'gcx, 'tcx>)
@ -299,18 +256,11 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
debug!("select: starting another iteration");
// Process pending obligations.
let outcome = {
let region_obligations = &mut self.region_obligations;
let rfc1592_obligations = &mut self.rfc1592_obligations;
self.predicates.process_obligations(
|obligation, tree, backtrace| process_predicate(selcx,
tree,
obligation,
backtrace,
region_obligations,
rfc1592_obligations))
};
let outcome = self.predicates.process_obligations(&mut FulfillProcessor {
selcx: selcx,
region_obligations: &mut self.region_obligations,
rfc1592_obligations: &mut self.rfc1592_obligations
});
debug!("select: outcome={:?}", outcome);
// these are obligations that were proven to be true.
@ -341,177 +291,40 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
}
}
/// Like `process_predicate1`, but wrap result into a pending predicate.
fn process_predicate<'a, 'gcx, 'tcx>(
selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
tree_cache: &mut LocalFulfilledPredicates<'tcx>,
pending_obligation: &mut PendingPredicateObligation<'tcx>,
backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
rfc1592_obligations: &mut Vec<PredicateObligation<'tcx>>)
-> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
FulfillmentErrorCode<'tcx>>
{
match process_predicate1(selcx, pending_obligation, region_obligations,
rfc1592_obligations) {
Ok(Some(v)) => process_child_obligations(selcx,
tree_cache,
&pending_obligation.obligation,
backtrace,
v),
Ok(None) => Ok(None),
Err(e) => Err(e)
}
struct FulfillProcessor<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> {
selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
region_obligations: &'a mut NodeMap<Vec<RegionObligation<'tcx>>>,
rfc1592_obligations: &'a mut Vec<PredicateObligation<'tcx>>
}
fn process_child_obligations<'a, 'gcx, 'tcx>(
selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
tree_cache: &mut LocalFulfilledPredicates<'tcx>,
pending_obligation: &PredicateObligation<'tcx>,
backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
child_obligations: Vec<PredicateObligation<'tcx>>)
-> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
FulfillmentErrorCode<'tcx>>
{
// FIXME(#30977) The code below is designed to detect (and
// permit) DAGs, while still ensuring that the reasoning
// is acyclic. However, it does a few things
// suboptimally. For example, it refreshes type variables
// a lot, probably more than needed, but also less than
// you might want.
//
// - more than needed: I want to be very sure we don't
// accidentally treat a cycle as a DAG, so I am
// refreshing type variables as we walk the ancestors;
// but we are going to repeat this a lot, which is
// sort of silly, and it would be nicer to refresh
// them *in place* so that later predicate processing
// can benefit from the same work;
// - less than you might want: we only add items in the cache here,
// but maybe we learn more about type variables and could add them into
// the cache later on.
impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, 'tcx> {
type Obligation = PendingPredicateObligation<'tcx>;
type Error = FulfillmentErrorCode<'tcx>;
let tcx = selcx.tcx();
let mut ancestor_set = AncestorSet::new(&backtrace);
let pending_predicate_obligations: Vec<_> =
child_obligations
.into_iter()
.filter_map(|obligation| {
// Probably silly, but remove any inference
// variables. This is actually crucial to the ancestor
// check marked (*) below, but it's not clear that it
// makes sense to ALWAYS do it.
let obligation = selcx.infcx().resolve_type_vars_if_possible(&obligation);
// Screen out obligations that we know globally
// are true.
if tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
return None;
}
// Check whether this obligation appears
// somewhere else in the tree. If not, we have to
// process it for sure.
if !tree_cache.is_duplicate_or_add(&obligation.predicate) {
return Some(PendingPredicateObligation {
obligation: obligation,
stalled_on: vec![]
});
}
debug!("process_child_obligations: duplicate={:?}",
obligation.predicate);
// OK, the obligation appears elsewhere in the tree.
// This is either a fatal error or else something we can
// ignore. If the obligation appears in our *ancestors*
// (rather than some more distant relative), that
// indicates a cycle. Cycles are either considered
// resolved (if this is a coinductive case) or a fatal
// error.
if let Some(index) = ancestor_set.has(selcx.infcx(), &obligation.predicate) {
// ~~~ (*) see above
debug!("process_child_obligations: cycle index = {}", index);
let backtrace = backtrace.clone();
let cycle: Vec<_> =
iter::once(&obligation)
.chain(Some(pending_obligation))
.chain(backtrace.take(index + 1).map(|p| &p.obligation))
.cloned()
.collect();
if coinductive_match(selcx, &cycle) {
debug!("process_child_obligations: coinductive match");
None
} else {
selcx.infcx().report_overflow_error_cycle(&cycle);
}
} else {
// Not a cycle. Just ignore this obligation then,
// we're already in the process of proving it.
debug!("process_child_obligations: not a cycle");
None
}
})
.collect();
Ok(Some(pending_predicate_obligations))
}
struct AncestorSet<'b, 'tcx: 'b> {
populated: bool,
cache: FnvHashMap<ty::Predicate<'tcx>, usize>,
backtrace: Backtrace<'b, PendingPredicateObligation<'tcx>>,
}
impl<'a, 'b, 'gcx, 'tcx> AncestorSet<'b, 'tcx> {
fn new(backtrace: &Backtrace<'b, PendingPredicateObligation<'tcx>>) -> Self {
AncestorSet {
populated: false,
cache: FnvHashMap(),
backtrace: backtrace.clone(),
}
fn process_obligation(&mut self,
obligation: &mut Self::Obligation)
-> Result<Option<Vec<Self::Obligation>>, Self::Error>
{
process_predicate(self.selcx,
obligation,
self.region_obligations,
self.rfc1592_obligations)
.map(|os| os.map(|os| os.into_iter().map(|o| PendingPredicateObligation {
obligation: o,
stalled_on: vec![]
}).collect()))
}
/// Checks whether any of the ancestors in the backtrace are equal
/// to `predicate` (`predicate` is assumed to be fully
/// type-resolved). Returns `None` if not; otherwise, returns
/// `Some` with the index within the backtrace.
fn has(&mut self,
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
predicate: &ty::Predicate<'tcx>)
-> Option<usize> {
// the first time, we have to populate the cache
if !self.populated {
let backtrace = self.backtrace.clone();
for (index, ancestor) in backtrace.enumerate() {
// Ugh. This just feels ridiculously
// inefficient. But we need to compare
// predicates without being concerned about
// the vagaries of type inference, so for now
// just ensure that they are always
// up-to-date. (I suppose we could just use a
// snapshot and check if they are unifiable?)
let resolved_predicate =
infcx.resolve_type_vars_if_possible(
&ancestor.obligation.predicate);
// Though we try to avoid it, it can happen that a
// cycle already exists in the predecessors. This
// happens if the type variables were not fully known
// at the time that the ancestors were pushed. We'll
// just ignore such cycles for now, on the premise
// that they will repeat themselves and we'll deal
// with them properly then.
self.cache.entry(resolved_predicate)
.or_insert(index);
}
self.populated = true;
fn process_backedge<'c, I>(&mut self, cycle: I,
_marker: PhantomData<&'c PendingPredicateObligation<'tcx>>)
where I: Clone + Iterator<Item=&'c PendingPredicateObligation<'tcx>>,
{
if coinductive_match(self.selcx, cycle.clone()) {
debug!("process_child_obligations: coinductive match");
} else {
let cycle : Vec<_> = cycle.map(|c| c.obligation.clone()).collect();
self.selcx.infcx().report_overflow_error_cycle(&cycle);
}
self.cache.get(predicate).cloned()
}
}
@ -533,7 +346,7 @@ fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 't
/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true
/// - `Ok(None)` if we don't have enough info to be sure
/// - `Err` if the predicate does not hold
fn process_predicate1<'a, 'gcx, 'tcx>(
fn process_predicate<'a, 'gcx, 'tcx>(
selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
pending_obligation: &mut PendingPredicateObligation<'tcx>,
region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
@ -725,25 +538,22 @@ fn process_predicate1<'a, 'gcx, 'tcx>(
/// - it also appears in the backtrace at some position `X`; and,
/// - all the predicates at positions `X..` between `X` an the top are
/// also defaulted traits.
fn coinductive_match<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
cycle: &[PredicateObligation<'tcx>])
-> bool
fn coinductive_match<'a,'c,'gcx,'tcx,I>(selcx: &mut SelectionContext<'a,'gcx,'tcx>,
cycle: I) -> bool
where I: Iterator<Item=&'c PendingPredicateObligation<'tcx>>,
'tcx: 'c
{
let len = cycle.len();
assert_eq!(cycle[0].predicate, cycle[len - 1].predicate);
cycle[0..len-1]
.iter()
let mut cycle = cycle;
cycle
.all(|bt_obligation| {
let result = coinductive_obligation(selcx, bt_obligation);
let result = coinductive_obligation(selcx, &bt_obligation.obligation);
debug!("coinductive_match: bt_obligation={:?} coinductive={}",
bt_obligation, result);
result
})
}
fn coinductive_obligation<'a, 'gcx, 'tcx>(selcx: &SelectionContext<'a, 'gcx, 'tcx>,
fn coinductive_obligation<'a,'gcx,'tcx>(selcx: &SelectionContext<'a,'gcx,'tcx>,
obligation: &PredicateObligation<'tcx>)
-> bool {
match obligation.predicate {
@ -774,25 +584,6 @@ fn register_region_obligation<'tcx>(t_a: Ty<'tcx>,
}
impl<'tcx> LocalFulfilledPredicates<'tcx> {
pub fn new() -> LocalFulfilledPredicates<'tcx> {
LocalFulfilledPredicates {
set: FnvHashSet()
}
}
fn is_duplicate_or_add(&mut self, key: &ty::Predicate<'tcx>) -> bool {
// For a `LocalFulfilledPredicates`, if we find a match, we
// don't need to add a read edge to the dep-graph. This is
// because it means that the predicate has already been
// considered by this `FulfillmentContext`, and hence the
// containing task will already have an edge. (Here we are
// assuming each `FulfillmentContext` only gets used from one
// task; but to do otherwise makes no sense)
!self.set.insert(key.clone())
}
}
impl<'a, 'gcx, 'tcx> GlobalFulfilledPredicates<'gcx> {
pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'gcx> {
GlobalFulfilledPredicates {

View File

@ -28,6 +28,8 @@
#![feature(nonzero)]
#![feature(rustc_private)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(fn_traits)]
#![cfg_attr(test, feature(test))]

View File

@ -15,20 +15,45 @@
//! in the first place). See README.md for a general overview of how
//! to use this class.
use fnv::{FnvHashMap, FnvHashSet};
use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::mem;
use std::hash;
use std::marker::PhantomData;
mod node_index;
use self::node_index::NodeIndex;
mod tree_index;
use self::tree_index::TreeIndex;
#[cfg(test)]
mod test;
pub struct ObligationForest<O, T> {
pub trait ForestObligation : Clone + Debug {
type Predicate : Clone + hash::Hash + Eq + Debug;
fn as_predicate(&self) -> &Self::Predicate;
}
pub trait ObligationProcessor {
type Obligation : ForestObligation;
type Error : Debug;
fn process_obligation(&mut self,
obligation: &mut Self::Obligation)
-> Result<Option<Vec<Self::Obligation>>, Self::Error>;
fn process_backedge<'c, I>(&mut self, cycle: I,
_marker: PhantomData<&'c Self::Obligation>)
where I: Clone + Iterator<Item=&'c Self::Obligation>;
}
struct SnapshotData {
node_len: usize,
cache_list_len: usize,
}
pub struct ObligationForest<O: ForestObligation> {
/// The list of obligations. In between calls to
/// `process_obligations`, this list only contains nodes in the
/// `Pending` or `Success` state (with a non-zero number of
@ -42,51 +67,66 @@ pub struct ObligationForest<O, T> {
/// at a higher index than its parent. This is needed by the
/// backtrace iterator (which uses `split_at`).
nodes: Vec<Node<O>>,
trees: Vec<Tree<T>>,
snapshots: Vec<usize>,
/// A cache of predicates that have been successfully completed.
done_cache: FnvHashSet<O::Predicate>,
/// An cache of the nodes in `nodes`, indexed by predicate.
waiting_cache: FnvHashMap<O::Predicate, NodeIndex>,
/// A list of the obligations added in snapshots, to allow
/// for their removal.
cache_list: Vec<O::Predicate>,
snapshots: Vec<SnapshotData>,
scratch: Option<Vec<usize>>,
}
pub struct Snapshot {
len: usize,
}
struct Tree<T> {
root: NodeIndex,
state: T,
}
#[derive(Debug)]
struct Node<O> {
state: NodeState<O>,
obligation: O,
state: Cell<NodeState>,
/// Obligations that depend on this obligation for their
/// completion. They must all be in a non-pending state.
dependents: Vec<NodeIndex>,
/// The parent of a node - the original obligation of
/// which it is a subobligation. Except for error reporting,
/// this is just another member of `dependents`.
parent: Option<NodeIndex>,
tree: TreeIndex,
}
/// The state of one node in some tree within the forest. This
/// represents the current state of processing for the obligation (of
/// type `O`) associated with this node.
#[derive(Debug)]
enum NodeState<O> {
/// Obligation not yet resolved to success or error.
Pending {
obligation: O,
},
///
/// Outside of ObligationForest methods, nodes should be either Pending
/// or Waiting.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum NodeState {
/// Obligations for which selection had not yet returned a
/// non-ambiguous result.
Pending,
/// Obligation resolved to success; `num_incomplete_children`
/// indicates the number of children still in an "incomplete"
/// state. Incomplete means that either the child is still
/// pending, or it has children which are incomplete. (Basically,
/// there is pending work somewhere in the subtree of the child.)
///
/// Once all children have completed, success nodes are removed
/// from the vector by the compression step.
Success {
obligation: O,
num_incomplete_children: usize,
},
/// This obligation was selected successfuly, but may or
/// may not have subobligations.
Success,
/// This obligation was selected sucessfully, but it has
/// a pending subobligation.
Waiting,
/// This obligation, along with its subobligations, are complete,
/// and will be removed in the next collection.
Done,
/// This obligation was resolved to an error. Error nodes are
/// removed from the vector by the compression step.
Error,
/// This is a temporary state used in DFS loops to detect cycles,
/// it should not exist outside of these DFSes.
OnDfsStack,
}
#[derive(Debug)]
@ -113,12 +153,15 @@ pub struct Error<O, E> {
pub backtrace: Vec<O>,
}
impl<O: Debug, T: Debug> ObligationForest<O, T> {
pub fn new() -> ObligationForest<O, T> {
impl<O: ForestObligation> ObligationForest<O> {
pub fn new() -> ObligationForest<O> {
ObligationForest {
trees: vec![],
nodes: vec![],
snapshots: vec![],
done_cache: FnvHashSet(),
waiting_cache: FnvHashMap(),
cache_list: vec![],
scratch: Some(vec![]),
}
}
@ -129,57 +172,69 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
}
pub fn start_snapshot(&mut self) -> Snapshot {
self.snapshots.push(self.trees.len());
self.snapshots.push(SnapshotData {
node_len: self.nodes.len(),
cache_list_len: self.cache_list.len()
});
Snapshot { len: self.snapshots.len() }
}
pub fn commit_snapshot(&mut self, snapshot: Snapshot) {
assert_eq!(snapshot.len, self.snapshots.len());
let trees_len = self.snapshots.pop().unwrap();
assert!(self.trees.len() >= trees_len);
let info = self.snapshots.pop().unwrap();
assert!(self.nodes.len() >= info.node_len);
assert!(self.cache_list.len() >= info.cache_list_len);
}
pub fn rollback_snapshot(&mut self, snapshot: Snapshot) {
// Check that we are obeying stack discipline.
assert_eq!(snapshot.len, self.snapshots.len());
let trees_len = self.snapshots.pop().unwrap();
let info = self.snapshots.pop().unwrap();
// If nothing happened in snapshot, done.
if self.trees.len() == trees_len {
return;
for entry in &self.cache_list[info.cache_list_len..] {
self.done_cache.remove(entry);
self.waiting_cache.remove(entry);
}
// Find root of first tree; because nothing can happen in a
// snapshot but pushing trees, all nodes after that should be
// roots of other trees as well
let first_root_index = self.trees[trees_len].root.get();
debug_assert!(self.nodes[first_root_index..]
.iter()
.zip(first_root_index..)
.all(|(root, root_index)| {
self.trees[root.tree.get()].root.get() == root_index
}));
// Pop off tree/root pairs pushed during snapshot.
self.trees.truncate(trees_len);
self.nodes.truncate(first_root_index);
self.nodes.truncate(info.node_len);
self.cache_list.truncate(info.cache_list_len);
}
pub fn in_snapshot(&self) -> bool {
!self.snapshots.is_empty()
}
/// Adds a new tree to the forest.
/// Registers an obligation
///
/// This CAN be done during a snapshot.
pub fn push_tree(&mut self, obligation: O, tree_state: T) {
let index = NodeIndex::new(self.nodes.len());
let tree = TreeIndex::new(self.trees.len());
self.trees.push(Tree {
root: index,
state: tree_state,
});
self.nodes.push(Node::new(tree, None, obligation));
/// This CAN be done in a snapshot
pub fn register_obligation(&mut self, obligation: O) {
self.register_obligation_at(obligation, None)
}
fn register_obligation_at(&mut self, obligation: O, parent: Option<NodeIndex>) {
if self.done_cache.contains(obligation.as_predicate()) { return }
match self.waiting_cache.entry(obligation.as_predicate().clone()) {
Entry::Occupied(o) => {
debug!("register_obligation_at({:?}, {:?}) - duplicate of {:?}!",
obligation, parent, o.get());
if let Some(parent) = parent {
if self.nodes[o.get().get()].dependents.contains(&parent) {
debug!("register_obligation_at({:?}, {:?}) - duplicate subobligation",
obligation, parent);
} else {
self.nodes[o.get().get()].dependents.push(parent);
}
}
}
Entry::Vacant(v) => {
debug!("register_obligation_at({:?}, {:?}) - ok",
obligation, parent);
v.insert(NodeIndex::new(self.nodes.len()));
self.cache_list.push(obligation.as_predicate().clone());
self.nodes.push(Node::new(parent, obligation));
}
};
}
/// Convert all remaining obligations to the given error.
@ -189,10 +244,8 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
assert!(!self.in_snapshot());
let mut errors = vec![];
for index in 0..self.nodes.len() {
debug_assert!(!self.nodes[index].is_popped());
self.inherit_error(index);
if let NodeState::Pending { .. } = self.nodes[index].state {
let backtrace = self.backtrace(index);
if let NodeState::Pending = self.nodes[index].state.get() {
let backtrace = self.error_at(index);
errors.push(Error {
error: error.clone(),
backtrace: backtrace,
@ -210,22 +263,17 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
{
self.nodes
.iter()
.filter_map(|n| {
match n.state {
NodeState::Pending { ref obligation } => Some(obligation),
_ => None,
}
})
.cloned()
.filter(|n| n.state.get() == NodeState::Pending)
.map(|n| n.obligation.clone())
.collect()
}
/// Process the obligations.
/// Perform a pass through the obligation list. This must
/// be called in a loop until `outcome.stalled` is false.
///
/// This CANNOT be unrolled (presently, at least).
pub fn process_obligations<E, F>(&mut self, mut action: F) -> Outcome<O, E>
where E: Debug,
F: FnMut(&mut O, &mut T, Backtrace<O>) -> Result<Option<Vec<O>>, E>
pub fn process_obligations<P>(&mut self, processor: &mut P) -> Outcome<O, P::Error>
where P: ObligationProcessor<Obligation=O>
{
debug!("process_obligations(len={})", self.nodes.len());
assert!(!self.in_snapshot()); // cannot unroll this action
@ -233,33 +281,18 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
let mut errors = vec![];
let mut stalled = true;
// We maintain the invariant that the list is in pre-order, so
// parents occur before their children. Also, whenever an
// error occurs, we propagate it from the child all the way to
// the root of the tree. Together, these two facts mean that
// when we visit a node, we can check if its root is in error,
// and we will find out if any prior node within this forest
// encountered an error.
for index in 0..self.nodes.len() {
debug_assert!(!self.nodes[index].is_popped());
self.inherit_error(index);
debug!("process_obligations: node {} == {:?}",
index,
self.nodes[index].state);
self.nodes[index]);
let result = {
let Node { tree, parent, .. } = self.nodes[index];
let (prefix, suffix) = self.nodes.split_at_mut(index);
let backtrace = Backtrace::new(prefix, parent);
match suffix[0].state {
NodeState::Error |
NodeState::Success { .. } => continue,
NodeState::Pending { ref mut obligation } => {
action(obligation, &mut self.trees[tree.get()].state, backtrace)
}
let result = match self.nodes[index] {
Node { state: ref _state, ref mut obligation, .. }
if _state.get() == NodeState::Pending =>
{
processor.process_obligation(obligation)
}
_ => continue
};
debug!("process_obligations: node {} got result {:?}",
@ -273,10 +306,15 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
Ok(Some(children)) => {
// if we saw a Some(_) result, we are not (yet) stalled
stalled = false;
self.success(index, children);
for child in children {
self.register_obligation_at(child,
Some(NodeIndex::new(index)));
}
self.nodes[index].state.set(NodeState::Success);
}
Err(err) => {
let backtrace = self.backtrace(index);
let backtrace = self.error_at(index);
errors.push(Error {
error: err,
backtrace: backtrace,
@ -285,259 +323,292 @@ impl<O: Debug, T: Debug> ObligationForest<O, T> {
}
}
self.mark_as_waiting();
self.process_cycles(processor);
// Now we have to compress the result
let successful_obligations = self.compress();
let completed_obligations = self.compress();
debug!("process_obligations: complete");
Outcome {
completed: successful_obligations,
completed: completed_obligations,
errors: errors,
stalled: stalled,
}
}
/// Indicates that node `index` has been processed successfully,
/// yielding `children` as the derivative work. If children is an
/// empty vector, this will update the ref count on the parent of
/// `index` to indicate that a child has completed
/// successfully. Otherwise, adds new nodes to represent the child
/// work.
fn success(&mut self, index: usize, children: Vec<O>) {
debug!("success(index={}, children={:?})", index, children);
/// Mark all NodeState::Success nodes as NodeState::Done and
/// report all cycles between them. This should be called
/// after `mark_as_waiting` marks all nodes with pending
/// subobligations as NodeState::Waiting.
fn process_cycles<P>(&mut self, processor: &mut P)
where P: ObligationProcessor<Obligation=O>
{
let mut stack = self.scratch.take().unwrap();
let num_incomplete_children = children.len();
if num_incomplete_children == 0 {
// if there is no work left to be done, decrement parent's ref count
self.update_parent(index);
} else {
// create child work
let tree_index = self.nodes[index].tree;
let node_index = NodeIndex::new(index);
self.nodes.extend(children.into_iter()
.map(|o| Node::new(tree_index, Some(node_index), o)));
for node in 0..self.nodes.len() {
self.find_cycles_from_node(&mut stack, processor, node);
}
// change state from `Pending` to `Success`, temporarily swapping in `Error`
let state = mem::replace(&mut self.nodes[index].state, NodeState::Error);
self.nodes[index].state = match state {
NodeState::Pending { obligation } => {
NodeState::Success {
obligation: obligation,
num_incomplete_children: num_incomplete_children,
}
}
NodeState::Success { .. } |
NodeState::Error => unreachable!(),
};
self.scratch = Some(stack);
}
/// Decrements the ref count on the parent of `child`; if the
/// parent's ref count then reaches zero, proceeds recursively.
fn update_parent(&mut self, child: usize) {
debug!("update_parent(child={})", child);
if let Some(parent) = self.nodes[child].parent {
let parent = parent.get();
match self.nodes[parent].state {
NodeState::Success { ref mut num_incomplete_children, .. } => {
*num_incomplete_children -= 1;
if *num_incomplete_children > 0 {
return;
fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>,
processor: &mut P, index: usize)
where P: ObligationProcessor<Obligation=O>
{
let node = &self.nodes[index];
let state = node.state.get();
match state {
NodeState::OnDfsStack => {
let index =
stack.iter().rposition(|n| *n == index).unwrap();
// I need a Clone closure
#[derive(Clone)]
struct GetObligation<'a, O: 'a>(&'a [Node<O>]);
impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> {
type Output = &'a O;
extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O {
&self.0[*args.0].obligation
}
}
impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> {
extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O {
&self.0[*args.0].obligation
}
}
_ => unreachable!(),
}
self.update_parent(parent);
}
}
/// If the root of `child` is in an error state, places `child`
/// into an error state. This is used during processing so that we
/// skip the remaining obligations from a tree once some other
/// node in the tree is found to be in error.
fn inherit_error(&mut self, child: usize) {
let tree = self.nodes[child].tree;
let root = self.trees[tree.get()].root;
if let NodeState::Error = self.nodes[root.get()].state {
self.nodes[child].state = NodeState::Error;
}
processor.process_backedge(stack[index..].iter().map(GetObligation(&self.nodes)),
PhantomData);
}
NodeState::Success => {
node.state.set(NodeState::OnDfsStack);
stack.push(index);
if let Some(parent) = node.parent {
self.find_cycles_from_node(stack, processor, parent.get());
}
for dependent in &node.dependents {
self.find_cycles_from_node(stack, processor, dependent.get());
}
stack.pop();
node.state.set(NodeState::Done);
},
NodeState::Waiting | NodeState::Pending => {
// this node is still reachable from some pending node. We
// will get to it when they are all processed.
}
NodeState::Done | NodeState::Error => {
// already processed that node
}
};
}
/// Returns a vector of obligations for `p` and all of its
/// ancestors, putting them into the error state in the process.
/// The fact that the root is now marked as an error is used by
/// `inherit_error` above to propagate the error state to the
/// remainder of the tree.
fn backtrace(&mut self, mut p: usize) -> Vec<O> {
fn error_at(&mut self, p: usize) -> Vec<O> {
let mut error_stack = self.scratch.take().unwrap();
let mut trace = vec![];
let mut n = p;
loop {
let state = mem::replace(&mut self.nodes[p].state, NodeState::Error);
match state {
NodeState::Pending { obligation } |
NodeState::Success { obligation, .. } => {
trace.push(obligation);
}
NodeState::Error => {
// we should not encounter an error, because if
// there was an error in the ancestors, it should
// have been propagated down and we should never
// have tried to process this obligation
panic!("encountered error in node {:?} when collecting stack trace",
p);
}
}
self.nodes[n].state.set(NodeState::Error);
trace.push(self.nodes[n].obligation.clone());
error_stack.extend(self.nodes[n].dependents.iter().map(|x| x.get()));
// loop to the parent
match self.nodes[p].parent {
Some(q) => {
p = q.get();
}
None => {
return trace;
}
match self.nodes[n].parent {
Some(q) => n = q.get(),
None => break
}
}
loop {
// non-standard `while let` to bypass #6393
let i = match error_stack.pop() {
Some(i) => i,
None => break
};
let node = &self.nodes[i];
match node.state.get() {
NodeState::Error => continue,
_ => node.state.set(NodeState::Error)
}
error_stack.extend(
node.dependents.iter().cloned().chain(node.parent).map(|x| x.get())
);
}
self.scratch = Some(error_stack);
trace
}
/// Marks all nodes that depend on a pending node as NodeState;:Waiting.
fn mark_as_waiting(&self) {
for node in &self.nodes {
if node.state.get() == NodeState::Waiting {
node.state.set(NodeState::Success);
}
}
for node in &self.nodes {
if node.state.get() == NodeState::Pending {
self.mark_as_waiting_from(node)
}
}
}
fn mark_as_waiting_from(&self, node: &Node<O>) {
match node.state.get() {
NodeState::Pending | NodeState::Done => {},
NodeState::Waiting | NodeState::Error | NodeState::OnDfsStack => return,
NodeState::Success => {
node.state.set(NodeState::Waiting);
}
}
if let Some(parent) = node.parent {
self.mark_as_waiting_from(&self.nodes[parent.get()]);
}
for dependent in &node.dependents {
self.mark_as_waiting_from(&self.nodes[dependent.get()]);
}
}
/// Compresses the vector, removing all popped nodes. This adjusts
/// the indices and hence invalidates any outstanding
/// indices. Cannot be used during a transaction.
///
/// Beforehand, all nodes must be marked as `Done` and no cycles
/// on these nodes may be present. This is done by e.g. `process_cycles`.
#[inline(never)]
fn compress(&mut self) -> Vec<O> {
assert!(!self.in_snapshot()); // didn't write code to unroll this action
let mut node_rewrites: Vec<_> = (0..self.nodes.len()).collect();
let mut tree_rewrites: Vec<_> = (0..self.trees.len()).collect();
// Finish propagating error state. Note that in this case we
// only have to check immediate parents, rather than all
// ancestors, because all errors have already occurred that
// are going to occur.
let nodes_len = self.nodes.len();
for i in 0..nodes_len {
if !self.nodes[i].is_popped() {
self.inherit_error(i);
}
}
// Determine which trees to remove by checking if their root
// is popped.
let mut dead_trees = 0;
let trees_len = self.trees.len();
for i in 0..trees_len {
let root_node = self.trees[i].root;
if self.nodes[root_node.get()].is_popped() {
dead_trees += 1;
} else if dead_trees > 0 {
self.trees.swap(i, i - dead_trees);
tree_rewrites[i] -= dead_trees;
}
}
// Now go through and move all nodes that are either
// successful or which have an error over into to the end of
// the list, preserving the relative order of the survivors
// (which is important for the `inherit_error` logic).
let mut node_rewrites: Vec<_> = self.scratch.take().unwrap();
node_rewrites.extend(0..nodes_len);
let mut dead_nodes = 0;
for i in 0..nodes_len {
// Now move all popped nodes to the end. Try to keep the order.
//
// LOOP INVARIANT:
// self.nodes[0..i - dead_nodes] are the first remaining nodes
// self.nodes[i - dead_nodes..i] are all dead
// self.nodes[i..] are unchanged
for i in 0..self.nodes.len() {
match self.nodes[i].state.get() {
NodeState::Done => {
self.waiting_cache.remove(self.nodes[i].obligation.as_predicate());
// FIXME(HashMap): why can't I get my key back?
self.done_cache.insert(self.nodes[i].obligation.as_predicate().clone());
}
NodeState::Error => {
// We *intentionally* remove the node from the cache at this point. Otherwise
// tests must come up with a different type on every type error they
// check against.
self.waiting_cache.remove(self.nodes[i].obligation.as_predicate());
}
_ => {}
}
if self.nodes[i].is_popped() {
node_rewrites[i] = nodes_len;
dead_nodes += 1;
} else if dead_nodes > 0 {
self.nodes.swap(i, i - dead_nodes);
node_rewrites[i] -= dead_nodes;
} else {
if dead_nodes > 0 {
self.nodes.swap(i, i - dead_nodes);
node_rewrites[i] -= dead_nodes;
}
}
}
// No compression needed.
if dead_nodes == 0 && dead_trees == 0 {
if dead_nodes == 0 {
node_rewrites.truncate(0);
self.scratch = Some(node_rewrites);
return vec![];
}
// Pop off the trees we killed.
self.trees.truncate(trees_len - dead_trees);
// Pop off all the nodes we killed and extract the success
// stories.
let successful = (0..dead_nodes)
.map(|_| self.nodes.pop().unwrap())
.flat_map(|node| {
match node.state {
match node.state.get() {
NodeState::Error => None,
NodeState::Pending { .. } => unreachable!(),
NodeState::Success { obligation, num_incomplete_children } => {
assert_eq!(num_incomplete_children, 0);
Some(obligation)
}
NodeState::Done => Some(node.obligation),
_ => unreachable!()
}
})
.collect();
.collect();
self.apply_rewrites(&node_rewrites);
// Adjust the various indices, since we compressed things.
for tree in &mut self.trees {
tree.root = NodeIndex::new(node_rewrites[tree.root.get()]);
}
for node in &mut self.nodes {
if let Some(ref mut index) = node.parent {
let new_index = node_rewrites[index.get()];
debug_assert!(new_index < (nodes_len - dead_nodes));
*index = NodeIndex::new(new_index);
}
node.tree = TreeIndex::new(tree_rewrites[node.tree.get()]);
}
node_rewrites.truncate(0);
self.scratch = Some(node_rewrites);
successful
}
fn apply_rewrites(&mut self, node_rewrites: &[usize]) {
let nodes_len = node_rewrites.len();
for node in &mut self.nodes {
if let Some(index) = node.parent {
let new_index = node_rewrites[index.get()];
if new_index >= nodes_len {
// parent dead due to error
node.parent = None;
} else {
node.parent = Some(NodeIndex::new(new_index));
}
}
let mut i = 0;
while i < node.dependents.len() {
let new_index = node_rewrites[node.dependents[i].get()];
if new_index >= nodes_len {
node.dependents.swap_remove(i);
} else {
node.dependents[i] = NodeIndex::new(new_index);
i += 1;
}
}
}
let mut kill_list = vec![];
for (predicate, index) in self.waiting_cache.iter_mut() {
let new_index = node_rewrites[index.get()];
if new_index >= nodes_len {
kill_list.push(predicate.clone());
} else {
*index = NodeIndex::new(new_index);
}
}
for predicate in kill_list { self.waiting_cache.remove(&predicate); }
}
}
impl<O> Node<O> {
fn new(tree: TreeIndex, parent: Option<NodeIndex>, obligation: O) -> Node<O> {
fn new(parent: Option<NodeIndex>, obligation: O) -> Node<O> {
Node {
obligation: obligation,
parent: parent,
state: NodeState::Pending { obligation: obligation },
tree: tree,
state: Cell::new(NodeState::Pending),
dependents: vec![],
}
}
fn is_popped(&self) -> bool {
match self.state {
NodeState::Pending { .. } => false,
NodeState::Success { num_incomplete_children, .. } => num_incomplete_children == 0,
NodeState::Error => true,
}
}
}
#[derive(Clone)]
pub struct Backtrace<'b, O: 'b> {
nodes: &'b [Node<O>],
pointer: Option<NodeIndex>,
}
impl<'b, O> Backtrace<'b, O> {
fn new(nodes: &'b [Node<O>], pointer: Option<NodeIndex>) -> Backtrace<'b, O> {
Backtrace {
nodes: nodes,
pointer: pointer,
}
}
}
impl<'b, O> Iterator for Backtrace<'b, O> {
type Item = &'b O;
fn next(&mut self) -> Option<&'b O> {
debug!("Backtrace: self.pointer = {:?}", self.pointer);
if let Some(p) = self.pointer {
self.pointer = self.nodes[p.get()].parent;
match self.nodes[p.get()].state {
NodeState::Pending { ref obligation } |
NodeState::Success { ref obligation, .. } => Some(obligation),
NodeState::Error => {
panic!("Backtrace encountered an error.");
}
}
} else {
None
match self.state.get() {
NodeState::Pending | NodeState::Waiting => false,
NodeState::Error | NodeState::Done => true,
NodeState::OnDfsStack | NodeState::Success => unreachable!()
}
}
}

View File

@ -8,30 +8,82 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{ObligationForest, Outcome, Error};
#![cfg(test)]
use super::{ObligationForest, ObligationProcessor, Outcome, Error};
use std::fmt;
use std::marker::PhantomData;
impl<'a> super::ForestObligation for &'a str {
type Predicate = &'a str;
fn as_predicate(&self) -> &Self::Predicate {
self
}
}
struct ClosureObligationProcessor<OF, BF, O, E> {
process_obligation: OF,
_process_backedge: BF,
marker: PhantomData<(O, E)>,
}
#[allow(non_snake_case)]
fn C<OF, BF, O>(of: OF, bf: BF) -> ClosureObligationProcessor<OF, BF, O, &'static str>
where OF: FnMut(&mut O) -> Result<Option<Vec<O>>, &'static str>,
BF: FnMut(&[O])
{
ClosureObligationProcessor {
process_obligation: of,
_process_backedge: bf,
marker: PhantomData
}
}
impl<OF, BF, O, E> ObligationProcessor for ClosureObligationProcessor<OF, BF, O, E>
where O: super::ForestObligation + fmt::Debug,
E: fmt::Debug,
OF: FnMut(&mut O) -> Result<Option<Vec<O>>, E>,
BF: FnMut(&[O])
{
type Obligation = O;
type Error = E;
fn process_obligation(&mut self,
obligation: &mut Self::Obligation)
-> Result<Option<Vec<Self::Obligation>>, Self::Error>
{
(self.process_obligation)(obligation)
}
fn process_backedge<'c, I>(&mut self, _cycle: I,
_marker: PhantomData<&'c Self::Obligation>)
where I: Clone + Iterator<Item=&'c Self::Obligation> {
}
}
#[test]
fn push_pop() {
let mut forest = ObligationForest::new();
forest.push_tree("A", "A");
forest.push_tree("B", "B");
forest.push_tree("C", "C");
forest.register_obligation("A");
forest.register_obligation("B");
forest.register_obligation("C");
// first round, B errors out, A has subtasks, and C completes, creating this:
// A |-> A.1
// |-> A.2
// |-> A.3
let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
tree,
_| {
assert_eq!(obligation.chars().next(), tree.chars().next());
match *obligation {
"A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
"B" => Err("B is for broken"),
"C" => Ok(Some(vec![])),
_ => unreachable!(),
}
});
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
"B" => Err("B is for broken"),
"C" => Ok(Some(vec![])),
_ => unreachable!(),
}
}, |_| {}));
assert_eq!(ok, vec!["C"]);
assert_eq!(err,
vec![Error {
@ -45,10 +97,9 @@ fn push_pop() {
// |-> A.3 |-> A.3.i
// D |-> D.1
// |-> D.2
forest.push_tree("D", "D");
let Outcome { completed: ok, errors: err, .. }: Outcome<&'static str, ()> =
forest.process_obligations(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.register_obligation("D");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.1" => Ok(None),
"A.2" => Ok(None),
@ -56,45 +107,43 @@ fn push_pop() {
"D" => Ok(Some(vec!["D.1", "D.2"])),
_ => unreachable!(),
}
});
}, |_| {}));
assert_eq!(ok, Vec::<&'static str>::new());
assert_eq!(err, Vec::new());
// third round: ok in A.1 but trigger an error in A.2. Check that it
// propagates to A.3.i, but not D.1 or D.2.
// propagates to A, but not D.1 or D.2.
// D |-> D.1 |-> D.1.i
// |-> D.2 |-> D.2.i
let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
tree,
_| {
assert_eq!(obligation.chars().next(), tree.chars().next());
match *obligation {
"A.1" => Ok(Some(vec![])),
"A.2" => Err("A is for apple"),
"D.1" => Ok(Some(vec!["D.1.i"])),
"D.2" => Ok(Some(vec!["D.2.i"])),
_ => unreachable!(),
}
});
assert_eq!(ok, vec!["A.1"]);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.1" => Ok(Some(vec![])),
"A.2" => Err("A is for apple"),
"A.3.i" => Ok(Some(vec![])),
"D.1" => Ok(Some(vec!["D.1.i"])),
"D.2" => Ok(Some(vec!["D.2.i"])),
_ => unreachable!(),
}
}, |_| {}));
assert_eq!(ok, vec!["A.3", "A.1", "A.3.i"]);
assert_eq!(err,
vec![Error {
error: "A is for apple",
backtrace: vec!["A.2", "A"],
}]);
// fourth round: error in D.1.i that should propagate to D.2.i
let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation,
tree,
_| {
assert_eq!(obligation.chars().next(), tree.chars().next());
match *obligation {
"D.1.i" => Err("D is for dumb"),
_ => panic!("unexpected obligation {:?}", obligation),
}
});
assert_eq!(ok, Vec::<&'static str>::new());
// fourth round: error in D.1.i
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D.1.i" => Err("D is for dumb"),
"D.2.i" => Ok(Some(vec![])),
_ => panic!("unexpected obligation {:?}", obligation),
}
}, |_| {}));
assert_eq!(ok, vec!["D.2.i", "D.2"]);
assert_eq!(err,
vec![Error {
error: "D is for dumb",
@ -113,60 +162,54 @@ fn push_pop() {
#[test]
fn success_in_grandchildren() {
let mut forest = ObligationForest::new();
forest.push_tree("A", "A");
forest.register_obligation("A");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
_ => unreachable!(),
}
});
}, |_| {}));
assert!(ok.is_empty());
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.1" => Ok(Some(vec![])),
"A.2" => Ok(Some(vec!["A.2.i", "A.2.ii"])),
"A.3" => Ok(Some(vec![])),
_ => unreachable!(),
}
});
}, |_| {}));
assert_eq!(ok, vec!["A.3", "A.1"]);
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.2.i" => Ok(Some(vec!["A.2.i.a"])),
"A.2.ii" => Ok(Some(vec![])),
_ => unreachable!(),
}
});
}, |_| {}));
assert_eq!(ok, vec!["A.2.ii"]);
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.2.i.a" => Ok(Some(vec![])),
_ => unreachable!(),
}
});
}, |_| {}));
assert_eq!(ok, vec!["A.2.i.a", "A.2.i", "A.2", "A"]);
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } = forest.process_obligations::<(), _>(|_,
_,
_| {
unreachable!()
});
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|_| unreachable!(), |_| {}));
assert!(ok.is_empty());
assert!(err.is_empty());
}
@ -174,63 +217,204 @@ fn success_in_grandchildren() {
#[test]
fn to_errors_no_throw() {
// check that converting multiple children with common parent (A)
// only yields one of them (and does not panic, in particular).
// yields to correct errors (and does not panic, in particular).
let mut forest = ObligationForest::new();
forest.push_tree("A", "A");
forest.register_obligation("A");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, _| {
assert_eq!(obligation.chars().next(), tree.chars().next());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A" => Ok(Some(vec!["A.1", "A.2", "A.3"])),
_ => unreachable!(),
}
});
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err.len(), 0);
let errors = forest.to_errors(());
assert_eq!(errors.len(), 1);
assert_eq!(errors[0].backtrace, vec!["A.1", "A"]);
assert_eq!(errors[1].backtrace, vec!["A.2", "A"]);
assert_eq!(errors[2].backtrace, vec!["A.3", "A"]);
assert_eq!(errors.len(), 3);
}
#[test]
fn backtrace() {
// check that converting multiple children with common parent (A)
// only yields one of them (and does not panic, in particular).
fn diamond() {
// check that diamond dependencies are handled correctly
let mut forest = ObligationForest::new();
forest.push_tree("A", "A");
forest.register_obligation("A");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
assert_eq!(obligation.chars().next(), tree.chars().next());
assert!(backtrace.next().is_none());
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A" => Ok(Some(vec!["A.1"])),
"A" => Ok(Some(vec!["A.1", "A.2"])),
_ => unreachable!(),
}
});
assert!(ok.is_empty());
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
assert_eq!(obligation.chars().next(), tree.chars().next());
assert!(backtrace.next().unwrap() == &"A");
assert!(backtrace.next().is_none());
match *obligation {
"A.1" => Ok(Some(vec!["A.1.i"])),
_ => unreachable!(),
}
});
assert!(ok.is_empty());
assert!(err.is_empty());
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations::<(), _>(|obligation, tree, mut backtrace| {
assert_eq!(obligation.chars().next(), tree.chars().next());
assert!(backtrace.next().unwrap() == &"A.1");
assert!(backtrace.next().unwrap() == &"A");
assert!(backtrace.next().is_none());
match *obligation {
"A.1.i" => Ok(None),
_ => unreachable!(),
}
});
}, |_|{}));
assert_eq!(ok.len(), 0);
assert!(err.is_empty());
assert_eq!(err.len(), 0);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A.1" => Ok(Some(vec!["D"])),
"A.2" => Ok(Some(vec!["D"])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err.len(), 0);
let mut d_count = 0;
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D" => { d_count += 1; Ok(Some(vec![])) },
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(d_count, 1);
assert_eq!(ok, vec!["D", "A.2", "A.1", "A"]);
assert_eq!(err.len(), 0);
let errors = forest.to_errors(());
assert_eq!(errors.len(), 0);
forest.register_obligation("A'");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A'" => Ok(Some(vec!["A'.1", "A'.2"])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err.len(), 0);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A'.1" => Ok(Some(vec!["D'", "A'"])),
"A'.2" => Ok(Some(vec!["D'"])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err.len(), 0);
let mut d_count = 0;
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D'" => { d_count += 1; Err("operation failed") },
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(d_count, 1);
assert_eq!(ok.len(), 0);
assert_eq!(err, vec![super::Error {
error: "operation failed",
backtrace: vec!["D'", "A'.1", "A'"]
}]);
let errors = forest.to_errors(());
assert_eq!(errors.len(), 0);
}
#[test]
fn done_dependency() {
// check that the local cache works
let mut forest = ObligationForest::new();
forest.register_obligation("A: Sized");
forest.register_obligation("B: Sized");
forest.register_obligation("C: Sized");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A: Sized" | "B: Sized" | "C: Sized" => Ok(Some(vec![])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok, vec!["C: Sized", "B: Sized", "A: Sized"]);
assert_eq!(err.len(), 0);
forest.register_obligation("(A,B,C): Sized");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"(A,B,C): Sized" => Ok(Some(vec![
"A: Sized",
"B: Sized",
"C: Sized"
])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok, vec!["(A,B,C): Sized"]);
assert_eq!(err.len(), 0);
}
#[test]
fn orphan() {
// check that orphaned nodes are handled correctly
let mut forest = ObligationForest::new();
forest.register_obligation("A");
forest.register_obligation("B");
forest.register_obligation("C1");
forest.register_obligation("C2");
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"A" => Ok(Some(vec!["D", "E"])),
"B" => Ok(None),
"C1" => Ok(Some(vec![])),
"C2" => Ok(Some(vec![])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok, vec!["C2", "C1"]);
assert_eq!(err.len(), 0);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D" | "E" => Ok(None),
"B" => Ok(Some(vec!["D"])),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err.len(), 0);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D" => Ok(None),
"E" => Err("E is for error"),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err, vec![super::Error {
error: "E is for error",
backtrace: vec!["E", "A"]
}]);
let Outcome { completed: ok, errors: err, .. } =
forest.process_obligations(&mut C(|obligation| {
match *obligation {
"D" => Err("D is dead"),
_ => unreachable!(),
}
}, |_|{}));
assert_eq!(ok.len(), 0);
assert_eq!(err, vec![super::Error {
error: "D is dead",
backtrace: vec!["D"]
}]);
let errors = forest.to_errors(());
assert_eq!(errors.len(), 0);
}

View File

@ -1,27 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::u32;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct TreeIndex {
index: u32,
}
impl TreeIndex {
pub fn new(value: usize) -> TreeIndex {
assert!(value < (u32::MAX as usize));
TreeIndex { index: value as u32 }
}
pub fn get(self) -> usize {
self.index as usize
}
}

View File

@ -247,7 +247,8 @@ pub const tag_rustc_version: usize = 0x10f;
pub fn rustc_version() -> String {
format!(
"rustc {}",
option_env!("CFG_VERSION").unwrap_or("unknown version")
// option_env!("CFG_VERSION").unwrap_or("unknown version")
"nightly edition"
)
}

View File

@ -14,5 +14,4 @@ pub fn main() {
let x: Vec<Trait + Sized> = Vec::new();
//~^ ERROR `Trait + Sized: std::marker::Sized` is not satisfied
//~| ERROR `Trait + Sized: std::marker::Sized` is not satisfied
//~| ERROR `Trait + Sized: std::marker::Sized` is not satisfied
}

View File

@ -12,6 +12,8 @@
#![feature(rustc_attrs)]
use std::ops::Index;
#[rustc_error]
fn main() {
let x = &[1, 2, 3] as &[i32];

View File

@ -27,12 +27,14 @@ fn f<T>(val: T) {
let t: S<T> = S(marker::PhantomData);
let a = &t as &Gettable<T>;
//~^ ERROR : std::marker::Send` is not satisfied
//~^^ ERROR : std::marker::Copy` is not satisfied
}
fn g<T>(val: T) {
let t: S<T> = S(marker::PhantomData);
let a: &Gettable<T> = &t;
//~^ ERROR : std::marker::Send` is not satisfied
//~^^ ERROR : std::marker::Copy` is not satisfied
}
fn foo<'a>() {

View File

@ -18,6 +18,7 @@ use std::cell::RefCell;
fn assert<T: RecoverSafe + ?Sized>() {}
fn main() {
assert::<Rc<RefCell<i32>>>(); //~ ERROR E0277
assert::<Rc<RefCell<i32>>>();
//~^ ERROR `std::cell::UnsafeCell<i32>: std::panic::RefUnwindSafe` is not satisfied
//~^^ ERROR `std::cell::UnsafeCell<usize>: std::panic::RefUnwindSafe` is not satisfied
}

View File

@ -18,5 +18,7 @@ use std::cell::RefCell;
fn assert<T: RecoverSafe + ?Sized>() {}
fn main() {
assert::<Arc<RefCell<i32>>>(); //~ ERROR E0277
assert::<Arc<RefCell<i32>>>();
//~^ ERROR `std::cell::UnsafeCell<i32>: std::panic::RefUnwindSafe` is not satisfied
//~^^ ERROR `std::cell::UnsafeCell<usize>: std::panic::RefUnwindSafe` is not satisfied
}

View File

@ -17,5 +17,7 @@ use std::cell::RefCell;
fn assert<T: RecoverSafe + ?Sized>() {}
fn main() {
assert::<&RefCell<i32>>(); //~ ERROR E0277
assert::<&RefCell<i32>>();
//~^ ERROR `std::cell::UnsafeCell<i32>: std::panic::RefUnwindSafe` is not satisfied
//~^^ ERROR `std::cell::UnsafeCell<usize>: std::panic::RefUnwindSafe` is not satisfied
}

View File

@ -17,6 +17,7 @@ use std::cell::RefCell;
fn assert<T: RecoverSafe + ?Sized>() {}
fn main() {
assert::<*mut RefCell<i32>>(); //~ ERROR E0277
assert::<*mut RefCell<i32>>();
//~^ ERROR `std::cell::UnsafeCell<i32>: std::panic::RefUnwindSafe` is not satisfied
//~^^ ERROR `std::cell::UnsafeCell<usize>: std::panic::RefUnwindSafe` is not satisfied
}

View File

@ -1,64 +0,0 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test if the on_unimplemented message override works
#![feature(on_unimplemented)]
#![feature(rustc_attrs)]
#[rustc_on_unimplemented = "invalid"]
trait Index<Idx: ?Sized> {
type Output: ?Sized;
fn index(&self, index: Idx) -> &Self::Output;
}
#[rustc_on_unimplemented = "a isize is required to index into a slice"]
impl Index<isize> for [i32] {
type Output = i32;
fn index(&self, index: isize) -> &i32 {
&self[index as usize]
}
}
#[rustc_on_unimplemented = "a usize is required to index into a slice"]
impl Index<usize> for [i32] {
type Output = i32;
fn index(&self, index: usize) -> &i32 {
&self[index]
}
}
trait Foo<A, B> {
fn f(&self, a: &A, b: &B);
}
#[rustc_on_unimplemented = "two i32 Foo trait takes"]
impl Foo<i32, i32> for [i32] {
fn f(&self, a: &i32, b: &i32) {}
}
#[rustc_on_unimplemented = "two u32 Foo trait takes"]
impl Foo<u32, u32> for [i32] {
fn f(&self, a: &u32, b: &u32) {}
}
#[rustc_error]
fn main() {
Index::<u32>::index(&[1, 2, 3] as &[i32], 2u32); //~ ERROR E0277
//~| NOTE a usize is required
//~| NOTE required by
Index::<i32>::index(&[1, 2, 3] as &[i32], 2i32); //~ ERROR E0277
//~| NOTE a isize is required
//~| NOTE required by
Foo::<usize, usize>::f(&[1, 2, 3] as &[i32], &2usize, &2usize); //~ ERROR E0277
//~| NOTE two u32 Foo trait
//~| NOTE required by
}

View File

@ -17,7 +17,9 @@ pub fn main() {
// Bool => does not implement iterator.
for i in false..true {}
//~^ ERROR E0277
//~^ ERROR `bool: std::num::One` is not satisfied
//~^^ ERROR `bool: std::iter::Step` is not satisfied
//~^^^ ERROR `for<'a> &'a bool: std::ops::Add` is not satisfied
// Unsized type.
let arr: &[_] = &[1, 2, 3];

View File

@ -21,7 +21,5 @@ fn main() {
(box 10 as Box<bar>).dup();
//~^ ERROR E0038
//~| ERROR E0038
//~| ERROR E0038
//~| ERROR E0038
//~| ERROR E0277
}

View File

@ -0,0 +1,30 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for #33344, initial version. This example allowed
// arbitrary trait bounds to be synthesized.
trait Tweedledum: IntoIterator {}
trait Tweedledee: IntoIterator {}
impl<T: Tweedledum> Tweedledee for T {}
impl<T: Tweedledee> Tweedledum for T {}
trait Combo: IntoIterator {}
impl<T: Tweedledee + Tweedledum> Combo for T {}
fn is_ee<T: Combo>(t: T) {
t.into_iter();
}
fn main() {
is_ee(4);
//~^ ERROR overflow evaluating the requirement `_: Tweedle
}