auto merge of #19342 : alexcrichton/rust/rollup, r=alexcrichton

This commit is contained in:
bors 2014-11-27 04:32:12 +00:00
commit f358ca45c8
239 changed files with 9713 additions and 10553 deletions

1
configure vendored
View File

@ -624,7 +624,6 @@ probe CFG_LD ld
probe CFG_VALGRIND valgrind
probe CFG_PERF perf
probe CFG_ISCC iscc
probe CFG_LLNEXTGEN LLnextgen
probe CFG_JAVAC javac
probe CFG_ANTLR4 antlr4
probe CFG_GRUN grun

View File

@ -246,26 +246,6 @@ endef
$(foreach lang,$(L10N_LANGS),$(eval $(call DEF_L10N_DOC,$(lang),guide)))
######################################################################
# LLnextgen (grammar analysis from refman)
######################################################################
ifeq ($(CFG_LLNEXTGEN),)
$(info cfg: no llnextgen found, omitting grammar-verification)
else
.PHONY: verify-grammar
doc/rust.g: $(D)/rust.md $(S)src/etc/extract_grammar.py
@$(call E, extract_grammar: $@)
$(Q)$(CFG_PYTHON) $(S)src/etc/extract_grammar.py $< >$@
verify-grammar: doc/rust.g
@$(call E, LLnextgen: $<)
$(Q)$(CFG_LLNEXTGEN) --generate-lexer-wrapper=no $< >$@
$(Q)rm -f doc/rust.c doc/rust.h
endif
######################################################################
# Rustdoc (libstd/extra)
######################################################################
@ -299,7 +279,8 @@ $(2) += doc/$(1)/index.html
doc/$(1)/index.html: CFG_COMPILER_HOST_TRIPLE = $(CFG_TARGET)
doc/$(1)/index.html: $$(LIB_DOC_DEP_$(1)) doc/$(1)/
@$$(call E, rustdoc: $$@)
$$(Q)$$(RUSTDOC) --cfg dox --cfg stage2 $$<
$$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(CFG_BUILD)) \
$$(RUSTDOC) --cfg dox --cfg stage2 $$<
endef
$(foreach crate,$(DOC_CRATES),$(eval $(call DEF_LIB_DOC,$(crate),DOC_TARGETS)))

View File

@ -49,6 +49,12 @@ else
LLVM_STDCPP_LOCATION_$(1) =
endif
# LLVM linkage:
LLVM_LINKAGE_PATH_$(1):=$$(abspath $$(RT_OUTPUT_DIR_$(1))/llvmdeps.rs)
$$(LLVM_LINKAGE_PATH_$(1)): $(S)src/etc/mklldeps.py $$(LLVM_CONFIG_$(1))
$(Q)$(CFG_PYTHON) "$$<" "$$@" "$$(LLVM_COMPONENTS)" "$$(CFG_ENABLE_LLVM_STATIC_STDCPP)" \
$$(LLVM_CONFIG_$(1))
endef
$(foreach host,$(CFG_HOST), \
@ -57,10 +63,14 @@ $(foreach host,$(CFG_HOST), \
$(foreach host,$(CFG_HOST), \
$(eval LLVM_CONFIGS := $(LLVM_CONFIGS) $(LLVM_CONFIG_$(host))))
$(S)src/librustc_llvm/llvmdeps.rs: \
$(LLVM_CONFIGS) \
$(S)src/etc/mklldeps.py \
$(MKFILE_DEPS)
$(Q)$(CFG_PYTHON) $(S)src/etc/mklldeps.py \
"$@" "$(LLVM_COMPONENTS)" "$(CFG_ENABLE_LLVM_STATIC_STDCPP)" \
$(LLVM_CONFIGS)
# This can't be done in target.mk because it's included before this file.
define LLVM_LINKAGE_DEPS
$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.rustc_llvm: $$(LLVM_LINKAGE_PATH_$(3))
endef
$(foreach source,$(CFG_HOST), \
$(foreach target,$(CFG_TARGET), \
$(eval $(call LLVM_LINKAGE_DEPS,0,$(target),$(source))) \
$(eval $(call LLVM_LINKAGE_DEPS,1,$(target),$(source))) \
$(eval $(call LLVM_LINKAGE_DEPS,2,$(target),$(source))) \
$(eval $(call LLVM_LINKAGE_DEPS,3,$(target),$(source)))))

View File

@ -79,7 +79,8 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \
$$(dir $$@)$$(call CFG_LIB_GLOB_$(2),$(4)))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES, \
$$(dir $$@)$$(call CFG_RLIB_GLOB,$(4)))
$$(STAGE$(1)_T_$(2)_H_$(3)) \
$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
$$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) \
$$(RUST_LIB_FLAGS_ST$(1)) \
-L "$$(RT_OUTPUT_DIR_$(2))" \
-L "$$(LLVM_LIBDIR_$(2))" \
@ -134,8 +135,6 @@ SNAPSHOT_RUSTC_POST_CLEANUP=$(HBIN0_H_$(CFG_BUILD))/rustc$(X_$(CFG_BUILD))
define TARGET_HOST_RULES
$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.rustc_llvm: $(S)src/librustc_llvm/llvmdeps.rs
$$(TBIN$(1)_T_$(2)_H_$(3))/:
mkdir -p $$@

View File

@ -412,7 +412,8 @@ $(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)): \
$$(CRATEFILE_$(4)) \
$$(TESTDEP_$(1)_$(2)_$(3)_$(4))
@$$(call E, rustc: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test \
$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
$$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) -o $$@ $$< --test \
-L "$$(RT_OUTPUT_DIR_$(2))" \
-L "$$(LLVM_LIBDIR_$(2))" \
$$(RUSTFLAGS_$(4))
@ -890,7 +891,8 @@ endif
ifeq ($(2),$$(CFG_BUILD))
$$(call TEST_OK_FILE,$(1),$(2),$(3),doc-crate-$(4)): $$(CRATEDOCTESTDEP_$(1)_$(2)_$(3)_$(4))
@$$(call E, run doc-crate-$(4) [$(2)])
$$(Q)$$(RUSTDOC_$(1)_T_$(2)_H_$(3)) --test --cfg dox \
$$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
$$(RUSTDOC_$(1)_T_$(2)_H_$(3)) --test --cfg dox \
$$(CRATEFILE_$(4)) --test-args "$$(TESTARGS)" && touch $$@
else
$$(call TEST_OK_FILE,$(1),$(2),$(3),doc-crate-$(4)):

View File

@ -2,7 +2,7 @@
# I think I found a bug in the compiler!
If you see this message: `error: internal compiler error: unexpected failure`,
If you see this message: `error: internal compiler error: unexpected panic`,
then you have definitely found a bug in the compiler. It's also possible that
your code is not well-typed, but if you saw this message, it's still a bug in
error reporting.

View File

@ -445,11 +445,32 @@ fn succ(x: &int) -> int { *x + 1 }
to
```{rust}
use std::rc::Rc;
fn box_succ(x: Box<int>) -> int { *x + 1 }
fn rc_succ(x: std::rc::Rc<int>) -> int { *x + 1 }
fn rc_succ(x: Rc<int>) -> int { *x + 1 }
```
Note that the caller of your function will have to modify their calls slightly:
```{rust}
use std::rc::Rc;
fn succ(x: &int) -> int { *x + 1 }
let ref_x = &5i;
let box_x = box 5i;
let rc_x = Rc::new(5i);
succ(ref_x);
succ(&*box_x);
succ(&*rc_x);
```
The initial `*` dereferences the pointer, and then `&` takes a reference to
those contents.
# Boxes
`Box<T>` is Rust's 'boxed pointer' type. Boxes provide the simplest form of
@ -572,7 +593,7 @@ fn add_one(x: &mut int) -> int {
fn main() {
let x = box 5i;
println!("{}", add_one(&*x)); // error: cannot borrow immutable dereference
println!("{}", add_one(&*x)); // error: cannot borrow immutable dereference
// of `&`-pointer as mutable
}
```
@ -700,9 +721,9 @@ This gives you flexibility without sacrificing performance.
You may think that this gives us terrible performance: return a value and then
immediately box it up ?! Isn't that the worst of both worlds? Rust is smarter
than that. There is no copy in this code. main allocates enough room for the
`box , passes a pointer to that memory into foo as x, and then foo writes the
value straight into that pointer. This writes the return value directly into
than that. There is no copy in this code. `main` allocates enough room for the
`box`, passes a pointer to that memory into `foo` as `x`, and then `foo` writes
the value straight into that pointer. This writes the return value directly into
the allocated box.
This is important enough that it bears repeating: pointers are not for

View File

@ -62,7 +62,7 @@ the easiest way to keep people updated while Rust is in its alpha state.
Oh, we should also mention the officially supported platforms:
* Windows (7, 8, Server 2008 R2), x86 only
* Windows (7, 8, Server 2008 R2)
* Linux (2.6.18 or later, various distributions), x86 and x86-64
* OSX 10.7 (Lion) or greater, x86 and x86-64
@ -378,9 +378,15 @@ of your time with Rust.
The first thing we'll learn about are 'variable bindings.' They look like this:
```{rust}
let x = 5i;
fn main() {
let x = 5i;
}
```
Putting `fn main() {` in each example is a bit tedious, so we'll leave that out
in the future. If you're following along, make sure to edit your `main()`
function, rather than leaving it off. Otherwise, you'll get an error.
In many languages, this is called a 'variable.' But Rust's variable bindings
have a few tricks up their sleeves. Rust has a very powerful feature called
'pattern matching' that we'll get into detail with later, but the left
@ -683,7 +689,7 @@ fn main() {
```
This is the simplest possible function declaration. As we mentioned before,
`fn` says 'this is a function,' followed by the name, some parenthesis because
`fn` says 'this is a function,' followed by the name, some parentheses because
this function takes no arguments, and then some curly braces to indicate the
body. Here's a function named `foo`:
@ -884,7 +890,7 @@ Tuples are an ordered list of a fixed size. Like this:
let x = (1i, "hello");
```
The parenthesis and commas form this two-length tuple. Here's the same code, but
The parentheses and commas form this two-length tuple. Here's the same code, but
with the type annotated:
```rust
@ -908,9 +914,9 @@ let (x, y, z) = (1i, 2i, 3i);
println!("x is {}", x);
```
Remember before when I said the left hand side of a `let` statement was more
Remember before when I said the left-hand side of a `let` statement was more
powerful than just assigning a binding? Here we are. We can put a pattern on
the left hand side of the `let`, and if it matches up to the right hand side,
the left-hand side of the `let`, and if it matches up to the right-hand side,
we can assign multiple bindings at once. In this case, `let` 'destructures,'
or 'breaks up,' the tuple, and assigns the bits to three bindings.
@ -1453,9 +1459,9 @@ focus. Any time you have a data structure of variable size, things can get
tricky, and strings are a re-sizable data structure. That said, Rust's strings
also work differently than in some other systems languages, such as C.
Let's dig into the details. A **string** is a sequence of unicode scalar values
Let's dig into the details. A **string** is a sequence of Unicode scalar values
encoded as a stream of UTF-8 bytes. All strings are guaranteed to be
validly-encoded UTF-8 sequences. Additionally, strings are not null-terminated
validly encoded UTF-8 sequences. Additionally, strings are not null-terminated
and can contain null bytes.
Rust has two main types of strings: `&str` and `String`.
@ -3933,7 +3939,7 @@ match x {
}
```
Here, the `val` inside the `match` has type `int`. In other words, the left hand
Here, the `val` inside the `match` has type `int`. In other words, the left-hand
side of the pattern destructures the value. If we have `&5i`, then in `&val`, `val`
would be `5i`.
@ -3991,6 +3997,35 @@ match origin {
}
```
You can do this kind of match on any member, not just the first:
```{rust}
# #![allow(non_shorthand_field_patterns)]
struct Point {
x: int,
y: int,
}
let origin = Point { x: 0i, y: 0i };
match origin {
Point { y: y, .. } => println!("y is {}", y),
}
```
If you want to match against a slice or array, you can use `[]`:
```{rust}
fn main() {
let v = vec!["match_this", "1"];
match v.as_slice() {
["match_this", second] => println!("The second element is {}", second),
_ => {},
}
}
```
Whew! That's a lot of different ways to match things, and they can all be
mixed and matched, depending on what you're doing:
@ -4681,7 +4716,7 @@ let x: Option<int> = Some(5i);
In the type declaration, we say `Option<int>`. Note how similar this looks to
`Option<T>`. So, in this particular `Option`, `T` has the value of `int`. On
the right hand side of the binding, we do make a `Some(T)`, where `T` is `5i`.
the right-hand side of the binding, we do make a `Some(T)`, where `T` is `5i`.
Since that's an `int`, the two sides match, and Rust is happy. If they didn't
match, we'd get an error:
@ -5249,7 +5284,7 @@ immediately.
## Success and failure
Tasks don't always succeed, they can also panic. A task that wishes to panic
Tasks don't always succeed, they can also panic. A task that wishes to panic
can call the `panic!` macro, passing a message:
```{rust}

View File

@ -19,6 +19,7 @@ f = open(sys.argv[1], 'wb')
components = sys.argv[2].split(' ')
components = [i for i in components if i] # ignore extra whitespaces
enable_static = sys.argv[3]
llconfig = sys.argv[4]
f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
@ -44,69 +45,47 @@ def run(args):
sys.exit(1)
return out
for llconfig in sys.argv[4:]:
f.write("\n")
f.write("\n")
out = run([llconfig, '--host-target'])
arch, os = out.split('-', 1)
arch = 'x86' if arch == 'i686' or arch == 'i386' else arch
if 'darwin' in os:
os = 'macos'
elif 'linux' in os:
os = 'linux'
elif 'freebsd' in os:
os = 'freebsd'
elif 'dragonfly' in os:
os = 'dragonfly'
elif 'android' in os:
os = 'android'
elif 'win' in os or 'mingw' in os:
os = 'windows'
cfg = [
"target_arch = \"" + arch + "\"",
"target_os = \"" + os + "\"",
]
version = run([llconfig, '--version']).strip()
f.write("#[cfg(all(" + ', '.join(cfg) + "))]\n")
# LLVM libs
if version < '3.5':
args = [llconfig, '--libs']
else:
args = [llconfig, '--libs', '--system-libs']
version = run([llconfig, '--version']).strip()
args.extend(components)
out = run(args)
for lib in out.strip().replace("\n", ' ').split(' '):
lib = lib.strip()[2:] # chop of the leading '-l'
f.write("#[link(name = \"" + lib + "\"")
# LLVM libraries are all static libraries
if 'LLVM' in lib:
f.write(", kind = \"static\"")
f.write(")]\n")
# LLVM libs
if version < '3.5':
args = [llconfig, '--libs']
else:
args = [llconfig, '--libs', '--system-libs']
args.extend(components)
out = run(args)
for lib in out.strip().replace("\n", ' ').split(' '):
lib = lib.strip()[2:] # chop of the leading '-l'
f.write("#[link(name = \"" + lib + "\"")
# LLVM libraries are all static libraries
if 'LLVM' in lib:
f.write(", kind = \"static\"")
f.write(")]\n")
# llvm-config before 3.5 didn't have a system-libs flag
if version < '3.5':
if os == 'win32':
# llvm-config before 3.5 didn't have a system-libs flag
if version < '3.5':
if os == 'win32':
f.write("#[link(name = \"imagehlp\")]")
# LLVM ldflags
out = run([llconfig, '--ldflags'])
for lib in out.strip().split(' '):
if lib[:2] == "-l":
f.write("#[link(name = \"" + lib[2:] + "\")]\n")
# LLVM ldflags
out = run([llconfig, '--ldflags'])
for lib in out.strip().split(' '):
if lib[:2] == "-l":
f.write("#[link(name = \"" + lib[2:] + "\")]\n")
# C++ runtime library
out = run([llconfig, '--cxxflags'])
if enable_static == '1':
assert('stdlib=libc++' not in out)
f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
else:
if 'stdlib=libc++' in out:
# C++ runtime library
out = run([llconfig, '--cxxflags'])
if enable_static == '1':
assert('stdlib=libc++' not in out)
f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
else:
if 'stdlib=libc++' in out:
f.write("#[link(name = \"c++\")]\n")
else:
else:
f.write("#[link(name = \"stdc++\")]\n")
# Attach everything to an extern block
f.write("extern {}\n")
# Attach everything to an extern block
f.write("extern {}\n")

View File

@ -567,6 +567,13 @@ impl<'a, T> Iterator<&'a T> for Items<'a, T> {
fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
}
impl<'a, T> DoubleEndedIterator<&'a T> for Items<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<(&'a T)> { self.iter.next_back() }
}
impl<'a, T> ExactSizeIterator<&'a T> for Items<'a, T> {}
/// An iterator that moves out of a `BinaryHeap`.
pub struct MoveItems<T> {
iter: vec::MoveItems<T>,
@ -625,6 +632,16 @@ mod tests {
}
}
#[test]
fn test_iterator_reverse() {
let data = vec!(5i, 9, 3);
let iterout = vec!(3i, 5, 9);
let pq = BinaryHeap::from_vec(data);
let v: Vec<int> = pq.iter().rev().map(|&x| x).collect();
assert_eq!(v, iterout);
}
#[test]
fn test_move_iter() {
let data = vec!(5i, 9, 3);

View File

@ -42,27 +42,25 @@ impl<E:CLike+fmt::Show> fmt::Show for EnumSet<E> {
}
}
/**
An interface for casting C-like enum to uint and back.
A typically implementation is as below.
```{rust,ignore}
#[repr(uint)]
enum Foo {
A, B, C
}
impl CLike for Foo {
fn to_uint(&self) -> uint {
*self as uint
}
fn from_uint(v: uint) -> Foo {
unsafe { mem::transmute(v) }
}
}
```
*/
/// An interface for casting C-like enum to uint and back.
/// A typically implementation is as below.
///
/// ```{rust,ignore}
/// #[repr(uint)]
/// enum Foo {
/// A, B, C
/// }
///
/// impl CLike for Foo {
/// fn to_uint(&self) -> uint {
/// *self as uint
/// }
///
/// fn from_uint(v: uint) -> Foo {
/// unsafe { mem::transmute(v) }
/// }
/// }
/// ```
pub trait CLike {
/// Converts a C-like enum to a `uint`.
fn to_uint(&self) -> uint;

View File

@ -8,58 +8,56 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Generic hashing support.
*
* This module provides a generic way to compute the hash of a value. The
* simplest way to make a type hashable is to use `#[deriving(Hash)]`:
*
* # Example
*
* ```rust
* use std::hash;
* use std::hash::Hash;
*
* #[deriving(Hash)]
* struct Person {
* id: uint,
* name: String,
* phone: u64,
* }
*
* let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
* let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
*
* assert!(hash::hash(&person1) != hash::hash(&person2));
* ```
*
* If you need more control over how a value is hashed, you need to implement
* the trait `Hash`:
*
* ```rust
* use std::hash;
* use std::hash::Hash;
* use std::hash::sip::SipState;
*
* struct Person {
* id: uint,
* name: String,
* phone: u64,
* }
*
* impl Hash for Person {
* fn hash(&self, state: &mut SipState) {
* self.id.hash(state);
* self.phone.hash(state);
* }
* }
*
* let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
* let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
*
* assert!(hash::hash(&person1) == hash::hash(&person2));
* ```
*/
//! Generic hashing support.
//!
//! This module provides a generic way to compute the hash of a value. The
//! simplest way to make a type hashable is to use `#[deriving(Hash)]`:
//!
//! # Example
//!
//! ```rust
//! use std::hash;
//! use std::hash::Hash;
//!
//! #[deriving(Hash)]
//! struct Person {
//! id: uint,
//! name: String,
//! phone: u64,
//! }
//!
//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
//!
//! assert!(hash::hash(&person1) != hash::hash(&person2));
//! ```
//!
//! If you need more control over how a value is hashed, you need to implement
//! the trait `Hash`:
//!
//! ```rust
//! use std::hash;
//! use std::hash::Hash;
//! use std::hash::sip::SipState;
//!
//! struct Person {
//! id: uint,
//! name: String,
//! phone: u64,
//! }
//!
//! impl Hash for Person {
//! fn hash(&self, state: &mut SipState) {
//! self.id.hash(state);
//! self.phone.hash(state);
//! }
//! }
//!
//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
//!
//! assert!(hash::hash(&person1) == hash::hash(&person2));
//! ```
#![allow(unused_must_use)]

View File

@ -34,8 +34,6 @@ static MINIMUM_CAPACITY: uint = 2u;
// FIXME(conventions): implement shrink_to_fit. Awkward with the current design, but it should
// be scrapped anyway. Defer to rewrite?
// FIXME(conventions): implement into_iter
/// `RingBuf` is a circular buffer that implements `Deque`.
pub struct RingBuf<T> {
@ -394,6 +392,14 @@ impl<T> RingBuf<T> {
}
}
/// Consumes the list into an iterator yielding elements by value.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> MoveItems<T> {
MoveItems {
inner: self,
}
}
/// Returns the number of elements in the `RingBuf`.
///
/// # Example
@ -736,11 +742,9 @@ impl<'a, T> Iterator<&'a mut T> for MutItems<'a, T> {
}
let tail = self.tail;
self.tail = wrap_index(self.tail + 1, self.cap);
if mem::size_of::<T>() != 0 {
unsafe { Some(&mut *self.ptr.offset(tail as int)) }
} else {
// use a non-zero pointer
Some(unsafe { mem::transmute(1u) })
unsafe {
Some(&mut *self.ptr.offset(tail as int))
}
}
@ -758,12 +762,43 @@ impl<'a, T> DoubleEndedIterator<&'a mut T> for MutItems<'a, T> {
return None;
}
self.head = wrap_index(self.head - 1, self.cap);
unsafe { Some(&mut *self.ptr.offset(self.head as int)) }
unsafe {
Some(&mut *self.ptr.offset(self.head as int))
}
}
}
impl<'a, T> ExactSizeIterator<&'a mut T> for MutItems<'a, T> {}
// A by-value RingBuf iterator
pub struct MoveItems<T> {
inner: RingBuf<T>,
}
impl<T> Iterator<T> for MoveItems<T> {
#[inline]
fn next(&mut self) -> Option<T> {
self.inner.pop_front()
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
let len = self.inner.len();
(len, Some(len))
}
}
impl<T> DoubleEndedIterator<T> for MoveItems<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
}
impl<T> ExactSizeIterator<T> for MoveItems<T> {}
impl<A: PartialEq> PartialEq for RingBuf<A> {
fn eq(&self, other: &RingBuf<A>) -> bool {
self.len() == other.len() &&
@ -1313,6 +1348,65 @@ mod tests {
}
}
#[test]
fn test_into_iter() {
// Empty iter
{
let d: RingBuf<int> = RingBuf::new();
let mut iter = d.into_iter();
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
assert_eq!(iter.size_hint(), (0, Some(0)));
}
// simple iter
{
let mut d = RingBuf::new();
for i in range(0i, 5) {
d.push_back(i);
}
let b = vec![0,1,2,3,4];
assert_eq!(d.into_iter().collect::<Vec<int>>(), b);
}
// wrapped iter
{
let mut d = RingBuf::new();
for i in range(0i, 5) {
d.push_back(i);
}
for i in range(6, 9) {
d.push_front(i);
}
let b = vec![8,7,6,0,1,2,3,4];
assert_eq!(d.into_iter().collect::<Vec<int>>(), b);
}
// partially used
{
let mut d = RingBuf::new();
for i in range(0i, 5) {
d.push_back(i);
}
for i in range(6, 9) {
d.push_front(i);
}
let mut it = d.into_iter();
assert_eq!(it.size_hint(), (8, Some(8)));
assert_eq!(it.next(), Some(8));
assert_eq!(it.size_hint(), (7, Some(7)));
assert_eq!(it.next_back(), Some(4));
assert_eq!(it.size_hint(), (6, Some(6)));
assert_eq!(it.next(), Some(7));
assert_eq!(it.size_hint(), (5, Some(5)));
}
}
#[test]
fn test_from_iter() {
use std::iter;

View File

@ -106,7 +106,7 @@ pub use core::slice::{OrdSlicePrelude, SlicePrelude, Items, MutItems};
pub use core::slice::{ImmutableIntSlice, MutableIntSlice};
pub use core::slice::{MutSplits, MutChunks, Splits};
pub use core::slice::{bytes, mut_ref_slice, ref_slice, CloneSlicePrelude};
pub use core::slice::{Found, NotFound, from_raw_buf, from_raw_mut_buf};
pub use core::slice::{from_raw_buf, from_raw_mut_buf, BinarySearchResult};
// Functional utilities

View File

@ -9,7 +9,6 @@
// except according to those terms.
// FIXME(conventions): implement bounded iterators
// FIXME(conventions): implement union family of fns
// FIXME(conventions): implement BitOr, BitAnd, BitXor, and Sub
// FIXME(conventions): replace each_reverse by making iter DoubleEnded
// FIXME(conventions): implement iter_mut and into_iter
@ -19,6 +18,7 @@ use core::prelude::*;
use core::default::Default;
use core::fmt;
use core::fmt::Show;
use core::iter::Peekable;
use std::hash::Hash;
use trie_map::{TrieMap, Entries};
@ -172,6 +172,106 @@ impl TrieSet {
SetItems{iter: self.map.upper_bound(val)}
}
/// Visits the values representing the difference, in ascending order.
///
/// # Example
///
/// ```
/// use std::collections::TrieSet;
///
/// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
/// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
///
/// // Can be seen as `a - b`.
/// for x in a.difference(&b) {
/// println!("{}", x); // Print 1 then 2
/// }
///
/// let diff1: TrieSet = a.difference(&b).collect();
/// assert_eq!(diff1, [1, 2].iter().map(|&x| x).collect());
///
/// // Note that difference is not symmetric,
/// // and `b - a` means something else:
/// let diff2: TrieSet = b.difference(&a).collect();
/// assert_eq!(diff2, [4, 5].iter().map(|&x| x).collect());
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn difference<'a>(&'a self, other: &'a TrieSet) -> DifferenceItems<'a> {
DifferenceItems{a: self.iter().peekable(), b: other.iter().peekable()}
}
/// Visits the values representing the symmetric difference, in ascending order.
///
/// # Example
///
/// ```
/// use std::collections::TrieSet;
///
/// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
/// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
///
/// // Print 1, 2, 4, 5 in ascending order.
/// for x in a.symmetric_difference(&b) {
/// println!("{}", x);
/// }
///
/// let diff1: TrieSet = a.symmetric_difference(&b).collect();
/// let diff2: TrieSet = b.symmetric_difference(&a).collect();
///
/// assert_eq!(diff1, diff2);
/// assert_eq!(diff1, [1, 2, 4, 5].iter().map(|&x| x).collect());
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle."]
pub fn symmetric_difference<'a>(&'a self, other: &'a TrieSet) -> SymDifferenceItems<'a> {
SymDifferenceItems{a: self.iter().peekable(), b: other.iter().peekable()}
}
/// Visits the values representing the intersection, in ascending order.
///
/// # Example
///
/// ```
/// use std::collections::TrieSet;
///
/// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
/// let b: TrieSet = [2, 3, 4].iter().map(|&x| x).collect();
///
/// // Print 2, 3 in ascending order.
/// for x in a.intersection(&b) {
/// println!("{}", x);
/// }
///
/// let diff: TrieSet = a.intersection(&b).collect();
/// assert_eq!(diff, [2, 3].iter().map(|&x| x).collect());
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn intersection<'a>(&'a self, other: &'a TrieSet) -> IntersectionItems<'a> {
IntersectionItems{a: self.iter().peekable(), b: other.iter().peekable()}
}
/// Visits the values representing the union, in ascending order.
///
/// # Example
///
/// ```
/// use std::collections::TrieSet;
///
/// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
/// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
///
/// // Print 1, 2, 3, 4, 5 in ascending order.
/// for x in a.union(&b) {
/// println!("{}", x);
/// }
///
/// let diff: TrieSet = a.union(&b).collect();
/// assert_eq!(diff, [1, 2, 3, 4, 5].iter().map(|&x| x).collect());
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn union<'a>(&'a self, other: &'a TrieSet) -> UnionItems<'a> {
UnionItems{a: self.iter().peekable(), b: other.iter().peekable()}
}
/// Return the number of elements in the set
///
/// # Example
@ -368,6 +468,39 @@ pub struct SetItems<'a> {
iter: Entries<'a, ()>
}
/// An iterator producing elements in the set difference (in-order).
pub struct DifferenceItems<'a> {
a: Peekable<uint, SetItems<'a>>,
b: Peekable<uint, SetItems<'a>>,
}
/// An iterator producing elements in the set symmetric difference (in-order).
pub struct SymDifferenceItems<'a> {
a: Peekable<uint, SetItems<'a>>,
b: Peekable<uint, SetItems<'a>>,
}
/// An iterator producing elements in the set intersection (in-order).
pub struct IntersectionItems<'a> {
a: Peekable<uint, SetItems<'a>>,
b: Peekable<uint, SetItems<'a>>,
}
/// An iterator producing elements in the set union (in-order).
pub struct UnionItems<'a> {
a: Peekable<uint, SetItems<'a>>,
b: Peekable<uint, SetItems<'a>>,
}
/// Compare `x` and `y`, but return `short` if x is None and `long` if y is None
fn cmp_opt(x: Option<&uint>, y: Option<&uint>, short: Ordering, long: Ordering) -> Ordering {
match (x, y) {
(None , _ ) => short,
(_ , None ) => long,
(Some(x1), Some(y1)) => x1.cmp(y1),
}
}
impl<'a> Iterator<uint> for SetItems<'a> {
fn next(&mut self) -> Option<uint> {
self.iter.next().map(|(key, _)| key)
@ -378,6 +511,60 @@ impl<'a> Iterator<uint> for SetItems<'a> {
}
}
impl<'a> Iterator<uint> for DifferenceItems<'a> {
fn next(&mut self) -> Option<uint> {
loop {
match cmp_opt(self.a.peek(), self.b.peek(), Less, Less) {
Less => return self.a.next(),
Equal => { self.a.next(); self.b.next(); }
Greater => { self.b.next(); }
}
}
}
}
impl<'a> Iterator<uint> for SymDifferenceItems<'a> {
fn next(&mut self) -> Option<uint> {
loop {
match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) {
Less => return self.a.next(),
Equal => { self.a.next(); self.b.next(); }
Greater => return self.b.next(),
}
}
}
}
impl<'a> Iterator<uint> for IntersectionItems<'a> {
fn next(&mut self) -> Option<uint> {
loop {
let o_cmp = match (self.a.peek(), self.b.peek()) {
(None , _ ) => None,
(_ , None ) => None,
(Some(a1), Some(b1)) => Some(a1.cmp(b1)),
};
match o_cmp {
None => return None,
Some(Less) => { self.a.next(); }
Some(Equal) => { self.b.next(); return self.a.next() }
Some(Greater) => { self.b.next(); }
}
}
}
}
impl<'a> Iterator<uint> for UnionItems<'a> {
fn next(&mut self) -> Option<uint> {
loop {
match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) {
Less => return self.a.next(),
Equal => { self.b.next(); return self.a.next() }
Greater => return self.b.next(),
}
}
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
@ -471,4 +658,84 @@ mod test {
assert!(b > a && b >= a);
assert!(a < b && a <= b);
}
fn check(a: &[uint],
b: &[uint],
expected: &[uint],
f: |&TrieSet, &TrieSet, f: |uint| -> bool| -> bool) {
let mut set_a = TrieSet::new();
let mut set_b = TrieSet::new();
for x in a.iter() { assert!(set_a.insert(*x)) }
for y in b.iter() { assert!(set_b.insert(*y)) }
let mut i = 0;
f(&set_a, &set_b, |x| {
assert_eq!(x, expected[i]);
i += 1;
true
});
assert_eq!(i, expected.len());
}
#[test]
fn test_intersection() {
fn check_intersection(a: &[uint], b: &[uint], expected: &[uint]) {
check(a, b, expected, |x, y, f| x.intersection(y).all(f))
}
check_intersection(&[], &[], &[]);
check_intersection(&[1, 2, 3], &[], &[]);
check_intersection(&[], &[1, 2, 3], &[]);
check_intersection(&[2], &[1, 2, 3], &[2]);
check_intersection(&[1, 2, 3], &[2], &[2]);
check_intersection(&[11, 1, 3, 77, 103, 5],
&[2, 11, 77, 5, 3],
&[3, 5, 11, 77]);
}
#[test]
fn test_difference() {
fn check_difference(a: &[uint], b: &[uint], expected: &[uint]) {
check(a, b, expected, |x, y, f| x.difference(y).all(f))
}
check_difference(&[], &[], &[]);
check_difference(&[1, 12], &[], &[1, 12]);
check_difference(&[], &[1, 2, 3, 9], &[]);
check_difference(&[1, 3, 5, 9, 11],
&[3, 9],
&[1, 5, 11]);
check_difference(&[11, 22, 33, 40, 42],
&[14, 23, 34, 38, 39, 50],
&[11, 22, 33, 40, 42]);
}
#[test]
fn test_symmetric_difference() {
fn check_symmetric_difference(a: &[uint], b: &[uint], expected: &[uint]) {
check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
}
check_symmetric_difference(&[], &[], &[]);
check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
check_symmetric_difference(&[1, 3, 5, 9, 11],
&[3, 9, 14, 22],
&[1, 5, 11, 14, 22]);
}
#[test]
fn test_union() {
fn check_union(a: &[uint], b: &[uint], expected: &[uint]) {
check(a, b, expected, |x, y, f| x.union(y).all(f))
}
check_union(&[], &[], &[]);
check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
check_union(&[1, 3, 5, 9, 11, 16, 19, 24],
&[1, 5, 9, 13, 19],
&[1, 3, 5, 9, 11, 13, 16, 19, 24]);
}
}

View File

@ -187,6 +187,7 @@ impl<T> Vec<T> {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
}
}

View File

@ -8,18 +8,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! The `Clone` trait for types that cannot be 'implicitly copied'
In Rust, some simple types are "implicitly copyable" and when you
assign them or pass them as arguments, the receiver will get a copy,
leaving the original value in place. These types do not require
allocation to copy and do not have finalizers (i.e. they do not
contain owned boxes or implement `Drop`), so the compiler considers
them cheap and safe to copy. For other types copies must be made
explicitly, by convention implementing the `Clone` trait and calling
the `clone` method.
*/
//! The `Clone` trait for types that cannot be 'implicitly copied'
//!
//! In Rust, some simple types are "implicitly copyable" and when you
//! assign them or pass them as arguments, the receiver will get a copy,
//! leaving the original value in place. These types do not require
//! allocation to copy and do not have finalizers (i.e. they do not
//! contain owned boxes or implement `Drop`), so the compiler considers
//! them cheap and safe to copy. For other types copies must be made
//! explicitly, by convention implementing the `Clone` trait and calling
//! the `clone` method.
#![unstable]

View File

@ -8,27 +8,25 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The Finally trait provides a method, `finally` on
stack closures that emulates Java-style try/finally blocks.
Using the `finally` method is sometimes convenient, but the type rules
prohibit any shared, mutable state between the "try" case and the
"finally" case. For advanced cases, the `try_finally` function can
also be used. See that function for more details.
# Example
```
use std::finally::Finally;
(|| {
// ...
}).finally(|| {
// this code is always run
})
```
*/
//! The Finally trait provides a method, `finally` on
//! stack closures that emulates Java-style try/finally blocks.
//!
//! Using the `finally` method is sometimes convenient, but the type rules
//! prohibit any shared, mutable state between the "try" case and the
//! "finally" case. For advanced cases, the `try_finally` function can
//! also be used. See that function for more details.
//!
//! # Example
//!
//! ```
//! use std::finally::Finally;
//!
//! (|| {
//! // ...
//! }).finally(|| {
//! // this code is always run
//! })
//! ```
#![experimental]
@ -58,38 +56,36 @@ impl<T> Finally<T> for fn() -> T {
}
}
/**
* The most general form of the `finally` functions. The function
* `try_fn` will be invoked first; whether or not it panics, the
* function `finally_fn` will be invoked next. The two parameters
* `mutate` and `drop` are used to thread state through the two
* closures. `mutate` is used for any shared, mutable state that both
* closures require access to; `drop` is used for any state that the
* `try_fn` requires ownership of.
*
* **WARNING:** While shared, mutable state between the try and finally
* function is often necessary, one must be very careful; the `try`
* function could have panicked at any point, so the values of the shared
* state may be inconsistent.
*
* # Example
*
* ```
* use std::finally::try_finally;
*
* struct State<'a> { buffer: &'a mut [u8], len: uint }
* # let mut buf = [];
* let mut state = State { buffer: &mut buf, len: 0 };
* try_finally(
* &mut state, (),
* |state, ()| {
* // use state.buffer, state.len
* },
* |state| {
* // use state.buffer, state.len to cleanup
* })
* ```
*/
/// The most general form of the `finally` functions. The function
/// `try_fn` will be invoked first; whether or not it panics, the
/// function `finally_fn` will be invoked next. The two parameters
/// `mutate` and `drop` are used to thread state through the two
/// closures. `mutate` is used for any shared, mutable state that both
/// closures require access to; `drop` is used for any state that the
/// `try_fn` requires ownership of.
///
/// **WARNING:** While shared, mutable state between the try and finally
/// function is often necessary, one must be very careful; the `try`
/// function could have panicked at any point, so the values of the shared
/// state may be inconsistent.
///
/// # Example
///
/// ```
/// use std::finally::try_finally;
///
/// struct State<'a> { buffer: &'a mut [u8], len: uint }
/// # let mut buf = [];
/// let mut state = State { buffer: &mut buf, len: 0 };
/// try_finally(
/// &mut state, (),
/// |state, ()| {
/// // use state.buffer, state.len
/// },
/// |state| {
/// // use state.buffer, state.len to cleanup
/// })
/// ```
pub fn try_finally<T,U,R>(mutate: &mut T,
drop: U,
try_fn: |&mut T, U| -> R,

View File

@ -54,36 +54,36 @@ pub enum SignFormat {
static DIGIT_E_RADIX: uint = ('e' as uint) - ('a' as uint) + 11u;
/**
* Converts a number to its string representation as a byte vector.
* This is meant to be a common base implementation for all numeric string
* conversion functions like `to_string()` or `to_str_radix()`.
*
* # Arguments
* - `num` - The number to convert. Accepts any number that
* implements the numeric traits.
* - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation
* is used, then this base is only used for the significand. The exponent
* itself always printed using a base of 10.
* - `negative_zero` - Whether to treat the special value `-0` as
* `-0` or as `+0`.
* - `sign` - How to emit the sign. See `SignFormat`.
* - `digits` - The amount of digits to use for emitting the fractional
* part, if any. See `SignificantDigits`.
* - `exp_format` - Whether or not to use the exponential (scientific) notation.
* See `ExponentFormat`.
* - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if
* exponential notation is desired.
* - `f` - A closure to invoke with the bytes representing the
* float.
*
* # Panics
* - Panics if `radix` < 2 or `radix` > 36.
* - Panics if `radix` > 14 and `exp_format` is `ExpDec` due to conflict
* between digit and exponent sign `'e'`.
* - Panics if `radix` > 25 and `exp_format` is `ExpBin` due to conflict
* between digit and exponent sign `'p'`.
*/
/// Converts a number to its string representation as a byte vector.
/// This is meant to be a common base implementation for all numeric string
/// conversion functions like `to_string()` or `to_str_radix()`.
///
/// # Arguments
///
/// - `num` - The number to convert. Accepts any number that
/// implements the numeric traits.
/// - `radix` - Base to use. Accepts only the values 2-36. If the exponential notation
/// is used, then this base is only used for the significand. The exponent
/// itself always printed using a base of 10.
/// - `negative_zero` - Whether to treat the special value `-0` as
/// `-0` or as `+0`.
/// - `sign` - How to emit the sign. See `SignFormat`.
/// - `digits` - The amount of digits to use for emitting the fractional
/// part, if any. See `SignificantDigits`.
/// - `exp_format` - Whether or not to use the exponential (scientific) notation.
/// See `ExponentFormat`.
/// - `exp_capital` - Whether or not to use a capital letter for the exponent sign, if
/// exponential notation is desired.
/// - `f` - A closure to invoke with the bytes representing the
/// float.
///
/// # Panics
///
/// - Panics if `radix` < 2 or `radix` > 36.
/// - Panics if `radix` > 14 and `exp_format` is `ExpDec` due to conflict
/// between digit and exponent sign `'e'`.
/// - Panics if `radix` > 25 and `exp_format` is `ExpBin` due to conflict
/// between digit and exponent sign `'p'`.
pub fn float_to_str_bytes_common<T: Float, U>(
num: T,
radix: uint,

View File

@ -85,7 +85,7 @@ pub struct Formatter<'a> {
width: Option<uint>,
precision: Option<uint>,
buf: &'a mut FormatWriter+'a,
buf: &'a mut (FormatWriter+'a),
curarg: slice::Items<'a, Argument<'a>>,
args: &'a [Argument<'a>],
}
@ -565,7 +565,7 @@ impl<'a, Sized? T: Show> Show for &'a T {
impl<'a, Sized? T: Show> Show for &'a mut T {
fn fmt(&self, f: &mut Formatter) -> Result { (**self).fmt(f) }
}
impl<'a> Show for &'a Show+'a {
impl<'a> Show for &'a (Show+'a) {
fn fmt(&self, f: &mut Formatter) -> Result { (*self).fmt(f) }
}
@ -724,7 +724,7 @@ macro_rules! tuple (
tuple! { T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, }
impl<'a> Show for &'a any::Any+'a {
impl<'a> Show for &'a (any::Any+'a) {
fn fmt(&self, f: &mut Formatter) -> Result { f.pad("&Any") }
}

View File

@ -8,38 +8,36 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! rustc compiler intrinsics.
The corresponding definitions are in librustc/middle/trans/foreign.rs.
# Volatiles
The volatile intrinsics provide operations intended to act on I/O
memory, which are guaranteed to not be reordered by the compiler
across other volatile intrinsics. See the LLVM documentation on
[[volatile]].
[volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
# Atomics
The atomic intrinsics provide common atomic operations on machine
words, with multiple possible memory orderings. They obey the same
semantics as C++11. See the LLVM documentation on [[atomics]].
[atomics]: http://llvm.org/docs/Atomics.html
A quick refresher on memory ordering:
* Acquire - a barrier for acquiring a lock. Subsequent reads and writes
take place after the barrier.
* Release - a barrier for releasing a lock. Preceding reads and writes
take place before the barrier.
* Sequentially consistent - sequentially consistent operations are
guaranteed to happen in order. This is the standard mode for working
with atomic types and is equivalent to Java's `volatile`.
*/
//! rustc compiler intrinsics.
//!
//! The corresponding definitions are in librustc/middle/trans/foreign.rs.
//!
//! # Volatiles
//!
//! The volatile intrinsics provide operations intended to act on I/O
//! memory, which are guaranteed to not be reordered by the compiler
//! across other volatile intrinsics. See the LLVM documentation on
//! [[volatile]].
//!
//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
//!
//! # Atomics
//!
//! The atomic intrinsics provide common atomic operations on machine
//! words, with multiple possible memory orderings. They obey the same
//! semantics as C++11. See the LLVM documentation on [[atomics]].
//!
//! [atomics]: http://llvm.org/docs/Atomics.html
//!
//! A quick refresher on memory ordering:
//!
//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
//! take place after the barrier.
//! * Release - a barrier for releasing a lock. Preceding reads and writes
//! take place before the barrier.
//! * Sequentially consistent - sequentially consistent operations are
//! guaranteed to happen in order. This is the standard mode for working
//! with atomic types and is equivalent to Java's `volatile`.
#![experimental]
#![allow(missing_docs)]

View File

@ -8,55 +8,51 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Composable external iterators
# The `Iterator` trait
This module defines Rust's core iteration trait. The `Iterator` trait has one
unimplemented method, `next`. All other methods are derived through default
methods to perform operations such as `zip`, `chain`, `enumerate`, and `fold`.
The goal of this module is to unify iteration across all containers in Rust.
An iterator can be considered as a state machine which is used to track which
element will be yielded next.
There are various extensions also defined in this module to assist with various
types of iteration, such as the `DoubleEndedIterator` for iterating in reverse,
the `FromIterator` trait for creating a container from an iterator, and much
more.
## Rust's `for` loop
The special syntax used by rust's `for` loop is based around the `Iterator`
trait defined in this module. For loops can be viewed as a syntactical expansion
into a `loop`, for example, the `for` loop in this example is essentially
translated to the `loop` below.
```rust
let values = vec![1i, 2, 3];
// "Syntactical sugar" taking advantage of an iterator
for &x in values.iter() {
println!("{}", x);
}
// Rough translation of the iteration without a `for` iterator.
let mut it = values.iter();
loop {
match it.next() {
Some(&x) => {
println!("{}", x);
}
None => { break }
}
}
```
This `for` loop syntax can be applied to any iterator over any type.
*/
//! Composable external iterators
//!
//! # The `Iterator` trait
//!
//! This module defines Rust's core iteration trait. The `Iterator` trait has one
//! unimplemented method, `next`. All other methods are derived through default
//! methods to perform operations such as `zip`, `chain`, `enumerate`, and `fold`.
//!
//! The goal of this module is to unify iteration across all containers in Rust.
//! An iterator can be considered as a state machine which is used to track which
//! element will be yielded next.
//!
//! There are various extensions also defined in this module to assist with various
//! types of iteration, such as the `DoubleEndedIterator` for iterating in reverse,
//! the `FromIterator` trait for creating a container from an iterator, and much
//! more.
//!
//! ## Rust's `for` loop
//!
//! The special syntax used by rust's `for` loop is based around the `Iterator`
//! trait defined in this module. For loops can be viewed as a syntactical expansion
//! into a `loop`, for example, the `for` loop in this example is essentially
//! translated to the `loop` below.
//!
//! ```rust
//! let values = vec![1i, 2, 3];
//!
//! // "Syntactical sugar" taking advantage of an iterator
//! for &x in values.iter() {
//! println!("{}", x);
//! }
//!
//! // Rough translation of the iteration without a `for` iterator.
//! let mut it = values.iter();
//! loop {
//! match it.next() {
//! Some(&x) => {
//! println!("{}", x);
//! }
//! None => { break }
//! }
//! }
//! ```
//!
//! This `for` loop syntax can be applied to any iterator over any type.
pub use self::MinMaxResult::*;

View File

@ -8,17 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Primitive traits representing basic 'kinds' of types
Rust types can be classified in various useful ways according to
intrinsic properties of the type. These classifications, often called
'kinds', are represented as traits.
They cannot be implemented by user code, but are instead implemented
by the compiler automatically for the types to which they apply.
*/
//! Primitive traits representing basic 'kinds' of types
//!
//! Rust types can be classified in various useful ways according to
//! intrinsic properties of the type. These classifications, often called
//! 'kinds', are represented as traits.
//!
//! They cannot be implemented by user code, but are instead implemented
//! by the compiler automatically for the types to which they apply.
/// Types able to be transferred across task boundaries.
#[lang="send"]

File diff suppressed because it is too large Load Diff

View File

@ -34,8 +34,6 @@
// * The `raw` and `bytes` submodules.
// * Boilerplate trait implementations.
pub use self::BinarySearchResult::*;
use mem::transmute;
use clone::Clone;
use cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering, Less, Equal, Greater, Equiv};
@ -219,7 +217,7 @@ pub trait SlicePrelude<T> for Sized? {
/// found; the fourth could match any position in `[1,4]`.
///
/// ```rust
/// use std::slice::{Found, NotFound};
/// use std::slice::BinarySearchResult::{Found, NotFound};
/// let s = [0i, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let s = s.as_slice();
///
@ -548,7 +546,7 @@ impl<T> SlicePrelude<T> for [T] {
while lim != 0 {
let ix = base + (lim >> 1);
match f(&self[ix]) {
Equal => return Found(ix),
Equal => return BinarySearchResult::Found(ix),
Less => {
base = ix + 1;
lim -= 1;
@ -557,7 +555,7 @@ impl<T> SlicePrelude<T> for [T] {
}
lim >>= 1;
}
return NotFound(base);
return BinarySearchResult::NotFound(base);
}
#[inline]
@ -838,7 +836,7 @@ pub trait OrdSlicePrelude<T: Ord> for Sized? {
/// found; the fourth could match any position in `[1,4]`.
///
/// ```rust
/// use std::slice::{Found, NotFound};
/// use std::slice::BinarySearchResult::{Found, NotFound};
/// let s = [0i, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let s = s.as_slice();
///
@ -1613,8 +1611,8 @@ impl BinarySearchResult {
/// Similar to `Result::ok`.
pub fn found(&self) -> Option<uint> {
match *self {
Found(i) => Some(i),
NotFound(_) => None
BinarySearchResult::Found(i) => Some(i),
BinarySearchResult::NotFound(_) => None
}
}
@ -1622,8 +1620,8 @@ impl BinarySearchResult {
/// Similar to `Result::err`.
pub fn not_found(&self) -> Option<uint> {
match *self {
Found(_) => None,
NotFound(i) => Some(i)
BinarySearchResult::Found(_) => None,
BinarySearchResult::NotFound(i) => Some(i)
}
}
}
@ -1634,9 +1632,7 @@ impl BinarySearchResult {
// Free functions
//
/**
* Converts a pointer to A into a slice of length 1 (without copying).
*/
/// Converts a pointer to A into a slice of length 1 (without copying).
#[unstable = "waiting for DST"]
pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] {
unsafe {
@ -1644,9 +1640,7 @@ pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] {
}
}
/**
* Converts a pointer to A into a slice of length 1 (without copying).
*/
/// Converts a pointer to A into a slice of length 1 (without copying).
#[unstable = "waiting for DST"]
pub fn mut_ref_slice<'a, A>(s: &'a mut A) -> &'a mut [A] {
unsafe {
@ -1710,10 +1704,8 @@ pub mod raw {
use raw::Slice;
use option::{None, Option, Some};
/**
* Form a slice from a pointer and length (as a number of units,
* not bytes).
*/
/// Form a slice from a pointer and length (as a number of units,
/// not bytes).
#[inline]
#[deprecated = "renamed to slice::from_raw_buf"]
pub unsafe fn buf_as_slice<T,U>(p: *const T, len: uint, f: |v: &[T]| -> U)
@ -1724,10 +1716,8 @@ pub mod raw {
}))
}
/**
* Form a slice from a pointer and length (as a number of units,
* not bytes).
*/
/// Form a slice from a pointer and length (as a number of units,
/// not bytes).
#[inline]
#[deprecated = "renamed to slice::from_raw_mut_buf"]
pub unsafe fn mut_buf_as_slice<T,
@ -1742,12 +1732,10 @@ pub mod raw {
}))
}
/**
* Returns a pointer to first element in slice and adjusts
* slice so it no longer contains that element. Returns None
* if the slice is empty. O(1).
*/
#[inline]
/// Returns a pointer to first element in slice and adjusts
/// slice so it no longer contains that element. Returns None
/// if the slice is empty. O(1).
#[inline]
#[deprecated = "inspect `Slice::{data, len}` manually (increment data by 1)"]
pub unsafe fn shift_ptr<T>(slice: &mut Slice<T>) -> Option<*const T> {
if slice.len == 0 { return None; }
@ -1757,11 +1745,9 @@ pub mod raw {
Some(head)
}
/**
* Returns a pointer to last element in slice and adjusts
* slice so it no longer contains that element. Returns None
* if the slice is empty. O(1).
*/
/// Returns a pointer to last element in slice and adjusts
/// slice so it no longer contains that element. Returns None
/// if the slice is empty. O(1).
#[inline]
#[deprecated = "inspect `Slice::{data, len}` manually (decrement len by 1)"]
pub unsafe fn pop_ptr<T>(slice: &mut Slice<T>) -> Option<*const T> {

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::slice::{Found, NotFound};
use std::slice::BinarySearchResult::{Found, NotFound};
#[test]
fn binary_search_not_found() {

View File

@ -8,15 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Simple [DEFLATE][def]-based compression. This is a wrapper around the
[`miniz`][mz] library, which is a one-file pure-C implementation of zlib.
[def]: https://en.wikipedia.org/wiki/DEFLATE
[mz]: https://code.google.com/p/miniz/
*/
//! Simple [DEFLATE][def]-based compression. This is a wrapper around the
//! [`miniz`][mz] library, which is a one-file pure-C implementation of zlib.
//!
//! [def]: https://en.wikipedia.org/wiki/DEFLATE
//! [mz]: https://code.google.com/p/miniz/
#![crate_name = "flate"]
#![experimental]

View File

@ -8,260 +8,258 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Generate files suitable for use with [Graphviz](http://www.graphviz.org/)
The `render` function generates output (e.g. an `output.dot` file) for
use with [Graphviz](http://www.graphviz.org/) by walking a labelled
graph. (Graphviz can then automatically lay out the nodes and edges
of the graph, and also optionally render the graph as an image or
other [output formats](
http://www.graphviz.org/content/output-formats), such as SVG.)
Rather than impose some particular graph data structure on clients,
this library exposes two traits that clients can implement on their
own structs before handing them over to the rendering function.
Note: This library does not yet provide access to the full
expressiveness of the [DOT language](
http://www.graphviz.org/doc/info/lang.html). For example, there are
many [attributes](http://www.graphviz.org/content/attrs) related to
providing layout hints (e.g. left-to-right versus top-down, which
algorithm to use, etc). The current intention of this library is to
emit a human-readable .dot file with very regular structure suitable
for easy post-processing.
# Examples
The first example uses a very simple graph representation: a list of
pairs of ints, representing the edges (the node set is implicit).
Each node label is derived directly from the int representing the node,
while the edge labels are all empty strings.
This example also illustrates how to use `CowVec` to return
an owned vector or a borrowed slice as appropriate: we construct the
node vector from scratch, but borrow the edge list (rather than
constructing a copy of all the edges from scratch).
The output from this example renders five nodes, with the first four
forming a diamond-shaped acyclic graph and then pointing to the fifth
which is cyclic.
```rust
use graphviz as dot;
type Nd = int;
type Ed = (int,int);
struct Edges(Vec<Ed>);
pub fn render_to<W:Writer>(output: &mut W) {
let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4)));
dot::render(&edges, output).unwrap()
}
impl<'a> dot::Labeller<'a, Nd, Ed> for Edges {
fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
dot::Id::new(format!("N{}", *n)).unwrap()
}
}
impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges {
fn nodes(&self) -> dot::Nodes<'a,Nd> {
// (assumes that |N| \approxeq |E|)
let &Edges(ref v) = self;
let mut nodes = Vec::with_capacity(v.len());
for &(s,t) in v.iter() {
nodes.push(s); nodes.push(t);
}
nodes.sort();
nodes.dedup();
nodes.into_cow()
}
fn edges(&'a self) -> dot::Edges<'a,Ed> {
let &Edges(ref edges) = self;
edges.as_slice().into_cow()
}
fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s }
fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t }
}
# pub fn main() { render_to(&mut Vec::new()) }
```
```no_run
# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
pub fn main() {
use std::io::File;
let mut f = File::create(&Path::new("example1.dot"));
render_to(&mut f)
}
```
Output from first example (in `example1.dot`):
```ignore
digraph example1 {
N0[label="N0"];
N1[label="N1"];
N2[label="N2"];
N3[label="N3"];
N4[label="N4"];
N0 -> N1[label=""];
N0 -> N2[label=""];
N1 -> N3[label=""];
N2 -> N3[label=""];
N3 -> N4[label=""];
N4 -> N4[label=""];
}
```
The second example illustrates using `node_label` and `edge_label` to
add labels to the nodes and edges in the rendered graph. The graph
here carries both `nodes` (the label text to use for rendering a
particular node), and `edges` (again a list of `(source,target)`
indices).
This example also illustrates how to use a type (in this case the edge
type) that shares substructure with the graph: the edge type here is a
direct reference to the `(source,target)` pair stored in the graph's
internal vector (rather than passing around a copy of the pair
itself). Note that this implies that `fn edges(&'a self)` must
construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>`
edges stored in `self`.
Since both the set of nodes and the set of edges are always
constructed from scratch via iterators, we use the `collect()` method
from the `Iterator` trait to collect the nodes and edges into freshly
constructed growable `Vec` values (rather use the `into_cow`
from the `IntoCow` trait as was used in the first example
above).
The output from this example renders four nodes that make up the
Hasse-diagram for the subsets of the set `{x, y}`. Each edge is
labelled with the &sube; character (specified using the HTML character
entity `&sube`).
```rust
use graphviz as dot;
type Nd = uint;
type Ed<'a> = &'a (uint, uint);
struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
pub fn render_to<W:Writer>(output: &mut W) {
let nodes = vec!("{x,y}","{x}","{y}","{}");
let edges = vec!((0,1), (0,2), (1,3), (2,3));
let graph = Graph { nodes: nodes, edges: edges };
dot::render(&graph, output).unwrap()
}
impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph {
fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
dot::Id::new(format!("N{}", n)).unwrap()
}
fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> {
dot::LabelStr(self.nodes[*n].as_slice().into_cow())
}
fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> {
dot::LabelStr("&sube;".into_cow())
}
}
impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph {
fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() }
fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t }
}
# pub fn main() { render_to(&mut Vec::new()) }
```
```no_run
# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
pub fn main() {
use std::io::File;
let mut f = File::create(&Path::new("example2.dot"));
render_to(&mut f)
}
```
The third example is similar to the second, except now each node and
edge now carries a reference to the string label for each node as well
as that node's index. (This is another illustration of how to share
structure with the graph itself, and why one might want to do so.)
The output from this example is the same as the second example: the
Hasse-diagram for the subsets of the set `{x, y}`.
```rust
use graphviz as dot;
type Nd<'a> = (uint, &'a str);
type Ed<'a> = (Nd<'a>, Nd<'a>);
struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
pub fn render_to<W:Writer>(output: &mut W) {
let nodes = vec!("{x,y}","{x}","{y}","{}");
let edges = vec!((0,1), (0,2), (1,3), (2,3));
let graph = Graph { nodes: nodes, edges: edges };
dot::render(&graph, output).unwrap()
}
impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph {
fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
dot::Id::new(format!("N{}", n.val0())).unwrap()
}
fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> {
let &(i, _) = n;
dot::LabelStr(self.nodes[i].as_slice().into_cow())
}
fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> {
dot::LabelStr("&sube;".into_cow())
}
}
impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph {
fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
self.nodes.iter().map(|s|s.as_slice()).enumerate().collect()
}
fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> {
self.edges.iter()
.map(|&(i,j)|((i, self.nodes[i].as_slice()),
(j, self.nodes[j].as_slice())))
.collect()
}
fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s }
fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t }
}
# pub fn main() { render_to(&mut Vec::new()) }
```
```no_run
# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
pub fn main() {
use std::io::File;
let mut f = File::create(&Path::new("example3.dot"));
render_to(&mut f)
}
```
# References
* [Graphviz](http://www.graphviz.org/)
* [DOT language](http://www.graphviz.org/doc/info/lang.html)
*/
//! Generate files suitable for use with [Graphviz](http://www.graphviz.org/)
//!
//! The `render` function generates output (e.g. an `output.dot` file) for
//! use with [Graphviz](http://www.graphviz.org/) by walking a labelled
//! graph. (Graphviz can then automatically lay out the nodes and edges
//! of the graph, and also optionally render the graph as an image or
//! other [output formats](
//! http://www.graphviz.org/content/output-formats), such as SVG.)
//!
//! Rather than impose some particular graph data structure on clients,
//! this library exposes two traits that clients can implement on their
//! own structs before handing them over to the rendering function.
//!
//! Note: This library does not yet provide access to the full
//! expressiveness of the [DOT language](
//! http://www.graphviz.org/doc/info/lang.html). For example, there are
//! many [attributes](http://www.graphviz.org/content/attrs) related to
//! providing layout hints (e.g. left-to-right versus top-down, which
//! algorithm to use, etc). The current intention of this library is to
//! emit a human-readable .dot file with very regular structure suitable
//! for easy post-processing.
//!
//! # Examples
//!
//! The first example uses a very simple graph representation: a list of
//! pairs of ints, representing the edges (the node set is implicit).
//! Each node label is derived directly from the int representing the node,
//! while the edge labels are all empty strings.
//!
//! This example also illustrates how to use `CowVec` to return
//! an owned vector or a borrowed slice as appropriate: we construct the
//! node vector from scratch, but borrow the edge list (rather than
//! constructing a copy of all the edges from scratch).
//!
//! The output from this example renders five nodes, with the first four
//! forming a diamond-shaped acyclic graph and then pointing to the fifth
//! which is cyclic.
//!
//! ```rust
//! use graphviz as dot;
//!
//! type Nd = int;
//! type Ed = (int,int);
//! struct Edges(Vec<Ed>);
//!
//! pub fn render_to<W:Writer>(output: &mut W) {
//! let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4)));
//! dot::render(&edges, output).unwrap()
//! }
//!
//! impl<'a> dot::Labeller<'a, Nd, Ed> for Edges {
//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
//!
//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
//! dot::Id::new(format!("N{}", *n)).unwrap()
//! }
//! }
//!
//! impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges {
//! fn nodes(&self) -> dot::Nodes<'a,Nd> {
//! // (assumes that |N| \approxeq |E|)
//! let &Edges(ref v) = self;
//! let mut nodes = Vec::with_capacity(v.len());
//! for &(s,t) in v.iter() {
//! nodes.push(s); nodes.push(t);
//! }
//! nodes.sort();
//! nodes.dedup();
//! nodes.into_cow()
//! }
//!
//! fn edges(&'a self) -> dot::Edges<'a,Ed> {
//! let &Edges(ref edges) = self;
//! edges.as_slice().into_cow()
//! }
//!
//! fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s }
//!
//! fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t }
//! }
//!
//! # pub fn main() { render_to(&mut Vec::new()) }
//! ```
//!
//! ```no_run
//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
//! pub fn main() {
//! use std::io::File;
//! let mut f = File::create(&Path::new("example1.dot"));
//! render_to(&mut f)
//! }
//! ```
//!
//! Output from first example (in `example1.dot`):
//!
//! ```ignore
//! digraph example1 {
//! N0[label="N0"];
//! N1[label="N1"];
//! N2[label="N2"];
//! N3[label="N3"];
//! N4[label="N4"];
//! N0 -> N1[label=""];
//! N0 -> N2[label=""];
//! N1 -> N3[label=""];
//! N2 -> N3[label=""];
//! N3 -> N4[label=""];
//! N4 -> N4[label=""];
//! }
//! ```
//!
//! The second example illustrates using `node_label` and `edge_label` to
//! add labels to the nodes and edges in the rendered graph. The graph
//! here carries both `nodes` (the label text to use for rendering a
//! particular node), and `edges` (again a list of `(source,target)`
//! indices).
//!
//! This example also illustrates how to use a type (in this case the edge
//! type) that shares substructure with the graph: the edge type here is a
//! direct reference to the `(source,target)` pair stored in the graph's
//! internal vector (rather than passing around a copy of the pair
//! itself). Note that this implies that `fn edges(&'a self)` must
//! construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>`
//! edges stored in `self`.
//!
//! Since both the set of nodes and the set of edges are always
//! constructed from scratch via iterators, we use the `collect()` method
//! from the `Iterator` trait to collect the nodes and edges into freshly
//! constructed growable `Vec` values (rather use the `into_cow`
//! from the `IntoCow` trait as was used in the first example
//! above).
//!
//! The output from this example renders four nodes that make up the
//! Hasse-diagram for the subsets of the set `{x, y}`. Each edge is
//! labelled with the &sube; character (specified using the HTML character
//! entity `&sube`).
//!
//! ```rust
//! use graphviz as dot;
//!
//! type Nd = uint;
//! type Ed<'a> = &'a (uint, uint);
//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
//!
//! pub fn render_to<W:Writer>(output: &mut W) {
//! let nodes = vec!("{x,y}","{x}","{y}","{}");
//! let edges = vec!((0,1), (0,2), (1,3), (2,3));
//! let graph = Graph { nodes: nodes, edges: edges };
//!
//! dot::render(&graph, output).unwrap()
//! }
//!
//! impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph {
//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
//! dot::Id::new(format!("N{}", n)).unwrap()
//! }
//! fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> {
//! dot::LabelStr(self.nodes[*n].as_slice().into_cow())
//! }
//! fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> {
//! dot::LabelStr("&sube;".into_cow())
//! }
//! }
//!
//! impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph {
//! fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() }
//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
//! fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
//! fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t }
//! }
//!
//! # pub fn main() { render_to(&mut Vec::new()) }
//! ```
//!
//! ```no_run
//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
//! pub fn main() {
//! use std::io::File;
//! let mut f = File::create(&Path::new("example2.dot"));
//! render_to(&mut f)
//! }
//! ```
//!
//! The third example is similar to the second, except now each node and
//! edge now carries a reference to the string label for each node as well
//! as that node's index. (This is another illustration of how to share
//! structure with the graph itself, and why one might want to do so.)
//!
//! The output from this example is the same as the second example: the
//! Hasse-diagram for the subsets of the set `{x, y}`.
//!
//! ```rust
//! use graphviz as dot;
//!
//! type Nd<'a> = (uint, &'a str);
//! type Ed<'a> = (Nd<'a>, Nd<'a>);
//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
//!
//! pub fn render_to<W:Writer>(output: &mut W) {
//! let nodes = vec!("{x,y}","{x}","{y}","{}");
//! let edges = vec!((0,1), (0,2), (1,3), (2,3));
//! let graph = Graph { nodes: nodes, edges: edges };
//!
//! dot::render(&graph, output).unwrap()
//! }
//!
//! impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph {
//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
//! fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
//! dot::Id::new(format!("N{}", n.val0())).unwrap()
//! }
//! fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> {
//! let &(i, _) = n;
//! dot::LabelStr(self.nodes[i].as_slice().into_cow())
//! }
//! fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> {
//! dot::LabelStr("&sube;".into_cow())
//! }
//! }
//!
//! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph {
//! fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
//! self.nodes.iter().map(|s|s.as_slice()).enumerate().collect()
//! }
//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> {
//! self.edges.iter()
//! .map(|&(i,j)|((i, self.nodes[i].as_slice()),
//! (j, self.nodes[j].as_slice())))
//! .collect()
//! }
//! fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s }
//! fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t }
//! }
//!
//! # pub fn main() { render_to(&mut Vec::new()) }
//! ```
//!
//! ```no_run
//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
//! pub fn main() {
//! use std::io::File;
//! let mut f = File::create(&Path::new("example3.dot"));
//! render_to(&mut f)
//! }
//! ```
//!
//! # References
//!
//! * [Graphviz](http://www.graphviz.org/)
//!
//! * [DOT language](http://www.graphviz.org/doc/info/lang.html)
#![crate_name = "graphviz"]
#![experimental]

View File

@ -19,59 +19,57 @@
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/")]
/*!
* Bindings for the C standard library and other platform libraries
*
* **NOTE:** These are *architecture and libc* specific. On Linux, these
* bindings are only correct for glibc.
*
* This module contains bindings to the C standard library, organized into
* modules by their defining standard. Additionally, it contains some assorted
* platform-specific definitions. For convenience, most functions and types
* are reexported, so `use libc::*` will import the available C bindings as
* appropriate for the target platform. The exact set of functions available
* are platform specific.
*
* *Note:* Because these definitions are platform-specific, some may not appear
* in the generated documentation.
*
* We consider the following specs reasonably normative with respect to
* interoperating with the C standard library (libc/msvcrt):
*
* * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995.
* * ISO 9899:1999 ('C99' or 'C9x').
* * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1').
* * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3').
* * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4').
*
* Note that any reference to the 1996 revision of POSIX, or any revs between
* 1990 (when '88 was approved at ISO) and 2001 (when the next actual
* revision-revision happened), are merely additions of other chapters (1b and
* 1c) outside the core interfaces.
*
* Despite having several names each, these are *reasonably* coherent
* point-in-time, list-of-definition sorts of specs. You can get each under a
* variety of names but will wind up with the same definition in each case.
*
* See standards(7) in linux-manpages for more details.
*
* Our interface to these libraries is complicated by the non-universality of
* conformance to any of them. About the only thing universally supported is
* the first (C95), beyond that definitions quickly become absent on various
* platforms.
*
* We therefore wind up dividing our module-space up (mostly for the sake of
* sanity while editing, filling-in-details and eliminating duplication) into
* definitions common-to-all (held in modules named c95, c99, posix88, posix01
* and posix08) and definitions that appear only on *some* platforms (named
* 'extra'). This would be things like significant OSX foundation kit, or Windows
* library kernel32.dll, or various fancy glibc, Linux or BSD extensions.
*
* In addition to the per-platform 'extra' modules, we define a module of
* 'common BSD' libc routines that never quite made it into POSIX but show up
* in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final
* one from Berkeley after the lawsuits died down and the CSRG dissolved.
*/
//! Bindings for the C standard library and other platform libraries
//!
//! **NOTE:** These are *architecture and libc* specific. On Linux, these
//! bindings are only correct for glibc.
//!
//! This module contains bindings to the C standard library, organized into
//! modules by their defining standard. Additionally, it contains some assorted
//! platform-specific definitions. For convenience, most functions and types
//! are reexported, so `use libc::*` will import the available C bindings as
//! appropriate for the target platform. The exact set of functions available
//! are platform specific.
//!
//! *Note:* Because these definitions are platform-specific, some may not appear
//! in the generated documentation.
//!
//! We consider the following specs reasonably normative with respect to
//! interoperating with the C standard library (libc/msvcrt):
//!
//! * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995.
//! * ISO 9899:1999 ('C99' or 'C9x').
//! * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1').
//! * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3').
//! * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4').
//!
//! Note that any reference to the 1996 revision of POSIX, or any revs between
//! 1990 (when '88 was approved at ISO) and 2001 (when the next actual
//! revision-revision happened), are merely additions of other chapters (1b and
//! 1c) outside the core interfaces.
//!
//! Despite having several names each, these are *reasonably* coherent
//! point-in-time, list-of-definition sorts of specs. You can get each under a
//! variety of names but will wind up with the same definition in each case.
//!
//! See standards(7) in linux-manpages for more details.
//!
//! Our interface to these libraries is complicated by the non-universality of
//! conformance to any of them. About the only thing universally supported is
//! the first (C95), beyond that definitions quickly become absent on various
//! platforms.
//!
//! We therefore wind up dividing our module-space up (mostly for the sake of
//! sanity while editing, filling-in-details and eliminating duplication) into
//! definitions common-to-all (held in modules named c95, c99, posix88, posix01
//! and posix08) and definitions that appear only on *some* platforms (named
//! 'extra'). This would be things like significant OSX foundation kit, or Windows
//! library kernel32.dll, or various fancy glibc, Linux or BSD extensions.
//!
//! In addition to the per-platform 'extra' modules, we define a module of
//! 'common BSD' libc routines that never quite made it into POSIX but show up
//! in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final
//! one from Berkeley after the lawsuits died down and the CSRG dissolved.
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
@ -329,20 +327,18 @@ pub mod types {
// Standard types that are opaque or common, so are not per-target.
pub mod common {
pub mod c95 {
/**
Type used to construct void pointers for use with C.
This type is only useful as a pointer target. Do not use it as a
return type for FFI functions which have the `void` return type in
C. Use the unit type `()` or omit the return type instead.
For LLVM to recognize the void pointer type and by extension
functions like malloc(), we need to have it represented as i8* in
LLVM bitcode. The enum used here ensures this and prevents misuse
of the "raw" type by only having private variants.. We need two
variants, because the compiler complains about the repr attribute
otherwise.
*/
/// Type used to construct void pointers for use with C.
///
/// This type is only useful as a pointer target. Do not use it as a
/// return type for FFI functions which have the `void` return type in
/// C. Use the unit type `()` or omit the return type instead.
///
/// For LLVM to recognize the void pointer type and by extension
/// functions like malloc(), we need to have it represented as i8* in
/// LLVM bitcode. The enum used here ensures this and prevents misuse
/// of the "raw" type by only having private variants.. We need two
/// variants, because the compiler complains about the repr attribute
/// otherwise.
#[repr(u8)]
pub enum c_void {
__variant1,

View File

@ -8,17 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Sampling from random distributions.
This is a generalization of `Rand` to allow parameters to control the
exact properties of the generated values, e.g. the mean and standard
deviation of a normal distribution. The `Sample` trait is the most
general, and allows for generating values that change some state
internally. The `IndependentSample` trait is for generating values
that do not need to record state.
*/
//! Sampling from random distributions.
//!
//! This is a generalization of `Rand` to allow parameters to control the
//! exact properties of the generated values, e.g. the mean and standard
//! deviation of a normal distribution. The `Sample` trait is the most
//! general, and allows for generating values that change some state
//! internally. The `IndependentSample` trait is for generating values
//! that do not need to record state.
#![experimental]

View File

@ -18,7 +18,7 @@ use std::cmp;
use std::fmt;
use std::iter;
use std::num;
use std::slice;
use std::slice::BinarySearchResult;
/// Static data containing Unicode ranges for general categories and scripts.
use unicode::regex::{UNICODE_CLASSES, PERLD, PERLS, PERLW};
@ -1027,8 +1027,8 @@ fn is_valid_cap(c: char) -> bool {
fn find_class(classes: NamedClasses, name: &str) -> Option<Vec<(char, char)>> {
match classes.binary_search(|&(s, _)| s.cmp(name)) {
slice::Found(i) => Some(classes[i].val1().to_vec()),
slice::NotFound(_) => None,
BinarySearchResult::Found(i) => Some(classes[i].val1().to_vec()),
BinarySearchResult::NotFound(_) => None,
}
}

View File

@ -146,5 +146,7 @@ register_diagnostics!(
E0167,
E0168,
E0169,
E0170
E0170,
E0171,
E0172
)

View File

@ -8,15 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The Rust compiler.
# Note
This API is completely unstable and subject to change.
*/
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![crate_name = "rustc"]
#![experimental]

View File

@ -421,7 +421,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
impl<'a, 'tcx, 'v> Visitor<'v> for ImproperCTypesVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty) {
match ty.node {
ast::TyPath(_, _, id) => self.check_def(ty.span, ty.id, id),
ast::TyPath(_, id) => self.check_def(ty.span, ty.id, id),
_ => (),
}
visit::walk_ty(self, ty);

View File

@ -464,11 +464,9 @@ impl<'a, 'tcx> Context<'a, 'tcx> {
self.lookup_and_emit(lint, Some(span), msg);
}
/**
* Merge the lints specified by any lint attributes into the
* current lint context, call the provided function, then reset the
* lints in effect to their previous state.
*/
/// Merge the lints specified by any lint attributes into the
/// current lint context, call the provided function, then reset the
/// lints in effect to their previous state.
fn with_lint_attrs(&mut self,
attrs: &[ast::Attribute],
f: |&mut Context|) {

View File

@ -500,20 +500,10 @@ fn encode_reexported_static_methods(ecx: &EncodeContext,
/// Iterates through "auxiliary node IDs", which are node IDs that describe
/// top-level items that are sub-items of the given item. Specifically:
///
/// * For enums, iterates through the node IDs of the variants.
///
/// * For newtype structs, iterates through the node ID of the constructor.
fn each_auxiliary_node_id(item: &ast::Item, callback: |NodeId| -> bool) -> bool {
let mut continue_ = true;
match item.node {
ast::ItemEnum(ref enum_def, _) => {
for variant in enum_def.variants.iter() {
continue_ = callback(variant.node.id);
if !continue_ {
break
}
}
}
ast::ItemStruct(ref struct_def, _) => {
// If this is a newtype struct, return the constructor.
match struct_def.ctor_id {
@ -1230,10 +1220,9 @@ fn encode_info_for_item(ecx: &EncodeContext,
encode_name(rbml_w, item.ident.name);
encode_attributes(rbml_w, item.attrs.as_slice());
match ty.node {
ast::TyPath(ref path, ref bounds, _) if path.segments
ast::TyPath(ref path, _) if path.segments
.len() == 1 => {
let ident = path.segments.last().unwrap().identifier;
assert!(bounds.is_none());
encode_impl_type_basename(rbml_w, ident);
}
_ => {}

View File

@ -196,53 +196,38 @@ fn reserve_id_range(sess: &Session,
}
impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> {
/// Translates an internal id, meaning a node id that is known to refer to some part of the
/// item currently being inlined, such as a local variable or argument. All naked node-ids
/// that appear in types have this property, since if something might refer to an external item
/// we would use a def-id to allow for the possibility that the item resides in another crate.
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
/*!
* Translates an internal id, meaning a node id that is known
* to refer to some part of the item currently being inlined,
* such as a local variable or argument. All naked node-ids
* that appear in types have this property, since if something
* might refer to an external item we would use a def-id to
* allow for the possibility that the item resides in another
* crate.
*/
// from_id_range should be non-empty
assert!(!self.from_id_range.empty());
(id - self.from_id_range.min + self.to_id_range.min)
}
/// Translates an EXTERNAL def-id, converting the crate number from the one used in the encoded
/// data to the current crate numbers.. By external, I mean that it be translated to a
/// reference to the item in its original crate, as opposed to being translated to a reference
/// to the inlined version of the item. This is typically, but not always, what you want,
/// because most def-ids refer to external things like types or other fns that may or may not
/// be inlined. Note that even when the inlined function is referencing itself recursively, we
/// would want `tr_def_id` for that reference--- conceptually the function calls the original,
/// non-inlined version, and trans deals with linking that recursive call to the inlined copy.
///
/// However, there are a *few* cases where def-ids are used but we know that the thing being
/// referenced is in fact *internal* to the item being inlined. In those cases, you should use
/// `tr_intern_def_id()` below.
pub fn tr_def_id(&self, did: ast::DefId) -> ast::DefId {
/*!
* Translates an EXTERNAL def-id, converting the crate number
* from the one used in the encoded data to the current crate
* numbers.. By external, I mean that it be translated to a
* reference to the item in its original crate, as opposed to
* being translated to a reference to the inlined version of
* the item. This is typically, but not always, what you
* want, because most def-ids refer to external things like
* types or other fns that may or may not be inlined. Note
* that even when the inlined function is referencing itself
* recursively, we would want `tr_def_id` for that
* reference--- conceptually the function calls the original,
* non-inlined version, and trans deals with linking that
* recursive call to the inlined copy.
*
* However, there are a *few* cases where def-ids are used but
* we know that the thing being referenced is in fact *internal*
* to the item being inlined. In those cases, you should use
* `tr_intern_def_id()` below.
*/
decoder::translate_def_id(self.cdata, did)
}
pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId {
/*!
* Translates an INTERNAL def-id, meaning a def-id that is
* known to refer to some part of the item currently being
* inlined. In that case, we want to convert the def-id to
* refer to the current crate and to the new, inlined node-id.
*/
/// Translates an INTERNAL def-id, meaning a def-id that is
/// known to refer to some part of the item currently being
/// inlined. In that case, we want to convert the def-id to
/// refer to the current crate and to the new, inlined node-id.
pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId {
assert_eq!(did.krate, ast::LOCAL_CRATE);
ast::DefId { krate: ast::LOCAL_CRATE, node: self.tr_id(did.node) }
}
@ -1780,43 +1765,40 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
}
}
/// Converts a def-id that appears in a type. The correct
/// translation will depend on what kind of def-id this is.
/// This is a subtle point: type definitions are not
/// inlined into the current crate, so if the def-id names
/// a nominal type or type alias, then it should be
/// translated to refer to the source crate.
///
/// However, *type parameters* are cloned along with the function
/// they are attached to. So we should translate those def-ids
/// to refer to the new, cloned copy of the type parameter.
/// We only see references to free type parameters in the body of
/// an inlined function. In such cases, we need the def-id to
/// be a local id so that the TypeContents code is able to lookup
/// the relevant info in the ty_param_defs table.
///
/// *Region parameters*, unfortunately, are another kettle of fish.
/// In such cases, def_id's can appear in types to distinguish
/// shadowed bound regions and so forth. It doesn't actually
/// matter so much what we do to these, since regions are erased
/// at trans time, but it's good to keep them consistent just in
/// case. We translate them with `tr_def_id()` which will map
/// the crate numbers back to the original source crate.
///
/// Unboxed closures are cloned along with the function being
/// inlined, and all side tables use interned node IDs, so we
/// translate their def IDs accordingly.
///
/// It'd be really nice to refactor the type repr to not include
/// def-ids so that all these distinctions were unnecessary.
fn convert_def_id(&mut self,
dcx: &DecodeContext,
source: tydecode::DefIdSource,
did: ast::DefId)
-> ast::DefId {
/*!
* Converts a def-id that appears in a type. The correct
* translation will depend on what kind of def-id this is.
* This is a subtle point: type definitions are not
* inlined into the current crate, so if the def-id names
* a nominal type or type alias, then it should be
* translated to refer to the source crate.
*
* However, *type parameters* are cloned along with the function
* they are attached to. So we should translate those def-ids
* to refer to the new, cloned copy of the type parameter.
* We only see references to free type parameters in the body of
* an inlined function. In such cases, we need the def-id to
* be a local id so that the TypeContents code is able to lookup
* the relevant info in the ty_param_defs table.
*
* *Region parameters*, unfortunately, are another kettle of fish.
* In such cases, def_id's can appear in types to distinguish
* shadowed bound regions and so forth. It doesn't actually
* matter so much what we do to these, since regions are erased
* at trans time, but it's good to keep them consistent just in
* case. We translate them with `tr_def_id()` which will map
* the crate numbers back to the original source crate.
*
* Unboxed closures are cloned along with the function being
* inlined, and all side tables use interned node IDs, so we
* translate their def IDs accordingly.
*
* It'd be really nice to refactor the type repr to not include
* def-ids so that all these distinctions were unnecessary.
*/
let r = match source {
NominalType | TypeWithId | RegionParameter => dcx.tr_def_id(did),
TypeParameter | UnboxedClosureSource => dcx.tr_intern_def_id(did)

View File

@ -684,16 +684,13 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
return ret;
}
/// Reports an error if `expr` (which should be a path)
/// is using a moved/uninitialized value
fn check_if_path_is_moved(&self,
id: ast::NodeId,
span: Span,
use_kind: MovedValueUseKind,
lp: &Rc<LoanPath<'tcx>>) {
/*!
* Reports an error if `expr` (which should be a path)
* is using a moved/uninitialized value
*/
debug!("check_if_path_is_moved(id={}, use_kind={}, lp={})",
id, use_kind, lp.repr(self.bccx.tcx));
let base_lp = owned_ptr_base_path_rc(lp);
@ -708,30 +705,29 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
});
}
/// Reports an error if assigning to `lp` will use a
/// moved/uninitialized value. Mainly this is concerned with
/// detecting derefs of uninitialized pointers.
///
/// For example:
///
/// ```
/// let a: int;
/// a = 10; // ok, even though a is uninitialized
///
/// struct Point { x: uint, y: uint }
/// let p: Point;
/// p.x = 22; // ok, even though `p` is uninitialized
///
/// let p: ~Point;
/// (*p).x = 22; // not ok, p is uninitialized, can't deref
/// ```
fn check_if_assigned_path_is_moved(&self,
id: ast::NodeId,
span: Span,
use_kind: MovedValueUseKind,
lp: &Rc<LoanPath<'tcx>>)
{
/*!
* Reports an error if assigning to `lp` will use a
* moved/uninitialized value. Mainly this is concerned with
* detecting derefs of uninitialized pointers.
*
* For example:
*
* let a: int;
* a = 10; // ok, even though a is uninitialized
*
* struct Point { x: uint, y: uint }
* let p: Point;
* p.x = 22; // ok, even though `p` is uninitialized
*
* let p: ~Point;
* (*p).x = 22; // not ok, p is uninitialized, can't deref
*/
match lp.kind {
LpVar(_) | LpUpvar(_) => {
// assigning to `x` does not require that `x` is initialized

File diff suppressed because it is too large Load Diff

View File

@ -8,13 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
//! Helper routines used for fragmenting structural paths due to moves for
//! tracking drop obligations. Please see the extensive comments in the
//! section "Structural fragments" in `doc.rs`.
Helper routines used for fragmenting structural paths due to moves for
tracking drop obligations. Please see the extensive comments in the
section "Structural fragments" in `doc.rs`.
*/
use self::Fragment::*;
use session::config;
@ -176,16 +173,12 @@ pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
instrument_all_paths("assigned_leaf_path", &fragments.assigned_leaf_paths);
}
/// Normalizes the fragment sets in `this`; i.e., removes duplicate entries, constructs the set of
/// parents, and constructs the left-over fragments.
///
/// Note: "left-over fragments" means paths that were not directly referenced in moves nor
/// assignments, but must nonetheless be tracked as potential drop obligations.
pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
/*!
* Normalizes the fragment sets in `this`; i.e., removes
* duplicate entries, constructs the set of parents, and
* constructs the left-over fragments.
*
* Note: "left-over fragments" means paths that were not
* directly referenced in moves nor assignments, but must
* nonetheless be tracked as potential drop obligations.
*/
let mut fragments = this.fragments.borrow_mut();
@ -277,24 +270,20 @@ pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
fn non_member(elem: MovePathIndex, set: &[MovePathIndex]) -> bool {
match set.binary_search_elem(&elem) {
slice::Found(_) => false,
slice::NotFound(_) => true,
slice::BinarySearchResult::Found(_) => false,
slice::BinarySearchResult::NotFound(_) => true,
}
}
}
/// Adds all of the precisely-tracked siblings of `lp` as potential move paths of interest. For
/// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the
/// siblings of `s.x.j`.
fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
lp: Rc<LoanPath<'tcx>>,
origin_id: Option<ast::NodeId>) {
/*!
* Adds all of the precisely-tracked siblings of `lp` as
* potential move paths of interest. For example, if `lp`
* represents `s.x.j`, then adds moves paths for `s.x.i` and
* `s.x.k`, the siblings of `s.x.j`.
*/
match lp.kind {
LpVar(_) | LpUpvar(..) => {} // Local variables have no siblings.
@ -343,6 +332,8 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
}
}
/// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name).
/// Based on this, add move paths for all of the siblings of `origin_lp`.
fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
@ -353,12 +344,6 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
origin_id: Option<ast::NodeId>,
enum_variant_info: Option<(ast::DefId,
Rc<LoanPath<'tcx>>)>) {
/*!
* We have determined that `origin_lp` destructures to
* LpExtend(parent, original_field_name). Based on this,
* add move paths for all of the siblings of `origin_lp`.
*/
let parent_ty = parent_lp.to_type();
let add_fragment_sibling_local = |field_name| {
@ -454,6 +439,8 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
}
}
/// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original
/// loan-path).
fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
@ -461,10 +448,6 @@ fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
mc: mc::MutabilityCategory,
new_field_name: mc::FieldName,
origin_lp: &Rc<LoanPath<'tcx>>) -> MovePathIndex {
/*!
* Adds the single sibling `LpExtend(parent, new_field_name)`
* of `origin_lp` (the original loan-path).
*/
let opt_variant_did = match parent.kind {
LpDowncast(_, variant_did) => Some(variant_did),
LpVar(..) | LpUpvar(..) | LpExtend(..) => None,

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes moves.
*/
//! Computes moves.
use middle::borrowck::*;
use middle::borrowck::LoanPathKind::*;

View File

@ -8,10 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* This module implements the check that the lifetime of a borrow
* does not exceed the lifetime of the value being borrowed.
*/
//! This module implements the check that the lifetime of a borrow
//! does not exceed the lifetime of the value being borrowed.
use middle::borrowck::*;
use middle::expr_use_visitor as euv;

View File

@ -225,6 +225,9 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
/// Guarantees that `addr_of(cmt)` will be valid for the duration of `static_scope_r`, or
/// reports an error. This may entail taking out loans, which will be added to the
/// `req_loan_map`.
fn guarantee_valid(&mut self,
borrow_id: ast::NodeId,
borrow_span: Span,
@ -232,12 +235,6 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
req_kind: ty::BorrowKind,
loan_region: ty::Region,
cause: euv::LoanCause) {
/*!
* Guarantees that `addr_of(cmt)` will be valid for the duration of
* `static_scope_r`, or reports an error. This may entail taking
* out loans, which will be added to the `req_loan_map`.
*/
debug!("guarantee_valid(borrow_id={}, cmt={}, \
req_mutbl={}, loan_region={})",
borrow_id,

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes the restrictions that result from a borrow.
*/
//! Computes the restrictions that result from a borrow.
pub use self::RestrictionResult::*;

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See doc.rs for a thorough explanation of the borrow checker */
//! See doc.rs for a thorough explanation of the borrow checker
#![allow(non_camel_case_types)]

View File

@ -8,12 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Data structures used for tracking moves. Please see the extensive
comments in the section "Moves and initialization" in `doc.rs`.
*/
//! Data structures used for tracking moves. Please see the extensive
//! comments in the section "Moves and initialization" in `doc.rs`.
pub use self::MoveKind::*;
@ -297,15 +293,11 @@ impl<'tcx> MoveData<'tcx> {
self.path_parent(index) == InvalidMovePathIndex
}
/// Returns the existing move path index for `lp`, if any, and otherwise adds a new index for
/// `lp` and any of its base paths that do not yet have an index.
pub fn move_path(&self,
tcx: &ty::ctxt<'tcx>,
lp: Rc<LoanPath<'tcx>>) -> MovePathIndex {
/*!
* Returns the existing move path index for `lp`, if any,
* and otherwise adds a new index for `lp` and any of its
* base paths that do not yet have an index.
*/
match self.path_map.borrow().get(&lp) {
Some(&index) => {
return index;
@ -370,13 +362,10 @@ impl<'tcx> MoveData<'tcx> {
result
}
/// Adds any existing move path indices for `lp` and any base paths of `lp` to `result`, but
/// does not add new move paths
fn add_existing_base_paths(&self, lp: &Rc<LoanPath<'tcx>>,
result: &mut Vec<MovePathIndex>) {
/*!
* Adds any existing move path indices for `lp` and any base
* paths of `lp` to `result`, but does not add new move paths
*/
match self.path_map.borrow().get(lp).cloned() {
Some(index) => {
self.each_base_path(index, |p| {
@ -397,16 +386,12 @@ impl<'tcx> MoveData<'tcx> {
}
/// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`.
pub fn add_move(&self,
tcx: &ty::ctxt<'tcx>,
lp: Rc<LoanPath<'tcx>>,
id: ast::NodeId,
kind: MoveKind) {
/*!
* Adds a new move entry for a move of `lp` that occurs at
* location `id` with kind `kind`.
*/
debug!("add_move(lp={}, id={}, kind={})",
lp.repr(tcx),
id,
@ -428,6 +413,8 @@ impl<'tcx> MoveData<'tcx> {
});
}
/// Adds a new record for an assignment to `lp` that occurs at location `id` with the given
/// `span`.
pub fn add_assignment(&self,
tcx: &ty::ctxt<'tcx>,
lp: Rc<LoanPath<'tcx>>,
@ -435,11 +422,6 @@ impl<'tcx> MoveData<'tcx> {
span: Span,
assignee_id: ast::NodeId,
mode: euv::MutateMode) {
/*!
* Adds a new record for an assignment to `lp` that occurs at
* location `id` with the given `span`.
*/
debug!("add_assignment(lp={}, assign_id={}, assignee_id={}",
lp.repr(tcx), assign_id, assignee_id);
@ -473,18 +455,16 @@ impl<'tcx> MoveData<'tcx> {
}
}
/// Adds a new record for a match of `base_lp`, downcast to
/// variant `lp`, that occurs at location `pattern_id`. (One
/// should be able to recover the span info from the
/// `pattern_id` and the ast_map, I think.)
pub fn add_variant_match(&self,
tcx: &ty::ctxt<'tcx>,
lp: Rc<LoanPath<'tcx>>,
pattern_id: ast::NodeId,
base_lp: Rc<LoanPath<'tcx>>,
mode: euv::MatchMode) {
/*!
* Adds a new record for a match of `base_lp`, downcast to
* variant `lp`, that occurs at location `pattern_id`. (One
* should be able to recover the span info from the
* `pattern_id` and the ast_map, I think.)
*/
debug!("add_variant_match(lp={}, pattern_id={})",
lp.repr(tcx), pattern_id);
@ -507,18 +487,15 @@ impl<'tcx> MoveData<'tcx> {
fragments::fixup_fragment_sets(self, tcx)
}
/// Adds the gen/kills for the various moves and
/// assignments into the provided data flow contexts.
/// Moves are generated by moves and killed by assignments and
/// scoping. Assignments are generated by assignment to variables and
/// killed by scoping. See `doc.rs` for more details.
fn add_gen_kills(&self,
tcx: &ty::ctxt<'tcx>,
dfcx_moves: &mut MoveDataFlow,
dfcx_assign: &mut AssignDataFlow) {
/*!
* Adds the gen/kills for the various moves and
* assignments into the provided data flow contexts.
* Moves are generated by moves and killed by assignments and
* scoping. Assignments are generated by assignment to variables and
* killed by scoping. See `doc.rs` for more details.
*/
for (i, the_move) in self.moves.borrow().iter().enumerate() {
dfcx_moves.add_gen(the_move.id, i);
}
@ -695,18 +672,14 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
ret
}
/// Iterates through each move of `loan_path` (or some base path of `loan_path`) that *may*
/// have occurred on entry to `id` without an intervening assignment. In other words, any moves
/// that would invalidate a reference to `loan_path` at location `id`.
pub fn each_move_of(&self,
id: ast::NodeId,
loan_path: &Rc<LoanPath<'tcx>>,
f: |&Move, &LoanPath<'tcx>| -> bool)
-> bool {
/*!
* Iterates through each move of `loan_path` (or some base path
* of `loan_path`) that *may* have occurred on entry to `id` without
* an intervening assignment. In other words, any moves that
* would invalidate a reference to `loan_path` at location `id`.
*/
// Bad scenarios:
//
// 1. Move of `a.b.c`, use of `a.b.c`
@ -755,17 +728,13 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
})
}
/// Iterates through every assignment to `loan_path` that may have occurred on entry to `id`.
/// `loan_path` must be a single variable.
pub fn each_assignment_of(&self,
id: ast::NodeId,
loan_path: &Rc<LoanPath<'tcx>>,
f: |&Assignment| -> bool)
-> bool {
/*!
* Iterates through every assignment to `loan_path` that
* may have occurred on entry to `id`. `loan_path` must be
* a single variable.
*/
let loan_path_index = {
match self.move_data.existing_move_path(loan_path) {
Some(i) => i,

View File

@ -8,12 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Module that constructs a control-flow graph representing an item.
Uses `Graph` as the underlying representation.
*/
//! Module that constructs a control-flow graph representing an item.
//! Uses `Graph` as the underlying representation.
use middle::graph;
use middle::ty;

View File

@ -567,6 +567,34 @@ pub fn eval_const_expr_partial(tcx: &ty::ctxt, e: &Expr) -> Result<const_val, St
None => Ok(const_int(0i64))
}
}
ast::ExprTupField(ref base, index) => {
// Get the base tuple if it is constant
if let Some(&ast::ExprTup(ref fields)) = lookup_const(tcx, &**base).map(|s| &s.node) {
// Check that the given index is within bounds and evaluate its value
if fields.len() > index.node {
return eval_const_expr_partial(tcx, &*fields[index.node])
} else {
return Err("tuple index out of bounds".to_string())
}
}
Err("non-constant struct in constant expr".to_string())
}
ast::ExprField(ref base, field_name) => {
// Get the base expression if it is a struct and it is constant
if let Some(&ast::ExprStruct(_, ref fields, _)) = lookup_const(tcx, &**base)
.map(|s| &s.node) {
// Check that the given field exists and evaluate it
if let Some(f) = fields.iter().find(|f|
f.ident.node.as_str() == field_name.node.as_str()) {
return eval_const_expr_partial(tcx, &*f.expr)
} else {
return Err("nonexistent struct field".to_string())
}
}
Err("non-constant struct in constant expr".to_string())
}
_ => Err("unsupported constant expr".to_string())
}
}

View File

@ -9,12 +9,10 @@
// except according to those terms.
/*!
* A module for propagating forward dataflow information. The analysis
* assumes that the items to be propagated can be represented as bits
* and thus uses bitvectors. Your job is simply to specify the so-called
* GEN and KILL bits for each expression.
*/
//! A module for propagating forward dataflow information. The analysis
//! assumes that the items to be propagated can be represented as bits
//! and thus uses bitvectors. Your job is simply to specify the so-called
//! GEN and KILL bits for each expression.
pub use self::EntryOrExit::*;

View File

@ -8,11 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A different sort of visitor for walking fn bodies. Unlike the
* normal visitor, which just walks the entire body in one shot, the
* `ExprUseVisitor` determines how expressions are being used.
*/
//! A different sort of visitor for walking fn bodies. Unlike the
//! normal visitor, which just walks the entire body in one shot, the
//! `ExprUseVisitor` determines how expressions are being used.
pub use self::MutateMode::*;
pub use self::LoanCause::*;
@ -295,7 +293,7 @@ impl OverloadedCallType {
pub struct ExprUseVisitor<'d,'t,'tcx,TYPER:'t> {
typer: &'t TYPER,
mc: mc::MemCategorizationContext<'t,TYPER>,
delegate: &'d mut Delegate<'tcx>+'d,
delegate: &'d mut (Delegate<'tcx>+'d),
}
// If the TYPER results in an error, it's because the type check
@ -716,12 +714,9 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
}
}
/// Indicates that the value of `blk` will be consumed, meaning either copied or moved
/// depending on its type.
fn walk_block(&mut self, blk: &ast::Block) {
/*!
* Indicates that the value of `blk` will be consumed,
* meaning either copied or moved depending on its type.
*/
debug!("walk_block(blk.id={})", blk.id);
for stmt in blk.stmts.iter() {
@ -821,16 +816,12 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
}
}
/// Autoderefs for overloaded Deref calls in fact reference their receiver. That is, if we have
/// `(*x)` where `x` is of type `Rc<T>`, then this in fact is equivalent to `x.deref()`. Since
/// `deref()` is declared with `&self`, this is an autoref of `x`.
fn walk_autoderefs(&mut self,
expr: &ast::Expr,
autoderefs: uint) {
/*!
* Autoderefs for overloaded Deref calls in fact reference
* their receiver. That is, if we have `(*x)` where `x` is of
* type `Rc<T>`, then this in fact is equivalent to
* `x.deref()`. Since `deref()` is declared with `&self`, this
* is an autoref of `x`.
*/
debug!("walk_autoderefs expr={} autoderefs={}", expr.repr(self.tcx()), autoderefs);
for i in range(0, autoderefs) {

View File

@ -13,7 +13,7 @@ use syntax::ast;
use self::SimplifiedType::*;
/** See `simplify_type */
/// See `simplify_type
#[deriving(Clone, PartialEq, Eq, Hash)]
pub enum SimplifiedType {
BoolSimplifiedType,
@ -33,26 +33,20 @@ pub enum SimplifiedType {
ParameterSimplifiedType,
}
/// Tries to simplify a type by dropping type parameters, deref'ing away any reference types, etc.
/// The idea is to get something simple that we can use to quickly decide if two types could unify
/// during method lookup.
///
/// If `can_simplify_params` is false, then we will fail to simplify type parameters entirely. This
/// is useful when those type parameters would be instantiated with fresh type variables, since
/// then we can't say much about whether two types would unify. Put another way,
/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
/// are to be considered bound.
pub fn simplify_type(tcx: &ty::ctxt,
ty: Ty,
can_simplify_params: bool)
-> Option<SimplifiedType>
{
/*!
* Tries to simplify a type by dropping type parameters, deref'ing
* away any reference types, etc. The idea is to get something
* simple that we can use to quickly decide if two types could
* unify during method lookup.
*
* If `can_simplify_params` is false, then we will fail to
* simplify type parameters entirely. This is useful when those
* type parameters would be instantiated with fresh type
* variables, since then we can't say much about whether two types
* would unify. Put another way, `can_simplify_params` should be
* true if type parameters appear free in `ty` and `false` if they
* are to be considered bound.
*/
match ty.sty {
ty::ty_bool => Some(BoolSimplifiedType),
ty::ty_char => Some(CharSimplifiedType),

View File

@ -8,31 +8,27 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A graph module for use in dataflow, region resolution, and elsewhere.
# Interface details
You customize the graph by specifying a "node data" type `N` and an
"edge data" type `E`. You can then later gain access (mutable or
immutable) to these "user-data" bits. Currently, you can only add
nodes or edges to the graph. You cannot remove or modify them once
added. This could be changed if we have a need.
# Implementation details
The main tricky thing about this code is the way that edges are
stored. The edges are stored in a central array, but they are also
threaded onto two linked lists for each node, one for incoming edges
and one for outgoing edges. Note that every edge is a member of some
incoming list and some outgoing list. Basically you can load the
first index of the linked list from the node data structures (the
field `first_edge`) and then, for each edge, load the next index from
the field `next_edge`). Each of those fields is an array that should
be indexed by the direction (see the type `Direction`).
*/
//! A graph module for use in dataflow, region resolution, and elsewhere.
//!
//! # Interface details
//!
//! You customize the graph by specifying a "node data" type `N` and an
//! "edge data" type `E`. You can then later gain access (mutable or
//! immutable) to these "user-data" bits. Currently, you can only add
//! nodes or edges to the graph. You cannot remove or modify them once
//! added. This could be changed if we have a need.
//!
//! # Implementation details
//!
//! The main tricky thing about this code is the way that edges are
//! stored. The edges are stored in a central array, but they are also
//! threaded onto two linked lists for each node, one for incoming edges
//! and one for outgoing edges. Note that every edge is a member of some
//! incoming list and some outgoing list. Basically you can load the
//! first index of the linked list from the node data structures (the
//! field `first_edge`) and then, for each edge, load the next index from
//! the field `next_edge`). Each of those fields is an array that should
//! be indexed by the direction (see the type `Direction`).
#![allow(dead_code)] // still WIP

View File

@ -8,105 +8,103 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A classic liveness analysis based on dataflow over the AST. Computes,
* for each local variable in a function, whether that variable is live
* at a given point. Program execution points are identified by their
* id.
*
* # Basic idea
*
* The basic model is that each local variable is assigned an index. We
* represent sets of local variables using a vector indexed by this
* index. The value in the vector is either 0, indicating the variable
* is dead, or the id of an expression that uses the variable.
*
* We conceptually walk over the AST in reverse execution order. If we
* find a use of a variable, we add it to the set of live variables. If
* we find an assignment to a variable, we remove it from the set of live
* variables. When we have to merge two flows, we take the union of
* those two flows---if the variable is live on both paths, we simply
* pick one id. In the event of loops, we continue doing this until a
* fixed point is reached.
*
* ## Checking initialization
*
* At the function entry point, all variables must be dead. If this is
* not the case, we can report an error using the id found in the set of
* live variables, which identifies a use of the variable which is not
* dominated by an assignment.
*
* ## Checking moves
*
* After each explicit move, the variable must be dead.
*
* ## Computing last uses
*
* Any use of the variable where the variable is dead afterwards is a
* last use.
*
* # Implementation details
*
* The actual implementation contains two (nested) walks over the AST.
* The outer walk has the job of building up the ir_maps instance for the
* enclosing function. On the way down the tree, it identifies those AST
* nodes and variable IDs that will be needed for the liveness analysis
* and assigns them contiguous IDs. The liveness id for an AST node is
* called a `live_node` (it's a newtype'd uint) and the id for a variable
* is called a `variable` (another newtype'd uint).
*
* On the way back up the tree, as we are about to exit from a function
* declaration we allocate a `liveness` instance. Now that we know
* precisely how many nodes and variables we need, we can allocate all
* the various arrays that we will need to precisely the right size. We then
* perform the actual propagation on the `liveness` instance.
*
* This propagation is encoded in the various `propagate_through_*()`
* methods. It effectively does a reverse walk of the AST; whenever we
* reach a loop node, we iterate until a fixed point is reached.
*
* ## The `Users` struct
*
* At each live node `N`, we track three pieces of information for each
* variable `V` (these are encapsulated in the `Users` struct):
*
* - `reader`: the `LiveNode` ID of some node which will read the value
* that `V` holds on entry to `N`. Formally: a node `M` such
* that there exists a path `P` from `N` to `M` where `P` does not
* write `V`. If the `reader` is `invalid_node()`, then the current
* value will never be read (the variable is dead, essentially).
*
* - `writer`: the `LiveNode` ID of some node which will write the
* variable `V` and which is reachable from `N`. Formally: a node `M`
* such that there exists a path `P` from `N` to `M` and `M` writes
* `V`. If the `writer` is `invalid_node()`, then there is no writer
* of `V` that follows `N`.
*
* - `used`: a boolean value indicating whether `V` is *used*. We
* distinguish a *read* from a *use* in that a *use* is some read that
* is not just used to generate a new value. For example, `x += 1` is
* a read but not a use. This is used to generate better warnings.
*
* ## Special Variables
*
* We generate various special variables for various, well, special purposes.
* These are described in the `specials` struct:
*
* - `exit_ln`: a live node that is generated to represent every 'exit' from
* the function, whether it be by explicit return, panic, or other means.
*
* - `fallthrough_ln`: a live node that represents a fallthrough
*
* - `no_ret_var`: a synthetic variable that is only 'read' from, the
* fallthrough node. This allows us to detect functions where we fail
* to return explicitly.
* - `clean_exit_var`: a synthetic variable that is only 'read' from the
* fallthrough node. It is only live if the function could converge
* via means other than an explicit `return` expression. That is, it is
* only dead if the end of the function's block can never be reached.
* It is the responsibility of typeck to ensure that there are no
* `return` expressions in a function declared as diverging.
*/
//! A classic liveness analysis based on dataflow over the AST. Computes,
//! for each local variable in a function, whether that variable is live
//! at a given point. Program execution points are identified by their
//! id.
//!
//! # Basic idea
//!
//! The basic model is that each local variable is assigned an index. We
//! represent sets of local variables using a vector indexed by this
//! index. The value in the vector is either 0, indicating the variable
//! is dead, or the id of an expression that uses the variable.
//!
//! We conceptually walk over the AST in reverse execution order. If we
//! find a use of a variable, we add it to the set of live variables. If
//! we find an assignment to a variable, we remove it from the set of live
//! variables. When we have to merge two flows, we take the union of
//! those two flows---if the variable is live on both paths, we simply
//! pick one id. In the event of loops, we continue doing this until a
//! fixed point is reached.
//!
//! ## Checking initialization
//!
//! At the function entry point, all variables must be dead. If this is
//! not the case, we can report an error using the id found in the set of
//! live variables, which identifies a use of the variable which is not
//! dominated by an assignment.
//!
//! ## Checking moves
//!
//! After each explicit move, the variable must be dead.
//!
//! ## Computing last uses
//!
//! Any use of the variable where the variable is dead afterwards is a
//! last use.
//!
//! # Implementation details
//!
//! The actual implementation contains two (nested) walks over the AST.
//! The outer walk has the job of building up the ir_maps instance for the
//! enclosing function. On the way down the tree, it identifies those AST
//! nodes and variable IDs that will be needed for the liveness analysis
//! and assigns them contiguous IDs. The liveness id for an AST node is
//! called a `live_node` (it's a newtype'd uint) and the id for a variable
//! is called a `variable` (another newtype'd uint).
//!
//! On the way back up the tree, as we are about to exit from a function
//! declaration we allocate a `liveness` instance. Now that we know
//! precisely how many nodes and variables we need, we can allocate all
//! the various arrays that we will need to precisely the right size. We then
//! perform the actual propagation on the `liveness` instance.
//!
//! This propagation is encoded in the various `propagate_through_*()`
//! methods. It effectively does a reverse walk of the AST; whenever we
//! reach a loop node, we iterate until a fixed point is reached.
//!
//! ## The `Users` struct
//!
//! At each live node `N`, we track three pieces of information for each
//! variable `V` (these are encapsulated in the `Users` struct):
//!
//! - `reader`: the `LiveNode` ID of some node which will read the value
//! that `V` holds on entry to `N`. Formally: a node `M` such
//! that there exists a path `P` from `N` to `M` where `P` does not
//! write `V`. If the `reader` is `invalid_node()`, then the current
//! value will never be read (the variable is dead, essentially).
//!
//! - `writer`: the `LiveNode` ID of some node which will write the
//! variable `V` and which is reachable from `N`. Formally: a node `M`
//! such that there exists a path `P` from `N` to `M` and `M` writes
//! `V`. If the `writer` is `invalid_node()`, then there is no writer
//! of `V` that follows `N`.
//!
//! - `used`: a boolean value indicating whether `V` is *used*. We
//! distinguish a *read* from a *use* in that a *use* is some read that
//! is not just used to generate a new value. For example, `x += 1` is
//! a read but not a use. This is used to generate better warnings.
//!
//! ## Special Variables
//!
//! We generate various special variables for various, well, special purposes.
//! These are described in the `specials` struct:
//!
//! - `exit_ln`: a live node that is generated to represent every 'exit' from
//! the function, whether it be by explicit return, panic, or other means.
//!
//! - `fallthrough_ln`: a live node that represents a fallthrough
//!
//! - `no_ret_var`: a synthetic variable that is only 'read' from, the
//! fallthrough node. This allows us to detect functions where we fail
//! to return explicitly.
//! - `clean_exit_var`: a synthetic variable that is only 'read' from the
//! fallthrough node. It is only live if the function could converge
//! via means other than an explicit `return` expression. That is, it is
//! only dead if the end of the function's block can never be reached.
//! It is the responsibility of typeck to ensure that there are no
//! `return` expressions in a function declared as diverging.
use self::LoopKind::*;
use self::LiveNodeKind::*;
use self::VarKind::*;

View File

@ -8,57 +8,55 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* # Categorization
*
* The job of the categorization module is to analyze an expression to
* determine what kind of memory is used in evaluating it (for example,
* where dereferences occur and what kind of pointer is dereferenced;
* whether the memory is mutable; etc)
*
* Categorization effectively transforms all of our expressions into
* expressions of the following forms (the actual enum has many more
* possibilities, naturally, but they are all variants of these base
* forms):
*
* E = rvalue // some computed rvalue
* | x // address of a local variable or argument
* | *E // deref of a ptr
* | E.comp // access to an interior component
*
* Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
* address where the result is to be found. If Expr is an lvalue, then this
* is the address of the lvalue. If Expr is an rvalue, this is the address of
* some temporary spot in memory where the result is stored.
*
* Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr)
* as follows:
*
* - cat: what kind of expression was this? This is a subset of the
* full expression forms which only includes those that we care about
* for the purpose of the analysis.
* - mutbl: mutability of the address A
* - ty: the type of data found at the address A
*
* The resulting categorization tree differs somewhat from the expressions
* themselves. For example, auto-derefs are explicit. Also, an index a[b] is
* decomposed into two operations: a dereference to reach the array data and
* then an index to jump forward to the relevant item.
*
* ## By-reference upvars
*
* One part of the translation which may be non-obvious is that we translate
* closure upvars into the dereference of a borrowed pointer; this more closely
* resembles the runtime translation. So, for example, if we had:
*
* let mut x = 3;
* let y = 5;
* let inc = || x += y;
*
* Then when we categorize `x` (*within* the closure) we would yield a
* result of `*x'`, effectively, where `x'` is a `cat_upvar` reference
* tied to `x`. The type of `x'` will be a borrowed pointer.
*/
//! # Categorization
//!
//! The job of the categorization module is to analyze an expression to
//! determine what kind of memory is used in evaluating it (for example,
//! where dereferences occur and what kind of pointer is dereferenced;
//! whether the memory is mutable; etc)
//!
//! Categorization effectively transforms all of our expressions into
//! expressions of the following forms (the actual enum has many more
//! possibilities, naturally, but they are all variants of these base
//! forms):
//!
//! E = rvalue // some computed rvalue
//! | x // address of a local variable or argument
//! | *E // deref of a ptr
//! | E.comp // access to an interior component
//!
//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
//! address where the result is to be found. If Expr is an lvalue, then this
//! is the address of the lvalue. If Expr is an rvalue, this is the address of
//! some temporary spot in memory where the result is stored.
//!
//! Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr)
//! as follows:
//!
//! - cat: what kind of expression was this? This is a subset of the
//! full expression forms which only includes those that we care about
//! for the purpose of the analysis.
//! - mutbl: mutability of the address A
//! - ty: the type of data found at the address A
//!
//! The resulting categorization tree differs somewhat from the expressions
//! themselves. For example, auto-derefs are explicit. Also, an index a[b] is
//! decomposed into two operations: a dereference to reach the array data and
//! then an index to jump forward to the relevant item.
//!
//! ## By-reference upvars
//!
//! One part of the translation which may be non-obvious is that we translate
//! closure upvars into the dereference of a borrowed pointer; this more closely
//! resembles the runtime translation. So, for example, if we had:
//!
//! let mut x = 3;
//! let y = 5;
//! let inc = || x += y;
//!
//! Then when we categorize `x` (*within* the closure) we would yield a
//! result of `*x'`, effectively, where `x'` is a `cat_upvar` reference
//! tied to `x`. The type of `x'` will be a borrowed pointer.
#![allow(non_camel_case_types)]
@ -266,24 +264,22 @@ pub struct MemCategorizationContext<'t,TYPER:'t> {
pub type McResult<T> = Result<T, ()>;
/**
* The `Typer` trait provides the interface for the mem-categorization
* module to the results of the type check. It can be used to query
* the type assigned to an expression node, to inquire after adjustments,
* and so on.
*
* This interface is needed because mem-categorization is used from
* two places: `regionck` and `borrowck`. `regionck` executes before
* type inference is complete, and hence derives types and so on from
* intermediate tables. This also implies that type errors can occur,
* and hence `node_ty()` and friends return a `Result` type -- any
* error will propagate back up through the mem-categorization
* routines.
*
* In the borrow checker, in contrast, type checking is complete and we
* know that no errors have occurred, so we simply consult the tcx and we
* can be sure that only `Ok` results will occur.
*/
/// The `Typer` trait provides the interface for the mem-categorization
/// module to the results of the type check. It can be used to query
/// the type assigned to an expression node, to inquire after adjustments,
/// and so on.
///
/// This interface is needed because mem-categorization is used from
/// two places: `regionck` and `borrowck`. `regionck` executes before
/// type inference is complete, and hence derives types and so on from
/// intermediate tables. This also implies that type errors can occur,
/// and hence `node_ty()` and friends return a `Result` type -- any
/// error will propagate back up through the mem-categorization
/// routines.
///
/// In the borrow checker, in contrast, type checking is complete and we
/// know that no errors have occurred, so we simply consult the tcx and we
/// can be sure that only `Ok` results will occur.
pub trait Typer<'tcx> {
fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>>;
@ -1058,20 +1054,17 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
}
}
/// Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is the cmt for `P`, `slice_pat` is
/// the pattern `Q`, returns:
///
/// * a cmt for `Q`
/// * the mutability and region of the slice `Q`
///
/// These last two bits of info happen to be things that borrowck needs.
pub fn cat_slice_pattern(&self,
vec_cmt: cmt<'tcx>,
slice_pat: &ast::Pat)
-> McResult<(cmt<'tcx>, ast::Mutability, ty::Region)> {
/*!
* Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is
* the cmt for `P`, `slice_pat` is the pattern `Q`, returns:
* - a cmt for `Q`
* - the mutability and region of the slice `Q`
*
* These last two bits of info happen to be things that
* borrowck needs.
*/
let slice_ty = if_ok!(self.node_ty(slice_pat.id));
let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(),
slice_pat,
@ -1079,17 +1072,13 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
let cmt_slice = self.cat_index(slice_pat, self.deref_vec(slice_pat, vec_cmt));
return Ok((cmt_slice, slice_mutbl, slice_r));
/// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b,
/// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we
/// have to recurse through rptrs.
fn vec_slice_info(tcx: &ty::ctxt,
pat: &ast::Pat,
slice_ty: Ty)
-> (ast::Mutability, ty::Region) {
/*!
* In a pattern like [a, b, ..c], normally `c` has slice type,
* but if you have [a, b, ..ref c], then the type of `ref c`
* will be `&&[]`, so to extract the slice details we have
* to recurse through rptrs.
*/
match slice_ty.sty {
ty::ty_rptr(r, ref mt) => match mt.ty.sty {
ty::ty_vec(_, None) => (mt.mutbl, r),
@ -1428,13 +1417,9 @@ impl<'tcx> cmt_<'tcx> {
}
}
/// Returns `Some(_)` if this lvalue represents a freely aliasable pointer type.
pub fn freely_aliasable(&self, ctxt: &ty::ctxt<'tcx>)
-> Option<AliasableReason> {
/*!
* Returns `Some(_)` if this lvalue represents a freely aliasable
* pointer type.
*/
// Maybe non-obvious: copied upvars can only be considered
// non-aliasable in once closures, since any other kind can be
// aliased and eventually recused.

View File

@ -243,7 +243,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
// * Private trait impls for private types can be completely ignored
ast::ItemImpl(_, _, ref ty, ref impl_items) => {
let public_ty = match ty.node {
ast::TyPath(_, _, id) => {
ast::TyPath(_, id) => {
match self.tcx.def_map.borrow()[id].clone() {
def::DefPrimTy(..) => true,
def => {
@ -311,7 +311,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
ast::ItemTy(ref ty, _) if public_first => {
match ty.node {
ast::TyPath(_, _, id) => {
ast::TyPath(_, id) => {
match self.tcx.def_map.borrow()[id].clone() {
def::DefPrimTy(..) | def::DefTyParam(..) => {},
def => {
@ -616,7 +616,7 @@ impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
// was private.
ast::ItemImpl(_, _, ref ty, _) => {
let id = match ty.node {
ast::TyPath(_, _, id) => id,
ast::TyPath(_, id) => id,
_ => return Some((err_span, err_msg, None)),
};
let def = self.tcx.def_map.borrow()[id].clone();
@ -1292,7 +1292,7 @@ impl<'a, 'tcx> VisiblePrivateTypesVisitor<'a, 'tcx> {
impl<'a, 'b, 'tcx, 'v> Visitor<'v> for CheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty) {
match ty.node {
ast::TyPath(_, _, path_id) => {
ast::TyPath(_, path_id) => {
if self.inner.path_is_private_type(path_id) {
self.contains_private = true;
// found what we're looking for so let's stop
@ -1493,7 +1493,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for VisiblePrivateTypesVisitor<'a, 'tcx> {
fn visit_ty(&mut self, t: &ast::Ty) {
match t.node {
ast::TyPath(ref p, _, path_id) => {
ast::TyPath(ref p, path_id) => {
if !self.tcx.sess.features.borrow().visible_private_types &&
self.path_is_private_type(path_id) {
self.tcx.sess.span_err(p.span,

View File

@ -8,18 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
This file actually contains two passes related to regions. The first
pass builds up the `scope_map`, which describes the parent links in
the region hierarchy. The second pass infers which types must be
region parameterized.
Most of the documentation on regions can be found in
`middle/typeck/infer/region_inference.rs`
*/
//! This file actually contains two passes related to regions. The first
//! pass builds up the `scope_map`, which describes the parent links in
//! the region hierarchy. The second pass infers which types must be
//! region parameterized.
//!
//! Most of the documentation on regions can be found in
//! `middle/typeck/infer/region_inference.rs`
use session::Session;
use middle::ty::{mod, Ty, FreeRegion};
@ -72,46 +67,44 @@ impl CodeExtent {
}
}
/**
The region maps encode information about region relationships.
- `scope_map` maps from a scope id to the enclosing scope id; this is
usually corresponding to the lexical nesting, though in the case of
closures the parent scope is the innermost conditional expression or repeating
block
- `var_map` maps from a variable or binding id to the block in which
that variable is declared.
- `free_region_map` maps from a free region `a` to a list of free
regions `bs` such that `a <= b for all b in bs`
- the free region map is populated during type check as we check
each function. See the function `relate_free_regions` for
more information.
- `rvalue_scopes` includes entries for those expressions whose cleanup
scope is larger than the default. The map goes from the expression
id to the cleanup scope id. For rvalues not present in this table,
the appropriate cleanup scope is the innermost enclosing statement,
conditional expression, or repeating block (see `terminating_scopes`).
- `terminating_scopes` is a set containing the ids of each statement,
or conditional/repeating expression. These scopes are calling "terminating
scopes" because, when attempting to find the scope of a temporary, by
default we search up the enclosing scopes until we encounter the
terminating scope. A conditional/repeating
expression is one which is not guaranteed to execute exactly once
upon entering the parent scope. This could be because the expression
only executes conditionally, such as the expression `b` in `a && b`,
or because the expression may execute many times, such as a loop
body. The reason that we distinguish such expressions is that, upon
exiting the parent scope, we cannot statically know how many times
the expression executed, and thus if the expression creates
temporaries we cannot know statically how many such temporaries we
would have to cleanup. Therefore we ensure that the temporaries never
outlast the conditional/repeating expression, preventing the need
for dynamic checks and/or arbitrary amounts of stack space.
*/
/// The region maps encode information about region relationships.
///
/// - `scope_map` maps from a scope id to the enclosing scope id; this is
/// usually corresponding to the lexical nesting, though in the case of
/// closures the parent scope is the innermost conditional expression or repeating
/// block
///
/// - `var_map` maps from a variable or binding id to the block in which
/// that variable is declared.
///
/// - `free_region_map` maps from a free region `a` to a list of free
/// regions `bs` such that `a <= b for all b in bs`
/// - the free region map is populated during type check as we check
/// each function. See the function `relate_free_regions` for
/// more information.
///
/// - `rvalue_scopes` includes entries for those expressions whose cleanup
/// scope is larger than the default. The map goes from the expression
/// id to the cleanup scope id. For rvalues not present in this table,
/// the appropriate cleanup scope is the innermost enclosing statement,
/// conditional expression, or repeating block (see `terminating_scopes`).
///
/// - `terminating_scopes` is a set containing the ids of each statement,
/// or conditional/repeating expression. These scopes are calling "terminating
/// scopes" because, when attempting to find the scope of a temporary, by
/// default we search up the enclosing scopes until we encounter the
/// terminating scope. A conditional/repeating
/// expression is one which is not guaranteed to execute exactly once
/// upon entering the parent scope. This could be because the expression
/// only executes conditionally, such as the expression `b` in `a && b`,
/// or because the expression may execute many times, such as a loop
/// body. The reason that we distinguish such expressions is that, upon
/// exiting the parent scope, we cannot statically know how many times
/// the expression executed, and thus if the expression creates
/// temporaries we cannot know statically how many such temporaries we
/// would have to cleanup. Therefore we ensure that the temporaries never
/// outlast the conditional/repeating expression, preventing the need
/// for dynamic checks and/or arbitrary amounts of stack space.
pub struct RegionMaps {
scope_map: RefCell<FnvHashMap<CodeExtent, CodeExtent>>,
var_map: RefCell<NodeMap<CodeExtent>>,
@ -171,14 +164,10 @@ impl RegionMaps {
self.rvalue_scopes.borrow_mut().insert(var, lifetime);
}
/// Records that a scope is a TERMINATING SCOPE. Whenever we create automatic temporaries --
/// e.g. by an expression like `a().f` -- they will be freed within the innermost terminating
/// scope.
pub fn mark_as_terminating_scope(&self, scope_id: CodeExtent) {
/*!
* Records that a scope is a TERMINATING SCOPE. Whenever we
* create automatic temporaries -- e.g. by an
* expression like `a().f` -- they will be freed within
* the innermost terminating scope.
*/
debug!("record_terminating_scope(scope_id={})", scope_id);
self.terminating_scopes.borrow_mut().insert(scope_id);
}
@ -197,10 +186,8 @@ impl RegionMaps {
}
}
/// Returns the lifetime of the local variable `var_id`
pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent {
/*!
* Returns the lifetime of the local variable `var_id`
*/
match self.var_map.borrow().get(&var_id) {
Some(&r) => r,
None => { panic!("no enclosing scope for id {}", var_id); }
@ -257,15 +244,12 @@ impl RegionMaps {
self.is_subscope_of(scope2, scope1)
}
/// Returns true if `subscope` is equal to or is lexically nested inside `superscope` and false
/// otherwise.
pub fn is_subscope_of(&self,
subscope: CodeExtent,
superscope: CodeExtent)
-> bool {
/*!
* Returns true if `subscope` is equal to or is lexically
* nested inside `superscope` and false otherwise.
*/
let mut s = subscope;
while superscope != s {
match self.scope_map.borrow().get(&s) {
@ -285,27 +269,20 @@ impl RegionMaps {
return true;
}
/// Determines whether two free regions have a subregion relationship
/// by walking the graph encoded in `free_region_map`. Note that
/// it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
/// (that is, the user can give two different names to the same lifetime).
pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
/*!
* Determines whether two free regions have a subregion relationship
* by walking the graph encoded in `free_region_map`. Note that
* it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
* (that is, the user can give two different names to the same lifetime).
*/
can_reach(&*self.free_region_map.borrow(), sub, sup)
}
/// Determines whether one region is a subregion of another. This is intended to run *after
/// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
pub fn is_subregion_of(&self,
sub_region: ty::Region,
super_region: ty::Region)
-> bool {
/*!
* Determines whether one region is a subregion of another. This is
* intended to run *after inference* and sadly the logic is somewhat
* duplicated with the code in infer.rs.
*/
debug!("is_subregion_of(sub_region={}, super_region={})",
sub_region, super_region);
@ -345,16 +322,12 @@ impl RegionMaps {
}
}
/// Finds the nearest common ancestor (if any) of two scopes. That is, finds the smallest
/// scope which is greater than or equal to both `scope_a` and `scope_b`.
pub fn nearest_common_ancestor(&self,
scope_a: CodeExtent,
scope_b: CodeExtent)
-> Option<CodeExtent> {
/*!
* Finds the nearest common ancestor (if any) of two scopes. That
* is, finds the smallest scope which is greater than or equal to
* both `scope_a` and `scope_b`.
*/
if scope_a == scope_b { return Some(scope_a); }
let a_ancestors = ancestors_of(self, scope_a);
@ -681,18 +654,15 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
visit::walk_local(visitor, local);
/// True if `pat` match the `P&` nonterminal:
///
/// P& = ref X
/// | StructName { ..., P&, ... }
/// | VariantName(..., P&, ...)
/// | [ ..., P&, ... ]
/// | ( ..., P&, ... )
/// | box P&
fn is_binding_pat(pat: &ast::Pat) -> bool {
/*!
* True if `pat` match the `P&` nonterminal:
*
* P& = ref X
* | StructName { ..., P&, ... }
* | VariantName(..., P&, ...)
* | [ ..., P&, ... ]
* | ( ..., P&, ... )
* | box P&
*/
match pat.node {
ast::PatIdent(ast::BindByRef(_), _, _) => true,
@ -719,35 +689,27 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
}
}
/// True if `ty` is a borrowed pointer type like `&int` or `&[...]`.
fn is_borrowed_ty(ty: &ast::Ty) -> bool {
/*!
* True if `ty` is a borrowed pointer type
* like `&int` or `&[...]`.
*/
match ty.node {
ast::TyRptr(..) => true,
_ => false
}
}
/// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate:
///
/// E& = & ET
/// | StructName { ..., f: E&, ... }
/// | [ ..., E&, ... ]
/// | ( ..., E&, ... )
/// | {...; E&}
/// | box E&
/// | E& as ...
/// | ( E& )
fn record_rvalue_scope_if_borrow_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
blk_id: CodeExtent) {
/*!
* If `expr` matches the `E&` grammar, then records an extended
* rvalue scope as appropriate:
*
* E& = & ET
* | StructName { ..., f: E&, ... }
* | [ ..., E&, ... ]
* | ( ..., E&, ... )
* | {...; E&}
* | box E&
* | E& as ...
* | ( E& )
*/
match expr.node {
ast::ExprAddrOf(_, ref subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id);
@ -787,29 +749,24 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
}
}
/// Applied to an expression `expr` if `expr` -- or something owned or partially owned by
/// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that
/// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let`
/// statement.
///
/// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching
/// `<rvalue>` as `blk_id`:
///
/// ET = *ET
/// | ET[...]
/// | ET.f
/// | (ET)
/// | <rvalue>
///
/// Note: ET is intended to match "rvalues or lvalues based on rvalues".
fn record_rvalue_scope<'a>(visitor: &mut RegionResolutionVisitor,
expr: &'a ast::Expr,
blk_scope: CodeExtent) {
/*!
* Applied to an expression `expr` if `expr` -- or something
* owned or partially owned by `expr` -- is going to be
* indirectly referenced by a variable in a let statement. In
* that case, the "temporary lifetime" or `expr` is extended
* to be the block enclosing the `let` statement.
*
* More formally, if `expr` matches the grammar `ET`, record
* the rvalue scope of the matching `<rvalue>` as `blk_id`:
*
* ET = *ET
* | ET[...]
* | ET.f
* | (ET)
* | <rvalue>
*
* Note: ET is intended to match "rvalues or
* lvalues based on rvalues".
*/
let mut expr = expr;
loop {
// Note: give all the expressions matching `ET` with the

View File

@ -63,7 +63,7 @@ use syntax::ast::{PolyTraitRef, PrimTy, Public, SelfExplicit, SelfStatic};
use syntax::ast::{RegionTyParamBound, StmtDecl, StructField};
use syntax::ast::{StructVariantKind, TraitRef, TraitTyParamBound};
use syntax::ast::{TupleVariantKind, Ty, TyBool, TyChar, TyClosure, TyF32};
use syntax::ast::{TyF64, TyFloat, TyI, TyI8, TyI16, TyI32, TyI64, TyInt};
use syntax::ast::{TyF64, TyFloat, TyI, TyI8, TyI16, TyI32, TyI64, TyInt, TyObjectSum};
use syntax::ast::{TyParam, TyParamBound, TyPath, TyPtr, TyPolyTraitRef, TyProc, TyQPath};
use syntax::ast::{TyRptr, TyStr, TyU, TyU8, TyU16, TyU32, TyU64, TyUint};
use syntax::ast::{TypeImplItem, UnnamedField};
@ -761,10 +761,8 @@ impl NameBindings {
}
}
/**
* Returns the module node. Panics if this node does not have a module
* definition.
*/
/// Returns the module node. Panics if this node does not have a module
/// definition.
fn get_module(&self) -> Rc<Module> {
match self.get_module_if_available() {
None => {
@ -1098,18 +1096,16 @@ impl<'a> Resolver<'a> {
visit::walk_crate(&mut visitor, krate);
}
/**
* Adds a new child item to the module definition of the parent node and
* returns its corresponding name bindings as well as the current parent.
* Or, if we're inside a block, creates (or reuses) an anonymous module
* corresponding to the innermost block ID and returns the name bindings
* as well as the newly-created parent.
*
* # Panics
*
* Panics if this node does not have a module definition and we are not inside
* a block.
*/
/// Adds a new child item to the module definition of the parent node and
/// returns its corresponding name bindings as well as the current parent.
/// Or, if we're inside a block, creates (or reuses) an anonymous module
/// corresponding to the innermost block ID and returns the name bindings
/// as well as the newly-created parent.
///
/// # Panics
///
/// Panics if this node does not have a module definition and we are not inside
/// a block.
fn add_child(&self,
name: Name,
reduced_graph_parent: ReducedGraphParent,
@ -1396,29 +1392,53 @@ impl<'a> Resolver<'a> {
// methods within to a new module, if the type was defined
// within this module.
// Create the module and add all methods.
match ty.node {
TyPath(ref path, _, _) if path.segments.len() == 1 => {
let mod_name = match ty.node {
TyPath(ref path, _) if path.segments.len() == 1 => {
// FIXME(18446) we should distinguish between the name of
// a trait and the name of an impl of that trait.
let mod_name = path.segments.last().unwrap().identifier.name;
Some(path.segments.last().unwrap().identifier.name)
}
TyObjectSum(ref lhs_ty, _) => {
match lhs_ty.node {
TyPath(ref path, _) if path.segments.len() == 1 => {
Some(path.segments.last().unwrap().identifier.name)
}
_ => {
None
}
}
}
_ => {
None
}
};
match mod_name {
None => {
self.resolve_error(ty.span,
"inherent implementations may \
only be implemented in the same \
module as the type they are \
implemented for")
}
Some(mod_name) => {
// Create the module and add all methods.
let parent_opt = parent.module().children.borrow()
.get(&mod_name).cloned();
.get(&mod_name).cloned();
let new_parent = match parent_opt {
// It already exists
Some(ref child) if child.get_module_if_available()
.is_some() &&
(child.get_module().kind.get() == ImplModuleKind ||
child.get_module().kind.get() == TraitModuleKind) => {
ModuleReducedGraphParent(child.get_module())
}
.is_some() &&
(child.get_module().kind.get() == ImplModuleKind ||
child.get_module().kind.get() == TraitModuleKind) => {
ModuleReducedGraphParent(child.get_module())
}
Some(ref child) if child.get_module_if_available()
.is_some() &&
child.get_module().kind.get() ==
EnumModuleKind => {
ModuleReducedGraphParent(child.get_module())
}
.is_some() &&
child.get_module().kind.get() ==
EnumModuleKind => {
ModuleReducedGraphParent(child.get_module())
}
// Create the module
_ => {
let name_bindings =
@ -1433,7 +1453,7 @@ impl<'a> Resolver<'a> {
let ns = TypeNS;
let is_public =
!name_bindings.defined_in_namespace(ns) ||
name_bindings.defined_in_public_namespace(ns);
name_bindings.defined_in_public_namespace(ns);
name_bindings.define_module(parent_link,
Some(def_id),
@ -1459,21 +1479,21 @@ impl<'a> Resolver<'a> {
ForbidDuplicateValues,
method.span);
let def = match method.pe_explicit_self()
.node {
SelfStatic => {
// Static methods become
// `DefStaticMethod`s.
DefStaticMethod(local_def(method.id),
FromImpl(local_def(item.id)))
}
_ => {
// Non-static methods become
// `DefMethod`s.
DefMethod(local_def(method.id),
None,
FromImpl(local_def(item.id)))
}
};
.node {
SelfStatic => {
// Static methods become
// `DefStaticMethod`s.
DefStaticMethod(local_def(method.id),
FromImpl(local_def(item.id)))
}
_ => {
// Non-static methods become
// `DefMethod`s.
DefMethod(local_def(method.id),
None,
FromImpl(local_def(item.id)))
}
};
// NB: not IMPORTABLE
let modifiers = if method.pe_vis() == ast::Public {
@ -1496,7 +1516,7 @@ impl<'a> Resolver<'a> {
ForbidDuplicateTypesAndModules,
typedef.span);
let def = DefAssociatedTy(local_def(
typedef.id));
typedef.id));
// NB: not IMPORTABLE
let modifiers = if typedef.vis == ast::Public {
PUBLIC
@ -1511,13 +1531,6 @@ impl<'a> Resolver<'a> {
}
}
}
_ => {
self.resolve_error(ty.span,
"inherent implementations may \
only be implemented in the same \
module as the type they are \
implemented for")
}
}
parent
@ -4725,7 +4738,7 @@ impl<'a> Resolver<'a> {
// type, the result will be that the type name resolves to a module but not
// a type (shadowing any imported modules or types with this name), leading
// to weird user-visible bugs. So we ward this off here. See #15060.
TyPath(ref path, _, path_id) => {
TyPath(ref path, path_id) => {
match self.def_map.borrow().get(&path_id) {
// FIXME: should we catch other options and give more precise errors?
Some(&DefMod(_)) => {
@ -4891,7 +4904,7 @@ impl<'a> Resolver<'a> {
// Like path expressions, the interpretation of path types depends
// on whether the path has multiple elements in it or not.
TyPath(ref path, ref bounds, path_id) => {
TyPath(ref path, path_id) => {
// This is a path in the type namespace. Walk through scopes
// looking for it.
let mut result_def = None;
@ -4961,11 +4974,12 @@ impl<'a> Resolver<'a> {
self.resolve_error(ty.span, msg.as_slice());
}
}
}
bounds.as_ref().map(|bound_vec| {
self.resolve_type_parameter_bounds(ty.id, bound_vec,
TyObjectSum(ref ty, ref bound_vec) => {
self.resolve_type(&**ty);
self.resolve_type_parameter_bounds(ty.id, bound_vec,
TraitBoundingTypeParameter);
});
}
TyQPath(ref qpath) => {
@ -5602,7 +5616,7 @@ impl<'a> Resolver<'a> {
fn extract_path_and_node_id(t: &Ty, allow: FallbackChecks)
-> Option<(Path, NodeId, FallbackChecks)> {
match t.node {
TyPath(ref path, _, node_id) => Some((path.clone(), node_id, allow)),
TyPath(ref path, node_id) => Some((path.clone(), node_id, allow)),
TyPtr(ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, OnlyTraitAndStatics),
TyRptr(_, ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, allow),
// This doesn't handle the remaining `Ty` variants as they are not

View File

@ -8,14 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Name resolution for lifetimes.
*
* Name resolution for lifetimes follows MUCH simpler rules than the
* full resolve. For example, lifetime names are never exported or
* used between functions, and they operate in a purely top-down
* way. Therefore we break lifetime name resolution into a separate pass.
*/
//! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows MUCH simpler rules than the
//! full resolve. For example, lifetime names are never exported or
//! used between functions, and they operate in a purely top-down
//! way. Therefore we break lifetime name resolution into a separate pass.
pub use self::DefRegion::*;
use self::ScopeChain::*;
@ -162,7 +160,7 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
visit::walk_ty(this, ty);
});
}
ast::TyPath(ref path, ref opt_bounds, id) => {
ast::TyPath(ref path, id) => {
// if this path references a trait, then this will resolve to
// a trait ref, which introduces a binding scope.
match self.def_map.borrow().get(&id) {
@ -170,13 +168,6 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
self.with(LateScope(&Vec::new(), self.scope), |this| {
this.visit_path(path, id);
});
match *opt_bounds {
Some(ref bounds) => {
visit::walk_ty_param_bounds_helper(self, bounds);
}
None => { }
}
}
_ => {
visit::walk_ty(self, ty);
@ -254,34 +245,27 @@ impl<'a> LifetimeContext<'a> {
}
/// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
///
/// Handles visiting fns and methods. These are a bit complicated because we must distinguish
/// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
/// within type bounds; those are early bound lifetimes, and the rest are late bound.
///
/// For example:
///
/// fn foo<'a,'b,'c,T:Trait<'b>>(...)
///
/// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
/// lifetimes may be interspersed together.
///
/// If early bound lifetimes are present, we separate them into their own list (and likewise
/// for late bound). They will be numbered sequentially, starting from the lowest index that is
/// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
/// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late(&mut self,
early_space: subst::ParamSpace,
generics: &ast::Generics,
walk: |&mut LifetimeContext|) {
/*!
* Handles visiting fns and methods. These are a bit
* complicated because we must distinguish early- vs late-bound
* lifetime parameters. We do this by checking which lifetimes
* appear within type bounds; those are early bound lifetimes,
* and the rest are late bound.
*
* For example:
*
* fn foo<'a,'b,'c,T:Trait<'b>>(...)
*
* Here `'a` and `'c` are late bound but `'b` is early
* bound. Note that early- and late-bound lifetimes may be
* interspersed together.
*
* If early bound lifetimes are present, we separate them into
* their own list (and likewise for late bound). They will be
* numbered sequentially, starting from the lowest index that
* is already in scope (for a fn item, that will be 0, but for
* a method it might not be). Late bound lifetimes are
* resolved by name and associated with a binder id (`binder_id`), so
* the ordering is not important there.
*/
let referenced_idents = early_bound_lifetime_names(generics);
debug!("visit_early_late: referenced_idents={}",
@ -479,13 +463,9 @@ pub fn early_bound_lifetimes<'a>(generics: &'a ast::Generics) -> Vec<ast::Lifeti
.collect()
}
/// Given a set of generic declarations, returns a list of names containing all early bound
/// lifetime names for those generics. (In fact, this list may also contain other names.)
fn early_bound_lifetime_names(generics: &ast::Generics) -> Vec<ast::Name> {
/*!
* Given a set of generic declarations, returns a list of names
* containing all early bound lifetime names for those
* generics. (In fact, this list may also contain other names.)
*/
// Create two lists, dividing the lifetimes into early/late bound.
// Initially, all of them are considered late, but we will move
// things from late into early as we go if we find references to

View File

@ -24,22 +24,19 @@ use syntax::codemap::{Span, DUMMY_SP};
///////////////////////////////////////////////////////////////////////////
/**
* A substitution mapping type/region parameters to new values. We
* identify each in-scope parameter by an *index* and a *parameter
* space* (which indices where the parameter is defined; see
* `ParamSpace`).
*/
/// A substitution mapping type/region parameters to new values. We
/// identify each in-scope parameter by an *index* and a *parameter
/// space* (which indices where the parameter is defined; see
/// `ParamSpace`).
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct Substs<'tcx> {
pub types: VecPerParamSpace<Ty<'tcx>>,
pub regions: RegionSubsts,
}
/**
* Represents the values to use when substituting lifetime parameters.
* If the value is `ErasedRegions`, then this subst is occurring during
* trans, and all region parameters will be replaced with `ty::ReStatic`. */
/// Represents the values to use when substituting lifetime parameters.
/// If the value is `ErasedRegions`, then this subst is occurring during
/// trans, and all region parameters will be replaced with `ty::ReStatic`.
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub enum RegionSubsts {
ErasedRegions,
@ -131,26 +128,18 @@ pub fn self_ty(&self) -> Option<Ty<'tcx>> {
Substs { types: types, regions: ErasedRegions }
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
/*!
* Since ErasedRegions are only to be used in trans, most of
* the compiler can use this method to easily access the set
* of region substitutions.
*/
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref r) => r
}
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
/*!
* Since ErasedRegions are only to be used in trans, most of
* the compiler can use this method to easily access the set
* of region substitutions.
*/
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref mut r) => r
@ -226,11 +215,9 @@ impl ParamSpace {
}
}
/**
* Vector of things sorted by param space. Used to keep
* the set of things declared on the type, self, or method
* distinct.
*/
/// Vector of things sorted by param space. Used to keep
/// the set of things declared on the type, self, or method
/// distinct.
#[deriving(PartialEq, Eq, Clone, Hash, Encodable, Decodable)]
pub struct VecPerParamSpace<T> {
// This was originally represented as a tuple with one Vec<T> for
@ -250,10 +237,8 @@ pub struct VecPerParamSpace<T> {
content: Vec<T>,
}
/**
* The `split` function converts one `VecPerParamSpace` into this
* `SeparateVecsPerParamSpace` structure.
*/
/// The `split` function converts one `VecPerParamSpace` into this
/// `SeparateVecsPerParamSpace` structure.
pub struct SeparateVecsPerParamSpace<T> {
pub types: Vec<T>,
pub selfs: Vec<T>,
@ -688,59 +673,49 @@ impl<'a,'tcx> SubstFolder<'a,'tcx> {
self.shift_regions_through_binders(ty)
}
/// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs
/// when we are substituting a type with escaping regions into a context where we have passed
/// through region binders. That's quite a mouthful. Let's see an example:
///
/// ```
/// type Func<A> = fn(A);
/// type MetaFunc = for<'a> fn(Func<&'a int>)
/// ```
///
/// The type `MetaFunc`, when fully expanded, will be
///
/// for<'a> fn(fn(&'a int))
/// ^~ ^~ ^~~
/// | | |
/// | | DebruijnIndex of 2
/// Binders
///
/// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
/// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
/// over the inner binder (remember that we count Debruijn indices from 1). However, in the
/// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a
/// debruijn index of 1. It's only during the substitution that we can see we must increase the
/// depth by 1 to account for the binder that we passed through.
///
/// As a second example, consider this twist:
///
/// ```
/// type FuncTuple<A> = (A,fn(A));
/// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
/// ```
///
/// Here the final type will be:
///
/// for<'a> fn((&'a int, fn(&'a int)))
/// ^~~ ^~~
/// | |
/// DebruijnIndex of 1 |
/// DebruijnIndex of 2
///
/// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the
/// first case we do not increase the Debruijn index and in the second case we do. The reason
/// is that only in the second case have we passed through a fn binder.
fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
/*!
* It is sometimes necessary to adjust the debruijn indices
* during substitution. This occurs when we are substituting a
* type with escaping regions into a context where we have
* passed through region binders. That's quite a
* mouthful. Let's see an example:
*
* ```
* type Func<A> = fn(A);
* type MetaFunc = for<'a> fn(Func<&'a int>)
* ```
*
* The type `MetaFunc`, when fully expanded, will be
*
* for<'a> fn(fn(&'a int))
* ^~ ^~ ^~~
* | | |
* | | DebruijnIndex of 2
* Binders
*
* Here the `'a` lifetime is bound in the outer function, but
* appears as an argument of the inner one. Therefore, that
* appearance will have a DebruijnIndex of 2, because we must
* skip over the inner binder (remember that we count Debruijn
* indices from 1). However, in the definition of `MetaFunc`,
* the binder is not visible, so the type `&'a int` will have
* a debruijn index of 1. It's only during the substitution
* that we can see we must increase the depth by 1 to account
* for the binder that we passed through.
*
* As a second example, consider this twist:
*
* ```
* type FuncTuple<A> = (A,fn(A));
* type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
* ```
*
* Here the final type will be:
*
* for<'a> fn((&'a int, fn(&'a int)))
* ^~~ ^~~
* | |
* DebruijnIndex of 1 |
* DebruijnIndex of 2
*
* As indicated in the diagram, here the same type `&'a int`
* is substituted once, but in the first case we do not
* increase the Debruijn index and in the second case we
* do. The reason is that only in the second case have we
* passed through a fn binder.
*/
debug!("shift_regions(ty={}, region_binders_passed={}, type_has_escaping_regions={})",
ty.repr(self.tcx()), self.region_binders_passed, ty::type_has_escaping_regions(ty));

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See `doc.rs` for high-level documentation */
//! See `doc.rs` for high-level documentation
use super::SelectionContext;
use super::Obligation;

View File

@ -8,403 +8,399 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# TRAIT RESOLUTION
This document describes the general process and points out some non-obvious
things.
## Major concepts
Trait resolution is the process of pairing up an impl with each
reference to a trait. So, for example, if there is a generic function like:
fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
and then a call to that function:
let v: Vec<int> = clone_slice([1, 2, 3].as_slice())
it is the job of trait resolution to figure out (in which case)
whether there exists an impl of `int : Clone`
Note that in some cases, like generic functions, we may not be able to
find a specific impl, but we can figure out that the caller must
provide an impl. To see what I mean, consider the body of `clone_slice`:
fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
let mut v = Vec::new();
for e in x.iter() {
v.push((*e).clone()); // (*)
}
}
The line marked `(*)` is only legal if `T` (the type of `*e`)
implements the `Clone` trait. Naturally, since we don't know what `T`
is, we can't find the specific impl; but based on the bound `T:Clone`,
we can say that there exists an impl which the caller must provide.
We use the term *obligation* to refer to a trait reference in need of
an impl.
## Overview
Trait resolution consists of three major parts:
- SELECTION: Deciding how to resolve a specific obligation. For
example, selection might decide that a specific obligation can be
resolved by employing an impl which matches the self type, or by
using a parameter bound. In the case of an impl, Selecting one
obligation can create *nested obligations* because of where clauses
on the impl itself. It may also require evaluating those nested
obligations to resolve ambiguities.
- FULFILLMENT: The fulfillment code is what tracks that obligations
are completely fulfilled. Basically it is a worklist of obligations
to be selected: once selection is successful, the obligation is
removed from the worklist and any nested obligations are enqueued.
- COHERENCE: The coherence checks are intended to ensure that there
are never overlapping impls, where two impls could be used with
equal precedence.
## Selection
Selection is the process of deciding whether an obligation can be
resolved and, if so, how it is to be resolved (via impl, where clause, etc).
The main interface is the `select()` function, which takes an obligation
and returns a `SelectionResult`. There are three possible outcomes:
- `Ok(Some(selection))` -- yes, the obligation can be resolved, and
`selection` indicates how. If the impl was resolved via an impl,
then `selection` may also indicate nested obligations that are required
by the impl.
- `Ok(None)` -- we are not yet sure whether the obligation can be
resolved or not. This happens most commonly when the obligation
contains unbound type variables.
- `Err(err)` -- the obligation definitely cannot be resolved due to a
type error, or because there are no impls that could possibly apply,
etc.
The basic algorithm for selection is broken into two big phases:
candidate assembly and confirmation.
### Candidate assembly
Searches for impls/where-clauses/etc that might
possibly be used to satisfy the obligation. Each of those is called
a candidate. To avoid ambiguity, we want to find exactly one
candidate that is definitively applicable. In some cases, we may not
know whether an impl/where-clause applies or not -- this occurs when
the obligation contains unbound inference variables.
The basic idea for candidate assembly is to do a first pass in which
we identify all possible candidates. During this pass, all that we do
is try and unify the type parameters. (In particular, we ignore any
nested where clauses.) Presuming that this unification succeeds, the
impl is added as a candidate.
Once this first pass is done, we can examine the set of candidates. If
it is a singleton set, then we are done: this is the only impl in
scope that could possibly apply. Otherwise, we can winnow down the set
of candidates by using where clauses and other conditions. If this
reduced set yields a single, unambiguous entry, we're good to go,
otherwise the result is considered ambiguous.
#### The basic process: Inferring based on the impls we see
This process is easier if we work through some examples. Consider
the following trait:
```
trait Convert<Target> {
fn convert(&self) -> Target;
}
```
This trait just has one method. It's about as simple as it gets. It
converts from the (implicit) `Self` type to the `Target` type. If we
wanted to permit conversion between `int` and `uint`, we might
implement `Convert` like so:
```rust
impl Convert<uint> for int { ... } // int -> uint
impl Convert<int> for uint { ... } // uint -> uint
```
Now imagine there is some code like the following:
```rust
let x: int = ...;
let y = x.convert();
```
The call to convert will generate a trait reference `Convert<$Y> for
int`, where `$Y` is the type variable representing the type of
`y`. When we match this against the two impls we can see, we will find
that only one remains: `Convert<uint> for int`. Therefore, we can
select this impl, which will cause the type of `$Y` to be unified to
`uint`. (Note that while assembling candidates, we do the initial
unifications in a transaction, so that they don't affect one another.)
There are tests to this effect in src/test/run-pass:
traits-multidispatch-infer-convert-source-and-target.rs
traits-multidispatch-infer-convert-target.rs
#### Winnowing: Resolving ambiguities
But what happens if there are multiple impls where all the types
unify? Consider this example:
```rust
trait Get {
fn get(&self) -> Self;
}
impl<T:Copy> Get for T {
fn get(&self) -> T { *self }
}
impl<T:Get> Get for Box<T> {
fn get(&self) -> Box<T> { box get_it(&**self) }
}
```
What happens when we invoke `get_it(&box 1_u16)`, for example? In this
case, the `Self` type is `Box<u16>` -- that unifies with both impls,
because the first applies to all types, and the second to all
boxes. In the olden days we'd have called this ambiguous. But what we
do now is do a second *winnowing* pass that considers where clauses
and attempts to remove candidates -- in this case, the first impl only
applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
then, we are left with just one candidate, so we can proceed. There is
a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
#### Matching
The subroutines that decide whether a particular impl/where-clause/etc
applies to a particular obligation. At the moment, this amounts to
unifying the self types, but in the future we may also recursively
consider some of the nested obligations, in the case of an impl.
#### Lifetimes and selection
Because of how that lifetime inference works, it is not possible to
give back immediate feedback as to whether a unification or subtype
relationship between lifetimes holds or not. Therefore, lifetime
matching is *not* considered during selection. This is reflected in
the fact that subregion assignment is infallible. This may yield
lifetime constraints that will later be found to be in error (in
contrast, the non-lifetime-constraints have already been checked
during selection and can never cause an error, though naturally they
may lead to other errors downstream).
#### Where clauses
Besides an impl, the other major way to resolve an obligation is via a
where clause. The selection process is always given a *parameter
environment* which contains a list of where clauses, which are
basically obligations that can assume are satisfiable. We will iterate
over that list and check whether our current obligation can be found
in that list, and if so it is considered satisfied. More precisely, we
want to check whether there is a where-clause obligation that is for
the same trait (or some subtrait) and for which the self types match,
using the definition of *matching* given above.
Consider this simple example:
trait A1 { ... }
trait A2 : A1 { ... }
trait B { ... }
fn foo<X:A2+B> { ... }
Clearly we can use methods offered by `A1`, `A2`, or `B` within the
body of `foo`. In each case, that will incur an obligation like `X :
A1` or `X : A2`. The parameter environment will contain two
where-clauses, `X : A2` and `X : B`. For each obligation, then, we
search this list of where-clauses. To resolve an obligation `X:A1`,
we would note that `X:A2` implies that `X:A1`.
### Confirmation
Confirmation unifies the output type parameters of the trait with the
values found in the obligation, possibly yielding a type error. If we
return to our example of the `Convert` trait from the previous
section, confirmation is where an error would be reported, because the
impl specified that `T` would be `uint`, but the obligation reported
`char`. Hence the result of selection would be an error.
### Selection during translation
During type checking, we do not store the results of trait selection.
We simply wish to verify that trait selection will succeed. Then
later, at trans time, when we have all concrete types available, we
can repeat the trait selection. In this case, we do not consider any
where-clauses to be in scope. We know that therefore each resolution
will resolve to a particular impl.
One interesting twist has to do with nested obligations. In general, in trans,
we only need to do a "shallow" selection for an obligation. That is, we wish to
identify which impl applies, but we do not (yet) need to decide how to select
any nested obligations. Nonetheless, we *do* currently do a complete resolution,
and that is because it can sometimes inform the results of type inference. That is,
we do not have the full substitutions in terms of the type varibales of the impl available
to us, so we must run trait selection to figure everything out.
Here is an example:
trait Foo { ... }
impl<U,T:Bar<U>> Foo for Vec<T> { ... }
impl Bar<uint> for int { ... }
After one shallow round of selection for an obligation like `Vec<int>
: Foo`, we would know which impl we want, and we would know that
`T=int`, but we do not know the type of `U`. We must select the
nested obligation `int : Bar<U>` to find out that `U=uint`.
It would be good to only do *just as much* nested resolution as
necessary. Currently, though, we just do a full resolution.
## Method matching
Method dispach follows a slightly different path than normal trait
selection. This is because it must account for the transformed self
type of the receiver and various other complications. The procedure is
described in `select.rs` in the "METHOD MATCHING" section.
# Caching and subtle considerations therewith
In general we attempt to cache the results of trait selection. This
is a somewhat complex process. Part of the reason for this is that we
want to be able to cache results even when all the types in the trait
reference are not fully known. In that case, it may happen that the
trait selection process is also influencing type variables, so we have
to be able to not only cache the *result* of the selection process,
but *replay* its effects on the type variables.
## An example
The high-level idea of how the cache works is that we first replace
all unbound inference variables with skolemized versions. Therefore,
if we had a trait reference `uint : Foo<$1>`, where `$n` is an unbound
inference variable, we might replace it with `uint : Foo<%0>`, where
`%n` is a skolemized type. We would then look this up in the cache.
If we found a hit, the hit would tell us the immediate next step to
take in the selection process: i.e., apply impl #22, or apply where
clause `X : Foo<Y>`. Let's say in this case there is no hit.
Therefore, we search through impls and where clauses and so forth, and
we come to the conclusion that the only possible impl is this one,
with def-id 22:
impl Foo<int> for uint { ... } // Impl #22
We would then record in the cache `uint : Foo<%0> ==>
ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
would (as a side-effect) unify `$1` with `int`.
Now, at some later time, we might come along and see a `uint :
Foo<$3>`. When skolemized, this would yield `uint : Foo<%0>`, just as
before, and hence the cache lookup would succeed, yielding
`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
(as a side-effect) unify `$3` with `int`.
## Where clauses and the local vs global cache
One subtle interaction is that the results of trait lookup will vary
depending on what where clauses are in scope. Therefore, we actually
have *two* caches, a local and a global cache. The local cache is
attached to the `ParameterEnvironment` and the global cache attached
to the `tcx`. We use the local cache whenever the result might depend
on the where clauses that are in scope. The determination of which
cache to use is done by the method `pick_candidate_cache` in
`select.rs`.
There are two cases where we currently use the local cache. The
current rules are probably more conservative than necessary.
### Trait references that involve parameter types
The most obvious case where you need the local environment is
when the trait reference includes parameter types. For example,
consider the following function:
impl<T> Vec<T> {
fn foo(x: T)
where T : Foo
{ ... }
fn bar(x: T)
{ ... }
}
If there is an obligation `T : Foo`, or `int : Bar<T>`, or whatever,
clearly the results from `foo` and `bar` are potentially different,
since the set of where clauses in scope are different.
### Trait references with unbound variables when where clauses are in scope
There is another less obvious interaction which involves unbound variables
where *only* where clauses are in scope (no impls). This manifested as
issue #18209 (`run-pass/trait-cache-issue-18209.rs`). Consider
this snippet:
```
pub trait Foo {
fn load_from() -> Box<Self>;
fn load() -> Box<Self> {
Foo::load_from()
}
}
```
The default method will incur an obligation `$0 : Foo` from the call
to `load_from`. If there are no impls, this can be eagerly resolved to
`VtableParam(Self : Foo)` and cached. Because the trait reference
doesn't involve any parameters types (only the resolution does), this
result was stored in the global cache, causing later calls to
`Foo::load_from()` to get nonsense.
To fix this, we always use the local cache if there are unbound
variables and where clauses in scope. This is more conservative than
necessary as far as I can tell. However, it still seems to be a simple
rule and I observe ~99% hit rate on rustc, so it doesn't seem to hurt
us in particular.
Here is an example of the kind of subtle case that I would be worried
about with a more complex rule (although this particular case works
out ok). Imagine the trait reference doesn't directly reference a
where clause, but the where clause plays a role in the winnowing
phase. Something like this:
```
pub trait Foo<T> { ... }
pub trait Bar { ... }
impl<U,T:Bar> Foo<U> for T { ... } // Impl A
impl Foo<char> for uint { ... } // Impl B
```
Now, in some function, we have no where clauses in scope, and we have
an obligation `$1 : Foo<$0>`. We might then conclude that `$0=char`
and `$1=uint`: this is because for impl A to apply, `uint:Bar` would
have to hold, and we know it does not or else the coherence check
would have failed. So we might enter into our global cache: `$1 :
Foo<$0> => Impl B`. Then we come along in a different scope, where a
generic type `A` is around with the bound `A:Bar`. Now suddenly the
impl is viable.
The flaw in this imaginary DOOMSDAY SCENARIO is that we would not
currently conclude that `$1 : Foo<$0>` implies that `$0 == uint` and
`$1 == char`, even though it is true that (absent type parameters)
there is no other type the user could enter. However, it is not
*completely* implausible that we *could* draw this conclusion in the
future; we wouldn't have to guess types, in particular, we could be
led by the impls.
*/
//! # TRAIT RESOLUTION
//!
//! This document describes the general process and points out some non-obvious
//! things.
//!
//! ## Major concepts
//!
//! Trait resolution is the process of pairing up an impl with each
//! reference to a trait. So, for example, if there is a generic function like:
//!
//! fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
//!
//! and then a call to that function:
//!
//! let v: Vec<int> = clone_slice([1, 2, 3].as_slice())
//!
//! it is the job of trait resolution to figure out (in which case)
//! whether there exists an impl of `int : Clone`
//!
//! Note that in some cases, like generic functions, we may not be able to
//! find a specific impl, but we can figure out that the caller must
//! provide an impl. To see what I mean, consider the body of `clone_slice`:
//!
//! fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
//! let mut v = Vec::new();
//! for e in x.iter() {
//! v.push((*e).clone()); // (*)
//! }
//! }
//!
//! The line marked `(*)` is only legal if `T` (the type of `*e`)
//! implements the `Clone` trait. Naturally, since we don't know what `T`
//! is, we can't find the specific impl; but based on the bound `T:Clone`,
//! we can say that there exists an impl which the caller must provide.
//!
//! We use the term *obligation* to refer to a trait reference in need of
//! an impl.
//!
//! ## Overview
//!
//! Trait resolution consists of three major parts:
//!
//! - SELECTION: Deciding how to resolve a specific obligation. For
//! example, selection might decide that a specific obligation can be
//! resolved by employing an impl which matches the self type, or by
//! using a parameter bound. In the case of an impl, Selecting one
//! obligation can create *nested obligations* because of where clauses
//! on the impl itself. It may also require evaluating those nested
//! obligations to resolve ambiguities.
//!
//! - FULFILLMENT: The fulfillment code is what tracks that obligations
//! are completely fulfilled. Basically it is a worklist of obligations
//! to be selected: once selection is successful, the obligation is
//! removed from the worklist and any nested obligations are enqueued.
//!
//! - COHERENCE: The coherence checks are intended to ensure that there
//! are never overlapping impls, where two impls could be used with
//! equal precedence.
//!
//! ## Selection
//!
//! Selection is the process of deciding whether an obligation can be
//! resolved and, if so, how it is to be resolved (via impl, where clause, etc).
//! The main interface is the `select()` function, which takes an obligation
//! and returns a `SelectionResult`. There are three possible outcomes:
//!
//! - `Ok(Some(selection))` -- yes, the obligation can be resolved, and
//! `selection` indicates how. If the impl was resolved via an impl,
//! then `selection` may also indicate nested obligations that are required
//! by the impl.
//!
//! - `Ok(None)` -- we are not yet sure whether the obligation can be
//! resolved or not. This happens most commonly when the obligation
//! contains unbound type variables.
//!
//! - `Err(err)` -- the obligation definitely cannot be resolved due to a
//! type error, or because there are no impls that could possibly apply,
//! etc.
//!
//! The basic algorithm for selection is broken into two big phases:
//! candidate assembly and confirmation.
//!
//! ### Candidate assembly
//!
//! Searches for impls/where-clauses/etc that might
//! possibly be used to satisfy the obligation. Each of those is called
//! a candidate. To avoid ambiguity, we want to find exactly one
//! candidate that is definitively applicable. In some cases, we may not
//! know whether an impl/where-clause applies or not -- this occurs when
//! the obligation contains unbound inference variables.
//!
//! The basic idea for candidate assembly is to do a first pass in which
//! we identify all possible candidates. During this pass, all that we do
//! is try and unify the type parameters. (In particular, we ignore any
//! nested where clauses.) Presuming that this unification succeeds, the
//! impl is added as a candidate.
//!
//! Once this first pass is done, we can examine the set of candidates. If
//! it is a singleton set, then we are done: this is the only impl in
//! scope that could possibly apply. Otherwise, we can winnow down the set
//! of candidates by using where clauses and other conditions. If this
//! reduced set yields a single, unambiguous entry, we're good to go,
//! otherwise the result is considered ambiguous.
//!
//! #### The basic process: Inferring based on the impls we see
//!
//! This process is easier if we work through some examples. Consider
//! the following trait:
//!
//! ```
//! trait Convert<Target> {
//! fn convert(&self) -> Target;
//! }
//! ```
//!
//! This trait just has one method. It's about as simple as it gets. It
//! converts from the (implicit) `Self` type to the `Target` type. If we
//! wanted to permit conversion between `int` and `uint`, we might
//! implement `Convert` like so:
//!
//! ```rust
//! impl Convert<uint> for int { ... } // int -> uint
//! impl Convert<int> for uint { ... } // uint -> uint
//! ```
//!
//! Now imagine there is some code like the following:
//!
//! ```rust
//! let x: int = ...;
//! let y = x.convert();
//! ```
//!
//! The call to convert will generate a trait reference `Convert<$Y> for
//! int`, where `$Y` is the type variable representing the type of
//! `y`. When we match this against the two impls we can see, we will find
//! that only one remains: `Convert<uint> for int`. Therefore, we can
//! select this impl, which will cause the type of `$Y` to be unified to
//! `uint`. (Note that while assembling candidates, we do the initial
//! unifications in a transaction, so that they don't affect one another.)
//!
//! There are tests to this effect in src/test/run-pass:
//!
//! traits-multidispatch-infer-convert-source-and-target.rs
//! traits-multidispatch-infer-convert-target.rs
//!
//! #### Winnowing: Resolving ambiguities
//!
//! But what happens if there are multiple impls where all the types
//! unify? Consider this example:
//!
//! ```rust
//! trait Get {
//! fn get(&self) -> Self;
//! }
//!
//! impl<T:Copy> Get for T {
//! fn get(&self) -> T { *self }
//! }
//!
//! impl<T:Get> Get for Box<T> {
//! fn get(&self) -> Box<T> { box get_it(&**self) }
//! }
//! ```
//!
//! What happens when we invoke `get_it(&box 1_u16)`, for example? In this
//! case, the `Self` type is `Box<u16>` -- that unifies with both impls,
//! because the first applies to all types, and the second to all
//! boxes. In the olden days we'd have called this ambiguous. But what we
//! do now is do a second *winnowing* pass that considers where clauses
//! and attempts to remove candidates -- in this case, the first impl only
//! applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
//! then, we are left with just one candidate, so we can proceed. There is
//! a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
//!
//! #### Matching
//!
//! The subroutines that decide whether a particular impl/where-clause/etc
//! applies to a particular obligation. At the moment, this amounts to
//! unifying the self types, but in the future we may also recursively
//! consider some of the nested obligations, in the case of an impl.
//!
//! #### Lifetimes and selection
//!
//! Because of how that lifetime inference works, it is not possible to
//! give back immediate feedback as to whether a unification or subtype
//! relationship between lifetimes holds or not. Therefore, lifetime
//! matching is *not* considered during selection. This is reflected in
//! the fact that subregion assignment is infallible. This may yield
//! lifetime constraints that will later be found to be in error (in
//! contrast, the non-lifetime-constraints have already been checked
//! during selection and can never cause an error, though naturally they
//! may lead to other errors downstream).
//!
//! #### Where clauses
//!
//! Besides an impl, the other major way to resolve an obligation is via a
//! where clause. The selection process is always given a *parameter
//! environment* which contains a list of where clauses, which are
//! basically obligations that can assume are satisfiable. We will iterate
//! over that list and check whether our current obligation can be found
//! in that list, and if so it is considered satisfied. More precisely, we
//! want to check whether there is a where-clause obligation that is for
//! the same trait (or some subtrait) and for which the self types match,
//! using the definition of *matching* given above.
//!
//! Consider this simple example:
//!
//! trait A1 { ... }
//! trait A2 : A1 { ... }
//!
//! trait B { ... }
//!
//! fn foo<X:A2+B> { ... }
//!
//! Clearly we can use methods offered by `A1`, `A2`, or `B` within the
//! body of `foo`. In each case, that will incur an obligation like `X :
//! A1` or `X : A2`. The parameter environment will contain two
//! where-clauses, `X : A2` and `X : B`. For each obligation, then, we
//! search this list of where-clauses. To resolve an obligation `X:A1`,
//! we would note that `X:A2` implies that `X:A1`.
//!
//! ### Confirmation
//!
//! Confirmation unifies the output type parameters of the trait with the
//! values found in the obligation, possibly yielding a type error. If we
//! return to our example of the `Convert` trait from the previous
//! section, confirmation is where an error would be reported, because the
//! impl specified that `T` would be `uint`, but the obligation reported
//! `char`. Hence the result of selection would be an error.
//!
//! ### Selection during translation
//!
//! During type checking, we do not store the results of trait selection.
//! We simply wish to verify that trait selection will succeed. Then
//! later, at trans time, when we have all concrete types available, we
//! can repeat the trait selection. In this case, we do not consider any
//! where-clauses to be in scope. We know that therefore each resolution
//! will resolve to a particular impl.
//!
//! One interesting twist has to do with nested obligations. In general, in trans,
//! we only need to do a "shallow" selection for an obligation. That is, we wish to
//! identify which impl applies, but we do not (yet) need to decide how to select
//! any nested obligations. Nonetheless, we *do* currently do a complete resolution,
//! and that is because it can sometimes inform the results of type inference. That is,
//! we do not have the full substitutions in terms of the type varibales of the impl available
//! to us, so we must run trait selection to figure everything out.
//!
//! Here is an example:
//!
//! trait Foo { ... }
//! impl<U,T:Bar<U>> Foo for Vec<T> { ... }
//!
//! impl Bar<uint> for int { ... }
//!
//! After one shallow round of selection for an obligation like `Vec<int>
//! : Foo`, we would know which impl we want, and we would know that
//! `T=int`, but we do not know the type of `U`. We must select the
//! nested obligation `int : Bar<U>` to find out that `U=uint`.
//!
//! It would be good to only do *just as much* nested resolution as
//! necessary. Currently, though, we just do a full resolution.
//!
//! ## Method matching
//!
//! Method dispach follows a slightly different path than normal trait
//! selection. This is because it must account for the transformed self
//! type of the receiver and various other complications. The procedure is
//! described in `select.rs` in the "METHOD MATCHING" section.
//!
//! # Caching and subtle considerations therewith
//!
//! In general we attempt to cache the results of trait selection. This
//! is a somewhat complex process. Part of the reason for this is that we
//! want to be able to cache results even when all the types in the trait
//! reference are not fully known. In that case, it may happen that the
//! trait selection process is also influencing type variables, so we have
//! to be able to not only cache the *result* of the selection process,
//! but *replay* its effects on the type variables.
//!
//! ## An example
//!
//! The high-level idea of how the cache works is that we first replace
//! all unbound inference variables with skolemized versions. Therefore,
//! if we had a trait reference `uint : Foo<$1>`, where `$n` is an unbound
//! inference variable, we might replace it with `uint : Foo<%0>`, where
//! `%n` is a skolemized type. We would then look this up in the cache.
//! If we found a hit, the hit would tell us the immediate next step to
//! take in the selection process: i.e., apply impl #22, or apply where
//! clause `X : Foo<Y>`. Let's say in this case there is no hit.
//! Therefore, we search through impls and where clauses and so forth, and
//! we come to the conclusion that the only possible impl is this one,
//! with def-id 22:
//!
//! impl Foo<int> for uint { ... } // Impl #22
//!
//! We would then record in the cache `uint : Foo<%0> ==>
//! ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
//! would (as a side-effect) unify `$1` with `int`.
//!
//! Now, at some later time, we might come along and see a `uint :
//! Foo<$3>`. When skolemized, this would yield `uint : Foo<%0>`, just as
//! before, and hence the cache lookup would succeed, yielding
//! `ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
//! (as a side-effect) unify `$3` with `int`.
//!
//! ## Where clauses and the local vs global cache
//!
//! One subtle interaction is that the results of trait lookup will vary
//! depending on what where clauses are in scope. Therefore, we actually
//! have *two* caches, a local and a global cache. The local cache is
//! attached to the `ParameterEnvironment` and the global cache attached
//! to the `tcx`. We use the local cache whenever the result might depend
//! on the where clauses that are in scope. The determination of which
//! cache to use is done by the method `pick_candidate_cache` in
//! `select.rs`.
//!
//! There are two cases where we currently use the local cache. The
//! current rules are probably more conservative than necessary.
//!
//! ### Trait references that involve parameter types
//!
//! The most obvious case where you need the local environment is
//! when the trait reference includes parameter types. For example,
//! consider the following function:
//!
//! impl<T> Vec<T> {
//! fn foo(x: T)
//! where T : Foo
//! { ... }
//!
//! fn bar(x: T)
//! { ... }
//! }
//!
//! If there is an obligation `T : Foo`, or `int : Bar<T>`, or whatever,
//! clearly the results from `foo` and `bar` are potentially different,
//! since the set of where clauses in scope are different.
//!
//! ### Trait references with unbound variables when where clauses are in scope
//!
//! There is another less obvious interaction which involves unbound variables
//! where *only* where clauses are in scope (no impls). This manifested as
//! issue #18209 (`run-pass/trait-cache-issue-18209.rs`). Consider
//! this snippet:
//!
//! ```
//! pub trait Foo {
//! fn load_from() -> Box<Self>;
//! fn load() -> Box<Self> {
//! Foo::load_from()
//! }
//! }
//! ```
//!
//! The default method will incur an obligation `$0 : Foo` from the call
//! to `load_from`. If there are no impls, this can be eagerly resolved to
//! `VtableParam(Self : Foo)` and cached. Because the trait reference
//! doesn't involve any parameters types (only the resolution does), this
//! result was stored in the global cache, causing later calls to
//! `Foo::load_from()` to get nonsense.
//!
//! To fix this, we always use the local cache if there are unbound
//! variables and where clauses in scope. This is more conservative than
//! necessary as far as I can tell. However, it still seems to be a simple
//! rule and I observe ~99% hit rate on rustc, so it doesn't seem to hurt
//! us in particular.
//!
//! Here is an example of the kind of subtle case that I would be worried
//! about with a more complex rule (although this particular case works
//! out ok). Imagine the trait reference doesn't directly reference a
//! where clause, but the where clause plays a role in the winnowing
//! phase. Something like this:
//!
//! ```
//! pub trait Foo<T> { ... }
//! pub trait Bar { ... }
//! impl<U,T:Bar> Foo<U> for T { ... } // Impl A
//! impl Foo<char> for uint { ... } // Impl B
//! ```
//!
//! Now, in some function, we have no where clauses in scope, and we have
//! an obligation `$1 : Foo<$0>`. We might then conclude that `$0=char`
//! and `$1=uint`: this is because for impl A to apply, `uint:Bar` would
//! have to hold, and we know it does not or else the coherence check
//! would have failed. So we might enter into our global cache: `$1 :
//! Foo<$0> => Impl B`. Then we come along in a different scope, where a
//! generic type `A` is around with the bound `A:Bar`. Now suddenly the
//! impl is viable.
//!
//! The flaw in this imaginary DOOMSDAY SCENARIO is that we would not
//! currently conclude that `$1 : Foo<$0>` implies that `$0 == uint` and
//! `$1 == char`, even though it is true that (absent type parameters)
//! there is no other type the user could enter. However, it is not
//! *completely* implausible that we *could* draw this conclusion in the
//! future; we wouldn't have to guess types, in particular, we could be
//! led by the impls.

View File

@ -19,18 +19,16 @@ use super::FulfillmentError;
use super::CodeSelectionError;
use super::select::SelectionContext;
/**
* The fulfillment context is used to drive trait resolution. It
* consists of a list of obligations that must be (eventually)
* satisfied. The job is to track which are satisfied, which yielded
* errors, and which are still pending. At any point, users can call
* `select_where_possible`, and the fulfilment context will try to do
* selection, retaining only those obligations that remain
* ambiguous. This may be helpful in pushing type inference
* along. Once all type inference constraints have been generated, the
* method `select_all_or_error` can be used to report any remaining
* ambiguous cases as errors.
*/
/// The fulfillment context is used to drive trait resolution. It
/// consists of a list of obligations that must be (eventually)
/// satisfied. The job is to track which are satisfied, which yielded
/// errors, and which are still pending. At any point, users can call
/// `select_where_possible`, and the fulfilment context will try to do
/// selection, retaining only those obligations that remain
/// ambiguous. This may be helpful in pushing type inference
/// along. Once all type inference constraints have been generated, the
/// method `select_all_or_error` can be used to report any remaining
/// ambiguous cases as errors.
pub struct FulfillmentContext<'tcx> {
// A list of all obligations that have been registered with this
// fulfillment context.
@ -81,20 +79,16 @@ impl<'tcx> FulfillmentContext<'tcx> {
}
}
/// Attempts to select obligations that were registered since the call to a selection routine.
/// This is used by the type checker to eagerly attempt to resolve obligations in hopes of
/// gaining type information. It'd be equally valid to use `select_where_possible` but it
/// results in `O(n^2)` performance (#18208).
pub fn select_new_obligations<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
/*!
* Attempts to select obligations that were registered since
* the call to a selection routine. This is used by the type checker
* to eagerly attempt to resolve obligations in hopes of gaining
* type information. It'd be equally valid to use `select_where_possible`
* but it results in `O(n^2)` performance (#18208).
*/
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, true)
}
@ -113,16 +107,13 @@ impl<'tcx> FulfillmentContext<'tcx> {
self.trait_obligations[]
}
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
fn select<'a>(&mut self,
selcx: &mut SelectionContext<'a, 'tcx>,
only_new_obligations: bool)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
/*!
* Attempts to select obligations using `selcx`. If
* `only_new_obligations` is true, then it only attempts to
* select obligations that haven't been seen before.
*/
debug!("select({} obligations, only_new_obligations={}) start",
self.trait_obligations.len(),
only_new_obligations);

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Trait Resolution. See doc.rs.
*/
//! Trait Resolution. See doc.rs.
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
@ -42,14 +40,12 @@ mod fulfill;
mod select;
mod util;
/**
* An `Obligation` represents some trait reference (e.g. `int:Eq`) for
* which the vtable must be found. The process of finding a vtable is
* called "resolving" the `Obligation`. This process consists of
* either identifying an `impl` (e.g., `impl Eq for int`) that
* provides the required vtable, or else finding a bound that is in
* scope. The eventual result is usually a `Selection` (defined below).
*/
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
/// which the vtable must be found. The process of finding a vtable is
/// called "resolving" the `Obligation`. This process consists of
/// either identifying an `impl` (e.g., `impl Eq for int`) that
/// provides the required vtable, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
#[deriving(Clone)]
pub struct Obligation<'tcx> {
pub cause: ObligationCause<'tcx>,
@ -57,9 +53,7 @@ pub struct Obligation<'tcx> {
pub trait_ref: Rc<ty::TraitRef<'tcx>>,
}
/**
* Why did we incur this obligation? Used for error reporting.
*/
/// Why did we incur this obligation? Used for error reporting.
#[deriving(Clone)]
pub struct ObligationCause<'tcx> {
pub span: Span,
@ -121,57 +115,53 @@ pub enum FulfillmentErrorCode<'tcx> {
CodeAmbiguity,
}
/**
* When performing resolution, it is typically the case that there
* can be one of three outcomes:
*
* - `Ok(Some(r))`: success occurred with result `r`
* - `Ok(None)`: could not definitely determine anything, usually due
* to inconclusive type inference.
* - `Err(e)`: error `e` occurred
*/
/// When performing resolution, it is typically the case that there
/// can be one of three outcomes:
///
/// - `Ok(Some(r))`: success occurred with result `r`
/// - `Ok(None)`: could not definitely determine anything, usually due
/// to inconclusive type inference.
/// - `Err(e)`: error `e` occurred
pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/**
* Given the successful resolution of an obligation, the `Vtable`
* indicates where the vtable comes from. Note that while we call this
* a "vtable", it does not necessarily indicate dynamic dispatch at
* runtime. `Vtable` instances just tell the compiler where to find
* methods, but in generic code those methods are typically statically
* dispatched -- only when an object is constructed is a `Vtable`
* instance reified into an actual vtable.
*
* For example, the vtable may be tied to a specific impl (case A),
* or it may be relative to some bound that is in scope (case B).
*
*
* ```
* impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
* impl<T:Clone> Clone<T> for Box<T> { ... } // Impl_2
* impl Clone for int { ... } // Impl_3
*
* fn foo<T:Clone>(concrete: Option<Box<int>>,
* param: T,
* mixed: Option<T>) {
*
* // Case A: Vtable points at a specific impl. Only possible when
* // type is concretely known. If the impl itself has bounded
* // type parameters, Vtable will carry resolutions for those as well:
* concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
*
* // Case B: Vtable must be provided by caller. This applies when
* // type is a type parameter.
* param.clone(); // VtableParam(Oblig_1)
*
* // Case C: A mix of cases A and B.
* mixed.clone(); // Vtable(Impl_1, [VtableParam(Oblig_1)])
* }
* ```
*
* ### The type parameter `N`
*
* See explanation on `VtableImplData`.
*/
/// Given the successful resolution of an obligation, the `Vtable`
/// indicates where the vtable comes from. Note that while we call this
/// a "vtable", it does not necessarily indicate dynamic dispatch at
/// runtime. `Vtable` instances just tell the compiler where to find
/// methods, but in generic code those methods are typically statically
/// dispatched -- only when an object is constructed is a `Vtable`
/// instance reified into an actual vtable.
///
/// For example, the vtable may be tied to a specific impl (case A),
/// or it may be relative to some bound that is in scope (case B).
///
///
/// ```
/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
/// impl<T:Clone> Clone<T> for Box<T> { ... } // Impl_2
/// impl Clone for int { ... } // Impl_3
///
/// fn foo<T:Clone>(concrete: Option<Box<int>>,
/// param: T,
/// mixed: Option<T>) {
///
/// // Case A: Vtable points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// // type parameters, Vtable will carry resolutions for those as well:
/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
///
/// // Case B: Vtable must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // VtableParam(Oblig_1)
///
/// // Case C: A mix of cases A and B.
/// mixed.clone(); // Vtable(Impl_1, [VtableParam(Oblig_1)])
/// }
/// ```
///
/// ### The type parameter `N`
///
/// See explanation on `VtableImplData`.
#[deriving(Show,Clone)]
pub enum Vtable<'tcx, N> {
/// Vtable identifying a particular impl.
@ -191,18 +181,16 @@ pub enum Vtable<'tcx, N> {
VtableBuiltin(VtableBuiltinData<N>),
}
/**
* Identifies a particular impl in the source, along with a set of
* substitutions from the impl's type/lifetime parameters. The
* `nested` vector corresponds to the nested obligations attached to
* the impl's type parameters.
*
* The type parameter `N` indicates the type used for "nested
* obligations" that are required by the impl. During type check, this
* is `Obligation`, as one might expect. During trans, however, this
* is `()`, because trans only requires a shallow resolution of an
* impl, and nested obligations are satisfied later.
*/
/// Identifies a particular impl in the source, along with a set of
/// substitutions from the impl's type/lifetime parameters. The
/// `nested` vector corresponds to the nested obligations attached to
/// the impl's type parameters.
///
/// The type parameter `N` indicates the type used for "nested
/// obligations" that are required by the impl. During type check, this
/// is `Obligation`, as one might expect. During trans, however, this
/// is `()`, because trans only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
#[deriving(Clone)]
pub struct VtableImplData<'tcx, N> {
pub impl_def_id: ast::DefId,
@ -215,17 +203,19 @@ pub struct VtableBuiltinData<N> {
pub nested: subst::VecPerParamSpace<N>
}
/**
* A vtable provided as a parameter by the caller. For example, in a
* function like `fn foo<T:Eq>(...)`, if the `eq()` method is invoked
* on an instance of `T`, the vtable would be of type `VtableParam`.
*/
/// A vtable provided as a parameter by the caller. For example, in a
/// function like `fn foo<T:Eq>(...)`, if the `eq()` method is invoked
/// on an instance of `T`, the vtable would be of type `VtableParam`.
#[deriving(PartialEq,Eq,Clone)]
pub struct VtableParamData<'tcx> {
// In the above example, this would `Eq`
pub bound: Rc<ty::TraitRef<'tcx>>,
}
/// Matches the self type of the inherent impl `impl_def_id`
/// against `self_ty` and returns the resulting resolution. This
/// routine may modify the surrounding type context (for example,
/// it may unify variables).
pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>,
@ -235,13 +225,6 @@ pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
-> SelectionResult<'tcx,
VtableImplData<'tcx, Obligation<'tcx>>>
{
/*!
* Matches the self type of the inherent impl `impl_def_id`
* against `self_ty` and returns the resulting resolution. This
* routine may modify the surrounding type context (for example,
* it may unify variables).
*/
// This routine is only suitable for inherent impls. This is
// because it does not attempt to unify the output type parameters
// from the trait ref against the values from the obligation.
@ -256,53 +239,41 @@ pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
selcx.select_inherent_impl(impl_def_id, cause, self_ty)
}
/// True if neither the trait nor self type is local. Note that `impl_def_id` must refer to an impl
/// of a trait, not an inherent impl.
pub fn is_orphan_impl(tcx: &ty::ctxt,
impl_def_id: ast::DefId)
-> bool
{
/*!
* True if neither the trait nor self type is local. Note that
* `impl_def_id` must refer to an impl of a trait, not an inherent
* impl.
*/
!coherence::impl_is_local(tcx, impl_def_id)
}
/// True if there exist types that satisfy both of the two given impls.
pub fn overlapping_impls(infcx: &InferCtxt,
impl1_def_id: ast::DefId,
impl2_def_id: ast::DefId)
-> bool
{
/*!
* True if there exist types that satisfy both of the two given impls.
*/
coherence::impl_can_satisfy(infcx, impl1_def_id, impl2_def_id) &&
coherence::impl_can_satisfy(infcx, impl2_def_id, impl1_def_id)
}
/// Given generic bounds from an impl like:
///
/// impl<A:Foo, B:Bar+Qux> ...
///
/// along with the bindings for the types `A` and `B` (e.g., `<A=A0, B=B0>`), yields a result like
///
/// [[Foo for A0, Bar for B0, Qux for B0], [], []]
///
/// Expects that `generic_bounds` have already been fully substituted, late-bound regions liberated
/// and so forth, so that they are in the same namespace as `type_substs`.
pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
cause: ObligationCause<'tcx>,
generic_bounds: &ty::GenericBounds<'tcx>,
type_substs: &subst::VecPerParamSpace<Ty<'tcx>>)
-> subst::VecPerParamSpace<Obligation<'tcx>>
{
/*!
* Given generic bounds from an impl like:
*
* impl<A:Foo, B:Bar+Qux> ...
*
* along with the bindings for the types `A` and `B` (e.g.,
* `<A=A0, B=B0>`), yields a result like
*
* [[Foo for A0, Bar for B0, Qux for B0], [], []]
*
* Expects that `generic_bounds` have already been fully
* substituted, late-bound regions liberated and so forth,
* so that they are in the same namespace as `type_substs`.
*/
util::obligations_for_generics(tcx, cause, 0, generic_bounds, type_substs)
}

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See `doc.rs` for high-level documentation */
//! See `doc.rs` for high-level documentation
#![allow(dead_code)] // FIXME -- just temporarily
pub use self::MethodMatchResult::*;
@ -43,7 +43,7 @@ use util::ppaux::Repr;
pub struct SelectionContext<'cx, 'tcx:'cx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
param_env: &'cx ty::ParameterEnvironment<'tcx>,
typer: &'cx Typer<'tcx>+'cx,
typer: &'cx (Typer<'tcx>+'cx),
/// Skolemizer used specifically for skolemizing entries on the
/// obligation stack. This ensures that all entries on the stack
@ -102,32 +102,30 @@ pub enum MethodMatchedData {
CoerciveMethodMatch(/* impl we matched */ ast::DefId)
}
/**
* The selection process begins by considering all impls, where
* clauses, and so forth that might resolve an obligation. Sometimes
* we'll be able to say definitively that (e.g.) an impl does not
* apply to the obligation: perhaps it is defined for `uint` but the
* obligation is for `int`. In that case, we drop the impl out of the
* list. But the other cases are considered *candidates*.
*
* Candidates can either be definitive or ambiguous. An ambiguous
* candidate is one that might match or might not, depending on how
* type variables wind up being resolved. This only occurs during inference.
*
* For selection to suceed, there must be exactly one non-ambiguous
* candidate. Usually, it is not possible to have more than one
* definitive candidate, due to the coherence rules. However, there is
* one case where it could occur: if there is a blanket impl for a
* trait (that is, an impl applied to all T), and a type parameter
* with a where clause. In that case, we can have a candidate from the
* where clause and a second candidate from the impl. This is not a
* problem because coherence guarantees us that the impl which would
* be used to satisfy the where clause is the same one that we see
* now. To resolve this issue, therefore, we ignore impls if we find a
* matching where clause. Part of the reason for this is that where
* clauses can give additional information (like, the types of output
* parameters) that would have to be inferred from the impl.
*/
/// The selection process begins by considering all impls, where
/// clauses, and so forth that might resolve an obligation. Sometimes
/// we'll be able to say definitively that (e.g.) an impl does not
/// apply to the obligation: perhaps it is defined for `uint` but the
/// obligation is for `int`. In that case, we drop the impl out of the
/// list. But the other cases are considered *candidates*.
///
/// Candidates can either be definitive or ambiguous. An ambiguous
/// candidate is one that might match or might not, depending on how
/// type variables wind up being resolved. This only occurs during inference.
///
/// For selection to suceed, there must be exactly one non-ambiguous
/// candidate. Usually, it is not possible to have more than one
/// definitive candidate, due to the coherence rules. However, there is
/// one case where it could occur: if there is a blanket impl for a
/// trait (that is, an impl applied to all T), and a type parameter
/// with a where clause. In that case, we can have a candidate from the
/// where clause and a second candidate from the impl. This is not a
/// problem because coherence guarantees us that the impl which would
/// be used to satisfy the where clause is the same one that we see
/// now. To resolve this issue, therefore, we ignore impls if we find a
/// matching where clause. Part of the reason for this is that where
/// clauses can give additional information (like, the types of output
/// parameters) that would have to be inferred from the impl.
#[deriving(PartialEq,Eq,Show,Clone)]
enum Candidate<'tcx> {
BuiltinCandidate(ty::BuiltinBound),
@ -201,15 +199,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
// `impl<T> Iterable<T> for Vec<T>`, than an error would result.
/// Evaluates whether the obligation can be satisfied. Returns an indication of whether the
/// obligation can be satisfied and, if so, by what means. Never affects surrounding typing
/// environment.
pub fn select(&mut self, obligation: &Obligation<'tcx>)
-> SelectionResult<'tcx, Selection<'tcx>> {
/*!
* Evaluates whether the obligation can be satisfied. Returns
* an indication of whether the obligation can be satisfied
* and, if so, by what means. Never affects surrounding typing
* environment.
*/
debug!("select({})", obligation.repr(self.tcx()));
assert!(!obligation.trait_ref.has_escaping_regions());
@ -253,15 +247,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// The result is "true" if the obligation *may* hold and "false" if
// we can be sure it does not.
/// Evaluates whether the obligation `obligation` can be satisfied (by any means).
pub fn evaluate_obligation(&mut self,
obligation: &Obligation<'tcx>)
-> bool
{
/*!
* Evaluates whether the obligation `obligation` can be
* satisfied (by any means).
*/
debug!("evaluate_obligation({})",
obligation.repr(self.tcx()));
assert!(!obligation.trait_ref.has_escaping_regions());
@ -387,17 +377,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Evaluates whether the impl with id `impl_def_id` could be applied to the self type
/// `obligation_self_ty`. This can be used either for trait or inherent impls.
pub fn evaluate_impl(&mut self,
impl_def_id: ast::DefId,
obligation: &Obligation<'tcx>)
-> bool
{
/*!
* Evaluates whether the impl with id `impl_def_id` could be
* applied to the self type `obligation_self_ty`. This can be
* used either for trait or inherent impls.
*/
debug!("evaluate_impl(impl_def_id={}, obligation={})",
impl_def_id.repr(self.tcx()),
obligation.repr(self.tcx()));
@ -435,23 +421,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// the body of `evaluate_method_obligation()` for more details on
// the algorithm.
/// Determine whether a trait-method is applicable to a receiver of
/// type `rcvr_ty`. *Does not affect the inference state.*
///
/// - `rcvr_ty` -- type of the receiver
/// - `xform_self_ty` -- transformed self type declared on the method, with `Self`
/// to a fresh type variable
/// - `obligation` -- a reference to the trait where the method is declared, with
/// the input types on the trait replaced with fresh type variables
pub fn evaluate_method_obligation(&mut self,
rcvr_ty: Ty<'tcx>,
xform_self_ty: Ty<'tcx>,
obligation: &Obligation<'tcx>)
-> MethodMatchResult
{
/*!
* Determine whether a trait-method is applicable to a receiver of
* type `rcvr_ty`. *Does not affect the inference state.*
*
* - `rcvr_ty` -- type of the receiver
* - `xform_self_ty` -- transformed self type declared on the method, with `Self`
* to a fresh type variable
* - `obligation` -- a reference to the trait where the method is declared, with
* the input types on the trait replaced with fresh type variables
*/
// Here is the situation. We have a trait method declared (say) like so:
//
// trait TheTrait {
@ -563,19 +546,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Given the successful result of a method match, this function "confirms" the result, which
/// basically repeats the various matching operations, but outside of any snapshot so that
/// their effects are committed into the inference state.
pub fn confirm_method_match(&mut self,
rcvr_ty: Ty<'tcx>,
xform_self_ty: Ty<'tcx>,
obligation: &Obligation<'tcx>,
data: MethodMatchedData)
{
/*!
* Given the successful result of a method match, this
* function "confirms" the result, which basically repeats the
* various matching operations, but outside of any snapshot so
* that their effects are committed into the inference state.
*/
let is_ok = match data {
PreciseMethodMatch => {
self.match_method_precise(rcvr_ty, xform_self_ty, obligation).is_ok()
@ -597,17 +576,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Implements the *precise method match* procedure described in
/// `evaluate_method_obligation()`.
fn match_method_precise(&mut self,
rcvr_ty: Ty<'tcx>,
xform_self_ty: Ty<'tcx>,
obligation: &Obligation<'tcx>)
-> Result<(),()>
{
/*!
* Implements the *precise method match* procedure described in
* `evaluate_method_obligation()`.
*/
self.infcx.commit_if_ok(|| {
match self.infcx.sub_types(false, infer::RelateSelfType(obligation.cause.span),
rcvr_ty, xform_self_ty) {
@ -623,18 +599,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
})
}
/// Assembles a list of potentially applicable impls using the *coercive match* procedure
/// described in `evaluate_method_obligation()`.
fn assemble_method_candidates_from_impls(&mut self,
rcvr_ty: Ty<'tcx>,
xform_self_ty: Ty<'tcx>,
obligation: &Obligation<'tcx>)
-> Vec<ast::DefId>
{
/*!
* Assembles a list of potentially applicable impls using the
* *coercive match* procedure described in
* `evaluate_method_obligation()`.
*/
let mut candidates = Vec::new();
let all_impls = self.all_impls(obligation.trait_ref.def_id);
@ -650,6 +622,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
candidates
}
/// Applies the *coercive match* procedure described in `evaluate_method_obligation()` to a
/// particular impl.
fn match_method_coerce(&mut self,
impl_def_id: ast::DefId,
rcvr_ty: Ty<'tcx>,
@ -657,11 +631,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation: &Obligation<'tcx>)
-> Result<Substs<'tcx>, ()>
{
/*!
* Applies the *coercive match* procedure described in
* `evaluate_method_obligation()` to a particular impl.
*/
// This is almost always expected to succeed. It
// causes the impl's self-type etc to be unified with
// the type variable that is shared between
@ -683,6 +652,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(substs)
}
/// A version of `winnow_impl` applicable to coerice method matching. This is basically the
/// same as `winnow_impl` but it uses the method matching procedure and is specific to impls.
fn winnow_method_impl(&mut self,
impl_def_id: ast::DefId,
rcvr_ty: Ty<'tcx>,
@ -690,13 +661,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation: &Obligation<'tcx>)
-> bool
{
/*!
* A version of `winnow_impl` applicable to coerice method
* matching. This is basically the same as `winnow_impl` but
* it uses the method matching procedure and is specific to
* impls.
*/
debug!("winnow_method_impl: impl_def_id={} rcvr_ty={} xform_self_ty={} obligation={}",
impl_def_id.repr(self.tcx()),
rcvr_ty.repr(self.tcx()),
@ -962,19 +926,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(candidates)
}
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
/// supplied to find out whether it is listed among them.
///
/// Never affects inference environment.
fn assemble_candidates_from_caller_bounds(&mut self,
obligation: &Obligation<'tcx>,
candidates: &mut CandidateSet<'tcx>)
-> Result<(),SelectionError<'tcx>>
{
/*!
* Given an obligation like `<SomeTrait for T>`, search the obligations
* that the caller supplied to find out whether it is listed among
* them.
*
* Never affects inference environment.
*/
debug!("assemble_candidates_from_caller_bounds({})",
obligation.repr(self.tcx()));
@ -1002,22 +962,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(())
}
/// Check for the artificial impl that the compiler will create for an obligation like `X :
/// FnMut<..>` where `X` is an unboxed closure type.
///
/// Note: the type parameters on an unboxed closure candidate are modeled as *output* type
/// parameters and hence do not affect whether this trait is a match or not. They will be
/// unified during the confirmation step.
fn assemble_unboxed_candidates(&mut self,
obligation: &Obligation<'tcx>,
candidates: &mut CandidateSet<'tcx>)
-> Result<(),SelectionError<'tcx>>
{
/*!
* Check for the artificial impl that the compiler will create
* for an obligation like `X : FnMut<..>` where `X` is an
* unboxed closure type.
*
* Note: the type parameters on an unboxed closure candidate
* are modeled as *output* type parameters and hence do not
* affect whether this trait is a match or not. They will be
* unified during the confirmation step.
*/
let tcx = self.tcx();
let kind = if Some(obligation.trait_ref.def_id) == tcx.lang_items.fn_trait() {
ty::FnUnboxedClosureKind
@ -1060,15 +1015,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(())
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(&mut self,
obligation: &Obligation<'tcx>,
candidates: &mut CandidateSet<'tcx>)
-> Result<(), SelectionError<'tcx>>
{
/*!
* Search for impls that might apply to `obligation`.
*/
let all_impls = self.all_impls(obligation.trait_ref.def_id);
for &impl_def_id in all_impls.iter() {
self.infcx.probe(|| {
@ -1092,17 +1044,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// attempt to evaluate recursive bounds to see if they are
// satisfied.
/// Further evaluate `candidate` to decide whether all type parameters match and whether nested
/// obligations are met. Returns true if `candidate` remains viable after this further
/// scrutiny.
fn winnow_candidate<'o>(&mut self,
stack: &ObligationStack<'o, 'tcx>,
candidate: &Candidate<'tcx>)
-> EvaluationResult
{
/*!
* Further evaluate `candidate` to decide whether all type parameters match
* and whether nested obligations are met. Returns true if `candidate` remains
* viable after this further scrutiny.
*/
debug!("winnow_candidate: candidate={}", candidate.repr(self.tcx()));
self.infcx.probe(|| {
let candidate = (*candidate).clone();
@ -1129,37 +1078,35 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
result
}
/// Returns true if `candidate_i` should be dropped in favor of `candidate_j`.
///
/// This is generally true if either:
/// - candidate i and candidate j are equivalent; or,
/// - candidate i is a conrete impl and candidate j is a where clause bound,
/// and the concrete impl is applicable to the types in the where clause bound.
///
/// The last case refers to cases where there are blanket impls (often conditional
/// blanket impls) as well as a where clause. This can come down to one of two cases:
///
/// - The impl is truly unconditional (it has no where clauses
/// of its own), in which case the where clause is
/// unnecessary, because coherence requires that we would
/// pick that particular impl anyhow (at least so long as we
/// don't have specialization).
///
/// - The impl is conditional, in which case we may not have winnowed it out
/// because we don't know if the conditions apply, but the where clause is basically
/// telling us taht there is some impl, though not necessarily the one we see.
///
/// In both cases we prefer to take the where clause, which is
/// essentially harmless. See issue #18453 for more details of
/// a case where doing the opposite caused us harm.
fn candidate_should_be_dropped_in_favor_of<'o>(&mut self,
stack: &ObligationStack<'o, 'tcx>,
candidate_i: &Candidate<'tcx>,
candidate_j: &Candidate<'tcx>)
-> bool
{
/*!
* Returns true if `candidate_i` should be dropped in favor of `candidate_j`.
* This is generally true if either:
* - candidate i and candidate j are equivalent; or,
* - candidate i is a conrete impl and candidate j is a where clause bound,
* and the concrete impl is applicable to the types in the where clause bound.
*
* The last case refers to cases where there are blanket impls (often conditional
* blanket impls) as well as a where clause. This can come down to one of two cases:
*
* - The impl is truly unconditional (it has no where clauses
* of its own), in which case the where clause is
* unnecessary, because coherence requires that we would
* pick that particular impl anyhow (at least so long as we
* don't have specialization).
*
* - The impl is conditional, in which case we may not have winnowed it out
* because we don't know if the conditions apply, but the where clause is basically
* telling us taht there is some impl, though not necessarily the one we see.
*
* In both cases we prefer to take the where clause, which is
* essentially harmless. See issue #18453 for more details of
* a case where doing the opposite caused us harm.
*/
match (candidate_i, candidate_j) {
(&ImplCandidate(impl_def_id), &ParamCandidate(ref vt)) => {
debug!("Considering whether to drop param {} in favor of impl {}",
@ -1848,26 +1795,23 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Determines whether the self type declared against
/// `impl_def_id` matches `obligation_self_ty`. If successful,
/// returns the substitutions used to make them match. See
/// `match_impl()`. For example, if `impl_def_id` is declared
/// as:
///
/// impl<T:Copy> Foo for ~T { ... }
///
/// and `obligation_self_ty` is `int`, we'd back an `Err(_)`
/// result. But if `obligation_self_ty` were `~int`, we'd get
/// back `Ok(T=int)`.
fn match_inherent_impl(&mut self,
impl_def_id: ast::DefId,
obligation_cause: ObligationCause,
obligation_self_ty: Ty<'tcx>)
-> Result<Substs<'tcx>,()>
{
/*!
* Determines whether the self type declared against
* `impl_def_id` matches `obligation_self_ty`. If successful,
* returns the substitutions used to make them match. See
* `match_impl()`. For example, if `impl_def_id` is declared
* as:
*
* impl<T:Copy> Foo for ~T { ... }
*
* and `obligation_self_ty` is `int`, we'd back an `Err(_)`
* result. But if `obligation_self_ty` were `~int`, we'd get
* back `Ok(T=int)`.
*/
// Create fresh type variables for each type parameter declared
// on the impl etc.
let impl_substs = util::fresh_substs_for_impl(self.infcx,
@ -1928,6 +1872,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// the output type parameters from the obligation with those found
// on the impl/bound, which may yield type errors.
/// Relates the output type parameters from an impl to the
/// trait. This may lead to type errors. The confirmation step
/// is separated from the main match procedure because these
/// type errors do not cause us to select another impl.
///
/// As an example, consider matching the obligation
/// `Iterator<char> for Elems<int>` using the following impl:
///
/// impl<T> Iterator<T> for Elems<T> { ... }
///
/// The match phase will succeed with substitution `T=int`.
/// The confirm step will then try to unify `int` and `char`
/// and yield an error.
fn confirm_impl_vtable(&mut self,
impl_def_id: ast::DefId,
obligation_cause: ObligationCause<'tcx>,
@ -1935,22 +1892,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
substs: &Substs<'tcx>)
-> Result<(), SelectionError<'tcx>>
{
/*!
* Relates the output type parameters from an impl to the
* trait. This may lead to type errors. The confirmation step
* is separated from the main match procedure because these
* type errors do not cause us to select another impl.
*
* As an example, consider matching the obligation
* `Iterator<char> for Elems<int>` using the following impl:
*
* impl<T> Iterator<T> for Elems<T> { ... }
*
* The match phase will succeed with substitution `T=int`.
* The confirm step will then try to unify `int` and `char`
* and yield an error.
*/
let impl_trait_ref = ty::impl_trait_ref(self.tcx(),
impl_def_id).unwrap();
let impl_trait_ref = impl_trait_ref.subst(self.tcx(),
@ -1958,38 +1899,30 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
self.confirm(obligation_cause, obligation_trait_ref, impl_trait_ref)
}
/// After we have determined which impl applies, and with what substitutions, there is one last
/// step. We have to go back and relate the "output" type parameters from the obligation to the
/// types that are specified in the impl.
///
/// For example, imagine we have:
///
/// impl<T> Iterator<T> for Vec<T> { ... }
///
/// and our obligation is `Iterator<Foo> for Vec<int>` (note the mismatch in the obligation
/// types). Up until this step, no error would be reported: the self type is `Vec<int>`, and
/// that matches `Vec<T>` with the substitution `T=int`. At this stage, we could then go and
/// check that the type parameters to the `Iterator` trait match. (In terms of the parameters,
/// the `expected_trait_ref` here would be `Iterator<int> for Vec<int>`, and the
/// `obligation_trait_ref` would be `Iterator<Foo> for Vec<int>`.
///
/// Note that this checking occurs *after* the impl has selected, because these output type
/// parameters should not affect the selection of the impl. Therefore, if there is a mismatch,
/// we report an error to the user.
fn confirm(&mut self,
obligation_cause: ObligationCause,
obligation_trait_ref: Rc<ty::TraitRef<'tcx>>,
expected_trait_ref: Rc<ty::TraitRef<'tcx>>)
-> Result<(), SelectionError<'tcx>>
{
/*!
* After we have determined which impl applies, and with what
* substitutions, there is one last step. We have to go back
* and relate the "output" type parameters from the obligation
* to the types that are specified in the impl.
*
* For example, imagine we have:
*
* impl<T> Iterator<T> for Vec<T> { ... }
*
* and our obligation is `Iterator<Foo> for Vec<int>` (note
* the mismatch in the obligation types). Up until this step,
* no error would be reported: the self type is `Vec<int>`,
* and that matches `Vec<T>` with the substitution `T=int`.
* At this stage, we could then go and check that the type
* parameters to the `Iterator` trait match.
* (In terms of the parameters, the `expected_trait_ref`
* here would be `Iterator<int> for Vec<int>`, and the
* `obligation_trait_ref` would be `Iterator<Foo> for Vec<int>`.
*
* Note that this checking occurs *after* the impl has
* selected, because these output type parameters should not
* affect the selection of the impl. Therefore, if there is a
* mismatch, we report an error to the user.
*/
let origin = infer::RelateOutputImplTypes(obligation_cause.span);
let obligation_trait_ref = obligation_trait_ref.clone();
@ -2019,11 +1952,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Returns set of all impls for a given trait.
fn all_impls(&self, trait_def_id: ast::DefId) -> Vec<ast::DefId> {
/*!
* Returns set of all impls for a given trait.
*/
ty::populate_implementations_for_trait_if_necessary(self.tcx(),
trait_def_id);
match self.tcx().trait_impls.borrow().get(&trait_def_id) {

View File

@ -42,21 +42,18 @@ pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
trait_ref: Rc<ty::TraitRef<'tcx>>)
-> Supertraits<'cx, 'tcx>
{
/*!
* Returns an iterator over the trait reference `T` and all of its
* supertrait references. May contain duplicates. In general
* the ordering is not defined.
*
* Example:
*
* ```
* trait Foo { ... }
* trait Bar : Foo { ... }
* trait Baz : Bar+Foo { ... }
* ```
*
* `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
*/
//! Returns an iterator over the trait reference `T` and all of its supertrait references. May
//! contain duplicates. In general the ordering is not defined.
//!
//! Example:
//!
//! ```
//! trait Foo { ... }
//! trait Bar : Foo { ... }
//! trait Baz : Bar+Foo { ... }
//! ```
//!
//! `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
transitive_bounds(tcx, &[trait_ref])
}
@ -97,12 +94,8 @@ impl<'cx, 'tcx> Supertraits<'cx, 'tcx> {
self.stack.push(entry);
}
/// Returns the path taken through the trait supertraits to reach the current point.
pub fn indices(&self) -> Vec<uint> {
/*!
* Returns the path taken through the trait supertraits to
* reach the current point.
*/
self.stack.iter().map(|e| e.position).collect()
}
}
@ -171,6 +164,7 @@ impl<'tcx> fmt::Show for VtableParamData<'tcx> {
}
}
/// See `super::obligations_for_generics`
pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
cause: ObligationCause<'tcx>,
recursion_depth: uint,
@ -178,7 +172,6 @@ pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
type_substs: &VecPerParamSpace<Ty<'tcx>>)
-> VecPerParamSpace<Obligation<'tcx>>
{
/*! See `super::obligations_for_generics` */
debug!("obligations_for_generics(generic_bounds={}, type_substs={})",
generic_bounds.repr(tcx), type_substs.repr(tcx));
@ -272,20 +265,15 @@ pub fn obligation_for_builtin_bound<'tcx>(
}
}
/// Starting from a caller obligation `caller_bound` (which has coordinates `space`/`i` in the list
/// of caller obligations), search through the trait and supertraits to find one where `test(d)` is
/// true, where `d` is the def-id of the trait/supertrait. If any is found, return `Some(p)` where
/// `p` is the path to that trait/supertrait. Else `None`.
pub fn search_trait_and_supertraits_from_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
caller_bound: Rc<ty::TraitRef<'tcx>>,
test: |ast::DefId| -> bool)
-> Option<VtableParamData<'tcx>>
{
/*!
* Starting from a caller obligation `caller_bound` (which has
* coordinates `space`/`i` in the list of caller obligations),
* search through the trait and supertraits to find one where
* `test(d)` is true, where `d` is the def-id of the
* trait/supertrait. If any is found, return `Some(p)` where `p`
* is the path to that trait/supertrait. Else `None`.
*/
for bound in transitive_bounds(tcx, &[caller_bound]) {
if test(bound.def_id) {
let vtable_param = VtableParamData { bound: bound };

View File

@ -671,39 +671,29 @@ pub fn type_has_late_bound_regions(ty: Ty) -> bool {
ty.flags.intersects(HAS_RE_LATE_BOUND)
}
/// An "escaping region" is a bound region whose binder is not part of `t`.
///
/// So, for example, consider a type like the following, which has two binders:
///
/// for<'a> fn(x: for<'b> fn(&'a int, &'b int))
/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
///
/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
/// fn type*, that type has an escaping region: `'a`.
///
/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
/// we already use the term "free region". It refers to the regions that we use to represent bound
/// regions on a fn definition while we are typechecking its body.
///
/// To clarify, conceptually there is no particular difference between an "escaping" region and a
/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
/// binding level, one is generally required to do some sort of processing to a bound region, such
/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
/// represent the scope to which it is attached, etc. An escaping region represents a bound region
/// for which this processing has not yet been done.
pub fn type_has_escaping_regions(ty: Ty) -> bool {
/*!
* An "escaping region" is a bound region whose binder is not part of `t`.
*
* So, for example, consider a type like the following, which has two
* binders:
*
* for<'a> fn(x: for<'b> fn(&'a int, &'b int))
* ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
* ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
*
* This type has *bound regions* (`'a`, `'b`), but it does not
* have escaping regions, because the binders of both `'a` and
* `'b` are part of the type itself. However, if we consider the
* *inner fn type*, that type has an escaping region: `'a`.
*
* Note that what I'm calling an "escaping region" is often just
* called a "free region". However, we already use the term "free
* region". It refers to the regions that we use to represent
* bound regions on a fn definition while we are typechecking its
* body.
*
* To clarify, conceptually there is no particular difference
* between an "escaping" region and a "free" region. However,
* there is a big difference in practice. Basically, when
* "entering" a binding level, one is generally required to do
* some sort of processing to a bound region, such as replacing it
* with a fresh/skolemized region, or making an entry in the
* environment to represent the scope to which it is attached,
* etc. An escaping region represents a bound region for which
* this processing has not yet been done.
*/
type_escapes_depth(ty, 0)
}
@ -743,18 +733,16 @@ impl<'tcx> FnOutput<'tcx> {
}
}
/**
* Signature of a function type, which I have arbitrarily
* decided to use to refer to the input/output types.
*
* - `inputs` is the list of arguments and their modes.
* - `output` is the return type.
* - `variadic` indicates whether this is a varidic function. (only true for foreign fns)
*
* Note that a `FnSig` introduces a level of region binding, to
* account for late-bound parameters that appear in the types of the
* fn's arguments or the fn's return type.
*/
/// Signature of a function type, which I have arbitrarily
/// decided to use to refer to the input/output types.
///
/// - `inputs` is the list of arguments and their modes.
/// - `output` is the return type.
/// - `variadic` indicates whether this is a varidic function. (only true for foreign fns)
///
/// Note that a `FnSig` introduces a level of region binding, to
/// account for late-bound parameters that appear in the types of the
/// fn's arguments or the fn's return type.
#[deriving(Clone, PartialEq, Eq, Hash)]
pub struct FnSig<'tcx> {
pub inputs: Vec<Ty<'tcx>>,
@ -769,47 +757,45 @@ pub struct ParamTy {
pub def_id: DefId
}
/**
* A [De Bruijn index][dbi] is a standard means of representing
* regions (and perhaps later types) in a higher-ranked setting. In
* particular, imagine a type like this:
*
* for<'a> fn(for<'b> fn(&'b int, &'a int), &'a char)
* ^ ^ | | |
* | | | | |
* | +------------+ 1 | |
* | | |
* +--------------------------------+ 2 |
* | |
* +------------------------------------------+ 1
*
* In this type, there are two binders (the outer fn and the inner
* fn). We need to be able to determine, for any given region, which
* fn type it is bound by, the inner or the outer one. There are
* various ways you can do this, but a De Bruijn index is one of the
* more convenient and has some nice properties. The basic idea is to
* count the number of binders, inside out. Some examples should help
* clarify what I mean.
*
* Let's start with the reference type `&'b int` that is the first
* argument to the inner function. This region `'b` is assigned a De
* Bruijn index of 1, meaning "the innermost binder" (in this case, a
* fn). The region `'a` that appears in the second argument type (`&'a
* int`) would then be assigned a De Bruijn index of 2, meaning "the
* second-innermost binder". (These indices are written on the arrays
* in the diagram).
*
* What is interesting is that De Bruijn index attached to a particular
* variable will vary depending on where it appears. For example,
* the final type `&'a char` also refers to the region `'a` declared on
* the outermost fn. But this time, this reference is not nested within
* any other binders (i.e., it is not an argument to the inner fn, but
* rather the outer one). Therefore, in this case, it is assigned a
* De Bruijn index of 1, because the innermost binder in that location
* is the outer fn.
*
* [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
*/
/// A [De Bruijn index][dbi] is a standard means of representing
/// regions (and perhaps later types) in a higher-ranked setting. In
/// particular, imagine a type like this:
///
/// for<'a> fn(for<'b> fn(&'b int, &'a int), &'a char)
/// ^ ^ | | |
/// | | | | |
/// | +------------+ 1 | |
/// | | |
/// +--------------------------------+ 2 |
/// | |
/// +------------------------------------------+ 1
///
/// In this type, there are two binders (the outer fn and the inner
/// fn). We need to be able to determine, for any given region, which
/// fn type it is bound by, the inner or the outer one. There are
/// various ways you can do this, but a De Bruijn index is one of the
/// more convenient and has some nice properties. The basic idea is to
/// count the number of binders, inside out. Some examples should help
/// clarify what I mean.
///
/// Let's start with the reference type `&'b int` that is the first
/// argument to the inner function. This region `'b` is assigned a De
/// Bruijn index of 1, meaning "the innermost binder" (in this case, a
/// fn). The region `'a` that appears in the second argument type (`&'a
/// int`) would then be assigned a De Bruijn index of 2, meaning "the
/// second-innermost binder". (These indices are written on the arrays
/// in the diagram).
///
/// What is interesting is that De Bruijn index attached to a particular
/// variable will vary depending on where it appears. For example,
/// the final type `&'a char` also refers to the region `'a` declared on
/// the outermost fn. But this time, this reference is not nested within
/// any other binders (i.e., it is not an argument to the inner fn, but
/// rather the outer one). Therefore, in this case, it is assigned a
/// De Bruijn index of 1, because the innermost binder in that location
/// is the outer fn.
///
/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
#[deriving(Clone, PartialEq, Eq, Hash, Encodable, Decodable, Show)]
pub struct DebruijnIndex {
// We maintain the invariant that this is never 0. So 1 indicates
@ -856,11 +842,9 @@ pub enum Region {
ReEmpty,
}
/**
* Upvars do not get their own node-id. Instead, we use the pair of
* the original var id (that is, the root variable that is referenced
* by the upvar) and the id of the closure expression.
*/
/// Upvars do not get their own node-id. Instead, we use the pair of
/// the original var id (that is, the root variable that is referenced
/// by the upvar) and the id of the closure expression.
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct UpvarId {
pub var_id: ast::NodeId,
@ -913,55 +897,53 @@ pub enum BorrowKind {
MutBorrow
}
/**
* Information describing the borrowing of an upvar. This is computed
* during `typeck`, specifically by `regionck`. The general idea is
* that the compiler analyses treat closures like:
*
* let closure: &'e fn() = || {
* x = 1; // upvar x is assigned to
* use(y); // upvar y is read
* foo(&z); // upvar z is borrowed immutably
* };
*
* as if they were "desugared" to something loosely like:
*
* struct Vars<'x,'y,'z> { x: &'x mut int,
* y: &'y const int,
* z: &'z int }
* let closure: &'e fn() = {
* fn f(env: &Vars) {
* *env.x = 1;
* use(*env.y);
* foo(env.z);
* }
* let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x,
* y: &'y const y,
* z: &'z z };
* (env, f)
* };
*
* This is basically what happens at runtime. The closure is basically
* an existentially quantified version of the `(env, f)` pair.
*
* This data structure indicates the region and mutability of a single
* one of the `x...z` borrows.
*
* It may not be obvious why each borrowed variable gets its own
* lifetime (in the desugared version of the example, these are indicated
* by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition).
* Each such lifetime must encompass the lifetime `'e` of the closure itself,
* but need not be identical to it. The reason that this makes sense:
*
* - Callers are only permitted to invoke the closure, and hence to
* use the pointers, within the lifetime `'e`, so clearly `'e` must
* be a sublifetime of `'x...'z`.
* - The closure creator knows which upvars were borrowed by the closure
* and thus `x...z` will be reserved for `'x...'z` respectively.
* - Through mutation, the borrowed upvars can actually escape
* the closure, so sometimes it is necessary for them to be larger
* than the closure lifetime itself.
*/
/// Information describing the borrowing of an upvar. This is computed
/// during `typeck`, specifically by `regionck`. The general idea is
/// that the compiler analyses treat closures like:
///
/// let closure: &'e fn() = || {
/// x = 1; // upvar x is assigned to
/// use(y); // upvar y is read
/// foo(&z); // upvar z is borrowed immutably
/// };
///
/// as if they were "desugared" to something loosely like:
///
/// struct Vars<'x,'y,'z> { x: &'x mut int,
/// y: &'y const int,
/// z: &'z int }
/// let closure: &'e fn() = {
/// fn f(env: &Vars) {
/// *env.x = 1;
/// use(*env.y);
/// foo(env.z);
/// }
/// let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x,
/// y: &'y const y,
/// z: &'z z };
/// (env, f)
/// };
///
/// This is basically what happens at runtime. The closure is basically
/// an existentially quantified version of the `(env, f)` pair.
///
/// This data structure indicates the region and mutability of a single
/// one of the `x...z` borrows.
///
/// It may not be obvious why each borrowed variable gets its own
/// lifetime (in the desugared version of the example, these are indicated
/// by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition).
/// Each such lifetime must encompass the lifetime `'e` of the closure itself,
/// but need not be identical to it. The reason that this makes sense:
///
/// - Callers are only permitted to invoke the closure, and hence to
/// use the pointers, within the lifetime `'e`, so clearly `'e` must
/// be a sublifetime of `'x...'z`.
/// - The closure creator knows which upvars were borrowed by the closure
/// and thus `x...z` will be reserved for `'x...'z` respectively.
/// - Through mutation, the borrowed upvars can actually escape
/// the closure, so sometimes it is necessary for them to be larger
/// than the closure lifetime itself.
#[deriving(PartialEq, Clone, Encodable, Decodable, Show)]
pub struct UpvarBorrow {
pub kind: BorrowKind,
@ -1111,37 +1093,33 @@ pub struct TyTrait<'tcx> {
pub bounds: ExistentialBounds
}
/**
* A complete reference to a trait. These take numerous guises in syntax,
* but perhaps the most recognizable form is in a where clause:
*
* T : Foo<U>
*
* This would be represented by a trait-reference where the def-id is the
* def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
* `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
*
* Trait references also appear in object types like `Foo<U>`, but in
* that case the `Self` parameter is absent from the substitutions.
*
* Note that a `TraitRef` introduces a level of region binding, to
* account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
* U>` or higher-ranked object types.
*/
/// A complete reference to a trait. These take numerous guises in syntax,
/// but perhaps the most recognizable form is in a where clause:
///
/// T : Foo<U>
///
/// This would be represented by a trait-reference where the def-id is the
/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
///
/// Trait references also appear in object types like `Foo<U>`, but in
/// that case the `Self` parameter is absent from the substitutions.
///
/// Note that a `TraitRef` introduces a level of region binding, to
/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
/// U>` or higher-ranked object types.
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct TraitRef<'tcx> {
pub def_id: DefId,
pub substs: Substs<'tcx>,
}
/**
* Binder serves as a synthetic binder for lifetimes. It is used when
* we wish to replace the escaping higher-ranked lifetimes in a type
* or something else that is not itself a binder (this is because the
* `replace_late_bound_regions` function replaces all lifetimes bound
* by the binder supplied to it; but a type is not a binder, so you
* must introduce an artificial one).
*/
/// Binder serves as a synthetic binder for lifetimes. It is used when
/// we wish to replace the escaping higher-ranked lifetimes in a type
/// or something else that is not itself a binder (this is because the
/// `replace_late_bound_regions` function replaces all lifetimes bound
/// by the binder supplied to it; but a type is not a binder, so you
/// must introduce an artificial one).
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct Binder<T> {
pub value: T
@ -1248,11 +1226,8 @@ pub fn all_builtin_bounds() -> BuiltinBounds {
set
}
/// An existential bound that does not implement any traits.
pub fn region_existential_bound(r: ty::Region) -> ExistentialBounds {
/*!
* An existential bound that does not implement any traits.
*/
ty::ExistentialBounds { region_bound: r,
builtin_bounds: empty_builtin_bounds() }
}
@ -1425,27 +1400,25 @@ impl<'tcx> Generics<'tcx> {
}
}
/**
* Represents the bounds declared on a particular set of type
* parameters. Should eventually be generalized into a flag list of
* where clauses. You can obtain a `GenericBounds` list from a
* `Generics` by using the `to_bounds` method. Note that this method
* reflects an important semantic invariant of `GenericBounds`: while
* the bounds in a `Generics` are expressed in terms of the bound type
* parameters of the impl/trait/whatever, a `GenericBounds` instance
* represented a set of bounds for some particular instantiation,
* meaning that the generic parameters have been substituted with
* their values.
*
* Example:
*
* struct Foo<T,U:Bar<T>> { ... }
*
* Here, the `Generics` for `Foo` would contain a list of bounds like
* `[[], [U:Bar<T>]]`. Now if there were some particular reference
* like `Foo<int,uint>`, then the `GenericBounds` would be `[[],
* [uint:Bar<int>]]`.
*/
/// Represents the bounds declared on a particular set of type
/// parameters. Should eventually be generalized into a flag list of
/// where clauses. You can obtain a `GenericBounds` list from a
/// `Generics` by using the `to_bounds` method. Note that this method
/// reflects an important semantic invariant of `GenericBounds`: while
/// the bounds in a `Generics` are expressed in terms of the bound type
/// parameters of the impl/trait/whatever, a `GenericBounds` instance
/// represented a set of bounds for some particular instantiation,
/// meaning that the generic parameters have been substituted with
/// their values.
///
/// Example:
///
/// struct Foo<T,U:Bar<T>> { ... }
///
/// Here, the `Generics` for `Foo` would contain a list of bounds like
/// `[[], [U:Bar<T>]]`. Now if there were some particular reference
/// like `Foo<int,uint>`, then the `GenericBounds` would be `[[],
/// [uint:Bar<int>]]`.
#[deriving(Clone, Show)]
pub struct GenericBounds<'tcx> {
pub types: VecPerParamSpace<ParamBounds<'tcx>>,
@ -1834,12 +1807,9 @@ impl FlagComputation {
}
}
/// Adds the flags/depth from a set of types that appear within the current type, but within a
/// region binder.
fn add_bound_computation(&mut self, computation: &FlagComputation) {
/*!
* Adds the flags/depth from a set of types that appear within
* the current type, but within a region binder.
*/
self.add_flags(computation.flags);
// The types that contributed to `computation` occured within
@ -2455,18 +2425,16 @@ pub fn type_needs_unwind_cleanup<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
}
}
/**
* Type contents is how the type checker reasons about kinds.
* They track what kinds of things are found within a type. You can
* think of them as kind of an "anti-kind". They track the kinds of values
* and thinks that are contained in types. Having a larger contents for
* a type tends to rule that type *out* from various kinds. For example,
* a type that contains a reference is not sendable.
*
* The reason we compute type contents and not kinds is that it is
* easier for me (nmatsakis) to think about what is contained within
* a type than to think about what is *not* contained within a type.
*/
/// Type contents is how the type checker reasons about kinds.
/// They track what kinds of things are found within a type. You can
/// think of them as kind of an "anti-kind". They track the kinds of values
/// and thinks that are contained in types. Having a larger contents for
/// a type tends to rule that type *out* from various kinds. For example,
/// a type that contains a reference is not sendable.
///
/// The reason we compute type contents and not kinds is that it is
/// easier for me (nmatsakis) to think about what is contained within
/// a type than to think about what is *not* contained within a type.
#[deriving(Clone)]
pub struct TypeContents {
pub bits: u64
@ -2575,38 +2543,26 @@ impl TypeContents {
self.intersects(TC::NeedsDrop)
}
/// Includes only those bits that still apply when indirected through a `Box` pointer
pub fn owned_pointer(&self) -> TypeContents {
/*!
* Includes only those bits that still apply
* when indirected through a `Box` pointer
*/
TC::OwnsOwned | (
*self & (TC::OwnsAll | TC::ReachesAll))
}
/// Includes only those bits that still apply when indirected through a reference (`&`)
pub fn reference(&self, bits: TypeContents) -> TypeContents {
/*!
* Includes only those bits that still apply
* when indirected through a reference (`&`)
*/
bits | (
*self & TC::ReachesAll)
}
/// Includes only those bits that still apply when indirected through a managed pointer (`@`)
pub fn managed_pointer(&self) -> TypeContents {
/*!
* Includes only those bits that still apply
* when indirected through a managed pointer (`@`)
*/
TC::Managed | (
*self & TC::ReachesAll)
}
/// Includes only those bits that still apply when indirected through an unsafe pointer (`*`)
pub fn unsafe_pointer(&self) -> TypeContents {
/*!
* Includes only those bits that still apply
* when indirected through an unsafe pointer (`*`)
*/
*self & TC::ReachesAll
}
@ -2883,14 +2839,10 @@ pub fn type_contents<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> TypeContents {
}
}
/// Type contents due to containing a reference with the region `region` and borrow kind `bk`
fn borrowed_contents(region: ty::Region,
mutbl: ast::Mutability)
-> TypeContents {
/*!
* Type contents due to containing a reference
* with the region `region` and borrow kind `bk`
*/
let b = match mutbl {
ast::MutMutable => TC::ReachesMutable | TC::OwnsAffine,
ast::MutImmutable => TC::None,
@ -3648,20 +3600,16 @@ pub fn expr_ty_opt<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Option<Ty<'tcx>>
return node_id_to_type_opt(cx, expr.id);
}
/// Returns the type of `expr`, considering any `AutoAdjustment`
/// entry recorded for that expression.
///
/// It would almost certainly be better to store the adjusted ty in with
/// the `AutoAdjustment`, but I opted not to do this because it would
/// require serializing and deserializing the type and, although that's not
/// hard to do, I just hate that code so much I didn't want to touch it
/// unless it was to fix it properly, which seemed a distraction from the
/// task at hand! -nmatsakis
pub fn expr_ty_adjusted<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Ty<'tcx> {
/*!
*
* Returns the type of `expr`, considering any `AutoAdjustment`
* entry recorded for that expression.
*
* It would almost certainly be better to store the adjusted ty in with
* the `AutoAdjustment`, but I opted not to do this because it would
* require serializing and deserializing the type and, although that's not
* hard to do, I just hate that code so much I didn't want to touch it
* unless it was to fix it properly, which seemed a distraction from the
* task at hand! -nmatsakis
*/
adjust_ty(cx, expr.span, expr.id, expr_ty(cx, expr),
cx.adjustments.borrow().get(&expr.id),
|method_call| cx.method_map.borrow().get(&method_call).map(|method| method.ty))
@ -3707,6 +3655,7 @@ pub fn local_var_name_str(cx: &ctxt, id: NodeId) -> InternedString {
}
}
/// See `expr_ty_adjusted`
pub fn adjust_ty<'tcx>(cx: &ctxt<'tcx>,
span: Span,
expr_id: ast::NodeId,
@ -3714,7 +3663,6 @@ pub fn adjust_ty<'tcx>(cx: &ctxt<'tcx>,
adjustment: Option<&AutoAdjustment<'tcx>>,
method_type: |typeck::MethodCall| -> Option<Ty<'tcx>>)
-> Ty<'tcx> {
/*! See `expr_ty_adjusted` */
match unadjusted_ty.sty {
ty_err => return unadjusted_ty,
@ -4128,16 +4076,11 @@ pub fn ty_sort_string<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> String {
}
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
pub fn type_err_to_str<'tcx>(cx: &ctxt<'tcx>, err: &type_err<'tcx>) -> String {
/*!
*
* Explains the source of a type err in a short,
* human readable way. This is meant to be placed in
* parentheses after some larger message. You should
* also invoke `note_and_explain_type_err()` afterwards
* to present additional details, particularly when
* it comes to lifetime-related errors. */
fn tstore_to_closure(s: &TraitStore) -> String {
match s {
&UniqTraitStore => "proc".to_string(),
@ -4352,21 +4295,16 @@ pub fn provided_trait_methods<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
}
}
/// Helper for looking things up in the various maps that are populated during typeck::collect
/// (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc). All of these share the pattern that if the
/// id is local, it should have been loaded into the map by the `typeck::collect` phase. If the
/// def-id is external, then we have to go consult the crate loading code (and cache the result for
/// the future).
fn lookup_locally_or_in_crate_store<V:Clone>(
descr: &str,
def_id: ast::DefId,
map: &mut DefIdMap<V>,
load_external: || -> V) -> V {
/*!
* Helper for looking things up in the various maps
* that are populated during typeck::collect (e.g.,
* `cx.impl_or_trait_items`, `cx.tcache`, etc). All of these share
* the pattern that if the id is local, it should have
* been loaded into the map by the `typeck::collect` phase.
* If the def-id is external, then we have to go consult
* the crate loading code (and cache the result for the future).
*/
match map.get(&def_id).cloned() {
Some(v) => { return v; }
None => { }
@ -5238,19 +5176,16 @@ pub fn each_bound_trait_and_supertraits<'tcx>(tcx: &ctxt<'tcx>,
return true;
}
/// Given a type which must meet the builtin bounds and trait bounds, returns a set of lifetimes
/// which the type must outlive.
///
/// Requires that trait definitions have been processed.
pub fn required_region_bounds<'tcx>(tcx: &ctxt<'tcx>,
region_bounds: &[ty::Region],
builtin_bounds: BuiltinBounds,
trait_bounds: &[Rc<TraitRef<'tcx>>])
-> Vec<ty::Region>
{
/*!
* Given a type which must meet the builtin bounds and trait
* bounds, returns a set of lifetimes which the type must outlive.
*
* Requires that trait definitions have been processed.
*/
let mut all_bounds = Vec::new();
debug!("required_region_bounds(builtin_bounds={}, trait_bounds={})",
@ -5636,13 +5571,9 @@ impl Variance {
}
}
/// Construct a parameter environment suitable for static contexts or other contexts where there
/// are no free type/lifetime parameters in scope.
pub fn empty_parameter_environment<'tcx>() -> ParameterEnvironment<'tcx> {
/*!
* Construct a parameter environment suitable for static contexts
* or other contexts where there are no free type/lifetime
* parameters in scope.
*/
ty::ParameterEnvironment { free_substs: Substs::empty(),
bounds: VecPerParamSpace::empty(),
caller_obligations: VecPerParamSpace::empty(),
@ -5650,6 +5581,7 @@ pub fn empty_parameter_environment<'tcx>() -> ParameterEnvironment<'tcx> {
selection_cache: traits::SelectionCache::new(), }
}
/// See `ParameterEnvironment` struct def'n for details
pub fn construct_parameter_environment<'tcx>(
tcx: &ctxt<'tcx>,
span: Span,
@ -5657,7 +5589,6 @@ pub fn construct_parameter_environment<'tcx>(
free_id: ast::NodeId)
-> ParameterEnvironment<'tcx>
{
/*! See `ParameterEnvironment` struct def'n for details */
//
// Construct the free substs.
@ -5786,15 +5717,11 @@ impl BorrowKind {
}
}
/// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
/// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
/// mutability that is stronger than necessary so that it at least *would permit* the borrow in
/// question.
pub fn to_mutbl_lossy(self) -> ast::Mutability {
/*!
* Returns a mutability `m` such that an `&m T` pointer could
* be used to obtain this borrow kind. Because borrow kinds
* are richer than mutabilities, we sometimes have to pick a
* mutability that is stronger than necessary so that it at
* least *would permit* the borrow in question.
*/
match self {
MutBorrow => ast::MutMutable,
ImmBorrow => ast::MutImmutable,
@ -5959,6 +5886,8 @@ impl<'tcx> AutoDerefRef<'tcx> {
}
}
/// Replace any late-bound regions bound in `value` with free variants attached to scope-id
/// `scope_id`.
pub fn liberate_late_bound_regions<'tcx, HR>(
tcx: &ty::ctxt<'tcx>,
scope: region::CodeExtent,
@ -5966,31 +5895,23 @@ pub fn liberate_late_bound_regions<'tcx, HR>(
-> HR
where HR : HigherRankedFoldable<'tcx>
{
/*!
* Replace any late-bound regions bound in `value` with free variants
* attached to scope-id `scope_id`.
*/
replace_late_bound_regions(
tcx, value,
|br, _| ty::ReFree(ty::FreeRegion{scope: scope, bound_region: br})).0
}
/// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
/// method lookup and a few other places where precise region relationships are not required.
pub fn erase_late_bound_regions<'tcx, HR>(
tcx: &ty::ctxt<'tcx>,
value: &HR)
-> HR
where HR : HigherRankedFoldable<'tcx>
{
/*!
* Replace any late-bound regions bound in `value` with `'static`.
* Useful in trans but also method lookup and a few other places
* where precise region relationships are not required.
*/
replace_late_bound_regions(tcx, value, |_, _| ty::ReStatic).0
}
/// Replaces the late-bound-regions in `value` that are bound by `value`.
pub fn replace_late_bound_regions<'tcx, HR>(
tcx: &ty::ctxt<'tcx>,
value: &HR,
@ -5998,10 +5919,6 @@ pub fn replace_late_bound_regions<'tcx, HR>(
-> (HR, FnvHashMap<ty::BoundRegion,ty::Region>)
where HR : HigherRankedFoldable<'tcx>
{
/*!
* Replaces the late-bound-regions in `value` that are bound by `value`.
*/
debug!("replace_late_bound_regions({})", value.repr(tcx));
let mut map = FnvHashMap::new();

View File

@ -8,33 +8,31 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Generalized type folding mechanism. The setup is a bit convoluted
* but allows for convenient usage. Let T be an instance of some
* "foldable type" (one which implements `TypeFoldable`) and F be an
* instance of a "folder" (a type which implements `TypeFolder`). Then
* the setup is intended to be:
*
* T.fold_with(F) --calls--> F.fold_T(T) --calls--> super_fold_T(F, T)
*
* This way, when you define a new folder F, you can override
* `fold_T()` to customize the behavior, and invoke `super_fold_T()`
* to get the original behavior. Meanwhile, to actually fold
* something, you can just write `T.fold_with(F)`, which is
* convenient. (Note that `fold_with` will also transparently handle
* things like a `Vec<T>` where T is foldable and so on.)
*
* In this ideal setup, the only function that actually *does*
* anything is `super_fold_T`, which traverses the type `T`. Moreover,
* `super_fold_T` should only ever call `T.fold_with()`.
*
* In some cases, we follow a degenerate pattern where we do not have
* a `fold_T` nor `super_fold_T` method. Instead, `T.fold_with`
* traverses the structure directly. This is suboptimal because the
* behavior cannot be overriden, but it's much less work to implement.
* If you ever *do* need an override that doesn't exist, it's not hard
* to convert the degenerate pattern into the proper thing.
*/
//! Generalized type folding mechanism. The setup is a bit convoluted
//! but allows for convenient usage. Let T be an instance of some
//! "foldable type" (one which implements `TypeFoldable`) and F be an
//! instance of a "folder" (a type which implements `TypeFolder`). Then
//! the setup is intended to be:
//!
//! T.fold_with(F) --calls--> F.fold_T(T) --calls--> super_fold_T(F, T)
//!
//! This way, when you define a new folder F, you can override
//! `fold_T()` to customize the behavior, and invoke `super_fold_T()`
//! to get the original behavior. Meanwhile, to actually fold
//! something, you can just write `T.fold_with(F)`, which is
//! convenient. (Note that `fold_with` will also transparently handle
//! things like a `Vec<T>` where T is foldable and so on.)
//!
//! In this ideal setup, the only function that actually *does*
//! anything is `super_fold_T`, which traverses the type `T`. Moreover,
//! `super_fold_T` should only ever call `T.fold_with()`.
//!
//! In some cases, we follow a degenerate pattern where we do not have
//! a `fold_T` nor `super_fold_T` method. Instead, `T.fold_with`
//! traverses the structure directly. This is suboptimal because the
//! behavior cannot be overriden, but it's much less work to implement.
//! If you ever *do* need an override that doesn't exist, it's not hard
//! to convert the degenerate pattern into the proper thing.
use middle::subst;
use middle::subst::VecPerParamSpace;
@ -701,9 +699,7 @@ pub fn super_fold_obligation<'tcx, T:TypeFolder<'tcx>>(this: &mut T,
///////////////////////////////////////////////////////////////////////////
// Higher-ranked things
/**
* Designates a "binder" for late-bound regions.
*/
/// Designates a "binder" for late-bound regions.
pub trait HigherRankedFoldable<'tcx>: Repr<'tcx> {
/// Folds the contents of `self`, ignoring the region binder created
/// by `self`.

View File

@ -8,46 +8,44 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Conversion from AST representation of types to the ty.rs
* representation. The main routine here is `ast_ty_to_ty()`: each use
* is parameterized by an instance of `AstConv` and a `RegionScope`.
*
* The parameterization of `ast_ty_to_ty()` is because it behaves
* somewhat differently during the collect and check phases,
* particularly with respect to looking up the types of top-level
* items. In the collect phase, the crate context is used as the
* `AstConv` instance; in this phase, the `get_item_ty()` function
* triggers a recursive call to `ty_of_item()` (note that
* `ast_ty_to_ty()` will detect recursive types and report an error).
* In the check phase, when the FnCtxt is used as the `AstConv`,
* `get_item_ty()` just looks up the item type in `tcx.tcache`.
*
* The `RegionScope` trait controls what happens when the user does
* not specify a region in some location where a region is required
* (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`).
* See the `rscope` module for more details.
*
* Unlike the `AstConv` trait, the region scope can change as we descend
* the type. This is to accommodate the fact that (a) fn types are binding
* scopes and (b) the default region may change. To understand case (a),
* consider something like:
*
* type foo = { x: &a.int, y: |&a.int| }
*
* The type of `x` is an error because there is no region `a` in scope.
* In the type of `y`, however, region `a` is considered a bound region
* as it does not already appear in scope.
*
* Case (b) says that if you have a type:
* type foo<'a> = ...;
* type bar = fn(&foo, &a.foo)
* The fully expanded version of type bar is:
* type bar = fn(&'foo &, &a.foo<'a>)
* Note that the self region for the `foo` defaulted to `&` in the first
* case but `&a` in the second. Basically, defaults that appear inside
* an rptr (`&r.T`) use the region `r` that appears in the rptr.
*/
//! Conversion from AST representation of types to the ty.rs
//! representation. The main routine here is `ast_ty_to_ty()`: each use
//! is parameterized by an instance of `AstConv` and a `RegionScope`.
//!
//! The parameterization of `ast_ty_to_ty()` is because it behaves
//! somewhat differently during the collect and check phases,
//! particularly with respect to looking up the types of top-level
//! items. In the collect phase, the crate context is used as the
//! `AstConv` instance; in this phase, the `get_item_ty()` function
//! triggers a recursive call to `ty_of_item()` (note that
//! `ast_ty_to_ty()` will detect recursive types and report an error).
//! In the check phase, when the FnCtxt is used as the `AstConv`,
//! `get_item_ty()` just looks up the item type in `tcx.tcache`.
//!
//! The `RegionScope` trait controls what happens when the user does
//! not specify a region in some location where a region is required
//! (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`).
//! See the `rscope` module for more details.
//!
//! Unlike the `AstConv` trait, the region scope can change as we descend
//! the type. This is to accommodate the fact that (a) fn types are binding
//! scopes and (b) the default region may change. To understand case (a),
//! consider something like:
//!
//! type foo = { x: &a.int, y: |&a.int| }
//!
//! The type of `x` is an error because there is no region `a` in scope.
//! In the type of `y`, however, region `a` is considered a bound region
//! as it does not already appear in scope.
//!
//! Case (b) says that if you have a type:
//! type foo<'a> = ...;
//! type bar = fn(&foo, &a.foo)
//! The fully expanded version of type bar is:
//! type bar = fn(&'foo &, &a.foo<'a>)
//! Note that the self region for the `foo` defaulted to `&` in the first
//! case but `&a` in the second. Basically, defaults that appear inside
//! an rptr (`&r.T`) use the region `r` that appears in the rptr.
use middle::const_eval;
use middle::def;
use middle::resolve_lifetime as rl;
@ -59,8 +57,9 @@ use middle::typeck::rscope::{UnelidableRscope, RegionScope, SpecificRscope,
ShiftedRscope, BindingRscope};
use middle::typeck::rscope;
use middle::typeck::TypeAndSubsts;
use util::common::ErrorReported;
use util::nodemap::DefIdMap;
use util::ppaux::{Repr, UserString};
use util::ppaux::{mod, Repr, UserString};
use std::rc::Rc;
use std::iter::AdditiveIterator;
@ -201,6 +200,8 @@ pub fn opt_ast_region_to_region<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
r
}
/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
/// returns an appropriate set of substitutions for this particular reference to `I`.
fn ast_path_substs_for_ty<'tcx,AC,RS>(
this: &AC,
rscope: &RS,
@ -211,12 +212,6 @@ fn ast_path_substs_for_ty<'tcx,AC,RS>(
-> Substs<'tcx>
where AC: AstConv<'tcx>, RS: RegionScope
{
/*!
* Given a path `path` that refers to an item `I` with the
* declared generics `decl_generics`, returns an appropriate
* set of substitutions for this particular reference to `I`.
*/
let tcx = this.tcx();
// ast_path_substs() is only called to convert paths that are
@ -422,6 +417,9 @@ pub fn instantiate_poly_trait_ref<'tcx,AC,RS>(
instantiate_trait_ref(this, rscope, &ast_trait_ref.trait_ref, self_ty)
}
/// Instantiates the path for the given trait reference, assuming that it's bound to a valid trait
/// type. Returns the def_id for the defining trait. Fails if the type is a type other than a trait
/// type.
pub fn instantiate_trait_ref<'tcx,AC,RS>(this: &AC,
rscope: &RS,
ast_trait_ref: &ast::TraitRef,
@ -430,12 +428,6 @@ pub fn instantiate_trait_ref<'tcx,AC,RS>(this: &AC,
where AC: AstConv<'tcx>,
RS: RegionScope
{
/*!
* Instantiates the path for the given trait reference, assuming that
* it's bound to a valid trait type. Returns the def_id for the defining
* trait. Fails if the type is a type other than a trait type.
*/
match lookup_def_tcx(this.tcx(),
ast_trait_ref.path.span,
ast_trait_ref.ref_id) {
@ -585,7 +577,7 @@ fn check_path_args(tcx: &ty::ctxt,
pub fn ast_ty_to_prim_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ast_ty: &ast::Ty)
-> Option<Ty<'tcx>> {
match ast_ty.node {
ast::TyPath(ref path, _, id) => {
ast::TyPath(ref path, id) => {
let a_def = match tcx.def_map.borrow().get(&id) {
None => {
tcx.sess.span_bug(ast_ty.span,
@ -642,7 +634,7 @@ pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
}
match ast_ty.node {
ast::TyPath(ref path, _, id) => {
ast::TyPath(ref path, id) => {
let a_def = match this.tcx().def_map.borrow().get(&id) {
None => {
this.tcx()
@ -682,64 +674,92 @@ pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
}
}
// Handle `~`, `Box`, and `&` being able to mean strs and vecs.
// If a_seq_ty is a str or a vec, make it a str/vec.
// Also handle first-class trait types.
fn mk_pointer<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
a_seq_mutbl: ast::Mutability,
a_seq_ty: &ast::Ty,
region: ty::Region,
constr: |Ty<'tcx>| -> Ty<'tcx>)
-> Ty<'tcx>
fn ast_ty_to_trait_ref<'tcx,AC,RS>(this: &AC,
rscope: &RS,
ty: &ast::Ty,
bounds: &[ast::TyParamBound])
-> Result<ty::TraitRef<'tcx>, ErrorReported>
where AC : AstConv<'tcx>, RS : RegionScope
{
let tcx = this.tcx();
/*!
* In a type like `Foo + Send`, we want to wait to collect the
* full set of bounds before we make the object type, because we
* need them to infer a region bound. (For example, if we tried
* made a type from just `Foo`, then it wouldn't be enough to
* infer a 'static bound, and hence the user would get an error.)
* So this function is used when we're dealing with a sum type to
* convert the LHS. It only accepts a type that refers to a trait
* name, and reports an error otherwise.
*/
debug!("mk_pointer(region={}, a_seq_ty={})",
region,
a_seq_ty.repr(tcx));
match a_seq_ty.node {
ast::TyVec(ref ty) => {
let ty = ast_ty_to_ty(this, rscope, &**ty);
return constr(ty::mk_vec(tcx, ty, None));
}
ast::TyPath(ref path, ref opt_bounds, id) => {
// Note that the "bounds must be empty if path is not a trait"
// restriction is enforced in the below case for ty_path, which
// will run after this as long as the path isn't a trait.
match tcx.def_map.borrow().get(&id) {
Some(&def::DefPrimTy(ast::TyStr)) => {
check_path_args(tcx, path, NO_TPS | NO_REGIONS);
return ty::mk_str_slice(tcx, region, a_seq_mutbl);
}
match ty.node {
ast::TyPath(ref path, id) => {
match this.tcx().def_map.borrow().get(&id) {
Some(&def::DefTrait(trait_def_id)) => {
let result = ast_path_to_trait_ref(this,
rscope,
trait_def_id,
None,
path);
let empty_vec = [];
let bounds = match *opt_bounds { None => empty_vec.as_slice(),
Some(ref bounds) => bounds.as_slice() };
let existential_bounds = conv_existential_bounds(this,
rscope,
path.span,
&[Rc::new(result.clone())],
bounds);
let tr = ty::mk_trait(tcx,
result,
existential_bounds);
return ty::mk_rptr(tcx, region, ty::mt{mutbl: a_seq_mutbl, ty: tr});
return Ok(ast_path_to_trait_ref(this,
rscope,
trait_def_id,
None,
path));
}
_ => {
span_err!(this.tcx().sess, ty.span, E0172, "expected a reference to a trait");
Err(ErrorReported)
}
_ => {}
}
}
_ => {}
_ => {
span_err!(this.tcx().sess, ty.span, E0171,
"expected a path on the left-hand side of `+`, not `{}`",
pprust::ty_to_string(ty));
match ty.node {
ast::TyRptr(None, ref mut_ty) => {
span_note!(this.tcx().sess, ty.span,
"perhaps you meant `&{}({} +{})`? (per RFC 248)",
ppaux::mutability_to_string(mut_ty.mutbl),
pprust::ty_to_string(&*mut_ty.ty),
pprust::bounds_to_string(bounds));
}
ast::TyRptr(Some(ref lt), ref mut_ty) => {
span_note!(this.tcx().sess, ty.span,
"perhaps you meant `&{} {}({} +{})`? (per RFC 248)",
pprust::lifetime_to_string(lt),
ppaux::mutability_to_string(mut_ty.mutbl),
pprust::ty_to_string(&*mut_ty.ty),
pprust::bounds_to_string(bounds));
}
_ => {
span_note!(this.tcx().sess, ty.span,
"perhaps you forgot parentheses? (per RFC 248)");
}
}
Err(ErrorReported)
}
}
constr(ast_ty_to_ty(this, rscope, a_seq_ty))
}
fn trait_ref_to_object_type<'tcx,AC,RS>(this: &AC,
rscope: &RS,
span: Span,
trait_ref: ty::TraitRef<'tcx>,
bounds: &[ast::TyParamBound])
-> Ty<'tcx>
where AC : AstConv<'tcx>, RS : RegionScope
{
let existential_bounds = conv_existential_bounds(this,
rscope,
span,
&[Rc::new(trait_ref.clone())],
bounds);
let result = ty::mk_trait(this.tcx(), trait_ref, existential_bounds);
debug!("trait_ref_to_object_type: result={}",
result.repr(this.tcx()));
result
}
fn qpath_to_ty<'tcx,AC,RS>(this: &AC,
@ -806,6 +826,17 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
ast::TyVec(ref ty) => {
ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty), None)
}
ast::TyObjectSum(ref ty, ref bounds) => {
match ast_ty_to_trait_ref(this, rscope, &**ty, bounds.as_slice()) {
Ok(trait_ref) => {
trait_ref_to_object_type(this, rscope, ast_ty.span,
trait_ref, bounds.as_slice())
}
Err(ErrorReported) => {
ty::mk_err()
}
}
}
ast::TyPtr(ref mt) => {
ty::mk_ptr(tcx, ty::mt {
ty: ast_ty_to_ty(this, rscope, &*mt.ty),
@ -815,8 +846,8 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
ast::TyRptr(ref region, ref mt) => {
let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region);
debug!("ty_rptr r={}", r.repr(this.tcx()));
mk_pointer(this, rscope, mt.mutbl, &*mt.ty, r,
|ty| ty::mk_rptr(tcx, r, ty::mt {ty: ty, mutbl: mt.mutbl}))
let t = ast_ty_to_ty(this, rscope, &*mt.ty);
ty::mk_rptr(tcx, r, ty::mt {ty: t, mutbl: mt.mutbl})
}
ast::TyTup(ref fields) => {
let flds = fields.iter()
@ -874,7 +905,7 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
ast::TyPolyTraitRef(ref bounds) => {
conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds.as_slice())
}
ast::TyPath(ref path, ref bounds, id) => {
ast::TyPath(ref path, id) => {
let a_def = match tcx.def_map.borrow().get(&id) {
None => {
tcx.sess
@ -884,35 +915,16 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
}
Some(&d) => d
};
// Kind bounds on path types are only supported for traits.
match a_def {
// But don't emit the error if the user meant to do a trait anyway.
def::DefTrait(..) => { },
_ if bounds.is_some() =>
tcx.sess.span_err(ast_ty.span,
"kind bounds can only be used on trait types"),
_ => { },
}
match a_def {
def::DefTrait(trait_def_id) => {
// N.B. this case overlaps somewhat with
// TyObjectSum, see that fn for details
let result = ast_path_to_trait_ref(this,
rscope,
trait_def_id,
None,
path);
let empty_bounds: &[ast::TyParamBound] = &[];
let ast_bounds = match *bounds {
Some(ref b) => b.as_slice(),
None => empty_bounds
};
let bounds = conv_existential_bounds(this,
rscope,
ast_ty.span,
&[Rc::new(result.clone())],
ast_bounds);
let result_ty = ty::mk_trait(tcx, result, bounds);
debug!("ast_ty_to_ty: result_ty={}", result_ty.repr(this.tcx()));
result_ty
trait_ref_to_object_type(this, rscope, path.span, result, &[])
}
def::DefTy(did, _) | def::DefStruct(did) => {
ast_path_to_ty(this, rscope, did, path).ty
@ -1318,6 +1330,10 @@ pub fn ty_of_closure<'tcx, AC: AstConv<'tcx>>(
}
}
/// Given an existential type like `Foo+'a+Bar`, this routine converts the `'a` and `Bar` intos an
/// `ExistentialBounds` struct. The `main_trait_refs` argument specifies the `Foo` -- it is absent
/// for closures. Eventually this should all be normalized, I think, so that there is no "main
/// trait ref" and instead we just have a flat list of bounds as the existential type.
pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
@ -1326,16 +1342,6 @@ pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
ast_bounds: &[ast::TyParamBound])
-> ty::ExistentialBounds
{
/*!
* Given an existential type like `Foo+'a+Bar`, this routine
* converts the `'a` and `Bar` intos an `ExistentialBounds`
* struct. The `main_trait_refs` argument specifies the `Foo` --
* it is absent for closures. Eventually this should all be
* normalized, I think, so that there is no "main trait ref" and
* instead we just have a flat list of bounds as the existential
* type.
*/
let ast_bound_refs: Vec<&ast::TyParamBound> =
ast_bounds.iter().collect();
@ -1432,6 +1438,10 @@ pub fn conv_existential_bounds_from_partitioned_bounds<'tcx, AC, RS>(
}
}
/// Given the bounds on a type parameter / existential type, determines what single region bound
/// (if any) we can use to summarize this type. The basic idea is that we will use the bound the
/// user provided, if they provided one, and otherwise search the supertypes of trait bounds for
/// region bounds. It may be that we can derive no bound at all, in which case we return `None`.
pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
span: Span,
builtin_bounds: ty::BuiltinBounds,
@ -1439,16 +1449,6 @@ pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_bounds: &[Rc<ty::TraitRef<'tcx>>])
-> Option<ty::Region>
{
/*!
* Given the bounds on a type parameter / existential type,
* determines what single region bound (if any) we can use to
* summarize this type. The basic idea is that we will use the
* bound the user provided, if they provided one, and otherwise
* search the supertypes of trait bounds for region bounds. It may
* be that we can derive no bound at all, in which case we return
* `None`.
*/
if region_bounds.len() > 1 {
tcx.sess.span_err(
region_bounds[1].span,
@ -1495,6 +1495,9 @@ pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
return Some(r);
}
/// A version of `compute_opt_region_bound` for use where some region bound is required
/// (existential types, basically). Reports an error if no region bound can be derived and we are
/// in an `rscope` that does not provide a default.
fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
@ -1504,13 +1507,6 @@ fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
trait_bounds: &[Rc<ty::TraitRef<'tcx>>])
-> ty::Region
{
/*!
* A version of `compute_opt_region_bound` for use where some
* region bound is required (existential types,
* basically). Reports an error if no region bound can be derived
* and we are in an `rscope` that does not provide a default.
*/
match compute_opt_region_bound(this.tcx(), span, builtin_bounds,
region_bounds, trait_bounds) {
Some(r) => r,
@ -1534,17 +1530,13 @@ pub struct PartitionedBounds<'a> {
pub region_bounds: Vec<&'a ast::Lifetime>,
}
/// Divides a list of bounds from the AST into three groups: builtin bounds (Copy, Sized etc),
/// general trait bounds, and region bounds.
pub fn partition_bounds<'a>(tcx: &ty::ctxt,
_span: Span,
ast_bounds: &'a [&ast::TyParamBound])
-> PartitionedBounds<'a>
{
/*!
* Divides a list of bounds from the AST into three groups:
* builtin bounds (Copy, Sized etc), general trait bounds,
* and region bounds.
*/
let mut builtin_bounds = ty::empty_builtin_bounds();
let mut region_bounds = Vec::new();
let mut trait_bounds = Vec::new();

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Code for type-checking closure expressions.
*/
//! Code for type-checking closure expressions.
use super::check_fn;
use super::{Expectation, ExpectCastableToType, ExpectHasType, NoExpectation};

View File

@ -189,22 +189,17 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
///////////////////////////////////////////////////////////////////////////
//
/// Returns a set of substitutions for the method *receiver* where all type and region
/// parameters are instantiated with fresh variables. This substitution does not include any
/// parameters declared on the method itself.
///
/// Note that this substitution may include late-bound regions from the impl level. If so,
/// these are instantiated later in the `instantiate_method_sig` routine.
fn fresh_receiver_substs(&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>)
-> (subst::Substs<'tcx>, MethodOrigin<'tcx>)
{
/*!
* Returns a set of substitutions for the method *receiver*
* where all type and region parameters are instantiated with
* fresh variables. This substitution does not include any
* parameters declared on the method itself.
*
* Note that this substitution may include late-bound regions
* from the impl level. If so, these are instantiated later in
* the `instantiate_method_sig` routine.
*/
match pick.kind {
probe::InherentImplPick(impl_def_id) => {
assert!(ty::impl_trait_ref(self.tcx(), impl_def_id).is_none(),
@ -478,14 +473,11 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
///////////////////////////////////////////////////////////////////////////
// RECONCILIATION
/// When we select a method with an `&mut self` receiver, we have to go convert any
/// auto-derefs, indices, etc from `Deref` and `Index` into `DerefMut` and `IndexMut`
/// respectively.
fn fixup_derefs_on_method_receiver_if_necessary(&self,
method_callee: &MethodCallee) {
/*!
* When we select a method with an `&mut self` receiver, we have to go
* convert any auto-derefs, indices, etc from `Deref` and `Index` into
* `DerefMut` and `IndexMut` respectively.
*/
let sig = match method_callee.ty.sty {
ty::ty_bare_fn(ref f) => f.sig.clone(),
ty::ty_closure(ref f) => f.sig.clone(),

View File

@ -8,119 +8,114 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Method lookup
Method lookup can be rather complex due to the interaction of a number
of factors, such as self types, autoderef, trait lookup, etc. This
file provides an overview of the process. More detailed notes are in
the code itself, naturally.
One way to think of method lookup is that we convert an expression of
the form:
receiver.method(...)
into a more explicit UFCS form:
Trait::method(ADJ(receiver), ...) // for a trait call
ReceiverType::method(ADJ(receiver), ...) // for an inherent method call
Here `ADJ` is some kind of adjustment, which is typically a series of
autoderefs and then possibly an autoref (e.g., `&**receiver`). However
we sometimes do other adjustments and coercions along the way, in
particular unsizing (e.g., converting from `[T, ..n]` to `[T]`).
## The Two Phases
Method lookup is divided into two major phases: probing (`probe.rs`)
and confirmation (`confirm.rs`). The probe phase is when we decide
what method to call and how to adjust the receiver. The confirmation
phase "applies" this selection, updating the side-tables, unifying
type variables, and otherwise doing side-effectful things.
One reason for this division is to be more amenable to caching. The
probe phase produces a "pick" (`probe::Pick`), which is designed to be
cacheable across method-call sites. Therefore, it does not include
inference variables or other information.
## Probe phase
The probe phase (`probe.rs`) decides what method is being called and
how to adjust the receiver.
### Steps
The first thing that the probe phase does is to create a series of
*steps*. This is done by progressively dereferencing the receiver type
until it cannot be deref'd anymore, as well as applying an optional
"unsize" step. So if the receiver has type `Rc<Box<[T, ..3]>>`, this
might yield:
Rc<Box<[T, ..3]>>
Box<[T, ..3]>
[T, ..3]
[T]
### Candidate assembly
We then search along those steps to create a list of *candidates*. A
`Candidate` is a method item that might plausibly be the method being
invoked. For each candidate, we'll derive a "transformed self type"
that takes into account explicit self.
Candidates are grouped into two kinds, inherent and extension.
**Inherent candidates** are those that are derived from the
type of the receiver itself. So, if you have a receiver of some
nominal type `Foo` (e.g., a struct), any methods defined within an
impl like `impl Foo` are inherent methods. Nothing needs to be
imported to use an inherent method, they are associated with the type
itself (note that inherent impls can only be defined in the same
module as the type itself).
FIXME: Inherent candidates are not always derived from impls. If you
have a trait object, such as a value of type `Box<ToString>`, then the
trait methods (`to_string()`, in this case) are inherently associated
with it. Another case is type parameters, in which case the methods of
their bounds are inherent. However, this part of the rules is subject
to change: when DST's "impl Trait for Trait" is complete, trait object
dispatch could be subsumed into trait matching, and the type parameter
behavior should be reconsidered in light of where clauses.
**Extension candidates** are derived from imported traits. If I have
the trait `ToString` imported, and I call `to_string()` on a value of
type `T`, then we will go off to find out whether there is an impl of
`ToString` for `T`. These kinds of method calls are called "extension
methods". They can be defined in any module, not only the one that
defined `T`. Furthermore, you must import the trait to call such a
method.
So, let's continue our example. Imagine that we were calling a method
`foo` with the receiver `Rc<Box<[T, ..3]>>` and there is a trait `Foo`
that defines it with `&self` for the type `Rc<U>` as well as a method
on the type `Box` that defines `Foo` but with `&mut self`. Then we
might have two candidates:
&Rc<Box<[T, ..3]>> from the impl of `Foo` for `Rc<U>` where `U=Box<T, ..3]>
&mut Box<[T, ..3]>> from the inherent impl on `Box<U>` where `U=[T, ..3]`
### Candidate search
Finally, to actually pick the method, we will search down the steps,
trying to match the receiver type against the candidate types. At
each step, we also consider an auto-ref and auto-mut-ref to see whether
that makes any of the candidates match. We pick the first step where
we find a match.
In the case of our example, the first step is `Rc<Box<[T, ..3]>>`,
which does not itself match any candidate. But when we autoref it, we
get the type `&Rc<Box<[T, ..3]>>` which does match. We would then
recursively consider all where-clauses that appear on the impl: if
those match (or we cannot rule out that they do), then this is the
method we would pick. Otherwise, we would continue down the series of
steps.
*/
//! # Method lookup
//!
//! Method lookup can be rather complex due to the interaction of a number
//! of factors, such as self types, autoderef, trait lookup, etc. This
//! file provides an overview of the process. More detailed notes are in
//! the code itself, naturally.
//!
//! One way to think of method lookup is that we convert an expression of
//! the form:
//!
//! receiver.method(...)
//!
//! into a more explicit UFCS form:
//!
//! Trait::method(ADJ(receiver), ...) // for a trait call
//! ReceiverType::method(ADJ(receiver), ...) // for an inherent method call
//!
//! Here `ADJ` is some kind of adjustment, which is typically a series of
//! autoderefs and then possibly an autoref (e.g., `&**receiver`). However
//! we sometimes do other adjustments and coercions along the way, in
//! particular unsizing (e.g., converting from `[T, ..n]` to `[T]`).
//!
//! ## The Two Phases
//!
//! Method lookup is divided into two major phases: probing (`probe.rs`)
//! and confirmation (`confirm.rs`). The probe phase is when we decide
//! what method to call and how to adjust the receiver. The confirmation
//! phase "applies" this selection, updating the side-tables, unifying
//! type variables, and otherwise doing side-effectful things.
//!
//! One reason for this division is to be more amenable to caching. The
//! probe phase produces a "pick" (`probe::Pick`), which is designed to be
//! cacheable across method-call sites. Therefore, it does not include
//! inference variables or other information.
//!
//! ## Probe phase
//!
//! The probe phase (`probe.rs`) decides what method is being called and
//! how to adjust the receiver.
//!
//! ### Steps
//!
//! The first thing that the probe phase does is to create a series of
//! *steps*. This is done by progressively dereferencing the receiver type
//! until it cannot be deref'd anymore, as well as applying an optional
//! "unsize" step. So if the receiver has type `Rc<Box<[T, ..3]>>`, this
//! might yield:
//!
//! Rc<Box<[T, ..3]>>
//! Box<[T, ..3]>
//! [T, ..3]
//! [T]
//!
//! ### Candidate assembly
//!
//! We then search along those steps to create a list of *candidates*. A
//! `Candidate` is a method item that might plausibly be the method being
//! invoked. For each candidate, we'll derive a "transformed self type"
//! that takes into account explicit self.
//!
//! Candidates are grouped into two kinds, inherent and extension.
//!
//! **Inherent candidates** are those that are derived from the
//! type of the receiver itself. So, if you have a receiver of some
//! nominal type `Foo` (e.g., a struct), any methods defined within an
//! impl like `impl Foo` are inherent methods. Nothing needs to be
//! imported to use an inherent method, they are associated with the type
//! itself (note that inherent impls can only be defined in the same
//! module as the type itself).
//!
//! FIXME: Inherent candidates are not always derived from impls. If you
//! have a trait object, such as a value of type `Box<ToString>`, then the
//! trait methods (`to_string()`, in this case) are inherently associated
//! with it. Another case is type parameters, in which case the methods of
//! their bounds are inherent. However, this part of the rules is subject
//! to change: when DST's "impl Trait for Trait" is complete, trait object
//! dispatch could be subsumed into trait matching, and the type parameter
//! behavior should be reconsidered in light of where clauses.
//!
//! **Extension candidates** are derived from imported traits. If I have
//! the trait `ToString` imported, and I call `to_string()` on a value of
//! type `T`, then we will go off to find out whether there is an impl of
//! `ToString` for `T`. These kinds of method calls are called "extension
//! methods". They can be defined in any module, not only the one that
//! defined `T`. Furthermore, you must import the trait to call such a
//! method.
//!
//! So, let's continue our example. Imagine that we were calling a method
//! `foo` with the receiver `Rc<Box<[T, ..3]>>` and there is a trait `Foo`
//! that defines it with `&self` for the type `Rc<U>` as well as a method
//! on the type `Box` that defines `Foo` but with `&mut self`. Then we
//! might have two candidates:
//!
//! &Rc<Box<[T, ..3]>> from the impl of `Foo` for `Rc<U>` where `U=Box<T, ..3]>
//! &mut Box<[T, ..3]>> from the inherent impl on `Box<U>` where `U=[T, ..3]`
//!
//! ### Candidate search
//!
//! Finally, to actually pick the method, we will search down the steps,
//! trying to match the receiver type against the candidate types. At
//! each step, we also consider an auto-ref and auto-mut-ref to see whether
//! that makes any of the candidates match. We pick the first step where
//! we find a match.
//!
//! In the case of our example, the first step is `Rc<Box<[T, ..3]>>`,
//! which does not itself match any candidate. But when we autoref it, we
//! get the type `&Rc<Box<[T, ..3]>>` which does match. We would then
//! recursively consider all where-clauses that appear on the impl: if
//! those match (or we cannot rule out that they do), then this is the
//! method we would pick. Otherwise, we would continue down the series of
//! steps.

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Method lookup: the secret sauce of Rust. See `doc.rs`. */
//! Method lookup: the secret sauce of Rust. See `doc.rs`.
use middle::subst;
use middle::subst::{Subst};
@ -56,6 +56,7 @@ pub enum CandidateSource {
type MethodIndex = uint; // just for doc purposes
/// Determines whether the type `self_ty` supports a method name `method_name` or not.
pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
method_name: ast::Name,
@ -63,10 +64,6 @@ pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
call_expr_id: ast::NodeId)
-> bool
{
/*!
* Determines whether the type `self_ty` supports a method name `method_name` or not.
*/
match probe::probe(fcx, span, method_name, self_ty, call_expr_id) {
Ok(_) => true,
Err(NoMatch(_)) => false,
@ -74,6 +71,20 @@ pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Performs method lookup. If lookup is successful, it will return the callee and store an
/// appropriate adjustment for the self-expr. In some cases it may report an error (e.g., invoking
/// the `drop` method).
///
/// # Arguments
///
/// Given a method call like `foo.bar::<T1,...Tn>(...)`:
///
/// * `fcx`: the surrounding `FnCtxt` (!)
/// * `span`: the span for the method call
/// * `method_name`: the name of the method being called (`bar`)
/// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
/// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
/// * `self_expr`: the self expression (`foo`)
pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
method_name: ast::Name,
@ -83,23 +94,6 @@ pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
self_expr: &ast::Expr)
-> Result<MethodCallee<'tcx>, MethodError>
{
/*!
* Performs method lookup. If lookup is successful, it will return the callee
* and store an appropriate adjustment for the self-expr. In some cases it may
* report an error (e.g., invoking the `drop` method).
*
* # Arguments
*
* Given a method call like `foo.bar::<T1,...Tn>(...)`:
*
* - `fcx`: the surrounding `FnCtxt` (!)
* - `span`: the span for the method call
* - `method_name`: the name of the method being called (`bar`)
* - `self_ty`: the (unadjusted) type of the self expression (`foo`)
* - `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
* - `self_expr`: the self expression (`foo`)
*/
debug!("lookup(method_name={}, self_ty={}, call_expr={}, self_expr={})",
method_name.repr(fcx.tcx()),
self_ty.repr(fcx.tcx()),
@ -124,6 +118,15 @@ pub fn lookup_in_trait<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
self_ty, opt_input_types)
}
/// `lookup_in_trait_adjusted` is used for overloaded operators. It does a very narrow slice of
/// what the normal probe/confirm path does. In particular, it doesn't really do any probing: it
/// simply constructs an obligation for a particular trait with the given self-type and checks
/// whether that trait is implemented.
///
/// FIXME(#18741) -- It seems likely that we can consolidate some of this code with the other
/// method-lookup code. In particular, autoderef on index is basically identical to autoderef with
/// normal probes, except that the test also looks for built-in indexing. Also, the second half of
/// this method is basically the same as confirmation.
pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
span: Span,
self_expr: Option<&'a ast::Expr>,
@ -134,21 +137,6 @@ pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
opt_input_types: Option<Vec<Ty<'tcx>>>)
-> Option<MethodCallee<'tcx>>
{
/*!
* `lookup_in_trait_adjusted` is used for overloaded operators. It
* does a very narrow slice of what the normal probe/confirm path
* does. In particular, it doesn't really do any probing: it
* simply constructs an obligation for a particular trait with the
* given self-type and checks whether that trait is implemented.
*
* FIXME(#18741) -- It seems likely that we can consolidate some of this
* code with the other method-lookup code. In particular,
* autoderef on index is basically identical to autoderef with
* normal probes, except that the test also looks for built-in
* indexing. Also, the second half of this method is basically
* the same as confirmation.
*/
debug!("lookup_in_trait_adjusted(self_ty={}, self_expr={}, m_name={}, trait_def_id={})",
self_ty.repr(fcx.tcx()),
self_expr.repr(fcx.tcx()),
@ -408,16 +396,13 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Find method with name `method_name` defined in `trait_def_id` and return it, along with its
/// index (or `None`, if no such method).
fn trait_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method_name: ast::Name)
-> Option<(uint, Rc<ty::Method<'tcx>>)>
{
/*!
* Find method with name `method_name` defined in `trait_def_id` and return it,
* along with its index (or `None`, if no such method).
*/
let trait_items = ty::trait_items(tcx, trait_def_id);
trait_items
.iter()

View File

@ -807,33 +807,26 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
})
}
/// Sometimes we get in a situation where we have multiple probes that are all impls of the
/// same trait, but we don't know which impl to use. In this case, since in all cases the
/// external interface of the method can be determined from the trait, it's ok not to decide.
/// We can basically just collapse all of the probes for various impls into one where-clause
/// probe. This will result in a pending obligation so when more type-info is available we can
/// make the final decision.
///
/// Example (`src/test/run-pass/method-two-trait-defer-resolution-1.rs`):
///
/// ```
/// trait Foo { ... }
/// impl Foo for Vec<int> { ... }
/// impl Foo for Vec<uint> { ... }
/// ```
///
/// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we
/// use, so it's ok to just commit to "using the method from the trait Foo".
fn collapse_candidates_to_trait_pick(&self,
probes: &[&Candidate<'tcx>])
-> Option<Pick<'tcx>> {
/*!
* Sometimes we get in a situation where we have multiple
* probes that are all impls of the same trait, but we don't
* know which impl to use. In this case, since in all cases
* the external interface of the method can be determined from
* the trait, it's ok not to decide. We can basically just
* collapse all of the probes for various impls into one
* where-clause probe. This will result in a pending
* obligation so when more type-info is available we can make
* the final decision.
*
* Example (`src/test/run-pass/method-two-trait-defer-resolution-1.rs`):
*
* ```
* trait Foo { ... }
* impl Foo for Vec<int> { ... }
* impl Foo for Vec<uint> { ... }
* ```
*
* Now imagine the receiver is `Vec<_>`. It doesn't really
* matter at this time which impl we use, so it's ok to just
* commit to "using the method from the trait Foo".
*/
// Do all probes correspond to the same trait?
let trait_data = match probes[0].to_trait_data() {
Some(data) => data,
@ -952,36 +945,27 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
subst::Substs::new(type_vars, region_placeholders)
}
/// Replace late-bound-regions bound by `value` with `'static` using
/// `ty::erase_late_bound_regions`.
///
/// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of
/// method matching. It is reasonable during the probe phase because we don't consider region
/// relationships at all. Therefore, we can just replace all the region variables with 'static
/// rather than creating fresh region variables. This is nice for two reasons:
///
/// 1. Because the numbers of the region variables would otherwise be fairly unique to this
/// particular method call, it winds up creating fewer types overall, which helps for memory
/// usage. (Admittedly, this is a rather small effect, though measureable.)
///
/// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any
/// late-bound regions with 'static. Otherwise, if we were going to replace late-bound
/// regions with actual region variables as is proper, we'd have to ensure that the same
/// region got replaced with the same variable, which requires a bit more coordination
/// and/or tracking the substitution and
/// so forth.
fn erase_late_bound_regions<T>(&self, value: &T) -> T
where T : HigherRankedFoldable<'tcx>
{
/*!
* Replace late-bound-regions bound by `value` with `'static`
* using `ty::erase_late_bound_regions`.
*
* This is only a reasonable thing to do during the *probe*
* phase, not the *confirm* phase, of method matching. It is
* reasonable during the probe phase because we don't consider
* region relationships at all. Therefore, we can just replace
* all the region variables with 'static rather than creating
* fresh region variables. This is nice for two reasons:
*
* 1. Because the numbers of the region variables would
* otherwise be fairly unique to this particular method
* call, it winds up creating fewer types overall, which
* helps for memory usage. (Admittedly, this is a rather
* small effect, though measureable.)
*
* 2. It makes it easier to deal with higher-ranked trait
* bounds, because we can replace any late-bound regions
* with 'static. Otherwise, if we were going to replace
* late-bound regions with actual region variables as is
* proper, we'd have to ensure that the same region got
* replaced with the same variable, which requires a bit
* more coordination and/or tracking the substitution and
* so forth.
*/
ty::erase_late_bound_regions(self.tcx(), value)
}
}
@ -1000,16 +984,13 @@ fn impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
.and_then(|item| item.as_opt_method())
}
/// Find method with name `method_name` defined in `trait_def_id` and return it, along with its
/// index (or `None`, if no such method).
fn trait_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method_name: ast::Name)
-> Option<(uint, Rc<ty::Method<'tcx>>)>
{
/*!
* Find method with name `method_name` defined in `trait_def_id` and return it,
* along with its index (or `None`, if no such method).
*/
let trait_items = ty::trait_items(tcx, trait_def_id);
trait_items
.iter()

View File

@ -486,6 +486,12 @@ impl<'a, 'tcx, 'v> Visitor<'v> for GatherLocalsVisitor<'a, 'tcx> {
}
/// Helper used by check_bare_fn and check_expr_fn. Does the grungy work of checking a function
/// body and returns the function context used for that purpose, since in the case of a fn item
/// there is still a bit more to do.
///
/// * ...
/// * inherited: other fields inherited from the enclosing fn (if any)
fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
fn_style: ast::FnStyle,
fn_style_id: ast::NodeId,
@ -495,16 +501,6 @@ fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
body: &ast::Block,
inherited: &'a Inherited<'a, 'tcx>)
-> FnCtxt<'a, 'tcx> {
/*!
* Helper used by check_bare_fn and check_expr_fn. Does the
* grungy work of checking a function body and returns the
* function context used for that purpose, since in the case of a
* fn item there is still a bit more to do.
*
* - ...
* - inherited: other fields inherited from the enclosing fn (if any)
*/
let tcx = ccx.tcx;
let err_count_on_creation = tcx.sess.err_count();
@ -701,19 +697,17 @@ pub fn check_item(ccx: &CrateCtxt, it: &ast::Item) {
}
}
/// Type checks a method body.
///
/// # Parameters
///
/// * `item_generics`: generics defined on the impl/trait that contains
/// the method
/// * `self_bound`: bound for the `Self` type parameter, if any
/// * `method`: the method definition
fn check_method_body<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
item_generics: &ty::Generics<'tcx>,
method: &ast::Method) {
/*!
* Type checks a method body.
*
* # Parameters
* - `item_generics`: generics defined on the impl/trait that contains
* the method
* - `self_bound`: bound for the `Self` type parameter, if any
* - `method`: the method definition
*/
debug!("check_method_body(item_generics={}, method.id={})",
item_generics.repr(ccx.tcx),
method.id);
@ -897,19 +891,17 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
}
}
/**
* Checks that a method from an impl conforms to the signature of
* the same method as declared in the trait.
*
* # Parameters
*
* - impl_generics: the generics declared on the impl itself (not the method!)
* - impl_m: type of the method we are checking
* - impl_m_span: span to use for reporting errors
* - impl_m_body_id: id of the method body
* - trait_m: the method in the trait
* - trait_to_impl_substs: the substitutions used on the type of the trait
*/
/// Checks that a method from an impl conforms to the signature of
/// the same method as declared in the trait.
///
/// # Parameters
///
/// - impl_generics: the generics declared on the impl itself (not the method!)
/// - impl_m: type of the method we are checking
/// - impl_m_span: span to use for reporting errors
/// - impl_m_body_id: id of the method body
/// - trait_m: the method in the trait
/// - trait_to_impl_substs: the substitutions used on the type of the trait
fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
impl_m: &ty::Method<'tcx>,
impl_m_span: Span,
@ -1222,6 +1214,33 @@ fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
// parameters.
infcx.resolve_regions_and_report_errors();
/// Check that region bounds on impl method are the same as those on the trait. In principle,
/// it could be ok for there to be fewer region bounds on the impl method, but this leads to an
/// annoying corner case that is painful to handle (described below), so for now we can just
/// forbid it.
///
/// Example (see `src/test/compile-fail/regions-bound-missing-bound-in-impl.rs`):
///
/// ```
/// trait Foo<'a> {
/// fn method1<'b>();
/// fn method2<'b:'a>();
/// }
///
/// impl<'a> Foo<'a> for ... {
/// fn method1<'b:'a>() { .. case 1, definitely bad .. }
/// fn method2<'b>() { .. case 2, could be ok .. }
/// }
/// ```
///
/// The "definitely bad" case is case #1. Here, the impl adds an extra constraint not present
/// in the trait.
///
/// The "maybe bad" case is case #2. Here, the impl adds an extra constraint not present in the
/// trait. We could in principle allow this, but it interacts in a complex way with early/late
/// bound resolution of lifetimes. Basically the presence or absence of a lifetime bound
/// affects whether the lifetime is early/late bound, and right now the code breaks if the
/// trait has an early bound lifetime parameter and the method does not.
fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
span: Span,
impl_m: &ty::Method<'tcx>,
@ -1232,39 +1251,6 @@ fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
impl_to_skol_substs: &Substs<'tcx>)
-> bool
{
/*!
Check that region bounds on impl method are the same as those
on the trait. In principle, it could be ok for there to be
fewer region bounds on the impl method, but this leads to an
annoying corner case that is painful to handle (described
below), so for now we can just forbid it.
Example (see
`src/test/compile-fail/regions-bound-missing-bound-in-impl.rs`):
trait Foo<'a> {
fn method1<'b>();
fn method2<'b:'a>();
}
impl<'a> Foo<'a> for ... {
fn method1<'b:'a>() { .. case 1, definitely bad .. }
fn method2<'b>() { .. case 2, could be ok .. }
}
The "definitely bad" case is case #1. Here, the impl adds an
extra constraint not present in the trait.
The "maybe bad" case is case #2. Here, the impl adds an extra
constraint not present in the trait. We could in principle
allow this, but it interacts in a complex way with early/late
bound resolution of lifetimes. Basically the presence or
absence of a lifetime bound affects whether the lifetime is
early/late bound, and right now the code breaks if the trait
has an early bound lifetime parameter and the method does not.
*/
let trait_params = trait_generics.regions.get_slice(subst::FnSpace);
let impl_params = impl_generics.regions.get_slice(subst::FnSpace);
@ -1770,23 +1756,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
/// Returns the type of `def_id` with all generics replaced by by fresh type/region variables.
/// Also returns the substitution from the type parameters on `def_id` to the fresh variables.
/// Registers any trait obligations specified on `def_id` at the same time.
///
/// Note that function is only intended to be used with types (notably, not impls). This is
/// because it doesn't do any instantiation of late-bound regions.
pub fn instantiate_type(&self,
span: Span,
def_id: ast::DefId)
-> TypeAndSubsts<'tcx>
{
/*!
* Returns the type of `def_id` with all generics replaced by
* by fresh type/region variables. Also returns the
* substitution from the type parameters on `def_id` to the
* fresh variables. Registers any trait obligations specified
* on `def_id` at the same time.
*
* Note that function is only intended to be used with types
* (notably, not impls). This is because it doesn't do any
* instantiation of late-bound regions.
*/
let polytype =
ty::lookup_item_type(self.tcx(), def_id);
let substs =
@ -1886,26 +1866,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
/// Fetch type of `expr` after applying adjustments that have been recorded in the fcx.
pub fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
/*!
* Fetch type of `expr` after applying adjustments that
* have been recorded in the fcx.
*/
let adjustments = self.inh.adjustments.borrow();
let adjustment = adjustments.get(&expr.id);
self.adjust_expr_ty(expr, adjustment)
}
/// Apply `adjustment` to the type of `expr`
pub fn adjust_expr_ty(&self,
expr: &ast::Expr,
adjustment: Option<&ty::AutoAdjustment<'tcx>>)
-> Ty<'tcx>
{
/*!
* Apply `adjustment` to the type of `expr`
*/
let raw_ty = self.expr_ty(expr);
let raw_ty = self.infcx().shallow_resolve(raw_ty);
ty::adjust_ty(self.tcx(),
@ -2013,16 +1986,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.infcx().report_mismatched_types(sp, e, a, err)
}
/// Registers an obligation for checking later, during regionck, that the type `ty` must
/// outlive the region `r`.
pub fn register_region_obligation(&self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
r: ty::Region)
{
/*!
* Registers an obligation for checking later, during
* regionck, that the type `ty` must outlive the region `r`.
*/
let mut region_obligations = self.inh.region_obligations.borrow_mut();
let region_obligation = RegionObligation { sub_region: r,
sup_type: ty,
@ -2045,31 +2015,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
/// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
/// type/region parameter was instantiated (`substs`), creates and registers suitable
/// trait/region obligations.
///
/// For example, if there is a function:
///
/// ```
/// fn foo<'a,T:'a>(...)
/// ```
///
/// and a reference:
///
/// ```
/// let f = foo;
/// ```
///
/// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
/// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
pub fn add_obligations_for_parameters(&self,
cause: traits::ObligationCause<'tcx>,
substs: &Substs<'tcx>,
generic_bounds: &ty::GenericBounds<'tcx>)
{
/*!
* Given a fully substituted set of bounds (`generic_bounds`),
* and the values with which each type/region parameter was
* instantiated (`substs`), creates and registers suitable
* trait/region obligations.
*
* For example, if there is a function:
*
* fn foo<'a,T:'a>(...)
*
* and a reference:
*
* let f = foo;
*
* Then we will create a fresh region variable `'$0` and a
* fresh type variable `$1` for `'a` and `T`. This routine
* will add a region obligation `$1:'$0` and register it
* locally.
*/
assert!(!generic_bounds.has_escaping_regions());
debug!("add_obligations_for_parameters(substs={}, generic_bounds={})",
@ -2160,22 +2128,17 @@ pub enum LvaluePreference {
NoPreference
}
/// Executes an autoderef loop for the type `t`. At each step, invokes `should_stop` to decide
/// whether to terminate the loop. Returns the final type and number of derefs that it performed.
///
/// Note: this method does not modify the adjustments table. The caller is responsible for
/// inserting an AutoAdjustment record into the `fcx` using one of the suitable methods.
pub fn autoderef<'a, 'tcx, T>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
base_ty: Ty<'tcx>,
expr_id: Option<ast::NodeId>,
mut lvalue_pref: LvaluePreference,
should_stop: |Ty<'tcx>, uint| -> Option<T>)
-> (Ty<'tcx>, uint, Option<T>) {
/*!
* Executes an autoderef loop for the type `t`. At each step, invokes
* `should_stop` to decide whether to terminate the loop. Returns
* the final type and number of derefs that it performed.
*
* Note: this method does not modify the adjustments table. The caller is
* responsible for inserting an AutoAdjustment record into the `fcx`
* using one of the suitable methods.
*/
let mut t = base_ty;
for autoderefs in range(0, fcx.tcx().sess.recursion_limit.get()) {
let resolved_t = structurally_resolved_type(fcx, sp, t);
@ -2306,19 +2269,14 @@ fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
make_overloaded_lvalue_return_type(fcx, method_call, method)
}
/// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait returns a type of `&T`, but the
/// actual type we assign to the *expression* is `T`. So this function just peels off the return
/// type by one layer to yield `T`. It also inserts the `method-callee` into the method map.
fn make_overloaded_lvalue_return_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
method_call: Option<MethodCall>,
method: Option<MethodCallee<'tcx>>)
-> Option<ty::mt<'tcx>>
{
/*!
* For the overloaded lvalue expressions (`*x`, `x[3]`), the trait
* returns a type of `&T`, but the actual type we assign to the
* *expression* is `T`. So this function just peels off the return
* type by one layer to yield `T`. It also inserts the
* `method-callee` into the method map.
*/
match method {
Some(method) => {
let ref_ty = ty::ty_fn_ret(method.ty);
@ -2380,6 +2338,8 @@ fn autoderef_for_index<'a, 'tcx, T>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Autoderefs `base_expr`, looking for a `Slice` impl. If it finds one, installs the relevant
/// method info and returns the result type (else None).
fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
method_call: MethodCall,
expr: &ast::Expr,
@ -2390,12 +2350,6 @@ fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
mutbl: ast::Mutability)
-> Option<Ty<'tcx>> // return type is result of slice
{
/*!
* Autoderefs `base_expr`, looking for a `Slice` impl. If it
* finds one, installs the relevant method info and returns the
* result type (else None).
*/
let lvalue_pref = match mutbl {
ast::MutMutable => PreferMutLvalue,
ast::MutImmutable => NoPreference
@ -2436,6 +2390,8 @@ fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
})
}
/// Checks for a `Slice` (or `SliceMut`) impl at the relevant level of autoderef. If it finds one,
/// installs method info and returns type of method (else None).
fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
method_call: MethodCall,
expr: &ast::Expr,
@ -2448,12 +2404,6 @@ fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
// result type is type of method being called
-> Option<Ty<'tcx>>
{
/*!
* Checks for a `Slice` (or `SliceMut`) impl at the relevant level
* of autoderef. If it finds one, installs method info and returns
* type of method (else None).
*/
let method = if mutbl == ast::MutMutable {
// Try `SliceMut` first, if preferred.
match fcx.tcx().lang_items.slice_mut_trait() {
@ -2510,6 +2460,10 @@ fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
})
}
/// To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust)
/// `base_expr`, looking for a type which either supports builtin indexing or overloaded indexing.
/// This loop implements one step in that search; the autoderef loop is implemented by
/// `autoderef_for_index`.
fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
method_call: MethodCall,
expr: &ast::Expr,
@ -2519,13 +2473,6 @@ fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
lvalue_pref: LvaluePreference)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
/*!
* To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust)
* `base_expr`, looking for a type which either supports builtin indexing or overloaded
* indexing. This loop implements one step in that search; the autoderef loop is implemented
* by `autoderef_for_index`.
*/
debug!("try_index_step(expr={}, base_expr.id={}, adjusted_ty={}, adjustment={})",
expr.repr(fcx.tcx()),
base_expr.repr(fcx.tcx()),
@ -2712,6 +2659,8 @@ fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Generic function that factors out common logic from function calls, method calls and overloaded
/// operators.
fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
sp: Span,
fn_inputs: &[Ty<'tcx>],
@ -2720,12 +2669,6 @@ fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
deref_args: DerefArgs,
variadic: bool,
tuple_arguments: TupleArgumentsFlag) {
/*!
*
* Generic function that factors out common logic from
* function calls, method calls and overloaded operators.
*/
let tcx = fcx.ccx.tcx;
// Grab the argument types, supplying fresh type variables
@ -5289,6 +5232,15 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Finds the parameters that the user provided and adds them to `substs`. If too many
/// parameters are provided, then reports an error and clears the output vector.
///
/// We clear the output vector because that will cause the `adjust_XXX_parameters()` later to
/// use inference variables. This seems less likely to lead to derived errors.
///
/// Note that we *do not* check for *too few* parameters here. Due to the presence of defaults
/// etc that is more complicated. I wanted however to do the reporting of *too many* parameters
/// here because we can easily use the precise span of the N+1'th parameter.
fn push_explicit_parameters_from_segment_to_substs<'a, 'tcx>(
fcx: &FnCtxt<'a, 'tcx>,
space: subst::ParamSpace,
@ -5298,23 +5250,6 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
segment: &ast::PathSegment,
substs: &mut Substs<'tcx>)
{
/*!
* Finds the parameters that the user provided and adds them
* to `substs`. If too many parameters are provided, then
* reports an error and clears the output vector.
*
* We clear the output vector because that will cause the
* `adjust_XXX_parameters()` later to use inference
* variables. This seems less likely to lead to derived
* errors.
*
* Note that we *do not* check for *too few* parameters here.
* Due to the presence of defaults etc that is more
* complicated. I wanted however to do the reporting of *too
* many* parameters here because we can easily use the precise
* span of the N+1'th parameter.
*/
match segment.parameters {
ast::AngleBracketedParameters(ref data) => {
push_explicit_angle_bracketed_parameters_from_segment_to_substs(
@ -5373,6 +5308,12 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// As with
/// `push_explicit_angle_bracketed_parameters_from_segment_to_substs`,
/// but intended for `Foo(A,B) -> C` form. This expands to
/// roughly the same thing as `Foo<(A,B),C>`. One important
/// difference has to do with the treatment of anonymous
/// regions, which are translated into bound regions (NYI).
fn push_explicit_parenthesized_parameters_from_segment_to_substs<'a, 'tcx>(
fcx: &FnCtxt<'a, 'tcx>,
space: subst::ParamSpace,
@ -5381,15 +5322,6 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
data: &ast::ParenthesizedParameterData,
substs: &mut Substs<'tcx>)
{
/*!
* As with
* `push_explicit_angle_bracketed_parameters_from_segment_to_substs`,
* but intended for `Foo(A,B) -> C` form. This expands to
* roughly the same thing as `Foo<(A,B),C>`. One important
* difference has to do with the treatment of anonymous
* regions, which are translated into bound regions (NYI).
*/
let type_count = type_defs.len(space);
if type_count < 2 {
span_err!(fcx.tcx().sess, span, E0167,
@ -5608,7 +5540,7 @@ pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
if !*b {
span_err!(ccx.tcx.sess, span, E0091,
"type parameter `{}` is unused",
token::get_ident(tps.get(i).ident));
token::get_ident(tps[i].ident));
}
}
}

View File

@ -8,115 +8,111 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The region check is a final pass that runs over the AST after we have
inferred the type constraints but before we have actually finalized
the types. Its purpose is to embed a variety of region constraints.
Inserting these constraints as a separate pass is good because (1) it
localizes the code that has to do with region inference and (2) often
we cannot know what constraints are needed until the basic types have
been inferred.
### Interaction with the borrow checker
In general, the job of the borrowck module (which runs later) is to
check that all soundness criteria are met, given a particular set of
regions. The job of *this* module is to anticipate the needs of the
borrow checker and infer regions that will satisfy its requirements.
It is generally true that the inference doesn't need to be sound,
meaning that if there is a bug and we inferred bad regions, the borrow
checker should catch it. This is not entirely true though; for
example, the borrow checker doesn't check subtyping, and it doesn't
check that region pointers are always live when they are used. It
might be worthwhile to fix this so that borrowck serves as a kind of
verification step -- that would add confidence in the overall
correctness of the compiler, at the cost of duplicating some type
checks and effort.
### Inferring the duration of borrows, automatic and otherwise
Whenever we introduce a borrowed pointer, for example as the result of
a borrow expression `let x = &data`, the lifetime of the pointer `x`
is always specified as a region inference variable. `regionck` has the
job of adding constraints such that this inference variable is as
narrow as possible while still accommodating all uses (that is, every
dereference of the resulting pointer must be within the lifetime).
#### Reborrows
Generally speaking, `regionck` does NOT try to ensure that the data
`data` will outlive the pointer `x`. That is the job of borrowck. The
one exception is when "re-borrowing" the contents of another borrowed
pointer. For example, imagine you have a borrowed pointer `b` with
lifetime L1 and you have an expression `&*b`. The result of this
expression will be another borrowed pointer with lifetime L2 (which is
an inference variable). The borrow checker is going to enforce the
constraint that L2 < L1, because otherwise you are re-borrowing data
for a lifetime larger than the original loan. However, without the
routines in this module, the region inferencer would not know of this
dependency and thus it might infer the lifetime of L2 to be greater
than L1 (issue #3148).
There are a number of troublesome scenarios in the tests
`region-dependent-*.rs`, but here is one example:
struct Foo { i: int }
struct Bar { foo: Foo }
fn get_i(x: &'a Bar) -> &'a int {
let foo = &x.foo; // Lifetime L1
&foo.i // Lifetime L2
}
Note that this comes up either with `&` expressions, `ref`
bindings, and `autorefs`, which are the three ways to introduce
a borrow.
The key point here is that when you are borrowing a value that
is "guaranteed" by a borrowed pointer, you must link the
lifetime of that borrowed pointer (L1, here) to the lifetime of
the borrow itself (L2). What do I mean by "guaranteed" by a
borrowed pointer? I mean any data that is reached by first
dereferencing a borrowed pointer and then either traversing
interior offsets or owned pointers. We say that the guarantor
of such data it the region of the borrowed pointer that was
traversed. This is essentially the same as the ownership
relation, except that a borrowed pointer never owns its
contents.
### Inferring borrow kinds for upvars
Whenever there is a closure expression, we need to determine how each
upvar is used. We do this by initially assigning each upvar an
immutable "borrow kind" (see `ty::BorrowKind` for details) and then
"escalating" the kind as needed. The borrow kind proceeds according to
the following lattice:
ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
So, for example, if we see an assignment `x = 5` to an upvar `x`, we
will promote its borrow kind to mutable borrow. If we see an `&mut x`
we'll do the same. Naturally, this applies not just to the upvar, but
to everything owned by `x`, so the result is the same for something
like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
struct). These adjustments are performed in
`adjust_upvar_borrow_kind()` (you can trace backwards through the code
from there).
The fact that we are inferring borrow kinds as we go results in a
semi-hacky interaction with mem-categorization. In particular,
mem-categorization will query the current borrow kind as it
categorizes, and we'll return the *current* value, but this may get
adjusted later. Therefore, in this module, we generally ignore the
borrow kind (and derived mutabilities) that are returned from
mem-categorization, since they may be inaccurate. (Another option
would be to use a unification scheme, where instead of returning a
concrete borrow kind like `ty::ImmBorrow`, we return a
`ty::InferBorrow(upvar_id)` or something like that, but this would
then mean that all later passes would have to check for these figments
and report an error, and it just seems like more mess in the end.)
*/
//! The region check is a final pass that runs over the AST after we have
//! inferred the type constraints but before we have actually finalized
//! the types. Its purpose is to embed a variety of region constraints.
//! Inserting these constraints as a separate pass is good because (1) it
//! localizes the code that has to do with region inference and (2) often
//! we cannot know what constraints are needed until the basic types have
//! been inferred.
//!
//! ### Interaction with the borrow checker
//!
//! In general, the job of the borrowck module (which runs later) is to
//! check that all soundness criteria are met, given a particular set of
//! regions. The job of *this* module is to anticipate the needs of the
//! borrow checker and infer regions that will satisfy its requirements.
//! It is generally true that the inference doesn't need to be sound,
//! meaning that if there is a bug and we inferred bad regions, the borrow
//! checker should catch it. This is not entirely true though; for
//! example, the borrow checker doesn't check subtyping, and it doesn't
//! check that region pointers are always live when they are used. It
//! might be worthwhile to fix this so that borrowck serves as a kind of
//! verification step -- that would add confidence in the overall
//! correctness of the compiler, at the cost of duplicating some type
//! checks and effort.
//!
//! ### Inferring the duration of borrows, automatic and otherwise
//!
//! Whenever we introduce a borrowed pointer, for example as the result of
//! a borrow expression `let x = &data`, the lifetime of the pointer `x`
//! is always specified as a region inference variable. `regionck` has the
//! job of adding constraints such that this inference variable is as
//! narrow as possible while still accommodating all uses (that is, every
//! dereference of the resulting pointer must be within the lifetime).
//!
//! #### Reborrows
//!
//! Generally speaking, `regionck` does NOT try to ensure that the data
//! `data` will outlive the pointer `x`. That is the job of borrowck. The
//! one exception is when "re-borrowing" the contents of another borrowed
//! pointer. For example, imagine you have a borrowed pointer `b` with
//! lifetime L1 and you have an expression `&*b`. The result of this
//! expression will be another borrowed pointer with lifetime L2 (which is
//! an inference variable). The borrow checker is going to enforce the
//! constraint that L2 < L1, because otherwise you are re-borrowing data
//! for a lifetime larger than the original loan. However, without the
//! routines in this module, the region inferencer would not know of this
//! dependency and thus it might infer the lifetime of L2 to be greater
//! than L1 (issue #3148).
//!
//! There are a number of troublesome scenarios in the tests
//! `region-dependent-*.rs`, but here is one example:
//!
//! struct Foo { i: int }
//! struct Bar { foo: Foo }
//! fn get_i(x: &'a Bar) -> &'a int {
//! let foo = &x.foo; // Lifetime L1
//! &foo.i // Lifetime L2
//! }
//!
//! Note that this comes up either with `&` expressions, `ref`
//! bindings, and `autorefs`, which are the three ways to introduce
//! a borrow.
//!
//! The key point here is that when you are borrowing a value that
//! is "guaranteed" by a borrowed pointer, you must link the
//! lifetime of that borrowed pointer (L1, here) to the lifetime of
//! the borrow itself (L2). What do I mean by "guaranteed" by a
//! borrowed pointer? I mean any data that is reached by first
//! dereferencing a borrowed pointer and then either traversing
//! interior offsets or owned pointers. We say that the guarantor
//! of such data it the region of the borrowed pointer that was
//! traversed. This is essentially the same as the ownership
//! relation, except that a borrowed pointer never owns its
//! contents.
//!
//! ### Inferring borrow kinds for upvars
//!
//! Whenever there is a closure expression, we need to determine how each
//! upvar is used. We do this by initially assigning each upvar an
//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then
//! "escalating" the kind as needed. The borrow kind proceeds according to
//! the following lattice:
//!
//! ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
//!
//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we
//! will promote its borrow kind to mutable borrow. If we see an `&mut x`
//! we'll do the same. Naturally, this applies not just to the upvar, but
//! to everything owned by `x`, so the result is the same for something
//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
//! struct). These adjustments are performed in
//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code
//! from there).
//!
//! The fact that we are inferring borrow kinds as we go results in a
//! semi-hacky interaction with mem-categorization. In particular,
//! mem-categorization will query the current borrow kind as it
//! categorizes, and we'll return the *current* value, but this may get
//! adjusted later. Therefore, in this module, we generally ignore the
//! borrow kind (and derived mutabilities) that are returned from
//! mem-categorization, since they may be inaccurate. (Another option
//! would be to use a unification scheme, where instead of returning a
//! concrete borrow kind like `ty::ImmBorrow`, we return a
//! `ty::InferBorrow(upvar_id)` or something like that, but this would
//! then mean that all later passes would have to check for these figments
//! and report an error, and it just seems like more mess in the end.)
use middle::def;
use middle::mem_categorization as mc;
@ -177,15 +173,11 @@ pub fn regionck_fn(fcx: &FnCtxt, id: ast::NodeId, blk: &ast::Block) {
fcx.infcx().resolve_regions_and_report_errors();
}
/// Checks that the types in `component_tys` are well-formed. This will add constraints into the
/// region graph. Does *not* run `resolve_regions_and_report_errors` and so forth.
pub fn regionck_ensure_component_tys_wf<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
component_tys: &[Ty<'tcx>]) {
/*!
* Checks that the types in `component_tys` are well-formed.
* This will add constraints into the region graph.
* Does *not* run `resolve_regions_and_report_errors` and so forth.
*/
let mut rcx = Rcx::new(fcx, 0);
for &component_ty in component_tys.iter() {
// Check that each type outlives the empty region. Since the
@ -239,12 +231,8 @@ pub struct Rcx<'a, 'tcx: 'a> {
maybe_links: MaybeLinkMap<'tcx>
}
/// Returns the validity region of `def` -- that is, how long is `def` valid?
fn region_of_def(fcx: &FnCtxt, def: def::Def) -> ty::Region {
/*!
* Returns the validity region of `def` -- that is, how long
* is `def` valid?
*/
let tcx = fcx.tcx();
match def {
def::DefLocal(node_id) => {
@ -283,35 +271,30 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
old_scope
}
/// Try to resolve the type for the given node, returning t_err if an error results. Note that
/// we never care about the details of the error, the same error will be detected and reported
/// in the writeback phase.
///
/// Note one important point: we do not attempt to resolve *region variables* here. This is
/// because regionck is essentially adding constraints to those region variables and so may yet
/// influence how they are resolved.
///
/// Consider this silly example:
///
/// ```
/// fn borrow(x: &int) -> &int {x}
/// fn foo(x: @int) -> int { // block: B
/// let b = borrow(x); // region: <R0>
/// *b
/// }
/// ```
///
/// Here, the region of `b` will be `<R0>`. `<R0>` is constrainted to be some subregion of the
/// block B and some superregion of the call. If we forced it now, we'd choose the smaller
/// region (the call). But that would make the *b illegal. Since we don't resolve, the type
/// of b will be `&<R0>.int` and then `*b` will require that `<R0>` be bigger than the let and
/// the `*b` expression, so we will effectively resolve `<R0>` to be the block B.
pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> {
/*!
* Try to resolve the type for the given node, returning
* t_err if an error results. Note that we never care
* about the details of the error, the same error will be
* detected and reported in the writeback phase.
*
* Note one important point: we do not attempt to resolve
* *region variables* here. This is because regionck is
* essentially adding constraints to those region variables
* and so may yet influence how they are resolved.
*
* Consider this silly example:
*
* fn borrow(x: &int) -> &int {x}
* fn foo(x: @int) -> int { // block: B
* let b = borrow(x); // region: <R0>
* *b
* }
*
* Here, the region of `b` will be `<R0>`. `<R0>` is
* constrainted to be some subregion of the block B and some
* superregion of the call. If we forced it now, we'd choose
* the smaller region (the call). But that would make the *b
* illegal. Since we don't resolve, the type of b will be
* `&<R0>.int` and then `*b` will require that `<R0>` be
* bigger than the let and the `*b` expression, so we will
* effectively resolve `<R0>` to be the block B.
*/
match resolve_type(self.fcx.infcx(), None, unresolved_ty,
resolve_and_force_all_but_regions) {
Ok(t) => t,
@ -384,25 +367,19 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
}
}
/// This method populates the region map's `free_region_map`. It walks over the transformed
/// argument and return types for each function just before we check the body of that function,
/// looking for types where you have a borrowed pointer to other borrowed data (e.g., `&'a &'b
/// [uint]`. We do not allow references to outlive the things they point at, so we can assume
/// that `'a <= 'b`. This holds for both the argument and return types, basically because, on
/// the caller side, the caller is responsible for checking that the type of every expression
/// (including the actual values for the arguments, as well as the return type of the fn call)
/// is well-formed.
///
/// Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs`
fn relate_free_regions(&mut self,
fn_sig_tys: &[Ty<'tcx>],
body_id: ast::NodeId) {
/*!
* This method populates the region map's `free_region_map`.
* It walks over the transformed argument and return types for
* each function just before we check the body of that
* function, looking for types where you have a borrowed
* pointer to other borrowed data (e.g., `&'a &'b [uint]`. We
* do not allow references to outlive the things they point
* at, so we can assume that `'a <= 'b`. This holds for both
* the argument and return types, basically because, on the caller
* side, the caller is responsible for checking that the type of
* every expression (including the actual values for the arguments,
* as well as the return type of the fn call) is well-formed.
*
* Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs`
*/
debug!("relate_free_regions >>");
let tcx = self.tcx();
@ -921,19 +898,15 @@ fn check_expr_fn_block(rcx: &mut Rcx,
_ => {}
}
/// Make sure that the type of all free variables referenced inside a closure/proc outlive the
/// closure/proc's lifetime bound. This is just a special case of the usual rules about closed
/// over values outliving the object's lifetime bound.
fn ensure_free_variable_types_outlive_closure_bound(
rcx: &mut Rcx,
bounds: ty::ExistentialBounds,
expr: &ast::Expr,
freevars: &[ty::Freevar])
{
/*!
* Make sure that the type of all free variables referenced
* inside a closure/proc outlive the closure/proc's lifetime
* bound. This is just a special case of the usual rules about
* closed over values outliving the object's lifetime bound.
*/
let tcx = rcx.fcx.ccx.tcx;
debug!("ensure_free_variable_types_outlive_closure_bound({}, {})",
@ -984,18 +957,14 @@ fn check_expr_fn_block(rcx: &mut Rcx,
}
}
/// Make sure that all free variables referenced inside the closure outlive the closure's
/// lifetime bound. Also, create an entry in the upvar_borrows map with a region.
fn constrain_free_variables_in_by_ref_closure(
rcx: &mut Rcx,
region_bound: ty::Region,
expr: &ast::Expr,
freevars: &[ty::Freevar])
{
/*!
* Make sure that all free variables referenced inside the
* closure outlive the closure's lifetime bound. Also, create
* an entry in the upvar_borrows map with a region.
*/
let tcx = rcx.fcx.ccx.tcx;
let infcx = rcx.fcx.infcx();
debug!("constrain_free_variables({}, {})",
@ -1183,15 +1152,12 @@ fn constrain_call<'a, I: Iterator<&'a ast::Expr>>(rcx: &mut Rcx,
}
}
/// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being
/// dereferenced, the lifetime of the pointer includes the deref expr.
fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
deref_expr: &ast::Expr,
derefs: uint,
mut derefd_ty: Ty<'tcx>) {
/*!
* Invoked on any auto-dereference that occurs. Checks that if
* this is a region pointer being dereferenced, the lifetime of
* the pointer includes the deref expr.
*/
let r_deref_expr = ty::ReScope(CodeExtent::from_node_id(deref_expr.id));
for i in range(0u, derefs) {
debug!("constrain_autoderefs(deref_expr=?, derefd_ty={}, derefs={}/{}",
@ -1259,16 +1225,12 @@ pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx,
}
/// Invoked on any index expression that occurs. Checks that if this is a slice being indexed, the
/// lifetime of the pointer includes the deref expr.
fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
index_expr: &ast::Expr,
indexed_ty: Ty<'tcx>)
{
/*!
* Invoked on any index expression that occurs. Checks that if
* this is a slice being indexed, the lifetime of the pointer
* includes the deref expr.
*/
debug!("constrain_index(index_expr=?, indexed_ty={}",
rcx.fcx.infcx().ty_to_string(indexed_ty));
@ -1286,18 +1248,14 @@ fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
}
}
/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
/// adjustments) are valid for at least `minimum_lifetime`
fn type_of_node_must_outlive<'a, 'tcx>(
rcx: &mut Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
id: ast::NodeId,
minimum_lifetime: ty::Region)
{
/*!
* Guarantees that any lifetimes which appear in the type of
* the node `id` (after applying adjustments) are valid for at
* least `minimum_lifetime`
*/
let tcx = rcx.fcx.tcx();
// Try to resolve the type. If we encounter an error, then typeck
@ -1314,14 +1272,10 @@ fn type_of_node_must_outlive<'a, 'tcx>(
type_must_outlive(rcx, origin, ty, minimum_lifetime);
}
/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
/// resulting pointer is linked to the lifetime of its guarantor (if any).
fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr,
mutability: ast::Mutability, base: &ast::Expr) {
/*!
* Computes the guarantor for an expression `&base` and then
* ensures that the lifetime of the resulting pointer is linked
* to the lifetime of its guarantor (if any).
*/
debug!("link_addr_of(base=?)");
let cmt = {
@ -1331,13 +1285,10 @@ fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr,
link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt);
}
/// Computes the guarantors for any ref bindings in a `let` and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of the initialization expression.
fn link_local(rcx: &Rcx, local: &ast::Local) {
/*!
* Computes the guarantors for any ref bindings in a `let` and
* then ensures that the lifetime of the resulting pointer is
* linked to the lifetime of the initialization expression.
*/
debug!("regionck::for_local()");
let init_expr = match local.init {
None => { return; }
@ -1348,12 +1299,10 @@ fn link_local(rcx: &Rcx, local: &ast::Local) {
link_pattern(rcx, mc, discr_cmt, &*local.pat);
}
/// Computes the guarantors for any ref bindings in a match and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of its guarantor (if any).
fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
/*!
* Computes the guarantors for any ref bindings in a match and
* then ensures that the lifetime of the resulting pointer is
* linked to the lifetime of its guarantor (if any).
*/
debug!("regionck::for_match()");
let mc = mc::MemCategorizationContext::new(rcx);
@ -1366,15 +1315,12 @@ fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
}
}
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
/// needed.
fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
mc: mc::MemCategorizationContext<Rcx<'a, 'tcx>>,
discr_cmt: mc::cmt<'tcx>,
root_pat: &ast::Pat) {
/*!
* Link lifetimes of any ref bindings in `root_pat` to
* the pointers found in the discriminant, if needed.
*/
let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
match sub_pat.node {
// `ref x` pattern
@ -1400,14 +1346,12 @@ fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
});
}
/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
/// autoref'd.
fn link_autoref(rcx: &Rcx,
expr: &ast::Expr,
autoderefs: uint,
autoref: &ty::AutoRef) {
/*!
* Link lifetime of borrowed pointer resulting from autoref
* to lifetimes in the value being autoref'd.
*/
debug!("link_autoref(autoref={})", autoref);
let mc = mc::MemCategorizationContext::new(rcx);
@ -1424,15 +1368,11 @@ fn link_autoref(rcx: &Rcx,
}
}
/// Computes the guarantor for cases where the `expr` is being passed by implicit reference and
/// must outlive `callee_scope`.
fn link_by_ref(rcx: &Rcx,
expr: &ast::Expr,
callee_scope: CodeExtent) {
/*!
* Computes the guarantor for cases where the `expr` is
* being passed by implicit reference and must outlive
* `callee_scope`.
*/
let tcx = rcx.tcx();
debug!("link_by_ref(expr={}, callee_scope={})",
expr.repr(tcx), callee_scope);
@ -1442,17 +1382,13 @@ fn link_by_ref(rcx: &Rcx,
link_region(rcx, expr.span, borrow_region, ty::ImmBorrow, expr_cmt);
}
/// Like `link_region()`, except that the region is extracted from the type of `id`, which must be
/// some reference (`&T`, `&str`, etc).
fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
id: ast::NodeId,
mutbl: ast::Mutability,
cmt_borrowed: mc::cmt<'tcx>) {
/*!
* Like `link_region()`, except that the region is
* extracted from the type of `id`, which must be some
* reference (`&T`, `&str`, etc).
*/
let rptr_ty = rcx.resolve_node_type(id);
if !ty::type_is_error(rptr_ty) {
let tcx = rcx.fcx.ccx.tcx;
@ -1463,19 +1399,14 @@ fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
}
}
/// Informs the inference engine that `borrow_cmt` is being borrowed with kind `borrow_kind` and
/// lifetime `borrow_region`. In order to ensure borrowck is satisfied, this may create constraints
/// between regions, as explained in `link_reborrowed_region()`.
fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
borrow_region: ty::Region,
borrow_kind: ty::BorrowKind,
borrow_cmt: mc::cmt<'tcx>) {
/*!
* Informs the inference engine that `borrow_cmt` is being
* borrowed with kind `borrow_kind` and lifetime `borrow_region`.
* In order to ensure borrowck is satisfied, this may create
* constraints between regions, as explained in
* `link_reborrowed_region()`.
*/
let mut borrow_cmt = borrow_cmt;
let mut borrow_kind = borrow_kind;
@ -1525,6 +1456,46 @@ fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
}
}
/// This is the most complicated case: the path being borrowed is
/// itself the referent of a borrowed pointer. Let me give an
/// example fragment of code to make clear(er) the situation:
///
/// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
/// ...
/// &'z *r // the reborrow has lifetime 'z
///
/// Now, in this case, our primary job is to add the inference
/// constraint that `'z <= 'a`. Given this setup, let's clarify the
/// parameters in (roughly) terms of the example:
///
/// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
/// borrow_region ^~ ref_region ^~
/// borrow_kind ^~ ref_kind ^~
/// ref_cmt ^
///
/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
///
/// Unfortunately, there are some complications beyond the simple
/// scenario I just painted:
///
/// 1. The reference `r` might in fact be a "by-ref" upvar. In that
/// case, we have two jobs. First, we are inferring whether this reference
/// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
/// adjust that based on this borrow (e.g., if this is an `&mut` borrow,
/// then `r` must be an `&mut` reference). Second, whenever we link
/// two regions (here, `'z <= 'a`), we supply a *cause*, and in this
/// case we adjust the cause to indicate that the reference being
/// "reborrowed" is itself an upvar. This provides a nicer error message
/// should something go wrong.
///
/// 2. There may in fact be more levels of reborrowing. In the
/// example, I said the borrow was like `&'z *r`, but it might
/// in fact be a borrow like `&'z **q` where `q` has type `&'a
/// &'b mut T`. In that case, we want to ensure that `'z <= 'a`
/// and `'z <= 'b`. This is explained more below.
///
/// The return value of this function indicates whether we need to
/// recurse and process `ref_cmt` (see case 2 above).
fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
span: Span,
borrow_region: ty::Region,
@ -1535,49 +1506,6 @@ fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
note: mc::Note)
-> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
{
/*!
* This is the most complicated case: the path being borrowed is
* itself the referent of a borrowed pointer. Let me give an
* example fragment of code to make clear(er) the situation:
*
* let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
* ...
* &'z *r // the reborrow has lifetime 'z
*
* Now, in this case, our primary job is to add the inference
* constraint that `'z <= 'a`. Given this setup, let's clarify the
* parameters in (roughly) terms of the example:
*
* A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
* borrow_region ^~ ref_region ^~
* borrow_kind ^~ ref_kind ^~
* ref_cmt ^
*
* Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
*
* Unfortunately, there are some complications beyond the simple
* scenario I just painted:
*
* 1. The reference `r` might in fact be a "by-ref" upvar. In that
* case, we have two jobs. First, we are inferring whether this reference
* should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
* adjust that based on this borrow (e.g., if this is an `&mut` borrow,
* then `r` must be an `&mut` reference). Second, whenever we link
* two regions (here, `'z <= 'a`), we supply a *cause*, and in this
* case we adjust the cause to indicate that the reference being
* "reborrowed" is itself an upvar. This provides a nicer error message
* should something go wrong.
*
* 2. There may in fact be more levels of reborrowing. In the
* example, I said the borrow was like `&'z *r`, but it might
* in fact be a borrow like `&'z **q` where `q` has type `&'a
* &'b mut T`. In that case, we want to ensure that `'z <= 'a`
* and `'z <= 'b`. This is explained more below.
*
* The return value of this function indicates whether we need to
* recurse and process `ref_cmt` (see case 2 above).
*/
// Possible upvar ID we may need later to create an entry in the
// maybe link map.
@ -1715,27 +1643,19 @@ fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
}
}
/// Adjusts the inferred borrow_kind as needed to account for upvars that are assigned to in an
/// assignment expression.
fn adjust_borrow_kind_for_assignment_lhs(rcx: &Rcx,
lhs: &ast::Expr) {
/*!
* Adjusts the inferred borrow_kind as needed to account
* for upvars that are assigned to in an assignment
* expression.
*/
let mc = mc::MemCategorizationContext::new(rcx);
let cmt = ignore_err!(mc.cat_expr(lhs));
adjust_upvar_borrow_kind_for_mut(rcx, cmt);
}
/// Indicates that `cmt` is being directly mutated (e.g., assigned to). If cmt contains any by-ref
/// upvars, this implies that those upvars must be borrowed using an `&mut` borow.
fn adjust_upvar_borrow_kind_for_mut<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
cmt: mc::cmt<'tcx>) {
/*!
* Indicates that `cmt` is being directly mutated (e.g., assigned
* to). If cmt contains any by-ref upvars, this implies that
* those upvars must be borrowed using an `&mut` borow.
*/
let mut cmt = cmt;
loop {
debug!("adjust_upvar_borrow_kind_for_mut(cmt={})",
@ -1834,16 +1754,12 @@ fn adjust_upvar_borrow_kind_for_unique<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, cmt: mc::c
}
}
/// Indicates that the borrow_kind of `outer_upvar_id` must permit a reborrowing with the
/// borrow_kind of `inner_upvar_id`. This occurs in nested closures, see comment above at the call
/// to this function.
fn link_upvar_borrow_kind_for_nested_closures(rcx: &mut Rcx,
inner_upvar_id: ty::UpvarId,
outer_upvar_id: ty::UpvarId) {
/*!
* Indicates that the borrow_kind of `outer_upvar_id` must
* permit a reborrowing with the borrow_kind of `inner_upvar_id`.
* This occurs in nested closures, see comment above at the call to
* this function.
*/
debug!("link_upvar_borrow_kind: inner_upvar_id={} outer_upvar_id={}",
inner_upvar_id, outer_upvar_id);
@ -1867,18 +1783,14 @@ fn adjust_upvar_borrow_kind_for_loan(rcx: &Rcx,
adjust_upvar_borrow_kind(rcx, upvar_id, upvar_borrow, kind)
}
/// We infer the borrow_kind with which to borrow upvars in a stack closure. The borrow_kind
/// basically follows a lattice of `imm < unique-imm < mut`, moving from left to right as needed
/// (but never right to left). Here the argument `mutbl` is the borrow_kind that is required by
/// some particular use.
fn adjust_upvar_borrow_kind(rcx: &Rcx,
upvar_id: ty::UpvarId,
upvar_borrow: &mut ty::UpvarBorrow,
kind: ty::BorrowKind) {
/*!
* We infer the borrow_kind with which to borrow upvars in a stack
* closure. The borrow_kind basically follows a lattice of
* `imm < unique-imm < mut`, moving from left to right as needed (but never
* right to left). Here the argument `mutbl` is the borrow_kind that
* is required by some particular use.
*/
debug!("adjust_upvar_borrow_kind: id={} kind=({} -> {})",
upvar_id, upvar_borrow.kind, kind);
@ -1911,15 +1823,12 @@ fn adjust_upvar_borrow_kind(rcx: &Rcx,
}
}
/// Ensures that all borrowed data reachable via `ty` outlives `region`.
fn type_must_outlive<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region)
{
/*!
* Ensures that all borrowed data reachable via `ty` outlives `region`.
*/
debug!("type_must_outlive(ty={}, region={})",
ty.repr(rcx.tcx()),
region.repr(rcx.tcx()));

View File

@ -33,18 +33,14 @@ struct Wf<'a, 'tcx: 'a> {
out: Vec<WfConstraint<'tcx>>,
}
/// This routine computes the well-formedness constraints that must hold for the type `ty` to
/// appear in a context with lifetime `outer_region`
pub fn region_wf_constraints<'tcx>(
tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
outer_region: ty::Region)
-> Vec<WfConstraint<'tcx>>
{
/*!
* This routine computes the well-formedness constraints that must
* hold for the type `ty` to appear in a context with lifetime
* `outer_region`
*/
let mut stack = Vec::new();
stack.push((outer_region, None));
let mut wf = Wf { tcx: tcx,
@ -168,12 +164,9 @@ impl<'a, 'tcx> Wf<'a, 'tcx> {
self.stack.pop().unwrap();
}
/// Pushes a constraint that `r_b` must outlive the top region on the stack.
fn push_region_constraint_from_top(&mut self,
r_b: ty::Region) {
/*!
* Pushes a constraint that `r_b` must outlive the
* top region on the stack.
*/
// Indicates that we have found borrowed content with a lifetime
// of at least `r_b`. This adds a constraint that `r_b` must
@ -192,30 +185,26 @@ impl<'a, 'tcx> Wf<'a, 'tcx> {
self.push_sub_region_constraint(opt_ty, r_a, r_b);
}
/// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
fn push_sub_region_constraint(&mut self,
opt_ty: Option<Ty<'tcx>>,
r_a: ty::Region,
r_b: ty::Region) {
/*! Pushes a constraint that `r_a <= r_b`, due to `opt_ty` */
self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
}
/// Pushes a constraint that `param_ty` must outlive the top region on the stack.
fn push_param_constraint_from_top(&mut self,
param_ty: ty::ParamTy) {
/*!
* Pushes a constraint that `param_ty` must outlive the
* top region on the stack.
*/
let &(region, opt_ty) = self.stack.last().unwrap();
self.push_param_constraint(region, opt_ty, param_ty);
}
/// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
fn push_param_constraint(&mut self,
region: ty::Region,
opt_ty: Option<Ty<'tcx>>,
param_ty: ty::ParamTy) {
/*! Pushes a constraint that `region <= param_ty`, due to `opt_ty` */
self.out.push(RegionSubParamConstraint(opt_ty, region, param_ty));
}

View File

@ -168,17 +168,14 @@ pub fn check_object_safety<'tcx>(tcx: &ty::ctxt<'tcx>,
}
}
// Returns a vec of error messages. If hte vec is empty - no errors!
/// Returns a vec of error messages. If hte vec is empty - no errors!
///
/// There are some limitations to calling functions through an object, because (a) the self
/// type is not known (that's the whole point of a trait instance, after all, to obscure the
/// self type) and (b) the call must go through a vtable and hence cannot be monomorphized.
fn check_object_safety_of_method<'tcx>(tcx: &ty::ctxt<'tcx>,
method: &ty::Method<'tcx>)
-> Vec<String> {
/*!
* There are some limitations to calling functions through an
* object, because (a) the self type is not known
* (that's the whole point of a trait instance, after all, to
* obscure the self type) and (b) the call must go through a
* vtable and hence cannot be monomorphized.
*/
let mut msgs = Vec::new();
let method_name = method.name.repr(tcx);
@ -455,8 +452,8 @@ pub fn maybe_report_ambiguity<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
}
}
/// Select as many obligations as we can at present.
pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt) {
/*! Select as many obligations as we can at present. */
match
fcx.inh.fulfillment_cx
@ -468,14 +465,10 @@ pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt) {
}
}
/// Try to select any fcx obligation that we haven't tried yet, in an effort to improve inference.
/// You could just call `select_fcx_obligations_where_possible` except that it leads to repeated
/// work.
pub fn select_new_fcx_obligations(fcx: &FnCtxt) {
/*!
* Try to select any fcx obligation that we haven't tried yet,
* in an effort to improve inference. You could just call
* `select_fcx_obligations_where_possible` except that it leads
* to repeated work.
*/
match
fcx.inh.fulfillment_cx
.borrow_mut()

View File

@ -38,24 +38,18 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
CheckTypeWellFormedVisitor { ccx: ccx, cache: HashSet::new() }
}
/// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are
/// well-formed, meaning that they do not require any constraints not declared in the struct
/// definition itself. For example, this definition would be illegal:
///
/// struct Ref<'a, T> { x: &'a T }
///
/// because the type did not declare that `T:'a`.
///
/// We do this check as a pre-pass before checking fn bodies because if these constraints are
/// not included it frequently leads to confusing errors in fn bodies. So it's better to check
/// the types first.
fn check_item_well_formed(&mut self, item: &ast::Item) {
/*!
* Checks that the field types (in a struct def'n) or
* argument types (in an enum def'n) are well-formed,
* meaning that they do not require any constraints not
* declared in the struct definition itself.
* For example, this definition would be illegal:
*
* struct Ref<'a, T> { x: &'a T }
*
* because the type did not declare that `T:'a`.
*
* We do this check as a pre-pass before checking fn bodies
* because if these constraints are not included it frequently
* leads to confusing errors in fn bodies. So it's better to check
* the types first.
*/
let ccx = self.ccx;
debug!("check_item_well_formed(it.id={}, it.ident={})",
item.id,
@ -107,16 +101,12 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
regionck::regionck_item(&fcx, item);
}
/// In a type definition, we check that to ensure that the types of the fields are well-formed.
fn check_type_defn(&mut self,
item: &ast::Item,
lookup_fields: for<'fcx> |&FnCtxt<'fcx, 'tcx>|
-> Vec<AdtVariant<'tcx>>)
{
/*!
* In a type definition, we check that to ensure that the types of the fields are
* well-formed.
*/
self.with_fcx(item, |this, fcx| {
let variants = lookup_fields(fcx);
let mut bounds_checker = BoundsChecker::new(fcx,
@ -282,22 +272,16 @@ impl<'cx,'tcx> BoundsChecker<'cx,'tcx> {
cache: cache, binding_count: 0 }
}
/// Given a trait ref like `A : Trait<B>`, where `Trait` is defined as (say):
///
/// trait Trait<B:OtherTrait> : Copy { ... }
///
/// This routine will check that `B : OtherTrait` and `A : Trait<B>`. It will also recursively
/// check that the types `A` and `B` are well-formed.
///
/// Note that it does not (currently, at least) check that `A : Copy` (that check is delegated
/// to the point where impl `A : Trait<B>` is implemented).
pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
/*!
* Given a trait ref like `A : Trait<B>`, where `Trait` is
* defined as (say):
*
* trait Trait<B:OtherTrait> : Copy { ... }
*
* This routine will check that `B : OtherTrait` and `A :
* Trait<B>`. It will also recursively check that the types
* `A` and `B` are well-formed.
*
* Note that it does not (currently, at least)
* check that `A : Copy` (that check is delegated to the point
* where impl `A : Trait<B>` is implemented).
*/
let trait_def = ty::lookup_trait_def(self.fcx.tcx(), trait_ref.def_id);
let bounds = trait_def.generics.to_bounds(self.tcx(), &trait_ref.substs);

View File

@ -477,17 +477,13 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
}
}
/// Substitutes the values for the receiver's type parameters that are found in method, leaving the
/// method's type parameters intact.
pub fn make_substs_for_receiver_types<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
method: &ty::Method<'tcx>)
-> subst::Substs<'tcx>
{
/*!
* Substitutes the values for the receiver's type parameters
* that are found in method, leaving the method's type parameters
* intact.
*/
let meth_tps: Vec<Ty> =
method.generics.types.get_slice(subst::FnSpace)
.iter()

View File

@ -8,10 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Orphan checker: every impl either implements a trait defined in this
* crate or pertains to a type defined in this crate.
*/
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use middle::traits;
use middle::ty;

View File

@ -8,10 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Overlap: No two impls for the same trait are implemented for the
* same type.
*/
//! Overlap: No two impls for the same trait are implemented for the
//! same type.
use middle::traits;
use middle::ty;

View File

@ -1944,6 +1944,9 @@ fn get_or_create_type_parameter_def<'tcx,AC>(this: &AC,
def
}
/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or
/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the
/// built-in trait (formerly known as kind): Send.
fn compute_bounds<'tcx,AC>(this: &AC,
name_of_bounded_thing: ast::Name,
param_ty: ty::ParamTy,
@ -1953,13 +1956,6 @@ fn compute_bounds<'tcx,AC>(this: &AC,
where_clause: &ast::WhereClause)
-> ty::ParamBounds<'tcx>
where AC: AstConv<'tcx> {
/*!
* Translate the AST's notion of ty param bounds (which are an
* enum consisting of a newtyped Ty or a region) to ty's
* notion of ty param bounds, which can either be user-defined
* traits, or the built-in trait (formerly known as kind): Send.
*/
let mut param_bounds = conv_param_bounds(this,
span,
param_ty,
@ -2040,16 +2036,13 @@ fn conv_param_bounds<'tcx,AC>(this: &AC,
}
}
/// Merges the bounds declared on a type parameter with those found from where clauses into a
/// single list.
fn merge_param_bounds<'a>(tcx: &ty::ctxt,
param_ty: ty::ParamTy,
ast_bounds: &'a [ast::TyParamBound],
where_clause: &'a ast::WhereClause)
-> Vec<&'a ast::TyParamBound> {
/*!
* Merges the bounds declared on a type parameter with those
* found from where clauses into a single list.
*/
let mut result = Vec::new();
for ast_bound in ast_bounds.iter() {

View File

@ -8,61 +8,57 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Type Coercion
Under certain circumstances we will coerce from one type to another,
for example by auto-borrowing. This occurs in situations where the
compiler has a firm 'expected type' that was supplied from the user,
and where the actual type is similar to that expected type in purpose
but not in representation (so actual subtyping is inappropriate).
## Reborrowing
Note that if we are expecting a reference, we will *reborrow*
even if the argument provided was already a reference. This is
useful for freezing mut/const things (that is, when the expected is &T
but you have &const T or &mut T) and also for avoiding the linearity
of mut things (when the expected is &mut T and you have &mut T). See
the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
examples of where this is useful.
## Subtle note
When deciding what type coercions to consider, we do not attempt to
resolve any type variables we may encounter. This is because `b`
represents the expected type "as the user wrote it", meaning that if
the user defined a generic function like
fn foo<A>(a: A, b: A) { ... }
and then we wrote `foo(&1, @2)`, we will not auto-borrow
either argument. In older code we went to some lengths to
resolve the `b` variable, which could mean that we'd
auto-borrow later arguments but not earlier ones, which
seems very confusing.
## Subtler note
However, right now, if the user manually specifies the
values for the type variables, as so:
foo::<&int>(@1, @2)
then we *will* auto-borrow, because we can't distinguish this from a
function that declared `&int`. This is inconsistent but it's easiest
at the moment. The right thing to do, I think, is to consider the
*unsubstituted* type when deciding whether to auto-borrow, but the
*substituted* type when considering the bounds and so forth. But most
of our methods don't give access to the unsubstituted type, and
rightly so because they'd be error-prone. So maybe the thing to do is
to actually determine the kind of coercions that should occur
separately and pass them in. Or maybe it's ok as is. Anyway, it's
sort of a minor point so I've opted to leave it for later---after all
we may want to adjust precisely when coercions occur.
*/
//! # Type Coercion
//!
//! Under certain circumstances we will coerce from one type to another,
//! for example by auto-borrowing. This occurs in situations where the
//! compiler has a firm 'expected type' that was supplied from the user,
//! and where the actual type is similar to that expected type in purpose
//! but not in representation (so actual subtyping is inappropriate).
//!
//! ## Reborrowing
//!
//! Note that if we are expecting a reference, we will *reborrow*
//! even if the argument provided was already a reference. This is
//! useful for freezing mut/const things (that is, when the expected is &T
//! but you have &const T or &mut T) and also for avoiding the linearity
//! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) { ... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use middle::subst;
use middle::ty::{AutoPtr, AutoDerefRef, AdjustDerefRef, AutoUnsize, AutoUnsafe};
@ -512,14 +508,10 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
}
}
/// Attempts to coerce from a bare Rust function (`extern "Rust" fn`) into a closure or a
/// `proc`.
fn coerce_from_bare_fn(&self, a: Ty<'tcx>, fn_ty_a: &ty::BareFnTy<'tcx>, b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
*
* Attempts to coerce from a bare Rust function (`extern
* "Rust" fn`) into a closure or a `proc`.
*/
self.unpack_actual_value(b, |sty_b| {
debug!("coerce_from_bare_fn(a={}, b={})",

View File

@ -642,21 +642,16 @@ impl<'f, 'tcx> CombineFields<'f, 'tcx> {
Ok(())
}
/// Attempts to generalize `ty` for the type variable `for_vid`. This checks for cycle -- that
/// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
/// replace all regions with fresh variables. Returns `ty_err` in the case of a cycle, `Ok`
/// otherwise.
fn generalize(&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
make_region_vars: bool)
-> cres<'tcx, Ty<'tcx>>
{
/*!
* Attempts to generalize `ty` for the type variable
* `for_vid`. This checks for cycle -- that is, whether the
* type `ty` references `for_vid`. If `make_region_vars` is
* true, it will also replace all regions with fresh
* variables. Returns `ty_err` in the case of a cycle, `Ok`
* otherwise.
*/
let mut generalize = Generalizer { infcx: self.infcx,
span: self.trace.origin.span(),
for_vid: for_vid,

View File

@ -8,244 +8,240 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Type inference engine
This is loosely based on standard HM-type inference, but with an
extension to try and accommodate subtyping. There is nothing
principled about this extension; it's sound---I hope!---but it's a
heuristic, ultimately, and does not guarantee that it finds a valid
typing even if one exists (in fact, there are known scenarios where it
fails, some of which may eventually become problematic).
## Key idea
The main change is that each type variable T is associated with a
lower-bound L and an upper-bound U. L and U begin as bottom and top,
respectively, but gradually narrow in response to new constraints
being introduced. When a variable is finally resolved to a concrete
type, it can (theoretically) select any type that is a supertype of L
and a subtype of U.
There are several critical invariants which we maintain:
- the upper-bound of a variable only becomes lower and the lower-bound
only becomes higher over time;
- the lower-bound L is always a subtype of the upper bound U;
- the lower-bound L and upper-bound U never refer to other type variables,
but only to types (though those types may contain type variables).
> An aside: if the terms upper- and lower-bound confuse you, think of
> "supertype" and "subtype". The upper-bound is a "supertype"
> (super=upper in Latin, or something like that anyway) and the lower-bound
> is a "subtype" (sub=lower in Latin). I find it helps to visualize
> a simple class hierarchy, like Java minus interfaces and
> primitive types. The class Object is at the root (top) and other
> types lie in between. The bottom type is then the Null type.
> So the tree looks like:
>
> ```text
> Object
> / \
> String Other
> \ /
> (null)
> ```
>
> So the upper bound type is the "supertype" and the lower bound is the
> "subtype" (also, super and sub mean upper and lower in Latin, or something
> like that anyway).
## Satisfying constraints
At a primitive level, there is only one form of constraint that the
inference understands: a subtype relation. So the outside world can
say "make type A a subtype of type B". If there are variables
involved, the inferencer will adjust their upper- and lower-bounds as
needed to ensure that this relation is satisfied. (We also allow "make
type A equal to type B", but this is translated into "A <: B" and "B
<: A")
As stated above, we always maintain the invariant that type bounds
never refer to other variables. This keeps the inference relatively
simple, avoiding the scenario of having a kind of graph where we have
to pump constraints along and reach a fixed point, but it does impose
some heuristics in the case where the user is relating two type
variables A <: B.
Combining two variables such that variable A will forever be a subtype
of variable B is the trickiest part of the algorithm because there is
often no right choice---that is, the right choice will depend on
future constraints which we do not yet know. The problem comes about
because both A and B have bounds that can be adjusted in the future.
Let's look at some of the cases that can come up.
Imagine, to start, the best case, where both A and B have an upper and
lower bound (that is, the bounds are not top nor bot respectively). In
that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
A and B should become, they will forever have the desired subtyping
relation. We can just leave things as they are.
### Option 1: Unify
However, suppose that A.ub is *not* a subtype of B.lb. In
that case, we must make a decision. One option is to unify A
and B so that they are one variable whose bounds are:
UB = GLB(A.ub, B.ub)
LB = LUB(A.lb, B.lb)
(Note that we will have to verify that LB <: UB; if it does not, the
types are not intersecting and there is an error) In that case, A <: B
holds trivially because A==B. However, we have now lost some
flexibility, because perhaps the user intended for A and B to end up
as different types and not the same type.
Pictorally, what this does is to take two distinct variables with
(hopefully not completely) distinct type ranges and produce one with
the intersection.
```text
B.ub B.ub
/\ /
A.ub / \ A.ub /
/ \ / \ \ /
/ X \ UB
/ / \ \ / \
/ / / \ / /
\ \ / / \ /
\ X / LB
\ / \ / / \
\ / \ / / \
A.lb B.lb A.lb B.lb
```
### Option 2: Relate UB/LB
Another option is to keep A and B as distinct variables but set their
bounds in such a way that, whatever happens, we know that A <: B will hold.
This can be achieved by ensuring that A.ub <: B.lb. In practice there
are two ways to do that, depicted pictorally here:
```text
Before Option #1 Option #2
B.ub B.ub B.ub
/\ / \ / \
A.ub / \ A.ub /(B')\ A.ub /(B')\
/ \ / \ \ / / \ / /
/ X \ __UB____/ UB /
/ / \ \ / | | /
/ / / \ / | | /
\ \ / / /(A')| | /
\ X / / LB ______LB/
\ / \ / / / \ / (A')/ \
\ / \ / \ / \ \ / \
A.lb B.lb A.lb B.lb A.lb B.lb
```
In these diagrams, UB and LB are defined as before. As you can see,
the new ranges `A'` and `B'` are quite different from the range that
would be produced by unifying the variables.
### What we do now
Our current technique is to *try* (transactionally) to relate the
existing bounds of A and B, if there are any (i.e., if `UB(A) != top
&& LB(B) != bot`). If that succeeds, we're done. If it fails, then
we merge A and B into same variable.
This is not clearly the correct course. For example, if `UB(A) !=
top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
and leave the variables unmerged. This is sometimes the better
course, it depends on the program.
The main case which fails today that I would like to support is:
```text
fn foo<T>(x: T, y: T) { ... }
fn bar() {
let x: @mut int = @mut 3;
let y: @int = @3;
foo(x, y);
}
```
In principle, the inferencer ought to find that the parameter `T` to
`foo(x, y)` is `@const int`. Today, however, it does not; this is
because the type variable `T` is merged with the type variable for
`X`, and thus inherits its UB/LB of `@mut int`. This leaves no
flexibility for `T` to later adjust to accommodate `@int`.
### What to do when not all bounds are present
In the prior discussion we assumed that A.ub was not top and B.lb was
not bot. Unfortunately this is rarely the case. Often type variables
have "lopsided" bounds. For example, if a variable in the program has
been initialized but has not been used, then its corresponding type
variable will have a lower bound but no upper bound. When that
variable is then used, we would like to know its upper bound---but we
don't have one! In this case we'll do different things depending on
how the variable is being used.
## Transactional support
Whenever we adjust merge variables or adjust their bounds, we always
keep a record of the old value. This allows the changes to be undone.
## Regions
I've only talked about type variables here, but region variables
follow the same principle. They have upper- and lower-bounds. A
region A is a subregion of a region B if A being valid implies that B
is valid. This basically corresponds to the block nesting structure:
the regions for outer block scopes are superregions of those for inner
block scopes.
## Integral and floating-point type variables
There is a third variety of type variable that we use only for
inferring the types of unsuffixed integer literals. Integral type
variables differ from general-purpose type variables in that there's
no subtyping relationship among the various integral types, so instead
of associating each variable with an upper and lower bound, we just
use simple unification. Each integer variable is associated with at
most one integer type. Floating point types are handled similarly to
integral types.
## GLB/LUB
Computing the greatest-lower-bound and least-upper-bound of two
types/regions is generally straightforward except when type variables
are involved. In that case, we follow a similar "try to use the bounds
when possible but otherwise merge the variables" strategy. In other
words, `GLB(A, B)` where `A` and `B` are variables will often result
in `A` and `B` being merged and the result being `A`.
## Type coercion
We have a notion of assignability which differs somewhat from
subtyping; in particular it may cause region borrowing to occur. See
the big comment later in this file on Type Coercion for specifics.
### In conclusion
I showed you three ways to relate `A` and `B`. There are also more,
of course, though I'm not sure if there are any more sensible options.
The main point is that there are various options, each of which
produce a distinct range of types for `A` and `B`. Depending on what
the correct values for A and B are, one of these options will be the
right choice: but of course we don't know the right values for A and B
yet, that's what we're trying to find! In our code, we opt to unify
(Option #1).
# Implementation details
We make use of a trait-like implementation strategy to consolidate
duplicated code between subtypes, GLB, and LUB computations. See the
section on "Type Combining" below for details.
*/
//! # Type inference engine
//!
//! This is loosely based on standard HM-type inference, but with an
//! extension to try and accommodate subtyping. There is nothing
//! principled about this extension; it's sound---I hope!---but it's a
//! heuristic, ultimately, and does not guarantee that it finds a valid
//! typing even if one exists (in fact, there are known scenarios where it
//! fails, some of which may eventually become problematic).
//!
//! ## Key idea
//!
//! The main change is that each type variable T is associated with a
//! lower-bound L and an upper-bound U. L and U begin as bottom and top,
//! respectively, but gradually narrow in response to new constraints
//! being introduced. When a variable is finally resolved to a concrete
//! type, it can (theoretically) select any type that is a supertype of L
//! and a subtype of U.
//!
//! There are several critical invariants which we maintain:
//!
//! - the upper-bound of a variable only becomes lower and the lower-bound
//! only becomes higher over time;
//! - the lower-bound L is always a subtype of the upper bound U;
//! - the lower-bound L and upper-bound U never refer to other type variables,
//! but only to types (though those types may contain type variables).
//!
//! > An aside: if the terms upper- and lower-bound confuse you, think of
//! > "supertype" and "subtype". The upper-bound is a "supertype"
//! > (super=upper in Latin, or something like that anyway) and the lower-bound
//! > is a "subtype" (sub=lower in Latin). I find it helps to visualize
//! > a simple class hierarchy, like Java minus interfaces and
//! > primitive types. The class Object is at the root (top) and other
//! > types lie in between. The bottom type is then the Null type.
//! > So the tree looks like:
//! >
//! > ```text
//! > Object
//! > / \
//! > String Other
//! > \ /
//! > (null)
//! > ```
//! >
//! > So the upper bound type is the "supertype" and the lower bound is the
//! > "subtype" (also, super and sub mean upper and lower in Latin, or something
//! > like that anyway).
//!
//! ## Satisfying constraints
//!
//! At a primitive level, there is only one form of constraint that the
//! inference understands: a subtype relation. So the outside world can
//! say "make type A a subtype of type B". If there are variables
//! involved, the inferencer will adjust their upper- and lower-bounds as
//! needed to ensure that this relation is satisfied. (We also allow "make
//! type A equal to type B", but this is translated into "A <: B" and "B
//! <: A")
//!
//! As stated above, we always maintain the invariant that type bounds
//! never refer to other variables. This keeps the inference relatively
//! simple, avoiding the scenario of having a kind of graph where we have
//! to pump constraints along and reach a fixed point, but it does impose
//! some heuristics in the case where the user is relating two type
//! variables A <: B.
//!
//! Combining two variables such that variable A will forever be a subtype
//! of variable B is the trickiest part of the algorithm because there is
//! often no right choice---that is, the right choice will depend on
//! future constraints which we do not yet know. The problem comes about
//! because both A and B have bounds that can be adjusted in the future.
//! Let's look at some of the cases that can come up.
//!
//! Imagine, to start, the best case, where both A and B have an upper and
//! lower bound (that is, the bounds are not top nor bot respectively). In
//! that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
//! A and B should become, they will forever have the desired subtyping
//! relation. We can just leave things as they are.
//!
//! ### Option 1: Unify
//!
//! However, suppose that A.ub is *not* a subtype of B.lb. In
//! that case, we must make a decision. One option is to unify A
//! and B so that they are one variable whose bounds are:
//!
//! UB = GLB(A.ub, B.ub)
//! LB = LUB(A.lb, B.lb)
//!
//! (Note that we will have to verify that LB <: UB; if it does not, the
//! types are not intersecting and there is an error) In that case, A <: B
//! holds trivially because A==B. However, we have now lost some
//! flexibility, because perhaps the user intended for A and B to end up
//! as different types and not the same type.
//!
//! Pictorally, what this does is to take two distinct variables with
//! (hopefully not completely) distinct type ranges and produce one with
//! the intersection.
//!
//! ```text
//! B.ub B.ub
//! /\ /
//! A.ub / \ A.ub /
//! / \ / \ \ /
//! / X \ UB
//! / / \ \ / \
//! / / / \ / /
//! \ \ / / \ /
//! \ X / LB
//! \ / \ / / \
//! \ / \ / / \
//! A.lb B.lb A.lb B.lb
//! ```
//!
//!
//! ### Option 2: Relate UB/LB
//!
//! Another option is to keep A and B as distinct variables but set their
//! bounds in such a way that, whatever happens, we know that A <: B will hold.
//! This can be achieved by ensuring that A.ub <: B.lb. In practice there
//! are two ways to do that, depicted pictorally here:
//!
//! ```text
//! Before Option #1 Option #2
//!
//! B.ub B.ub B.ub
//! /\ / \ / \
//! A.ub / \ A.ub /(B')\ A.ub /(B')\
//! / \ / \ \ / / \ / /
//! / X \ __UB____/ UB /
//! / / \ \ / | | /
//! / / / \ / | | /
//! \ \ / / /(A')| | /
//! \ X / / LB ______LB/
//! \ / \ / / / \ / (A')/ \
//! \ / \ / \ / \ \ / \
//! A.lb B.lb A.lb B.lb A.lb B.lb
//! ```
//!
//! In these diagrams, UB and LB are defined as before. As you can see,
//! the new ranges `A'` and `B'` are quite different from the range that
//! would be produced by unifying the variables.
//!
//! ### What we do now
//!
//! Our current technique is to *try* (transactionally) to relate the
//! existing bounds of A and B, if there are any (i.e., if `UB(A) != top
//! && LB(B) != bot`). If that succeeds, we're done. If it fails, then
//! we merge A and B into same variable.
//!
//! This is not clearly the correct course. For example, if `UB(A) !=
//! top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
//! and leave the variables unmerged. This is sometimes the better
//! course, it depends on the program.
//!
//! The main case which fails today that I would like to support is:
//!
//! ```text
//! fn foo<T>(x: T, y: T) { ... }
//!
//! fn bar() {
//! let x: @mut int = @mut 3;
//! let y: @int = @3;
//! foo(x, y);
//! }
//! ```
//!
//! In principle, the inferencer ought to find that the parameter `T` to
//! `foo(x, y)` is `@const int`. Today, however, it does not; this is
//! because the type variable `T` is merged with the type variable for
//! `X`, and thus inherits its UB/LB of `@mut int`. This leaves no
//! flexibility for `T` to later adjust to accommodate `@int`.
//!
//! ### What to do when not all bounds are present
//!
//! In the prior discussion we assumed that A.ub was not top and B.lb was
//! not bot. Unfortunately this is rarely the case. Often type variables
//! have "lopsided" bounds. For example, if a variable in the program has
//! been initialized but has not been used, then its corresponding type
//! variable will have a lower bound but no upper bound. When that
//! variable is then used, we would like to know its upper bound---but we
//! don't have one! In this case we'll do different things depending on
//! how the variable is being used.
//!
//! ## Transactional support
//!
//! Whenever we adjust merge variables or adjust their bounds, we always
//! keep a record of the old value. This allows the changes to be undone.
//!
//! ## Regions
//!
//! I've only talked about type variables here, but region variables
//! follow the same principle. They have upper- and lower-bounds. A
//! region A is a subregion of a region B if A being valid implies that B
//! is valid. This basically corresponds to the block nesting structure:
//! the regions for outer block scopes are superregions of those for inner
//! block scopes.
//!
//! ## Integral and floating-point type variables
//!
//! There is a third variety of type variable that we use only for
//! inferring the types of unsuffixed integer literals. Integral type
//! variables differ from general-purpose type variables in that there's
//! no subtyping relationship among the various integral types, so instead
//! of associating each variable with an upper and lower bound, we just
//! use simple unification. Each integer variable is associated with at
//! most one integer type. Floating point types are handled similarly to
//! integral types.
//!
//! ## GLB/LUB
//!
//! Computing the greatest-lower-bound and least-upper-bound of two
//! types/regions is generally straightforward except when type variables
//! are involved. In that case, we follow a similar "try to use the bounds
//! when possible but otherwise merge the variables" strategy. In other
//! words, `GLB(A, B)` where `A` and `B` are variables will often result
//! in `A` and `B` being merged and the result being `A`.
//!
//! ## Type coercion
//!
//! We have a notion of assignability which differs somewhat from
//! subtyping; in particular it may cause region borrowing to occur. See
//! the big comment later in this file on Type Coercion for specifics.
//!
//! ### In conclusion
//!
//! I showed you three ways to relate `A` and `B`. There are also more,
//! of course, though I'm not sure if there are any more sensible options.
//! The main point is that there are various options, each of which
//! produce a distinct range of types for `A` and `B`. Depending on what
//! the correct values for A and B are, one of these options will be the
//! right choice: but of course we don't know the right values for A and B
//! yet, that's what we're trying to find! In our code, we opt to unify
//! (Option #1).
//!
//! # Implementation details
//!
//! We make use of a trait-like implementation strategy to consolidate
//! duplicated code between subtypes, GLB, and LUB computations. See the
//! section on "Type Combining" below for details.

View File

@ -8,56 +8,53 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
//! Error Reporting Code for the inference engine
//!
//! Because of the way inference, and in particular region inference,
//! works, it often happens that errors are not detected until far after
//! the relevant line of code has been type-checked. Therefore, there is
//! an elaborate system to track why a particular constraint in the
//! inference graph arose so that we can explain to the user what gave
//! rise to a particular error.
//!
//! The basis of the system are the "origin" types. An "origin" is the
//! reason that a constraint or inference variable arose. There are
//! different "origin" enums for different kinds of constraints/variables
//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
//! a span, but also more information so that we can generate a meaningful
//! error message.
//!
//! Having a catalogue of all the different reasons an error can arise is
//! also useful for other reasons, like cross-referencing FAQs etc, though
//! we are not really taking advantage of this yet.
//!
//! # Region Inference
//!
//! Region inference is particularly tricky because it always succeeds "in
//! the moment" and simply registers a constraint. Then, at the end, we
//! can compute the full graph and report errors, so we need to be able to
//! store and later report what gave rise to the conflicting constraints.
//!
//! # Subtype Trace
//!
//! Determing whether `T1 <: T2` often involves a number of subtypes and
//! subconstraints along the way. A "TypeTrace" is an extended version
//! of an origin that traces the types and other values that were being
//! compared. It is not necessarily comprehensive (in fact, at the time of
//! this writing it only tracks the root values being compared) but I'd
//! like to extend it to include significant "waypoints". For example, if
//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
//! <: T4` fails, I'd like the trace to include enough information to say
//! "in the 2nd element of the tuple". Similarly, failures when comparing
//! arguments or return types in fn types should be able to cite the
//! specific position, etc.
//!
//! # Reality vs plan
//!
//! Of course, there is still a LOT of code in typeck that has yet to be
//! ported to this system, and which relies on string concatenation at the
//! time of error detection.
Error Reporting Code for the inference engine
Because of the way inference, and in particular region inference,
works, it often happens that errors are not detected until far after
the relevant line of code has been type-checked. Therefore, there is
an elaborate system to track why a particular constraint in the
inference graph arose so that we can explain to the user what gave
rise to a particular error.
The basis of the system are the "origin" types. An "origin" is the
reason that a constraint or inference variable arose. There are
different "origin" enums for different kinds of constraints/variables
(e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
a span, but also more information so that we can generate a meaningful
error message.
Having a catalogue of all the different reasons an error can arise is
also useful for other reasons, like cross-referencing FAQs etc, though
we are not really taking advantage of this yet.
# Region Inference
Region inference is particularly tricky because it always succeeds "in
the moment" and simply registers a constraint. Then, at the end, we
can compute the full graph and report errors, so we need to be able to
store and later report what gave rise to the conflicting constraints.
# Subtype Trace
Determing whether `T1 <: T2` often involves a number of subtypes and
subconstraints along the way. A "TypeTrace" is an extended version
of an origin that traces the types and other values that were being
compared. It is not necessarily comprehensive (in fact, at the time of
this writing it only tracks the root values being compared) but I'd
like to extend it to include significant "waypoints". For example, if
you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
<: T4` fails, I'd like the trace to include enough information to say
"in the 2nd element of the tuple". Similarly, failures when comparing
arguments or return types in fn types should be able to cite the
specific position, etc.
# Reality vs plan
Of course, there is still a LOT of code in typeck that has yet to be
ported to this system, and which relies on string concatenation at the
time of error detection.
*/
use self::FreshOrKept::*;
use std::collections::HashSet;
@ -391,11 +388,9 @@ impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> {
ty::note_and_explain_type_err(self.tcx, terr);
}
/// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived
/// error.
fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String> {
/*!
* Returns a string of the form "expected `{}`, found `{}`",
* or None if this is a derived error.
*/
match *values {
infer::Types(ref exp_found) => self.expected_found_str(exp_found),
infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found)
@ -1249,7 +1244,7 @@ impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
}
ty_queue.push(&*mut_ty.ty);
}
ast::TyPath(ref path, ref bounds, id) => {
ast::TyPath(ref path, id) => {
let a_def = match self.tcx.def_map.borrow().get(&id) {
None => {
self.tcx
@ -1296,7 +1291,7 @@ impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
let new_path = self.rebuild_path(rebuild_info, lifetime);
let to = ast::Ty {
id: cur_ty.id,
node: ast::TyPath(new_path, bounds.clone(), id),
node: ast::TyPath(new_path, id),
span: cur_ty.span
};
new_ty = self.rebuild_ty(new_ty, P(to));

View File

@ -8,408 +8,404 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Skolemization and functions
One of the trickiest and most subtle aspects of regions is dealing
with higher-ranked things which include bound region variables, such
as function types. I strongly suggest that if you want to understand
the situation, you read this paper (which is, admittedly, very long,
but you don't have to read the whole thing):
http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
Although my explanation will never compete with SPJ's (for one thing,
his is approximately 100 pages), I will attempt to explain the basic
problem and also how we solve it. Note that the paper only discusses
subtyping, not the computation of LUB/GLB.
The problem we are addressing is that there is a kind of subtyping
between functions with bound region parameters. Consider, for
example, whether the following relation holds:
for<'a> fn(&'a int) <: for<'b> fn(&'b int)? (Yes, a => b)
The answer is that of course it does. These two types are basically
the same, except that in one we used the name `a` and one we used
the name `b`.
In the examples that follow, it becomes very important to know whether
a lifetime is bound in a function type (that is, is a lifetime
parameter) or appears free (is defined in some outer scope).
Therefore, from now on I will always write the bindings explicitly,
using the Rust syntax `for<'a> fn(&'a int)` to indicate that `a` is a
lifetime parameter.
Now let's consider two more function types. Here, we assume that the
`'b` lifetime is defined somewhere outside and hence is not a lifetime
parameter bound by the function type (it "appears free"):
for<'a> fn(&'a int) <: fn(&'b int)? (Yes, a => b)
This subtyping relation does in fact hold. To see why, you have to
consider what subtyping means. One way to look at `T1 <: T2` is to
say that it means that it is always ok to treat an instance of `T1` as
if it had the type `T2`. So, with our functions, it is always ok to
treat a function that can take pointers with any lifetime as if it
were a function that can only take a pointer with the specific
lifetime `'b`. After all, `'b` is a lifetime, after all, and
the function can take values of any lifetime.
You can also look at subtyping as the *is a* relationship. This amounts
to the same thing: a function that accepts pointers with any lifetime
*is a* function that accepts pointers with some specific lifetime.
So, what if we reverse the order of the two function types, like this:
fn(&'b int) <: for<'a> fn(&'a int)? (No)
Does the subtyping relationship still hold? The answer of course is
no. In this case, the function accepts *only the lifetime `'b`*,
so it is not reasonable to treat it as if it were a function that
accepted any lifetime.
What about these two examples:
for<'a,'b> fn(&'a int, &'b int) <: for<'a> fn(&'a int, &'a int)? (Yes)
for<'a> fn(&'a int, &'a int) <: for<'a,'b> fn(&'a int, &'b int)? (No)
Here, it is true that functions which take two pointers with any two
lifetimes can be treated as if they only accepted two pointers with
the same lifetime, but not the reverse.
## The algorithm
Here is the algorithm we use to perform the subtyping check:
1. Replace all bound regions in the subtype with new variables
2. Replace all bound regions in the supertype with skolemized
equivalents. A "skolemized" region is just a new fresh region
name.
3. Check that the parameter and return types match as normal
4. Ensure that no skolemized regions 'leak' into region variables
visible from "the outside"
Let's walk through some examples and see how this algorithm plays out.
#### First example
We'll start with the first example, which was:
1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)? Yes: a -> b
After steps 1 and 2 of the algorithm we will have replaced the types
like so:
1. fn(&'A T) <: fn(&'x T)?
Here the upper case `&A` indicates a *region variable*, that is, a
region whose value is being inferred by the system. I also replaced
`&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
to indicate skolemized region names. We can assume they don't appear
elsewhere. Note that neither the sub- nor the supertype bind any
region names anymore (as indicated by the absence of `<` and `>`).
The next step is to check that the parameter types match. Because
parameters are contravariant, this means that we check whether:
&'x T <: &'A T
Region pointers are contravariant so this implies that
&A <= &x
must hold, where `<=` is the subregion relationship. Processing
*this* constrain simply adds a constraint into our graph that `&A <=
&x` and is considered successful (it can, for example, be satisfied by
choosing the value `&x` for `&A`).
So far we have encountered no error, so the subtype check succeeds.
#### The third example
Now let's look first at the third example, which was:
3. fn(&'a T) <: for<'b> fn(&'b T)? No!
After steps 1 and 2 of the algorithm we will have replaced the types
like so:
3. fn(&'a T) <: fn(&'x T)?
This looks pretty much the same as before, except that on the LHS
`'a` was not bound, and hence was left as-is and not replaced with
a variable. The next step is again to check that the parameter types
match. This will ultimately require (as before) that `'a` <= `&x`
must hold: but this does not hold. `self` and `x` are both distinct
free regions. So the subtype check fails.
#### Checking for skolemization leaks
You may be wondering about that mysterious last step in the algorithm.
So far it has not been relevant. The purpose of that last step is to
catch something like *this*:
for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)? No.
Here the function types are the same but for where the binding occurs.
The subtype returns a function that expects a value in precisely one
region. The supertype returns a function that expects a value in any
region. If we allow an instance of the subtype to be used where the
supertype is expected, then, someone could call the fn and think that
the return value has type `fn<b>(&'b T)` when it really has type
`fn(&'a T)` (this is case #3, above). Bad.
So let's step through what happens when we perform this subtype check.
We first replace the bound regions in the subtype (the supertype has
no bound regions). This gives us:
fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
Now we compare the return types, which are covariant, and hence we have:
fn(&'A T) <: for<'b> fn(&'b T)?
Here we skolemize the bound region in the supertype to yield:
fn(&'A T) <: fn(&'x T)?
And then proceed to compare the argument types:
&'x T <: &'A T
'A <= 'x
Finally, this is where it gets interesting! This is where an error
*should* be reported. But in fact this will not happen. The reason why
is that `A` is a variable: we will infer that its value is the fresh
region `x` and think that everything is happy. In fact, this behavior
is *necessary*, it was key to the first example we walked through.
The difference between this example and the first one is that the variable
`A` already existed at the point where the skolemization occurred. In
the first example, you had two functions:
for<'a> fn(&'a T) <: for<'b> fn(&'b T)
and hence `&A` and `&x` were created "together". In general, the
intention of the skolemized names is that they are supposed to be
fresh names that could never be equal to anything from the outside.
But when inference comes into play, we might not be respecting this
rule.
So the way we solve this is to add a fourth step that examines the
constraints that refer to skolemized names. Basically, consider a
non-directed verison of the constraint graph. Let `Tainted(x)` be the
set of all things reachable from a skolemized variable `x`.
`Tainted(x)` should not contain any regions that existed before the
step at which the skolemization was performed. So this case here
would fail because `&x` was created alone, but is relatable to `&A`.
## Computing the LUB and GLB
The paper I pointed you at is written for Haskell. It does not
therefore considering subtyping and in particular does not consider
LUB or GLB computation. We have to consider this. Here is the
algorithm I implemented.
First though, let's discuss what we are trying to compute in more
detail. The LUB is basically the "common supertype" and the GLB is
"common subtype"; one catch is that the LUB should be the
*most-specific* common supertype and the GLB should be *most general*
common subtype (as opposed to any common supertype or any common
subtype).
Anyway, to help clarify, here is a table containing some function
pairs and their LUB/GLB (for conciseness, in this table, I'm just
including the lifetimes here, not the rest of the types, and I'm
writing `fn<>` instead of `for<> fn`):
```
Type 1 Type 2 LUB GLB
fn<'a>('a) fn('X) fn('X) fn<'a>('a)
fn('a) fn('X) -- fn<'a>('a)
fn<'a,'b>('a, 'b) fn<'x>('x, 'x) fn<'a>('a, 'a) fn<'a,'b>('a, 'b)
fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a) fn<'a,'b,'c>('a,'b,'c)
```
### Conventions
I use lower-case letters (e.g., `&a`) for bound regions and upper-case
letters for free regions (`&A`). Region variables written with a
dollar-sign (e.g., `$a`). I will try to remember to enumerate the
bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
### High-level summary
Both the LUB and the GLB algorithms work in a similar fashion. They
begin by replacing all bound regions (on both sides) with fresh region
inference variables. Therefore, both functions are converted to types
that contain only free regions. We can then compute the LUB/GLB in a
straightforward way, as described in `combine.rs`. This results in an
interim type T. The algorithms then examine the regions that appear
in T and try to, in some cases, replace them with bound regions to
yield the final result.
To decide whether to replace a region `R` that appears in `T` with a
bound region, the algorithms make use of two bits of information.
First is a set `V` that contains all region variables created as part
of the LUB/GLB computation. `V` will contain the region variables
created to replace the bound regions in the input types, but it also
contains 'intermediate' variables created to represent the LUB/GLB of
individual regions. Basically, when asked to compute the LUB/GLB of a
region variable with another region, the inferencer cannot oblige
immediately since the values of that variables are not known.
Therefore, it creates a new variable that is related to the two
regions. For example, the LUB of two variables `$x` and `$y` is a
fresh variable `$z` that is constrained such that `$x <= $z` and `$y
<= $z`. So `V` will contain these intermediate variables as well.
The other important factor in deciding how to replace a region in T is
the function `Tainted($r)` which, for a region variable, identifies
all regions that the region variable is related to in some way
(`Tainted()` made an appearance in the subtype computation as well).
### LUB
The LUB algorithm proceeds in three steps:
1. Replace all bound regions (on both sides) with fresh region
inference variables.
2. Compute the LUB "as normal", meaning compute the GLB of each
pair of argument types and the LUB of the return types and
so forth. Combine those to a new function type `F`.
3. Replace each region `R` that appears in `F` as follows:
- Let `V` be the set of variables created during the LUB
computational steps 1 and 2, as described in the previous section.
- If `R` is not in `V`, replace `R` with itself.
- If `Tainted(R)` contains a region that is not in `V`,
replace `R` with itself.
- Otherwise, select the earliest variable in `Tainted(R)` that originates
from the left-hand side and replace `R` with the bound region that
this variable was a replacement for.
So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
In this case, `&a` will be replaced with `$a` and the interim LUB type
`fn($b)` will be computed, where `$b=GLB(&A,$a)`. Therefore, `V =
{$a, $b}` and `Tainted($b) = { $b, $a, &A }`. When we go to replace
`$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
we leave `$b` as is. When region inference happens, `$b` will be
resolved to `&A`, as we wanted.
Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`. In
this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
&h)` and a graph that looks like:
```
$a $b *--$x
\ \ / /
\ $h-* /
$g-----------*
```
Here `$g` and `$h` are fresh variables that are created to represent
the LUB/GLB of things requiring inference. This means that `V` and
`Tainted` will look like:
```
V = {$a, $b, $g, $h, $x}
Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
```
Therefore we replace both `$g` and `$h` with `$a`, and end up
with the type `fn(&a, &a)`.
### GLB
The procedure for computing the GLB is similar. The difference lies
in computing the replacements for the various variables. For each
region `R` that appears in the type `F`, we again compute `Tainted(R)`
and examine the results:
1. If `R` is not in `V`, it is not replaced.
2. Else, if `Tainted(R)` contains only variables in `V`, and it
contains exactly one variable from the LHS and one variable from
the RHS, then `R` can be mapped to the bound version of the
variable from the LHS.
3. Else, if `Tainted(R)` contains no variable from the LHS and no
variable from the RHS, then `R` can be mapped to itself.
4. Else, `R` is mapped to a fresh bound variable.
These rules are pretty complex. Let's look at some examples to see
how they play out.
Out first example was `fn(&a)` and `fn(&X)`. In this case, `&a` will
be replaced with `$a` and we will ultimately compute a
(pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}. To find the
replacement for `$g` we consult the rules above:
- Rule (1) does not apply because `$g \in V`
- Rule (2) does not apply because `&X \in Tainted($g)`
- Rule (3) does not apply because `$a \in Tainted($g)`
- Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
So our final result is `fn(&z)`, which is correct.
The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`. In this case,
by rule (3), `$g` is mapped to itself, and hence the result is
`fn($g)`. This result is correct (in this case, at least), but it is
indicative of a case that *can* lead us into concluding that there is
no GLB when in fact a GLB does exist. See the section "Questionable
Results" below for more details.
The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
Tainted($h) = {$g, $h, $a, $b, $c}`. Only rule (4) applies and hence
we'll select fresh bound variables `y` and `z` and wind up with
`fn(&y, &z)`.
For the last example, let's consider what may seem trivial, but is
not: `fn(&a, &a)` and `fn(&b, &b)`. In this case, we'll get `F=fn($g,
$h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
$x}`. Both of these sets contain exactly one bound variable from each
side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
is the desired result.
### Shortcomings and correctness
You may be wondering whether this algorithm is correct. The answer is
"sort of". There are definitely cases where they fail to compute a
result even though a correct result exists. I believe, though, that
if they succeed, then the result is valid, and I will attempt to
convince you. The basic argument is that the "pre-replacement" step
computes a set of constraints. The replacements, then, attempt to
satisfy those constraints, using bound identifiers where needed.
For now I will briefly go over the cases for LUB/GLB and identify
their intent:
- LUB:
- The region variables that are substituted in place of bound regions
are intended to collect constraints on those bound regions.
- If Tainted(R) contains only values in V, then this region is unconstrained
and can therefore be generalized, otherwise it cannot.
- GLB:
- The region variables that are substituted in place of bound regions
are intended to collect constraints on those bound regions.
- If Tainted(R) contains exactly one variable from each side, and
only variables in V, that indicates that those two bound regions
must be equated.
- Otherwise, if Tainted(R) references any variables from left or right
side, then it is trying to combine a bound region with a free one or
multiple bound regions, so we need to select fresh bound regions.
Sorry this is more of a shorthand to myself. I will try to write up something
more convincing in the future.
#### Where are the algorithms wrong?
- The pre-replacement computation can fail even though using a
bound-region would have succeeded.
- We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
GLB of $a and $b. But if inference finds that $a and $b must be mapped
to regions without a GLB, then this is effectively a failure to compute
the GLB. However, the result `fn<$c>(fn($c))` is a valid GLB.
*/
//! # Skolemization and functions
//!
//! One of the trickiest and most subtle aspects of regions is dealing
//! with higher-ranked things which include bound region variables, such
//! as function types. I strongly suggest that if you want to understand
//! the situation, you read this paper (which is, admittedly, very long,
//! but you don't have to read the whole thing):
//!
//! http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
//!
//! Although my explanation will never compete with SPJ's (for one thing,
//! his is approximately 100 pages), I will attempt to explain the basic
//! problem and also how we solve it. Note that the paper only discusses
//! subtyping, not the computation of LUB/GLB.
//!
//! The problem we are addressing is that there is a kind of subtyping
//! between functions with bound region parameters. Consider, for
//! example, whether the following relation holds:
//!
//! for<'a> fn(&'a int) <: for<'b> fn(&'b int)? (Yes, a => b)
//!
//! The answer is that of course it does. These two types are basically
//! the same, except that in one we used the name `a` and one we used
//! the name `b`.
//!
//! In the examples that follow, it becomes very important to know whether
//! a lifetime is bound in a function type (that is, is a lifetime
//! parameter) or appears free (is defined in some outer scope).
//! Therefore, from now on I will always write the bindings explicitly,
//! using the Rust syntax `for<'a> fn(&'a int)` to indicate that `a` is a
//! lifetime parameter.
//!
//! Now let's consider two more function types. Here, we assume that the
//! `'b` lifetime is defined somewhere outside and hence is not a lifetime
//! parameter bound by the function type (it "appears free"):
//!
//! for<'a> fn(&'a int) <: fn(&'b int)? (Yes, a => b)
//!
//! This subtyping relation does in fact hold. To see why, you have to
//! consider what subtyping means. One way to look at `T1 <: T2` is to
//! say that it means that it is always ok to treat an instance of `T1` as
//! if it had the type `T2`. So, with our functions, it is always ok to
//! treat a function that can take pointers with any lifetime as if it
//! were a function that can only take a pointer with the specific
//! lifetime `'b`. After all, `'b` is a lifetime, after all, and
//! the function can take values of any lifetime.
//!
//! You can also look at subtyping as the *is a* relationship. This amounts
//! to the same thing: a function that accepts pointers with any lifetime
//! *is a* function that accepts pointers with some specific lifetime.
//!
//! So, what if we reverse the order of the two function types, like this:
//!
//! fn(&'b int) <: for<'a> fn(&'a int)? (No)
//!
//! Does the subtyping relationship still hold? The answer of course is
//! no. In this case, the function accepts *only the lifetime `'b`*,
//! so it is not reasonable to treat it as if it were a function that
//! accepted any lifetime.
//!
//! What about these two examples:
//!
//! for<'a,'b> fn(&'a int, &'b int) <: for<'a> fn(&'a int, &'a int)? (Yes)
//! for<'a> fn(&'a int, &'a int) <: for<'a,'b> fn(&'a int, &'b int)? (No)
//!
//! Here, it is true that functions which take two pointers with any two
//! lifetimes can be treated as if they only accepted two pointers with
//! the same lifetime, but not the reverse.
//!
//! ## The algorithm
//!
//! Here is the algorithm we use to perform the subtyping check:
//!
//! 1. Replace all bound regions in the subtype with new variables
//! 2. Replace all bound regions in the supertype with skolemized
//! equivalents. A "skolemized" region is just a new fresh region
//! name.
//! 3. Check that the parameter and return types match as normal
//! 4. Ensure that no skolemized regions 'leak' into region variables
//! visible from "the outside"
//!
//! Let's walk through some examples and see how this algorithm plays out.
//!
//! #### First example
//!
//! We'll start with the first example, which was:
//!
//! 1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)? Yes: a -> b
//!
//! After steps 1 and 2 of the algorithm we will have replaced the types
//! like so:
//!
//! 1. fn(&'A T) <: fn(&'x T)?
//!
//! Here the upper case `&A` indicates a *region variable*, that is, a
//! region whose value is being inferred by the system. I also replaced
//! `&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
//! to indicate skolemized region names. We can assume they don't appear
//! elsewhere. Note that neither the sub- nor the supertype bind any
//! region names anymore (as indicated by the absence of `<` and `>`).
//!
//! The next step is to check that the parameter types match. Because
//! parameters are contravariant, this means that we check whether:
//!
//! &'x T <: &'A T
//!
//! Region pointers are contravariant so this implies that
//!
//! &A <= &x
//!
//! must hold, where `<=` is the subregion relationship. Processing
//! *this* constrain simply adds a constraint into our graph that `&A <=
//! &x` and is considered successful (it can, for example, be satisfied by
//! choosing the value `&x` for `&A`).
//!
//! So far we have encountered no error, so the subtype check succeeds.
//!
//! #### The third example
//!
//! Now let's look first at the third example, which was:
//!
//! 3. fn(&'a T) <: for<'b> fn(&'b T)? No!
//!
//! After steps 1 and 2 of the algorithm we will have replaced the types
//! like so:
//!
//! 3. fn(&'a T) <: fn(&'x T)?
//!
//! This looks pretty much the same as before, except that on the LHS
//! `'a` was not bound, and hence was left as-is and not replaced with
//! a variable. The next step is again to check that the parameter types
//! match. This will ultimately require (as before) that `'a` <= `&x`
//! must hold: but this does not hold. `self` and `x` are both distinct
//! free regions. So the subtype check fails.
//!
//! #### Checking for skolemization leaks
//!
//! You may be wondering about that mysterious last step in the algorithm.
//! So far it has not been relevant. The purpose of that last step is to
//! catch something like *this*:
//!
//! for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)? No.
//!
//! Here the function types are the same but for where the binding occurs.
//! The subtype returns a function that expects a value in precisely one
//! region. The supertype returns a function that expects a value in any
//! region. If we allow an instance of the subtype to be used where the
//! supertype is expected, then, someone could call the fn and think that
//! the return value has type `fn<b>(&'b T)` when it really has type
//! `fn(&'a T)` (this is case #3, above). Bad.
//!
//! So let's step through what happens when we perform this subtype check.
//! We first replace the bound regions in the subtype (the supertype has
//! no bound regions). This gives us:
//!
//! fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
//!
//! Now we compare the return types, which are covariant, and hence we have:
//!
//! fn(&'A T) <: for<'b> fn(&'b T)?
//!
//! Here we skolemize the bound region in the supertype to yield:
//!
//! fn(&'A T) <: fn(&'x T)?
//!
//! And then proceed to compare the argument types:
//!
//! &'x T <: &'A T
//! 'A <= 'x
//!
//! Finally, this is where it gets interesting! This is where an error
//! *should* be reported. But in fact this will not happen. The reason why
//! is that `A` is a variable: we will infer that its value is the fresh
//! region `x` and think that everything is happy. In fact, this behavior
//! is *necessary*, it was key to the first example we walked through.
//!
//! The difference between this example and the first one is that the variable
//! `A` already existed at the point where the skolemization occurred. In
//! the first example, you had two functions:
//!
//! for<'a> fn(&'a T) <: for<'b> fn(&'b T)
//!
//! and hence `&A` and `&x` were created "together". In general, the
//! intention of the skolemized names is that they are supposed to be
//! fresh names that could never be equal to anything from the outside.
//! But when inference comes into play, we might not be respecting this
//! rule.
//!
//! So the way we solve this is to add a fourth step that examines the
//! constraints that refer to skolemized names. Basically, consider a
//! non-directed verison of the constraint graph. Let `Tainted(x)` be the
//! set of all things reachable from a skolemized variable `x`.
//! `Tainted(x)` should not contain any regions that existed before the
//! step at which the skolemization was performed. So this case here
//! would fail because `&x` was created alone, but is relatable to `&A`.
//!
//! ## Computing the LUB and GLB
//!
//! The paper I pointed you at is written for Haskell. It does not
//! therefore considering subtyping and in particular does not consider
//! LUB or GLB computation. We have to consider this. Here is the
//! algorithm I implemented.
//!
//! First though, let's discuss what we are trying to compute in more
//! detail. The LUB is basically the "common supertype" and the GLB is
//! "common subtype"; one catch is that the LUB should be the
//! *most-specific* common supertype and the GLB should be *most general*
//! common subtype (as opposed to any common supertype or any common
//! subtype).
//!
//! Anyway, to help clarify, here is a table containing some function
//! pairs and their LUB/GLB (for conciseness, in this table, I'm just
//! including the lifetimes here, not the rest of the types, and I'm
//! writing `fn<>` instead of `for<> fn`):
//!
//! ```
//! Type 1 Type 2 LUB GLB
//! fn<'a>('a) fn('X) fn('X) fn<'a>('a)
//! fn('a) fn('X) -- fn<'a>('a)
//! fn<'a,'b>('a, 'b) fn<'x>('x, 'x) fn<'a>('a, 'a) fn<'a,'b>('a, 'b)
//! fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a) fn<'a,'b,'c>('a,'b,'c)
//! ```
//!
//! ### Conventions
//!
//! I use lower-case letters (e.g., `&a`) for bound regions and upper-case
//! letters for free regions (`&A`). Region variables written with a
//! dollar-sign (e.g., `$a`). I will try to remember to enumerate the
//! bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
//!
//! ### High-level summary
//!
//! Both the LUB and the GLB algorithms work in a similar fashion. They
//! begin by replacing all bound regions (on both sides) with fresh region
//! inference variables. Therefore, both functions are converted to types
//! that contain only free regions. We can then compute the LUB/GLB in a
//! straightforward way, as described in `combine.rs`. This results in an
//! interim type T. The algorithms then examine the regions that appear
//! in T and try to, in some cases, replace them with bound regions to
//! yield the final result.
//!
//! To decide whether to replace a region `R` that appears in `T` with a
//! bound region, the algorithms make use of two bits of information.
//! First is a set `V` that contains all region variables created as part
//! of the LUB/GLB computation. `V` will contain the region variables
//! created to replace the bound regions in the input types, but it also
//! contains 'intermediate' variables created to represent the LUB/GLB of
//! individual regions. Basically, when asked to compute the LUB/GLB of a
//! region variable with another region, the inferencer cannot oblige
//! immediately since the values of that variables are not known.
//! Therefore, it creates a new variable that is related to the two
//! regions. For example, the LUB of two variables `$x` and `$y` is a
//! fresh variable `$z` that is constrained such that `$x <= $z` and `$y
//! <= $z`. So `V` will contain these intermediate variables as well.
//!
//! The other important factor in deciding how to replace a region in T is
//! the function `Tainted($r)` which, for a region variable, identifies
//! all regions that the region variable is related to in some way
//! (`Tainted()` made an appearance in the subtype computation as well).
//!
//! ### LUB
//!
//! The LUB algorithm proceeds in three steps:
//!
//! 1. Replace all bound regions (on both sides) with fresh region
//! inference variables.
//! 2. Compute the LUB "as normal", meaning compute the GLB of each
//! pair of argument types and the LUB of the return types and
//! so forth. Combine those to a new function type `F`.
//! 3. Replace each region `R` that appears in `F` as follows:
//! - Let `V` be the set of variables created during the LUB
//! computational steps 1 and 2, as described in the previous section.
//! - If `R` is not in `V`, replace `R` with itself.
//! - If `Tainted(R)` contains a region that is not in `V`,
//! replace `R` with itself.
//! - Otherwise, select the earliest variable in `Tainted(R)` that originates
//! from the left-hand side and replace `R` with the bound region that
//! this variable was a replacement for.
//!
//! So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
//! In this case, `&a` will be replaced with `$a` and the interim LUB type
//! `fn($b)` will be computed, where `$b=GLB(&A,$a)`. Therefore, `V =
//! {$a, $b}` and `Tainted($b) = { $b, $a, &A }`. When we go to replace
//! `$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
//! we leave `$b` as is. When region inference happens, `$b` will be
//! resolved to `&A`, as we wanted.
//!
//! Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`. In
//! this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
//! &h)` and a graph that looks like:
//!
//! ```
//! $a $b *--$x
//! \ \ / /
//! \ $h-* /
//! $g-----------*
//! ```
//!
//! Here `$g` and `$h` are fresh variables that are created to represent
//! the LUB/GLB of things requiring inference. This means that `V` and
//! `Tainted` will look like:
//!
//! ```
//! V = {$a, $b, $g, $h, $x}
//! Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
//! ```
//!
//! Therefore we replace both `$g` and `$h` with `$a`, and end up
//! with the type `fn(&a, &a)`.
//!
//! ### GLB
//!
//! The procedure for computing the GLB is similar. The difference lies
//! in computing the replacements for the various variables. For each
//! region `R` that appears in the type `F`, we again compute `Tainted(R)`
//! and examine the results:
//!
//! 1. If `R` is not in `V`, it is not replaced.
//! 2. Else, if `Tainted(R)` contains only variables in `V`, and it
//! contains exactly one variable from the LHS and one variable from
//! the RHS, then `R` can be mapped to the bound version of the
//! variable from the LHS.
//! 3. Else, if `Tainted(R)` contains no variable from the LHS and no
//! variable from the RHS, then `R` can be mapped to itself.
//! 4. Else, `R` is mapped to a fresh bound variable.
//!
//! These rules are pretty complex. Let's look at some examples to see
//! how they play out.
//!
//! Out first example was `fn(&a)` and `fn(&X)`. In this case, `&a` will
//! be replaced with `$a` and we will ultimately compute a
//! (pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
//! Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}. To find the
//! replacement for `$g` we consult the rules above:
//! - Rule (1) does not apply because `$g \in V`
//! - Rule (2) does not apply because `&X \in Tainted($g)`
//! - Rule (3) does not apply because `$a \in Tainted($g)`
//! - Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
//! So our final result is `fn(&z)`, which is correct.
//!
//! The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
//! have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
//! Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`. In this case,
//! by rule (3), `$g` is mapped to itself, and hence the result is
//! `fn($g)`. This result is correct (in this case, at least), but it is
//! indicative of a case that *can* lead us into concluding that there is
//! no GLB when in fact a GLB does exist. See the section "Questionable
//! Results" below for more details.
//!
//! The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
//! before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
//! Tainted($h) = {$g, $h, $a, $b, $c}`. Only rule (4) applies and hence
//! we'll select fresh bound variables `y` and `z` and wind up with
//! `fn(&y, &z)`.
//!
//! For the last example, let's consider what may seem trivial, but is
//! not: `fn(&a, &a)` and `fn(&b, &b)`. In this case, we'll get `F=fn($g,
//! $h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
//! $x}`. Both of these sets contain exactly one bound variable from each
//! side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
//! is the desired result.
//!
//! ### Shortcomings and correctness
//!
//! You may be wondering whether this algorithm is correct. The answer is
//! "sort of". There are definitely cases where they fail to compute a
//! result even though a correct result exists. I believe, though, that
//! if they succeed, then the result is valid, and I will attempt to
//! convince you. The basic argument is that the "pre-replacement" step
//! computes a set of constraints. The replacements, then, attempt to
//! satisfy those constraints, using bound identifiers where needed.
//!
//! For now I will briefly go over the cases for LUB/GLB and identify
//! their intent:
//!
//! - LUB:
//! - The region variables that are substituted in place of bound regions
//! are intended to collect constraints on those bound regions.
//! - If Tainted(R) contains only values in V, then this region is unconstrained
//! and can therefore be generalized, otherwise it cannot.
//! - GLB:
//! - The region variables that are substituted in place of bound regions
//! are intended to collect constraints on those bound regions.
//! - If Tainted(R) contains exactly one variable from each side, and
//! only variables in V, that indicates that those two bound regions
//! must be equated.
//! - Otherwise, if Tainted(R) references any variables from left or right
//! side, then it is trying to combine a bound region with a free one or
//! multiple bound regions, so we need to select fresh bound regions.
//!
//! Sorry this is more of a shorthand to myself. I will try to write up something
//! more convincing in the future.
//!
//! #### Where are the algorithms wrong?
//!
//! - The pre-replacement computation can fail even though using a
//! bound-region would have succeeded.
//! - We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
//! GLB of $a and $b. But if inference finds that $a and $b must be mapped
//! to regions without a GLB, then this is effectively a failure to compute
//! the GLB. However, the result `fn<$c>(fn($c))` is a valid GLB.

View File

@ -8,10 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Helper routines for higher-ranked things. See the `doc` module at
* the end of the file for details.
*/
//! Helper routines for higher-ranked things. See the `doc` module at
//! the end of the file for details.
use middle::ty::{mod, Ty, replace_late_bound_regions};
use middle::typeck::infer::{mod, combine, cres, InferCtxt};

View File

@ -8,28 +8,26 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* # Lattice Variables
*
* This file contains generic code for operating on inference variables
* that are characterized by an upper- and lower-bound. The logic and
* reasoning is explained in detail in the large comment in `infer.rs`.
*
* The code in here is defined quite generically so that it can be
* applied both to type variables, which represent types being inferred,
* and fn variables, which represent function types being inferred.
* It may eventually be applied to their types as well, who knows.
* In some cases, the functions are also generic with respect to the
* operation on the lattice (GLB vs LUB).
*
* Although all the functions are generic, we generally write the
* comments in a way that is specific to type variables and the LUB
* operation. It's just easier that way.
*
* In general all of the functions are defined parametrically
* over a `LatticeValue`, which is a value defined with respect to
* a lattice.
*/
//! # Lattice Variables
//!
//! This file contains generic code for operating on inference variables
//! that are characterized by an upper- and lower-bound. The logic and
//! reasoning is explained in detail in the large comment in `infer.rs`.
//!
//! The code in here is defined quite generically so that it can be
//! applied both to type variables, which represent types being inferred,
//! and fn variables, which represent function types being inferred.
//! It may eventually be applied to their types as well, who knows.
//! In some cases, the functions are also generic with respect to the
//! operation on the lattice (GLB vs LUB).
//!
//! Although all the functions are generic, we generally write the
//! comments in a way that is specific to type variables and the LUB
//! operation. It's just easier that way.
//!
//! In general all of the functions are defined parametrically
//! over a `LatticeValue`, which is a value defined with respect to
//! a lattice.
use middle::ty::{TyVar};
use middle::ty::{mod, Ty};

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See doc.rs for documentation */
//! See doc.rs for documentation
#![allow(non_camel_case_types)]
@ -305,6 +305,8 @@ pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>)
}
}
/// Computes the least upper-bound of `a` and `b`. If this is not possible, reports an error and
/// returns ty::err.
pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
origin: TypeOrigin,
a_is_expected: bool,
@ -312,11 +314,6 @@ pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
b: Ty<'tcx>)
-> Ty<'tcx>
{
/*!
* Computes the least upper-bound of `a` and `b`. If this is
* not possible, reports an error and returns ty::err.
*/
debug!("common_supertype({}, {})",
a.repr(cx.tcx), b.repr(cx.tcx));
@ -754,17 +751,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
.collect()
}
/// Given a set of generics defined on a type or impl, returns a substitution mapping each
/// type/region parameter to a fresh inference variable.
pub fn fresh_substs_for_generics(&self,
span: Span,
generics: &ty::Generics<'tcx>)
-> subst::Substs<'tcx>
{
/*!
* Given a set of generics defined on a type or impl, returns
* a substitution mapping each type/region parameter to a
* fresh inference variable.
*/
let type_params =
generics.types.map(
|_| self.next_ty_var());
@ -774,18 +767,15 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
subst::Substs::new(type_params, region_params)
}
/// Given a set of generics defined on a trait, returns a substitution mapping each output
/// type/region parameter to a fresh inference variable, and mapping the self type to
/// `self_ty`.
pub fn fresh_substs_for_trait(&self,
span: Span,
generics: &ty::Generics<'tcx>,
self_ty: Ty<'tcx>)
-> subst::Substs<'tcx>
{
/*!
* Given a set of generics defined on a trait, returns a
* substitution mapping each output type/region parameter to a
* fresh inference variable, and mapping the self type to
* `self_ty`.
*/
assert!(generics.types.len(subst::SelfSpace) == 1);
assert!(generics.types.len(subst::FnSpace) == 0);

View File

@ -8,371 +8,367 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Region inference module.
# Terminology
Note that we use the terms region and lifetime interchangeably,
though the term `lifetime` is preferred.
# Introduction
Region inference uses a somewhat more involved algorithm than type
inference. It is not the most efficient thing ever written though it
seems to work well enough in practice (famous last words). The reason
that we use a different algorithm is because, unlike with types, it is
impractical to hand-annotate with regions (in some cases, there aren't
even the requisite syntactic forms). So we have to get it right, and
it's worth spending more time on a more involved analysis. Moreover,
regions are a simpler case than types: they don't have aggregate
structure, for example.
Unlike normal type inference, which is similar in spirit to H-M and thus
works progressively, the region type inference works by accumulating
constraints over the course of a function. Finally, at the end of
processing a function, we process and solve the constraints all at
once.
The constraints are always of one of three possible forms:
- ConstrainVarSubVar(R_i, R_j) states that region variable R_i
must be a subregion of R_j
- ConstrainRegSubVar(R, R_i) states that the concrete region R
(which must not be a variable) must be a subregion of the varibale R_i
- ConstrainVarSubReg(R_i, R) is the inverse
# Building up the constraints
Variables and constraints are created using the following methods:
- `new_region_var()` creates a new, unconstrained region variable;
- `make_subregion(R_i, R_j)` states that R_i is a subregion of R_j
- `lub_regions(R_i, R_j) -> R_k` returns a region R_k which is
the smallest region that is greater than both R_i and R_j
- `glb_regions(R_i, R_j) -> R_k` returns a region R_k which is
the greatest region that is smaller than both R_i and R_j
The actual region resolution algorithm is not entirely
obvious, though it is also not overly complex.
## Snapshotting
It is also permitted to try (and rollback) changes to the graph. This
is done by invoking `start_snapshot()`, which returns a value. Then
later you can call `rollback_to()` which undoes the work.
Alternatively, you can call `commit()` which ends all snapshots.
Snapshots can be recursive---so you can start a snapshot when another
is in progress, but only the root snapshot can "commit".
# Resolving constraints
The constraint resolution algorithm is not super complex but also not
entirely obvious. Here I describe the problem somewhat abstractly,
then describe how the current code works. There may be other, smarter
ways of doing this with which I am unfamiliar and can't be bothered to
research at the moment. - NDM
## The problem
Basically our input is a directed graph where nodes can be divided
into two categories: region variables and concrete regions. Each edge
`R -> S` in the graph represents a constraint that the region `R` is a
subregion of the region `S`.
Region variable nodes can have arbitrary degree. There is one region
variable node per region variable.
Each concrete region node is associated with some, well, concrete
region: e.g., a free lifetime, or the region for a particular scope.
Note that there may be more than one concrete region node for a
particular region value. Moreover, because of how the graph is built,
we know that all concrete region nodes have either in-degree 1 or
out-degree 1.
Before resolution begins, we build up the constraints in a hashmap
that maps `Constraint` keys to spans. During resolution, we construct
the actual `Graph` structure that we describe here.
## Our current algorithm
We divide region variables into two groups: Expanding and Contracting.
Expanding region variables are those that have a concrete region
predecessor (direct or indirect). Contracting region variables are
all others.
We first resolve the values of Expanding region variables and then
process Contracting ones. We currently use an iterative, fixed-point
procedure (but read on, I believe this could be replaced with a linear
walk). Basically we iterate over the edges in the graph, ensuring
that, if the source of the edge has a value, then this value is a
subregion of the target value. If the target does not yet have a
value, it takes the value from the source. If the target already had
a value, then the resulting value is Least Upper Bound of the old and
new values. When we are done, each Expanding node will have the
smallest region that it could possibly have and still satisfy the
constraints.
We next process the Contracting nodes. Here we again iterate over the
edges, only this time we move values from target to source (if the
source is a Contracting node). For each contracting node, we compute
its value as the GLB of all its successors. Basically contracting
nodes ensure that there is overlap between their successors; we will
ultimately infer the largest overlap possible.
# The Region Hierarchy
## Without closures
Let's first consider the region hierarchy without thinking about
closures, because they add a lot of complications. The region
hierarchy *basically* mirrors the lexical structure of the code.
There is a region for every piece of 'evaluation' that occurs, meaning
every expression, block, and pattern (patterns are considered to
"execute" by testing the value they are applied to and creating any
relevant bindings). So, for example:
fn foo(x: int, y: int) { // -+
// +------------+ // |
// | +-----+ // |
// | +-+ +-+ +-+ // |
// | | | | | | | // |
// v v v v v v v // |
let z = x + y; // |
... // |
} // -+
fn bar() { ... }
In this example, there is a region for the fn body block as a whole,
and then a subregion for the declaration of the local variable.
Within that, there are sublifetimes for the assignment pattern and
also the expression `x + y`. The expression itself has sublifetimes
for evaluating `x` and `y`.
## Function calls
Function calls are a bit tricky. I will describe how we handle them
*now* and then a bit about how we can improve them (Issue #6268).
Consider a function call like `func(expr1, expr2)`, where `func`,
`arg1`, and `arg2` are all arbitrary expressions. Currently,
we construct a region hierarchy like:
+----------------+
| |
+--+ +---+ +---+|
v v v v v vv
func(expr1, expr2)
Here you can see that the call as a whole has a region and the
function plus arguments are subregions of that. As a side-effect of
this, we get a lot of spurious errors around nested calls, in
particular when combined with `&mut` functions. For example, a call
like this one
self.foo(self.bar())
where both `foo` and `bar` are `&mut self` functions will always yield
an error.
Here is a more involved example (which is safe) so we can see what's
going on:
struct Foo { f: uint, g: uint }
...
fn add(p: &mut uint, v: uint) {
*p += v;
}
...
fn inc(p: &mut uint) -> uint {
*p += 1; *p
}
fn weird() {
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f,
'b: inc(&mut (*x).f)) // (..)
}
The important part is the line marked `(..)` which contains a call to
`add()`. The first argument is a mutable borrow of the field `f`. The
second argument also borrows the field `f`. Now, in the current borrow
checker, the first borrow is given the lifetime of the call to
`add()`, `'a`. The second borrow is given the lifetime of `'b` of the
call to `inc()`. Because `'b` is considered to be a sublifetime of
`'a`, an error is reported since there are two co-existing mutable
borrows of the same data.
However, if we were to examine the lifetimes a bit more carefully, we
can see that this error is unnecessary. Let's examine the lifetimes
involved with `'a` in detail. We'll break apart all the steps involved
in a call expression:
'a: {
'a_arg1: let a_temp1: ... = add;
'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
'a_arg3: let a_temp3: uint = {
let b_temp1: ... = inc;
let b_temp2: &'b = &'b mut (*x).f;
'b_call: b_temp1(b_temp2)
};
'a_call: a_temp1(a_temp2, a_temp3) // (**)
}
Here we see that the lifetime `'a` includes a number of substatements.
In particular, there is this lifetime I've called `'a_call` that
corresponds to the *actual execution of the function `add()`*, after
all arguments have been evaluated. There is a corresponding lifetime
`'b_call` for the execution of `inc()`. If we wanted to be precise
about it, the lifetime of the two borrows should be `'a_call` and
`'b_call` respectively, since the references that were created
will not be dereferenced except during the execution itself.
However, this model by itself is not sound. The reason is that
while the two references that are created will never be used
simultaneously, it is still true that the first reference is
*created* before the second argument is evaluated, and so even though
it will not be *dereferenced* during the evaluation of the second
argument, it can still be *invalidated* by that evaluation. Consider
this similar but unsound example:
struct Foo { f: uint, g: uint }
...
fn add(p: &mut uint, v: uint) {
*p += v;
}
...
fn consume(x: Box<Foo>) -> uint {
x.f + x.g
}
fn weird() {
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f, consume(x)) // (..)
}
In this case, the second argument to `add` actually consumes `x`, thus
invalidating the first argument.
So, for now, we exclude the `call` lifetimes from our model.
Eventually I would like to include them, but we will have to make the
borrow checker handle this situation correctly. In particular, if
there is a reference created whose lifetime does not enclose
the borrow expression, we must issue sufficient restrictions to ensure
that the pointee remains valid.
## Adding closures
The other significant complication to the region hierarchy is
closures. I will describe here how closures should work, though some
of the work to implement this model is ongoing at the time of this
writing.
The body of closures are type-checked along with the function that
creates them. However, unlike other expressions that appear within the
function body, it is not entirely obvious when a closure body executes
with respect to the other expressions. This is because the closure
body will execute whenever the closure is called; however, we can
never know precisely when the closure will be called, especially
without some sort of alias analysis.
However, we can place some sort of limits on when the closure
executes. In particular, the type of every closure `fn:'r K` includes
a region bound `'r`. This bound indicates the maximum lifetime of that
closure; once we exit that region, the closure cannot be called
anymore. Therefore, we say that the lifetime of the closure body is a
sublifetime of the closure bound, but the closure body itself is unordered
with respect to other parts of the code.
For example, consider the following fragment of code:
'a: {
let closure: fn:'a() = || 'b: {
'c: ...
};
'd: ...
}
Here we have four lifetimes, `'a`, `'b`, `'c`, and `'d`. The closure
`closure` is bounded by the lifetime `'a`. The lifetime `'b` is the
lifetime of the closure body, and `'c` is some statement within the
closure body. Finally, `'d` is a statement within the outer block that
created the closure.
We can say that the closure body `'b` is a sublifetime of `'a` due to
the closure bound. By the usual lexical scoping conventions, the
statement `'c` is clearly a sublifetime of `'b`, and `'d` is a
sublifetime of `'d`. However, there is no ordering between `'c` and
`'d` per se (this kind of ordering between statements is actually only
an issue for dataflow; passes like the borrow checker must assume that
closures could execute at any time from the moment they are created
until they go out of scope).
### Complications due to closure bound inference
There is only one problem with the above model: in general, we do not
actually *know* the closure bounds during region inference! In fact,
closure bounds are almost always region variables! This is very tricky
because the inference system implicitly assumes that we can do things
like compute the LUB of two scoped lifetimes without needing to know
the values of any variables.
Here is an example to illustrate the problem:
fn identify<T>(x: T) -> T { x }
fn foo() { // 'foo is the function body
'a: {
let closure = identity(|| 'b: {
'c: ...
});
'd: closure();
}
'e: ...;
}
In this example, the closure bound is not explicit. At compile time,
we will create a region variable (let's call it `V0`) to represent the
closure bound.
The primary difficulty arises during the constraint propagation phase.
Imagine there is some variable with incoming edges from `'c` and `'d`.
This means that the value of the variable must be `LUB('c,
'd)`. However, without knowing what the closure bound `V0` is, we
can't compute the LUB of `'c` and `'d`! Any we don't know the closure
bound until inference is done.
The solution is to rely on the fixed point nature of inference.
Basically, when we must compute `LUB('c, 'd)`, we just use the current
value for `V0` as the closure's bound. If `V0`'s binding should
change, then we will do another round of inference, and the result of
`LUB('c, 'd)` will change.
One minor implication of this is that the graph does not in fact track
the full set of dependencies between edges. We cannot easily know
whether the result of a LUB computation will change, since there may
be indirect dependencies on other variables that are not reflected on
the graph. Therefore, we must *always* iterate over all edges when
doing the fixed point calculation, not just those adjacent to nodes
whose values have changed.
Were it not for this requirement, we could in fact avoid fixed-point
iteration altogether. In that universe, we could instead first
identify and remove strongly connected components (SCC) in the graph.
Note that such components must consist solely of region variables; all
of these variables can effectively be unified into a single variable.
Once SCCs are removed, we are left with a DAG. At this point, we
could walk the DAG in topological order once to compute the expanding
nodes, and again in reverse topological order to compute the
contracting nodes. However, as I said, this does not work given the
current treatment of closure bounds, but perhaps in the future we can
address this problem somehow and make region inference somewhat more
efficient. Note that this is solely a matter of performance, not
expressiveness.
### Skolemization
For a discussion on skolemization and higher-ranked subtyping, please
see the module `middle::typeck::infer::higher_ranked::doc`.
*/
//! Region inference module.
//!
//! # Terminology
//!
//! Note that we use the terms region and lifetime interchangeably,
//! though the term `lifetime` is preferred.
//!
//! # Introduction
//!
//! Region inference uses a somewhat more involved algorithm than type
//! inference. It is not the most efficient thing ever written though it
//! seems to work well enough in practice (famous last words). The reason
//! that we use a different algorithm is because, unlike with types, it is
//! impractical to hand-annotate with regions (in some cases, there aren't
//! even the requisite syntactic forms). So we have to get it right, and
//! it's worth spending more time on a more involved analysis. Moreover,
//! regions are a simpler case than types: they don't have aggregate
//! structure, for example.
//!
//! Unlike normal type inference, which is similar in spirit to H-M and thus
//! works progressively, the region type inference works by accumulating
//! constraints over the course of a function. Finally, at the end of
//! processing a function, we process and solve the constraints all at
//! once.
//!
//! The constraints are always of one of three possible forms:
//!
//! - ConstrainVarSubVar(R_i, R_j) states that region variable R_i
//! must be a subregion of R_j
//! - ConstrainRegSubVar(R, R_i) states that the concrete region R
//! (which must not be a variable) must be a subregion of the varibale R_i
//! - ConstrainVarSubReg(R_i, R) is the inverse
//!
//! # Building up the constraints
//!
//! Variables and constraints are created using the following methods:
//!
//! - `new_region_var()` creates a new, unconstrained region variable;
//! - `make_subregion(R_i, R_j)` states that R_i is a subregion of R_j
//! - `lub_regions(R_i, R_j) -> R_k` returns a region R_k which is
//! the smallest region that is greater than both R_i and R_j
//! - `glb_regions(R_i, R_j) -> R_k` returns a region R_k which is
//! the greatest region that is smaller than both R_i and R_j
//!
//! The actual region resolution algorithm is not entirely
//! obvious, though it is also not overly complex.
//!
//! ## Snapshotting
//!
//! It is also permitted to try (and rollback) changes to the graph. This
//! is done by invoking `start_snapshot()`, which returns a value. Then
//! later you can call `rollback_to()` which undoes the work.
//! Alternatively, you can call `commit()` which ends all snapshots.
//! Snapshots can be recursive---so you can start a snapshot when another
//! is in progress, but only the root snapshot can "commit".
//!
//! # Resolving constraints
//!
//! The constraint resolution algorithm is not super complex but also not
//! entirely obvious. Here I describe the problem somewhat abstractly,
//! then describe how the current code works. There may be other, smarter
//! ways of doing this with which I am unfamiliar and can't be bothered to
//! research at the moment. - NDM
//!
//! ## The problem
//!
//! Basically our input is a directed graph where nodes can be divided
//! into two categories: region variables and concrete regions. Each edge
//! `R -> S` in the graph represents a constraint that the region `R` is a
//! subregion of the region `S`.
//!
//! Region variable nodes can have arbitrary degree. There is one region
//! variable node per region variable.
//!
//! Each concrete region node is associated with some, well, concrete
//! region: e.g., a free lifetime, or the region for a particular scope.
//! Note that there may be more than one concrete region node for a
//! particular region value. Moreover, because of how the graph is built,
//! we know that all concrete region nodes have either in-degree 1 or
//! out-degree 1.
//!
//! Before resolution begins, we build up the constraints in a hashmap
//! that maps `Constraint` keys to spans. During resolution, we construct
//! the actual `Graph` structure that we describe here.
//!
//! ## Our current algorithm
//!
//! We divide region variables into two groups: Expanding and Contracting.
//! Expanding region variables are those that have a concrete region
//! predecessor (direct or indirect). Contracting region variables are
//! all others.
//!
//! We first resolve the values of Expanding region variables and then
//! process Contracting ones. We currently use an iterative, fixed-point
//! procedure (but read on, I believe this could be replaced with a linear
//! walk). Basically we iterate over the edges in the graph, ensuring
//! that, if the source of the edge has a value, then this value is a
//! subregion of the target value. If the target does not yet have a
//! value, it takes the value from the source. If the target already had
//! a value, then the resulting value is Least Upper Bound of the old and
//! new values. When we are done, each Expanding node will have the
//! smallest region that it could possibly have and still satisfy the
//! constraints.
//!
//! We next process the Contracting nodes. Here we again iterate over the
//! edges, only this time we move values from target to source (if the
//! source is a Contracting node). For each contracting node, we compute
//! its value as the GLB of all its successors. Basically contracting
//! nodes ensure that there is overlap between their successors; we will
//! ultimately infer the largest overlap possible.
//!
//! # The Region Hierarchy
//!
//! ## Without closures
//!
//! Let's first consider the region hierarchy without thinking about
//! closures, because they add a lot of complications. The region
//! hierarchy *basically* mirrors the lexical structure of the code.
//! There is a region for every piece of 'evaluation' that occurs, meaning
//! every expression, block, and pattern (patterns are considered to
//! "execute" by testing the value they are applied to and creating any
//! relevant bindings). So, for example:
//!
//! fn foo(x: int, y: int) { // -+
//! // +------------+ // |
//! // | +-----+ // |
//! // | +-+ +-+ +-+ // |
//! // | | | | | | | // |
//! // v v v v v v v // |
//! let z = x + y; // |
//! ... // |
//! } // -+
//!
//! fn bar() { ... }
//!
//! In this example, there is a region for the fn body block as a whole,
//! and then a subregion for the declaration of the local variable.
//! Within that, there are sublifetimes for the assignment pattern and
//! also the expression `x + y`. The expression itself has sublifetimes
//! for evaluating `x` and `y`.
//!
//! ## Function calls
//!
//! Function calls are a bit tricky. I will describe how we handle them
//! *now* and then a bit about how we can improve them (Issue #6268).
//!
//! Consider a function call like `func(expr1, expr2)`, where `func`,
//! `arg1`, and `arg2` are all arbitrary expressions. Currently,
//! we construct a region hierarchy like:
//!
//! +----------------+
//! | |
//! +--+ +---+ +---+|
//! v v v v v vv
//! func(expr1, expr2)
//!
//! Here you can see that the call as a whole has a region and the
//! function plus arguments are subregions of that. As a side-effect of
//! this, we get a lot of spurious errors around nested calls, in
//! particular when combined with `&mut` functions. For example, a call
//! like this one
//!
//! self.foo(self.bar())
//!
//! where both `foo` and `bar` are `&mut self` functions will always yield
//! an error.
//!
//! Here is a more involved example (which is safe) so we can see what's
//! going on:
//!
//! struct Foo { f: uint, g: uint }
//! ...
//! fn add(p: &mut uint, v: uint) {
//! *p += v;
//! }
//! ...
//! fn inc(p: &mut uint) -> uint {
//! *p += 1; *p
//! }
//! fn weird() {
//! let mut x: Box<Foo> = box Foo { ... };
//! 'a: add(&mut (*x).f,
//! 'b: inc(&mut (*x).f)) // (..)
//! }
//!
//! The important part is the line marked `(..)` which contains a call to
//! `add()`. The first argument is a mutable borrow of the field `f`. The
//! second argument also borrows the field `f`. Now, in the current borrow
//! checker, the first borrow is given the lifetime of the call to
//! `add()`, `'a`. The second borrow is given the lifetime of `'b` of the
//! call to `inc()`. Because `'b` is considered to be a sublifetime of
//! `'a`, an error is reported since there are two co-existing mutable
//! borrows of the same data.
//!
//! However, if we were to examine the lifetimes a bit more carefully, we
//! can see that this error is unnecessary. Let's examine the lifetimes
//! involved with `'a` in detail. We'll break apart all the steps involved
//! in a call expression:
//!
//! 'a: {
//! 'a_arg1: let a_temp1: ... = add;
//! 'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
//! 'a_arg3: let a_temp3: uint = {
//! let b_temp1: ... = inc;
//! let b_temp2: &'b = &'b mut (*x).f;
//! 'b_call: b_temp1(b_temp2)
//! };
//! 'a_call: a_temp1(a_temp2, a_temp3) // (**)
//! }
//!
//! Here we see that the lifetime `'a` includes a number of substatements.
//! In particular, there is this lifetime I've called `'a_call` that
//! corresponds to the *actual execution of the function `add()`*, after
//! all arguments have been evaluated. There is a corresponding lifetime
//! `'b_call` for the execution of `inc()`. If we wanted to be precise
//! about it, the lifetime of the two borrows should be `'a_call` and
//! `'b_call` respectively, since the references that were created
//! will not be dereferenced except during the execution itself.
//!
//! However, this model by itself is not sound. The reason is that
//! while the two references that are created will never be used
//! simultaneously, it is still true that the first reference is
//! *created* before the second argument is evaluated, and so even though
//! it will not be *dereferenced* during the evaluation of the second
//! argument, it can still be *invalidated* by that evaluation. Consider
//! this similar but unsound example:
//!
//! struct Foo { f: uint, g: uint }
//! ...
//! fn add(p: &mut uint, v: uint) {
//! *p += v;
//! }
//! ...
//! fn consume(x: Box<Foo>) -> uint {
//! x.f + x.g
//! }
//! fn weird() {
//! let mut x: Box<Foo> = box Foo { ... };
//! 'a: add(&mut (*x).f, consume(x)) // (..)
//! }
//!
//! In this case, the second argument to `add` actually consumes `x`, thus
//! invalidating the first argument.
//!
//! So, for now, we exclude the `call` lifetimes from our model.
//! Eventually I would like to include them, but we will have to make the
//! borrow checker handle this situation correctly. In particular, if
//! there is a reference created whose lifetime does not enclose
//! the borrow expression, we must issue sufficient restrictions to ensure
//! that the pointee remains valid.
//!
//! ## Adding closures
//!
//! The other significant complication to the region hierarchy is
//! closures. I will describe here how closures should work, though some
//! of the work to implement this model is ongoing at the time of this
//! writing.
//!
//! The body of closures are type-checked along with the function that
//! creates them. However, unlike other expressions that appear within the
//! function body, it is not entirely obvious when a closure body executes
//! with respect to the other expressions. This is because the closure
//! body will execute whenever the closure is called; however, we can
//! never know precisely when the closure will be called, especially
//! without some sort of alias analysis.
//!
//! However, we can place some sort of limits on when the closure
//! executes. In particular, the type of every closure `fn:'r K` includes
//! a region bound `'r`. This bound indicates the maximum lifetime of that
//! closure; once we exit that region, the closure cannot be called
//! anymore. Therefore, we say that the lifetime of the closure body is a
//! sublifetime of the closure bound, but the closure body itself is unordered
//! with respect to other parts of the code.
//!
//! For example, consider the following fragment of code:
//!
//! 'a: {
//! let closure: fn:'a() = || 'b: {
//! 'c: ...
//! };
//! 'd: ...
//! }
//!
//! Here we have four lifetimes, `'a`, `'b`, `'c`, and `'d`. The closure
//! `closure` is bounded by the lifetime `'a`. The lifetime `'b` is the
//! lifetime of the closure body, and `'c` is some statement within the
//! closure body. Finally, `'d` is a statement within the outer block that
//! created the closure.
//!
//! We can say that the closure body `'b` is a sublifetime of `'a` due to
//! the closure bound. By the usual lexical scoping conventions, the
//! statement `'c` is clearly a sublifetime of `'b`, and `'d` is a
//! sublifetime of `'d`. However, there is no ordering between `'c` and
//! `'d` per se (this kind of ordering between statements is actually only
//! an issue for dataflow; passes like the borrow checker must assume that
//! closures could execute at any time from the moment they are created
//! until they go out of scope).
//!
//! ### Complications due to closure bound inference
//!
//! There is only one problem with the above model: in general, we do not
//! actually *know* the closure bounds during region inference! In fact,
//! closure bounds are almost always region variables! This is very tricky
//! because the inference system implicitly assumes that we can do things
//! like compute the LUB of two scoped lifetimes without needing to know
//! the values of any variables.
//!
//! Here is an example to illustrate the problem:
//!
//! fn identify<T>(x: T) -> T { x }
//!
//! fn foo() { // 'foo is the function body
//! 'a: {
//! let closure = identity(|| 'b: {
//! 'c: ...
//! });
//! 'd: closure();
//! }
//! 'e: ...;
//! }
//!
//! In this example, the closure bound is not explicit. At compile time,
//! we will create a region variable (let's call it `V0`) to represent the
//! closure bound.
//!
//! The primary difficulty arises during the constraint propagation phase.
//! Imagine there is some variable with incoming edges from `'c` and `'d`.
//! This means that the value of the variable must be `LUB('c,
//! 'd)`. However, without knowing what the closure bound `V0` is, we
//! can't compute the LUB of `'c` and `'d`! Any we don't know the closure
//! bound until inference is done.
//!
//! The solution is to rely on the fixed point nature of inference.
//! Basically, when we must compute `LUB('c, 'd)`, we just use the current
//! value for `V0` as the closure's bound. If `V0`'s binding should
//! change, then we will do another round of inference, and the result of
//! `LUB('c, 'd)` will change.
//!
//! One minor implication of this is that the graph does not in fact track
//! the full set of dependencies between edges. We cannot easily know
//! whether the result of a LUB computation will change, since there may
//! be indirect dependencies on other variables that are not reflected on
//! the graph. Therefore, we must *always* iterate over all edges when
//! doing the fixed point calculation, not just those adjacent to nodes
//! whose values have changed.
//!
//! Were it not for this requirement, we could in fact avoid fixed-point
//! iteration altogether. In that universe, we could instead first
//! identify and remove strongly connected components (SCC) in the graph.
//! Note that such components must consist solely of region variables; all
//! of these variables can effectively be unified into a single variable.
//! Once SCCs are removed, we are left with a DAG. At this point, we
//! could walk the DAG in topological order once to compute the expanding
//! nodes, and again in reverse topological order to compute the
//! contracting nodes. However, as I said, this does not work given the
//! current treatment of closure bounds, but perhaps in the future we can
//! address this problem somehow and make region inference somewhat more
//! efficient. Note that this is solely a matter of performance, not
//! expressiveness.
//!
//! ### Skolemization
//!
//! For a discussion on skolemization and higher-ranked subtyping, please
//! see the module `middle::typeck::infer::higher_ranked::doc`.

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See doc.rs */
//! See doc.rs
pub use self::Constraint::*;
pub use self::Verify::*;
@ -597,15 +597,10 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
.collect()
}
/// Computes all regions that have been related to `r0` in any way since the mark `mark` was
/// made---`r0` itself will be the first entry. This is used when checking whether skolemized
/// regions are being improperly related to other regions.
pub fn tainted(&self, mark: RegionMark, r0: Region) -> Vec<Region> {
/*!
* Computes all regions that have been related to `r0` in any
* way since the mark `mark` was made---`r0` itself will be
* the first entry. This is used when checking whether
* skolemized regions are being improperly related to other
* regions.
*/
debug!("tainted(mark={}, r0={})", mark, r0.repr(self.tcx));
let _indenter = indenter();
@ -694,13 +689,11 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
}
/**
This function performs the actual region resolution. It must be
called after all constraints have been added. It performs a
fixed-point iteration to find region values which satisfy all
constraints, assuming such values can be found; if they cannot,
errors are reported.
*/
/// This function performs the actual region resolution. It must be
/// called after all constraints have been added. It performs a
/// fixed-point iteration to find region values which satisfy all
/// constraints, assuming such values can be found; if they cannot,
/// errors are reported.
pub fn resolve_regions(&self) -> Vec<RegionResolutionError<'tcx>> {
debug!("RegionVarBindings: resolve_regions()");
let mut errors = vec!();
@ -783,16 +776,12 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
}
/// Computes a region that encloses both free region arguments. Guarantee that if the same two
/// regions are given as argument, in any order, a consistent result is returned.
fn lub_free_regions(&self,
a: &FreeRegion,
b: &FreeRegion) -> ty::Region
{
/*!
* Computes a region that encloses both free region arguments.
* Guarantee that if the same two regions are given as argument,
* in any order, a consistent result is returned.
*/
return match a.cmp(b) {
Less => helper(self, a, b),
Greater => helper(self, b, a),
@ -884,16 +873,13 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
}
/// Computes a region that is enclosed by both free region arguments, if any. Guarantees that
/// if the same two regions are given as argument, in any order, a consistent result is
/// returned.
fn glb_free_regions(&self,
a: &FreeRegion,
b: &FreeRegion) -> cres<'tcx, ty::Region>
{
/*!
* Computes a region that is enclosed by both free region arguments,
* if any. Guarantees that if the same two regions are given as argument,
* in any order, a consistent result is returned.
*/
return match a.cmp(b) {
Less => helper(self, a, b),
Greater => helper(self, b, a),

View File

@ -8,37 +8,27 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Skolemization is the process of replacing unknown variables with
* fresh types. The idea is that the type, after skolemization,
* contains no inference variables but instead contains either a value
* for each variable or fresh "arbitrary" types wherever a variable
* would have been.
*
* Skolemization is used primarily to get a good type for inserting
* into a cache. The result summarizes what the type inferencer knows
* "so far". The primary place it is used right now is in the trait
* matching algorithm, which needs to be able to cache whether an
* `impl` self type matches some other type X -- *without* affecting
* `X`. That means if that if the type `X` is in fact an unbound type
* variable, we want the match to be regarded as ambiguous, because
* depending on what type that type variable is ultimately assigned,
* the match may or may not succeed.
*
* Note that you should be careful not to allow the output of
* skolemization to leak to the user in error messages or in any other
* form. Skolemization is only really useful as an internal detail.
*
* __An important detail concerning regions.__ The skolemizer also
* replaces *all* regions with 'static. The reason behind this is
* that, in general, we do not take region relationships into account
* when making type-overloaded decisions. This is important because of
* the design of the region inferencer, which is not based on
* unification but rather on accumulating and then solving a set of
* constraints. In contrast, the type inferencer assigns a value to
* each type variable only once, and it does so as soon as it can, so
* it is reasonable to ask what the type inferencer knows "so far".
*/
//! Skolemization is the process of replacing unknown variables with fresh types. The idea is that
//! the type, after skolemization, contains no inference variables but instead contains either a
//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
//!
//! Skolemization is used primarily to get a good type for inserting into a cache. The result
//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
//! on what type that type variable is ultimately assigned, the match may or may not succeed.
//!
//! Note that you should be careful not to allow the output of skolemization to leak to the user in
//! error messages or in any other form. Skolemization is only really useful as an internal detail.
//!
//! __An important detail concerning regions.__ The skolemizer also replaces *all* regions with
//! 'static. The reason behind this is that, in general, we do not take region relationships into
//! account when making type-overloaded decisions. This is important because of the design of the
//! region inferencer, which is not based on unification but rather on accumulating and then
//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
//! inferencer knows "so far".
use middle::ty::{mod, Ty};
use middle::ty_fold;

View File

@ -72,12 +72,10 @@ impl<'tcx> TypeVariableTable<'tcx> {
self.values.get(vid.index).diverging
}
/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
///
/// Precondition: neither `a` nor `b` are known.
pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
/*!
* Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
*
* Precondition: neither `a` nor `b` are known.
*/
if a != b {
self.relations(a).push((dir, b));
@ -86,19 +84,15 @@ impl<'tcx> TypeVariableTable<'tcx> {
}
}
/// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
/// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
/// where `vid1` is some other variable id.
pub fn instantiate_and_push(
&mut self,
vid: ty::TyVid,
ty: Ty<'tcx>,
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
/*!
* Instantiates `vid` with the type `ty` and then pushes an
* entry onto `stack` for each of the relations of `vid` to
* other variables. The relations will have the form `(ty,
* dir, vid1)` where `vid1` is some other variable id.
*/
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index).value;
mem::replace(value_ptr, Known(ty))

View File

@ -22,85 +22,68 @@ use syntax::ast;
use util::ppaux::Repr;
use util::snapshot_vec as sv;
/**
* This trait is implemented by any type that can serve as a type
* variable. We call such variables *unification keys*. For example,
* this trait is implemented by `IntVid`, which represents integral
* variables.
*
* Each key type has an associated value type `V`. For example, for
* `IntVid`, this is `Option<IntVarValue>`, representing some
* (possibly not yet known) sort of integer.
*
* Implementations of this trait are at the end of this file.
*/
/// This trait is implemented by any type that can serve as a type
/// variable. We call such variables *unification keys*. For example,
/// this trait is implemented by `IntVid`, which represents integral
/// variables.
///
/// Each key type has an associated value type `V`. For example, for
/// `IntVid`, this is `Option<IntVarValue>`, representing some
/// (possibly not yet known) sort of integer.
///
/// Implementations of this trait are at the end of this file.
pub trait UnifyKey<'tcx, V> : Clone + Show + PartialEq + Repr<'tcx> {
fn index(&self) -> uint;
fn from_index(u: uint) -> Self;
/**
* Given an inference context, returns the unification table
* appropriate to this key type.
*/
// Given an inference context, returns the unification table
// appropriate to this key type.
fn unification_table<'v>(infcx: &'v InferCtxt)
-> &'v RefCell<UnificationTable<Self,V>>;
fn tag(k: Option<Self>) -> &'static str;
}
/**
* Trait for valid types that a type variable can be set to. Note that
* this is typically not the end type that the value will take on, but
* rather an `Option` wrapper (where `None` represents a variable
* whose value is not yet set).
*
* Implementations of this trait are at the end of this file.
*/
/// Trait for valid types that a type variable can be set to. Note that
/// this is typically not the end type that the value will take on, but
/// rather an `Option` wrapper (where `None` represents a variable
/// whose value is not yet set).
///
/// Implementations of this trait are at the end of this file.
pub trait UnifyValue<'tcx> : Clone + Repr<'tcx> + PartialEq {
}
/**
* Value of a unification key. We implement Tarjan's union-find
* algorithm: when two keys are unified, one of them is converted
* into a "redirect" pointing at the other. These redirects form a
* DAG: the roots of the DAG (nodes that are not redirected) are each
* associated with a value of type `V` and a rank. The rank is used
* to keep the DAG relatively balanced, which helps keep the running
* time of the algorithm under control. For more information, see
* <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
*/
/// Value of a unification key. We implement Tarjan's union-find
/// algorithm: when two keys are unified, one of them is converted
/// into a "redirect" pointing at the other. These redirects form a
/// DAG: the roots of the DAG (nodes that are not redirected) are each
/// associated with a value of type `V` and a rank. The rank is used
/// to keep the DAG relatively balanced, which helps keep the running
/// time of the algorithm under control. For more information, see
/// <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
#[deriving(PartialEq,Clone)]
pub enum VarValue<K,V> {
Redirect(K),
Root(V, uint),
}
/**
* Table of unification keys and their values.
*/
/// Table of unification keys and their values.
pub struct UnificationTable<K,V> {
/**
* Indicates the current value of each key.
*/
/// Indicates the current value of each key.
values: sv::SnapshotVec<VarValue<K,V>,(),Delegate>,
}
/**
* At any time, users may snapshot a unification table. The changes
* made during the snapshot may either be *committed* or *rolled back*.
*/
/// At any time, users may snapshot a unification table. The changes
/// made during the snapshot may either be *committed* or *rolled back*.
pub struct Snapshot<K> {
// Link snapshot to the key type `K` of the table.
marker: marker::CovariantType<K>,
snapshot: sv::Snapshot,
}
/**
* Internal type used to represent the result of a `get()` operation.
* Conveys the current root and value of the key.
*/
/// Internal type used to represent the result of a `get()` operation.
/// Conveys the current root and value of the key.
pub struct Node<K,V> {
pub key: K,
pub value: V,
@ -121,28 +104,22 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
}
}
/**
* Starts a new snapshot. Each snapshot must be either
* rolled back or committed in a "LIFO" (stack) order.
*/
/// Starts a new snapshot. Each snapshot must be either
/// rolled back or committed in a "LIFO" (stack) order.
pub fn snapshot(&mut self) -> Snapshot<K> {
Snapshot { marker: marker::CovariantType::<K>,
snapshot: self.values.start_snapshot() }
}
/**
* Reverses all changes since the last snapshot. Also
* removes any keys that have been created since then.
*/
/// Reverses all changes since the last snapshot. Also
/// removes any keys that have been created since then.
pub fn rollback_to(&mut self, snapshot: Snapshot<K>) {
debug!("{}: rollback_to()", UnifyKey::tag(None::<K>));
self.values.rollback_to(snapshot.snapshot);
}
/**
* Commits all changes since the last snapshot. Of course, they
* can still be undone if there is a snapshot further out.
*/
/// Commits all changes since the last snapshot. Of course, they
/// can still be undone if there is a snapshot further out.
pub fn commit(&mut self, snapshot: Snapshot<K>) {
debug!("{}: commit()", UnifyKey::tag(None::<K>));
self.values.commit(snapshot.snapshot);
@ -157,13 +134,9 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
k
}
/// Find the root node for `vid`. This uses the standard union-find algorithm with path
/// compression: http://en.wikipedia.org/wiki/Disjoint-set_data_structure
pub fn get(&mut self, tcx: &ty::ctxt, vid: K) -> Node<K,V> {
/*!
* Find the root node for `vid`. This uses the standard
* union-find algorithm with path compression:
* http://en.wikipedia.org/wiki/Disjoint-set_data_structure
*/
let index = vid.index();
let value = (*self.values.get(index)).clone();
match value {
@ -188,16 +161,13 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
}
}
/// Sets the value for `vid` to `new_value`. `vid` MUST be a root node! Also, we must be in the
/// middle of a snapshot.
pub fn set(&mut self,
tcx: &ty::ctxt<'tcx>,
key: K,
new_value: VarValue<K,V>)
{
/*!
* Sets the value for `vid` to `new_value`. `vid` MUST be a
* root node! Also, we must be in the middle of a snapshot.
*/
assert!(self.is_root(&key));
debug!("Updating variable {} to {}",
@ -207,19 +177,15 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
self.values.set(key.index(), new_value);
}
/// Either redirects node_a to node_b or vice versa, depending on the relative rank. Returns
/// the new root and rank. You should then update the value of the new root to something
/// suitable.
pub fn unify(&mut self,
tcx: &ty::ctxt<'tcx>,
node_a: &Node<K,V>,
node_b: &Node<K,V>)
-> (K, uint)
{
/*!
* Either redirects node_a to node_b or vice versa, depending
* on the relative rank. Returns the new root and rank. You
* should then update the value of the new root to something
* suitable.
*/
debug!("unify(node_a(id={}, rank={}), node_b(id={}, rank={}))",
node_a.key.repr(tcx),
node_a.rank,
@ -255,10 +221,8 @@ impl<K,V> sv::SnapshotVecDelegate<VarValue<K,V>,()> for Delegate {
// Code to handle simple keys like ints, floats---anything that
// doesn't have a subtyping relationship we need to worry about.
/**
* Indicates a type that does not have any kind of subtyping
* relationship.
*/
/// Indicates a type that does not have any kind of subtyping
/// relationship.
pub trait SimplyUnifiable<'tcx> : Clone + PartialEq + Repr<'tcx> {
fn to_type(&self) -> Ty<'tcx>;
fn to_type_err(expected_found<Self>) -> ty::type_err<'tcx>;
@ -295,19 +259,15 @@ pub trait InferCtxtMethodsForSimplyUnifiableTypes<'tcx, V:SimplyUnifiable<'tcx>,
impl<'a,'tcx,V:SimplyUnifiable<'tcx>,K:UnifyKey<'tcx, Option<V>>>
InferCtxtMethodsForSimplyUnifiableTypes<'tcx, V, K> for InferCtxt<'a, 'tcx>
{
/// Unifies two simple keys. Because simple keys do not have any subtyping relationships, if
/// both keys have already been associated with a value, then those two values must be the
/// same.
fn simple_vars(&self,
a_is_expected: bool,
a_id: K,
b_id: K)
-> ures<'tcx>
{
/*!
* Unifies two simple keys. Because simple keys do
* not have any subtyping relationships, if both keys
* have already been associated with a value, then those two
* values must be the same.
*/
let tcx = self.tcx;
let table = UnifyKey::unification_table(self);
let node_a = table.borrow_mut().get(tcx, a_id);
@ -341,19 +301,14 @@ impl<'a,'tcx,V:SimplyUnifiable<'tcx>,K:UnifyKey<'tcx, Option<V>>>
return Ok(())
}
/// Sets the value of the key `a_id` to `b`. Because simple keys do not have any subtyping
/// relationships, if `a_id` already has a value, it must be the same as `b`.
fn simple_var_t(&self,
a_is_expected: bool,
a_id: K,
b: V)
-> ures<'tcx>
{
/*!
* Sets the value of the key `a_id` to `b`. Because
* simple keys do not have any subtyping relationships,
* if `a_id` already has a value, it must be the same as
* `b`.
*/
let tcx = self.tcx;
let table = UnifyKey::unification_table(self);
let node_a = table.borrow_mut().get(tcx, a_id);

View File

@ -150,20 +150,18 @@ pub struct MethodCallee<'tcx> {
pub substs: subst::Substs<'tcx>
}
/**
* With method calls, we store some extra information in
* side tables (i.e method_map). We use
* MethodCall as a key to index into these tables instead of
* just directly using the expression's NodeId. The reason
* for this being that we may apply adjustments (coercions)
* with the resulting expression also needing to use the
* side tables. The problem with this is that we don't
* assign a separate NodeId to this new expression
* and so it would clash with the base expression if both
* needed to add to the side tables. Thus to disambiguate
* we also keep track of whether there's an adjustment in
* our key.
*/
/// With method calls, we store some extra information in
/// side tables (i.e method_map). We use
/// MethodCall as a key to index into these tables instead of
/// just directly using the expression's NodeId. The reason
/// for this being that we may apply adjustments (coercions)
/// with the resulting expression also needing to use the
/// side tables. The problem with this is that we don't
/// assign a separate NodeId to this new expression
/// and so it would clash with the base expression if both
/// needed to add to the side tables. Thus to disambiguate
/// we also keep track of whether there's an adjustment in
/// our key.
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct MethodCall {
pub expr_id: ast::NodeId,

View File

@ -139,11 +139,11 @@ impl RegionScope for BindingRscope {
/// A scope which simply shifts the Debruijn index of other scopes
/// to account for binding levels.
pub struct ShiftedRscope<'r> {
base_scope: &'r RegionScope+'r
base_scope: &'r (RegionScope+'r)
}
impl<'r> ShiftedRscope<'r> {
pub fn new(base_scope: &'r RegionScope+'r) -> ShiftedRscope<'r> {
pub fn new(base_scope: &'r (RegionScope+'r)) -> ShiftedRscope<'r> {
ShiftedRscope { base_scope: base_scope }
}
}

View File

@ -8,189 +8,186 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
//! This file infers the variance of type and lifetime parameters. The
//! algorithm is taken from Section 4 of the paper "Taming the Wildcards:
//! Combining Definition- and Use-Site Variance" published in PLDI'11 and
//! written by Altidor et al., and hereafter referred to as The Paper.
//!
//! This inference is explicitly designed *not* to consider the uses of
//! types within code. To determine the variance of type parameters
//! defined on type `X`, we only consider the definition of the type `X`
//! and the definitions of any types it references.
//!
//! We only infer variance for type parameters found on *types*: structs,
//! enums, and traits. We do not infer variance for type parameters found
//! on fns or impls. This is because those things are not type definitions
//! and variance doesn't really make sense in that context.
//!
//! It is worth covering what variance means in each case. For structs and
//! enums, I think it is fairly straightforward. The variance of the type
//! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
//! the variances we compute for type parameters.)
//!
//! ### Variance on traits
//!
//! The meaning of variance for trait parameters is more subtle and worth
//! expanding upon. There are in fact two uses of the variance values we
//! compute.
//!
//! #### Trait variance and object types
//!
//! The first is for object types. Just as with structs and enums, we can
//! decide the subtyping relationship between two object types `&Trait<A>`
//! and `&Trait<B>` based on the relationship of `A` and `B`. Note that
//! for object types we ignore the `Self` type parameter -- it is unknown,
//! and the nature of dynamic dispatch ensures that we will always call a
//! function that is expected the appropriate `Self` type. However, we
//! must be careful with the other type parameters, or else we could end
//! up calling a function that is expecting one type but provided another.
//!
//! To see what I mean, consider a trait like so:
//!
//! trait ConvertTo<A> {
//! fn convertTo(&self) -> A;
//! }
//!
//! Intuitively, If we had one object `O=&ConvertTo<Object>` and another
//! `S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
//! (presuming Java-like "string" and "object" types, my go to examples
//! for subtyping). The actual algorithm would be to compare the
//! (explicit) type parameters pairwise respecting their variance: here,
//! the type parameter A is covariant (it appears only in a return
//! position), and hence we require that `String <: Object`.
//!
//! You'll note though that we did not consider the binding for the
//! (implicit) `Self` type parameter: in fact, it is unknown, so that's
//! good. The reason we can ignore that parameter is precisely because we
//! don't need to know its value until a call occurs, and at that time (as
//! you said) the dynamic nature of virtual dispatch means the code we run
//! will be correct for whatever value `Self` happens to be bound to for
//! the particular object whose method we called. `Self` is thus different
//! from `A`, because the caller requires that `A` be known in order to
//! know the return type of the method `convertTo()`. (As an aside, we
//! have rules preventing methods where `Self` appears outside of the
//! receiver position from being called via an object.)
//!
//! #### Trait variance and vtable resolution
//!
//! But traits aren't only used with objects. They're also used when
//! deciding whether a given impl satisfies a given trait bound. To set the
//! scene here, imagine I had a function:
//!
//! fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
//! ...
//! }
//!
//! Now imagine that I have an implementation of `ConvertTo` for `Object`:
//!
//! impl ConvertTo<int> for Object { ... }
//!
//! And I want to call `convertAll` on an array of strings. Suppose
//! further that for whatever reason I specifically supply the value of
//! `String` for the type parameter `T`:
//!
//! let mut vector = ~["string", ...];
//! convertAll::<int, String>(v);
//!
//! Is this legal? To put another way, can we apply the `impl` for
//! `Object` to the type `String`? The answer is yes, but to see why
//! we have to expand out what will happen:
//!
//! - `convertAll` will create a pointer to one of the entries in the
//! vector, which will have type `&String`
//! - It will then call the impl of `convertTo()` that is intended
//! for use with objects. This has the type:
//!
//! fn(self: &Object) -> int
//!
//! It is ok to provide a value for `self` of type `&String` because
//! `&String <: &Object`.
//!
//! OK, so intuitively we want this to be legal, so let's bring this back
//! to variance and see whether we are computing the correct result. We
//! must first figure out how to phrase the question "is an impl for
//! `Object,int` usable where an impl for `String,int` is expected?"
//!
//! Maybe it's helpful to think of a dictionary-passing implementation of
//! type classes. In that case, `convertAll()` takes an implicit parameter
//! representing the impl. In short, we *have* an impl of type:
//!
//! V_O = ConvertTo<int> for Object
//!
//! and the function prototype expects an impl of type:
//!
//! V_S = ConvertTo<int> for String
//!
//! As with any argument, this is legal if the type of the value given
//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
//! The answer will depend on the variance of the various parameters. In
//! this case, because the `Self` parameter is contravariant and `A` is
//! covariant, it means that:
//!
//! V_O <: V_S iff
//! int <: int
//! String <: Object
//!
//! These conditions are satisfied and so we are happy.
//!
//! ### The algorithm
//!
//! The basic idea is quite straightforward. We iterate over the types
//! defined and, for each use of a type parameter X, accumulate a
//! constraint indicating that the variance of X must be valid for the
//! variance of that use site. We then iteratively refine the variance of
//! X until all constraints are met. There is *always* a sol'n, because at
//! the limit we can declare all type parameters to be invariant and all
//! constraints will be satisfied.
//!
//! As a simple example, consider:
//!
//! enum Option<A> { Some(A), None }
//! enum OptionalFn<B> { Some(|B|), None }
//! enum OptionalMap<C> { Some(|C| -> C), None }
//!
//! Here, we will generate the constraints:
//!
//! 1. V(A) <= +
//! 2. V(B) <= -
//! 3. V(C) <= +
//! 4. V(C) <= -
//!
//! These indicate that (1) the variance of A must be at most covariant;
//! (2) the variance of B must be at most contravariant; and (3, 4) the
//! variance of C must be at most covariant *and* contravariant. All of these
//! results are based on a variance lattice defined as follows:
//!
//! * Top (bivariant)
//! - +
//! o Bottom (invariant)
//!
//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
//! optimal solution. Note that there is always a naive solution which
//! just declares all variables to be invariant.
//!
//! You may be wondering why fixed-point iteration is required. The reason
//! is that the variance of a use site may itself be a function of the
//! variance of other type parameters. In full generality, our constraints
//! take the form:
//!
//! V(X) <= Term
//! Term := + | - | * | o | V(X) | Term x Term
//!
//! Here the notation V(X) indicates the variance of a type/region
//! parameter `X` with respect to its defining class. `Term x Term`
//! represents the "variance transform" as defined in the paper:
//!
//! If the variance of a type variable `X` in type expression `E` is `V2`
//! and the definition-site variance of the [corresponding] type parameter
//! of a class `C` is `V1`, then the variance of `X` in the type expression
//! `C<E>` is `V3 = V1.xform(V2)`.
This file infers the variance of type and lifetime parameters. The
algorithm is taken from Section 4 of the paper "Taming the Wildcards:
Combining Definition- and Use-Site Variance" published in PLDI'11 and
written by Altidor et al., and hereafter referred to as The Paper.
This inference is explicitly designed *not* to consider the uses of
types within code. To determine the variance of type parameters
defined on type `X`, we only consider the definition of the type `X`
and the definitions of any types it references.
We only infer variance for type parameters found on *types*: structs,
enums, and traits. We do not infer variance for type parameters found
on fns or impls. This is because those things are not type definitions
and variance doesn't really make sense in that context.
It is worth covering what variance means in each case. For structs and
enums, I think it is fairly straightforward. The variance of the type
or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
(resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
the variances we compute for type parameters.)
### Variance on traits
The meaning of variance for trait parameters is more subtle and worth
expanding upon. There are in fact two uses of the variance values we
compute.
#### Trait variance and object types
The first is for object types. Just as with structs and enums, we can
decide the subtyping relationship between two object types `&Trait<A>`
and `&Trait<B>` based on the relationship of `A` and `B`. Note that
for object types we ignore the `Self` type parameter -- it is unknown,
and the nature of dynamic dispatch ensures that we will always call a
function that is expected the appropriate `Self` type. However, we
must be careful with the other type parameters, or else we could end
up calling a function that is expecting one type but provided another.
To see what I mean, consider a trait like so:
trait ConvertTo<A> {
fn convertTo(&self) -> A;
}
Intuitively, If we had one object `O=&ConvertTo<Object>` and another
`S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
(presuming Java-like "string" and "object" types, my go to examples
for subtyping). The actual algorithm would be to compare the
(explicit) type parameters pairwise respecting their variance: here,
the type parameter A is covariant (it appears only in a return
position), and hence we require that `String <: Object`.
You'll note though that we did not consider the binding for the
(implicit) `Self` type parameter: in fact, it is unknown, so that's
good. The reason we can ignore that parameter is precisely because we
don't need to know its value until a call occurs, and at that time (as
you said) the dynamic nature of virtual dispatch means the code we run
will be correct for whatever value `Self` happens to be bound to for
the particular object whose method we called. `Self` is thus different
from `A`, because the caller requires that `A` be known in order to
know the return type of the method `convertTo()`. (As an aside, we
have rules preventing methods where `Self` appears outside of the
receiver position from being called via an object.)
#### Trait variance and vtable resolution
But traits aren't only used with objects. They're also used when
deciding whether a given impl satisfies a given trait bound. To set the
scene here, imagine I had a function:
fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
...
}
Now imagine that I have an implementation of `ConvertTo` for `Object`:
impl ConvertTo<int> for Object { ... }
And I want to call `convertAll` on an array of strings. Suppose
further that for whatever reason I specifically supply the value of
`String` for the type parameter `T`:
let mut vector = ~["string", ...];
convertAll::<int, String>(v);
Is this legal? To put another way, can we apply the `impl` for
`Object` to the type `String`? The answer is yes, but to see why
we have to expand out what will happen:
- `convertAll` will create a pointer to one of the entries in the
vector, which will have type `&String`
- It will then call the impl of `convertTo()` that is intended
for use with objects. This has the type:
fn(self: &Object) -> int
It is ok to provide a value for `self` of type `&String` because
`&String <: &Object`.
OK, so intuitively we want this to be legal, so let's bring this back
to variance and see whether we are computing the correct result. We
must first figure out how to phrase the question "is an impl for
`Object,int` usable where an impl for `String,int` is expected?"
Maybe it's helpful to think of a dictionary-passing implementation of
type classes. In that case, `convertAll()` takes an implicit parameter
representing the impl. In short, we *have* an impl of type:
V_O = ConvertTo<int> for Object
and the function prototype expects an impl of type:
V_S = ConvertTo<int> for String
As with any argument, this is legal if the type of the value given
(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
The answer will depend on the variance of the various parameters. In
this case, because the `Self` parameter is contravariant and `A` is
covariant, it means that:
V_O <: V_S iff
int <: int
String <: Object
These conditions are satisfied and so we are happy.
### The algorithm
The basic idea is quite straightforward. We iterate over the types
defined and, for each use of a type parameter X, accumulate a
constraint indicating that the variance of X must be valid for the
variance of that use site. We then iteratively refine the variance of
X until all constraints are met. There is *always* a sol'n, because at
the limit we can declare all type parameters to be invariant and all
constraints will be satisfied.
As a simple example, consider:
enum Option<A> { Some(A), None }
enum OptionalFn<B> { Some(|B|), None }
enum OptionalMap<C> { Some(|C| -> C), None }
Here, we will generate the constraints:
1. V(A) <= +
2. V(B) <= -
3. V(C) <= +
4. V(C) <= -
These indicate that (1) the variance of A must be at most covariant;
(2) the variance of B must be at most contravariant; and (3, 4) the
variance of C must be at most covariant *and* contravariant. All of these
results are based on a variance lattice defined as follows:
* Top (bivariant)
- +
o Bottom (invariant)
Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
optimal solution. Note that there is always a naive solution which
just declares all variables to be invariant.
You may be wondering why fixed-point iteration is required. The reason
is that the variance of a use site may itself be a function of the
variance of other type parameters. In full generality, our constraints
take the form:
V(X) <= Term
Term := + | - | * | o | V(X) | Term x Term
Here the notation V(X) indicates the variance of a type/region
parameter `X` with respect to its defining class. `Term x Term`
represents the "variance transform" as defined in the paper:
If the variance of a type variable `X` in type expression `E` is `V2`
and the definition-site variance of the [corresponding] type parameter
of a class `C` is `V1`, then the variance of `X` in the type expression
`C<E>` is `V3 = V1.xform(V2)`.
*/
use self::VarianceTerm::*;
use self::ParamKind::*;
@ -219,18 +216,16 @@ pub fn infer_variance(tcx: &ty::ctxt) {
tcx.variance_computed.set(true);
}
/**************************************************************************
* Representing terms
*
* Terms are structured as a straightforward tree. Rather than rely on
* GC, we allocate terms out of a bounded arena (the lifetime of this
* arena is the lifetime 'a that is threaded around).
*
* We assign a unique index to each type/region parameter whose variance
* is to be inferred. We refer to such variables as "inferreds". An
* `InferredIndex` is a newtype'd int representing the index of such
* a variable.
*/
// Representing terms
//
// Terms are structured as a straightforward tree. Rather than rely on
// GC, we allocate terms out of a bounded arena (the lifetime of this
// arena is the lifetime 'a that is threaded around).
//
// We assign a unique index to each type/region parameter whose variance
// is to be inferred. We refer to such variables as "inferreds". An
// `InferredIndex` is a newtype'd int representing the index of such
// a variable.
type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
@ -253,9 +248,7 @@ impl<'a> fmt::Show for VarianceTerm<'a> {
}
}
/**************************************************************************
* The first pass over the crate simply builds up the set of inferreds.
*/
// The first pass over the crate simply builds up the set of inferreds.
struct TermsContext<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
@ -399,12 +392,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
}
}
/**************************************************************************
* Constraint construction and representation
*
* The second pass over the AST determines the set of constraints.
* We walk the set of items and, for each member, generate new constraints.
*/
// Constraint construction and representation
//
// The second pass over the AST determines the set of constraints.
// We walk the set of items and, for each member, generate new constraints.
struct ConstraintContext<'a, 'tcx: 'a> {
terms_cx: TermsContext<'a, 'tcx>,
@ -632,6 +623,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
return result;
}
/// Returns a variance term representing the declared variance of the type/region parameter
/// with the given id.
fn declared_variance(&self,
param_def_id: ast::DefId,
item_def_id: ast::DefId,
@ -639,11 +632,6 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
space: ParamSpace,
index: uint)
-> VarianceTermPtr<'a> {
/*!
* Returns a variance term representing the declared variance of
* the type/region parameter with the given id.
*/
assert_eq!(param_def_id.krate, item_def_id.krate);
if self.invariant_lang_items[kind as uint] == Some(item_def_id) {
@ -944,14 +932,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
}
}
/**************************************************************************
* Constraint solving
*
* The final phase iterates over the constraints, refining the variance
* for each inferred until a fixed point is reached. This will be the
* optimal solution to the constraints. The final variance for each
* inferred is then written into the `variance_map` in the tcx.
*/
// Constraint solving
//
// The final phase iterates over the constraints, refining the variance
// for each inferred until a fixed point is reached. This will be the
// optimal solution to the constraints. The final variance for each
// inferred is then written into the `variance_map` in the tcx.
struct SolveContext<'a, 'tcx: 'a> {
terms_cx: TermsContext<'a, 'tcx>,
@ -1086,9 +1072,7 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> {
}
}
/**************************************************************************
* Miscellany transformations on variance
*/
// Miscellany transformations on variance
trait Xform {
fn xform(self, v: Self) -> Self;

View File

@ -8,54 +8,52 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Infrastructure for compiler plugins.
*
* Plugins are Rust libraries which extend the behavior of `rustc`
* in various ways.
*
* Plugin authors will use the `Registry` type re-exported by
* this module, along with its methods. The rest of the module
* is for use by `rustc` itself.
*
* To define a plugin, build a dylib crate with a
* `#[plugin_registrar]` function:
*
* ```rust,ignore
* #![crate_name = "myplugin"]
* #![crate_type = "dylib"]
* #![feature(plugin_registrar)]
*
* extern crate rustc;
*
* use rustc::plugin::Registry;
*
* #[plugin_registrar]
* pub fn plugin_registrar(reg: &mut Registry) {
* reg.register_macro("mymacro", expand_mymacro);
* }
*
* fn expand_mymacro(...) { // details elided
* ```
*
* WARNING: We currently don't check that the registrar function
* has the appropriate type!
*
* To use a plugin while compiling another crate:
*
* ```rust
* #![feature(phase)]
*
* #[phase(plugin)]
* extern crate myplugin;
* ```
*
* If you also need the plugin crate available at runtime, use
* `phase(plugin, link)`.
*
* See [the compiler plugin guide](../../guide-plugin.html)
* for more examples.
*/
//! Infrastructure for compiler plugins.
//!
//! Plugins are Rust libraries which extend the behavior of `rustc`
//! in various ways.
//!
//! Plugin authors will use the `Registry` type re-exported by
//! this module, along with its methods. The rest of the module
//! is for use by `rustc` itself.
//!
//! To define a plugin, build a dylib crate with a
//! `#[plugin_registrar]` function:
//!
//! ```rust,ignore
//! #![crate_name = "myplugin"]
//! #![crate_type = "dylib"]
//! #![feature(plugin_registrar)]
//!
//! extern crate rustc;
//!
//! use rustc::plugin::Registry;
//!
//! #[plugin_registrar]
//! pub fn plugin_registrar(reg: &mut Registry) {
//! reg.register_macro("mymacro", expand_mymacro);
//! }
//!
//! fn expand_mymacro(...) { // details elided
//! ```
//!
//! WARNING: We currently don't check that the registrar function
//! has the appropriate type!
//!
//! To use a plugin while compiling another crate:
//!
//! ```rust
//! #![feature(phase)]
//!
//! #[phase(plugin)]
//! extern crate myplugin;
//! ```
//!
//! If you also need the plugin crate available at runtime, use
//! `phase(plugin, link)`.
//!
//! See [the compiler plugin guide](../../guide-plugin.html)
//! for more examples.
pub use self::registry::Registry;

View File

@ -122,24 +122,20 @@ pub fn block_query(b: &ast::Block, p: |&ast::Expr| -> bool) -> bool {
return v.flag;
}
// K: Eq + Hash<S>, V, S, H: Hasher<S>
/// K: Eq + Hash<S>, V, S, H: Hasher<S>
///
/// Determines whether there exists a path from `source` to `destination`. The graph is defined by
/// the `edges_map`, which maps from a node `S` to a list of its adjacent nodes `T`.
///
/// Efficiency note: This is implemented in an inefficient way because it is typically invoked on
/// very small graphs. If the graphs become larger, a more efficient graph representation and
/// algorithm would probably be advised.
pub fn can_reach<S,H:Hasher<S>,T:Eq+Clone+Hash<S>>(
edges_map: &HashMap<T,Vec<T>,H>,
source: T,
destination: T)
-> bool
{
/*!
* Determines whether there exists a path from `source` to
* `destination`. The graph is defined by the `edges_map`, which
* maps from a node `S` to a list of its adjacent nodes `T`.
*
* Efficiency note: This is implemented in an inefficient way
* because it is typically invoked on very small graphs. If the graphs
* become larger, a more efficient graph representation and algorithm
* would probably be advised.
*/
if source == destination {
return true;
}

Some files were not shown because too many files have changed in this diff Show More