Merge remote-tracking branch 'origin/master' into gen

This commit is contained in:
Alex Crichton 2017-08-14 19:36:13 -07:00
commit 1413253a41
232 changed files with 5063 additions and 2000 deletions

View File

@ -99,7 +99,7 @@ Before you can start building the compiler you need to configure the build for
your system. In most cases, that will just mean using the defaults provided
for Rust.
To change configuration, you must copy the file `src/bootstrap/config.toml.example`
To change configuration, you must copy the file `config.toml.example`
to `config.toml` in the directory from which you will be running the build, and
change the settings provided.
@ -237,10 +237,13 @@ Some common invocations of `x.py` are:
## Pull Requests
Pull requests are the primary mechanism we use to change Rust. GitHub itself
has some [great documentation][pull-requests] on using the Pull Request
feature. We use the 'fork and pull' model described there.
has some [great documentation][pull-requests] on using the Pull Request feature.
We use the "fork and pull" model [described here][development-models], where
contributors push changes to their personal fork and create pull requests to
bring those changes into the source repository.
[pull-requests]: https://help.github.com/articles/using-pull-requests/
[pull-requests]: https://help.github.com/articles/about-pull-requests/
[development-models]: https://help.github.com/articles/about-collaborative-development-models/
Please make pull requests against the `master` branch.

View File

@ -39,7 +39,7 @@ Read ["Installation"] from [The Book].
```
> ***Note:*** Install locations can be adjusted by copying the config file
> from `./src/bootstrap/config.toml.example` to `./config.toml`, and
> from `./config.toml.example` to `./config.toml`, and
> adjusting the `prefix` option under `[install]`. Various other options, such
> as enabling debug information, are also supported, and are documented in
> the config file.

View File

@ -258,6 +258,9 @@
# saying that the FileCheck executable is missing, you may want to disable this.
#codegen-tests = true
# Flag indicating whether git info will be retrieved from .git automatically.
#ignore-git = false
# =============================================================================
# Options for specific targets
#

View File

@ -80,7 +80,7 @@ handled naturally.
Next, rustbuild offers a TOML-based configuration system with a `config.toml`
file in the same location as `config.mk`. An example of this configuration can
be found at `src/bootstrap/config.toml.example`, and the configuration file
be found at `config.toml.example`, and the configuration file
can also be passed as `--config path/to/config.toml` if the build system is
being invoked manually (via the python script).

View File

@ -21,11 +21,10 @@ extern crate bootstrap;
use std::env;
use bootstrap::{Flags, Config, Build};
use bootstrap::{Config, Build};
fn main() {
let args = env::args().skip(1).collect::<Vec<_>>();
let flags = Flags::parse(&args);
let config = Config::parse(&flags.build, flags.config.clone());
Build::new(flags, config).build();
let config = Config::parse(&args);
Build::new(config).build();
}

View File

@ -120,28 +120,19 @@ impl StepDescription {
fn maybe_run(&self, builder: &Builder, path: Option<&Path>) {
let build = builder.build;
let hosts = if self.only_build_targets || self.only_build {
&build.config.host[..1]
build.build_triple()
} else {
&build.hosts
};
// Determine the actual targets participating in this rule.
// NOTE: We should keep the full projection from build triple to
// the hosts for the dist steps, now that the hosts array above is
// truncated to avoid duplication of work in that case. Therefore
// the original non-shadowed hosts array is used below.
// Determine the targets participating in this rule.
let targets = if self.only_hosts {
// If --target was specified but --host wasn't specified,
// don't run any host-only tests. Also, respect any `--host`
// overrides as done for `hosts`.
if build.flags.host.len() > 0 {
&build.flags.host[..]
} else if build.flags.target.len() > 0 {
if build.config.run_host_only {
&[]
} else if self.only_build {
&build.config.host[..1]
build.build_triple()
} else {
&build.config.host[..]
&build.hosts
}
} else {
&build.targets
@ -288,7 +279,7 @@ impl<'a> Builder<'a> {
let builder = Builder {
build: build,
top_stage: build.flags.stage.unwrap_or(2),
top_stage: build.config.stage.unwrap_or(2),
kind: kind,
cache: Cache::new(),
stack: RefCell::new(Vec::new()),
@ -307,7 +298,7 @@ impl<'a> Builder<'a> {
}
pub fn run(build: &Build) {
let (kind, paths) = match build.flags.cmd {
let (kind, paths) = match build.config.cmd {
Subcommand::Build { ref paths } => (Kind::Build, &paths[..]),
Subcommand::Doc { ref paths } => (Kind::Doc, &paths[..]),
Subcommand::Test { ref paths, .. } => (Kind::Test, &paths[..]),
@ -319,7 +310,7 @@ impl<'a> Builder<'a> {
let builder = Builder {
build: build,
top_stage: build.flags.stage.unwrap_or(2),
top_stage: build.config.stage.unwrap_or(2),
kind: kind,
cache: Cache::new(),
stack: RefCell::new(Vec::new()),
@ -333,7 +324,7 @@ impl<'a> Builder<'a> {
StepDescription::run(&Builder::get_step_descriptions(Kind::Doc), self, paths);
}
/// Obtain a compiler at a given stage and for a given host. Explictly does
/// Obtain a compiler at a given stage and for a given host. Explicitly does
/// not take `Compiler` since all `Compiler` instances are meant to be
/// obtained through this function, since it ensures that they are valid
/// (i.e., built and assembled).
@ -414,22 +405,19 @@ impl<'a> Builder<'a> {
}
}
pub fn rustdoc(&self, compiler: Compiler) -> PathBuf {
self.ensure(tool::Rustdoc { target_compiler: compiler })
pub fn rustdoc(&self, host: Interned<String>) -> PathBuf {
self.ensure(tool::Rustdoc { host })
}
pub fn rustdoc_cmd(&self, compiler: Compiler) -> Command {
pub fn rustdoc_cmd(&self, host: Interned<String>) -> Command {
let mut cmd = Command::new(&self.out.join("bootstrap/debug/rustdoc"));
let compiler = self.compiler(self.top_stage, host);
cmd
.env("RUSTC_STAGE", compiler.stage.to_string())
.env("RUSTC_SYSROOT", if compiler.is_snapshot(&self.build) {
INTERNER.intern_path(self.build.rustc_snapshot_libdir())
} else {
self.sysroot(compiler)
})
.env("RUSTC_LIBDIR", self.rustc_libdir(compiler))
.env("RUSTC_SYSROOT", self.sysroot(compiler))
.env("RUSTC_LIBDIR", self.sysroot_libdir(compiler, self.build.build))
.env("CFG_RELEASE_CHANNEL", &self.build.config.channel)
.env("RUSTDOC_REAL", self.rustdoc(compiler));
.env("RUSTDOC_REAL", self.rustdoc(host));
cmd
}
@ -483,7 +471,7 @@ impl<'a> Builder<'a> {
.env("RUSTC_RPATH", self.config.rust_rpath.to_string())
.env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc"))
.env("RUSTDOC_REAL", if cmd == "doc" || cmd == "test" {
self.rustdoc(compiler)
self.rustdoc(compiler.host)
} else {
PathBuf::from("/path/to/nowhere/rustdoc/not/required")
})
@ -501,7 +489,7 @@ impl<'a> Builder<'a> {
// crates). Let's say, for example that rustc itself depends on the
// bitflags crate. If an external crate then depends on the
// bitflags crate as well, we need to make sure they don't
// conflict, even if they pick the same verison of bitflags. We'll
// conflict, even if they pick the same version of bitflags. We'll
// want to make sure that e.g. a plugin and rustc each get their
// own copy of bitflags.
@ -543,12 +531,12 @@ impl<'a> Builder<'a> {
// Ignore incremental modes except for stage0, since we're
// not guaranteeing correctness across builds if the compiler
// is changing under your feet.`
if self.flags.incremental && compiler.stage == 0 {
if self.config.incremental && compiler.stage == 0 {
let incr_dir = self.incremental_dir(compiler);
cargo.env("RUSTC_INCREMENTAL", incr_dir);
}
if let Some(ref on_fail) = self.flags.on_fail {
if let Some(ref on_fail) = self.config.on_fail {
cargo.env("RUSTC_ON_FAIL", on_fail);
}

View File

@ -32,6 +32,7 @@
//! everything.
use std::process::Command;
use std::iter;
use build_helper::{cc2ar, output};
use gcc;
@ -43,47 +44,41 @@ use cache::Interned;
pub fn find(build: &mut Build) {
// For all targets we're going to need a C compiler for building some shims
// and such as well as for being a linker for Rust code.
//
// This includes targets that aren't necessarily passed on the commandline
// (FIXME: Perhaps it shouldn't?)
for target in &build.config.target {
for target in build.targets.iter().chain(&build.hosts).cloned().chain(iter::once(build.build)) {
let mut cfg = gcc::Config::new();
cfg.cargo_metadata(false).opt_level(0).debug(false)
.target(target).host(&build.build);
.target(&target).host(&build.build);
let config = build.config.target_config.get(&target);
if let Some(cc) = config.and_then(|c| c.cc.as_ref()) {
cfg.compiler(cc);
} else {
set_compiler(&mut cfg, "gcc", *target, config, build);
set_compiler(&mut cfg, "gcc", target, config, build);
}
let compiler = cfg.get_compiler();
let ar = cc2ar(compiler.path(), target);
build.verbose(&format!("CC_{} = {:?}", target, compiler.path()));
let ar = cc2ar(compiler.path(), &target);
build.verbose(&format!("CC_{} = {:?}", &target, compiler.path()));
if let Some(ref ar) = ar {
build.verbose(&format!("AR_{} = {:?}", target, ar));
build.verbose(&format!("AR_{} = {:?}", &target, ar));
}
build.cc.insert(*target, (compiler, ar));
build.cc.insert(target, (compiler, ar));
}
// For all host triples we need to find a C++ compiler as well
//
// This includes hosts that aren't necessarily passed on the commandline
// (FIXME: Perhaps it shouldn't?)
for host in &build.config.host {
for host in build.hosts.iter().cloned().chain(iter::once(build.build)) {
let mut cfg = gcc::Config::new();
cfg.cargo_metadata(false).opt_level(0).debug(false).cpp(true)
.target(host).host(&build.build);
let config = build.config.target_config.get(host);
.target(&host).host(&build.build);
let config = build.config.target_config.get(&host);
if let Some(cxx) = config.and_then(|c| c.cxx.as_ref()) {
cfg.compiler(cxx);
} else {
set_compiler(&mut cfg, "g++", *host, config, build);
set_compiler(&mut cfg, "g++", host, config, build);
}
let compiler = cfg.get_compiler();
build.verbose(&format!("CXX_{} = {:?}", host, compiler.path()));
build.cxx.insert(*host, compiler);
build.cxx.insert(host, compiler);
}
}

View File

@ -21,6 +21,7 @@ use std::process::Command;
use build_helper::output;
use Build;
use config::Config;
// The version number
pub const CFG_RELEASE_NUM: &str = "1.21.0";
@ -41,9 +42,9 @@ struct Info {
}
impl GitInfo {
pub fn new(dir: &Path) -> GitInfo {
pub fn new(config: &Config, dir: &Path) -> GitInfo {
// See if this even begins to look like a git dir
if !dir.join(".git").exists() {
if config.ignore_git || !dir.join(".git").exists() {
return GitInfo { inner: None }
}

View File

@ -164,7 +164,7 @@ impl Step for Cargotest {
try_run(build, cmd.arg(&build.initial_cargo)
.arg(&out_dir)
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler)));
.env("RUSTDOC", builder.rustdoc(compiler.host)));
}
}
@ -565,7 +565,7 @@ impl Step for Compiletest {
// Avoid depending on rustdoc when we don't need it.
if mode == "rustdoc" || mode == "run-make" {
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler));
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler.host));
}
cmd.arg("--src-base").arg(build.src.join("src/test").join(suite));
@ -618,14 +618,8 @@ impl Step for Compiletest {
if let Some(ref dir) = build.lldb_python_dir {
cmd.arg("--lldb-python-dir").arg(dir);
}
let llvm_config = build.llvm_config(target);
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
if !build.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
cmd.args(&build.flags.cmd.test_args());
cmd.args(&build.config.cmd.test_args());
if build.is_verbose() {
cmd.arg("--verbose");
@ -635,17 +629,32 @@ impl Step for Compiletest {
cmd.arg("--quiet");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if suite == "run-make" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc").arg(build.cc(target))
.arg("--cxx").arg(build.cxx(target).unwrap())
.arg("--cflags").arg(build.cflags(target).join(" "))
.arg("--llvm-components").arg(llvm_components.trim())
.arg("--llvm-cxxflags").arg(llvm_cxxflags.trim());
} else {
if build.config.llvm_enabled {
let llvm_config = build.llvm_config(target);
let llvm_version = output(Command::new(&llvm_config).arg("--version"));
cmd.arg("--llvm-version").arg(llvm_version);
if !build.is_rust_llvm(target) {
cmd.arg("--system-llvm");
}
// Only pass correct values for these flags for the `run-make` suite as it
// requires that a C++ compiler was configured which isn't always the case.
if suite == "run-make" {
let llvm_components = output(Command::new(&llvm_config).arg("--components"));
let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags"));
cmd.arg("--cc").arg(build.cc(target))
.arg("--cxx").arg(build.cxx(target).unwrap())
.arg("--cflags").arg(build.cflags(target).join(" "))
.arg("--llvm-components").arg(llvm_components.trim())
.arg("--llvm-cxxflags").arg(llvm_cxxflags.trim());
}
}
if suite == "run-make" && !build.config.llvm_enabled {
println!("Ignoring run-make test suite as they generally dont work without LLVM");
return;
}
if suite != "run-make" {
cmd.arg("--cc").arg("")
.arg("--cxx").arg("")
.arg("--cflags").arg("")
@ -814,13 +823,13 @@ fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) {
}
println!("doc tests for: {}", markdown.display());
let mut cmd = builder.rustdoc_cmd(compiler);
let mut cmd = builder.rustdoc_cmd(compiler.host);
build.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
cmd.env("RUSTC_BOOTSTRAP", "1");
let test_args = build.flags.cmd.test_args().join(" ");
let test_args = build.config.cmd.test_args().join(" ");
cmd.arg("--test-args").arg(test_args);
if build.config.quiet_tests {
@ -1051,7 +1060,7 @@ impl Step for Crate {
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
cargo.arg("--");
cargo.args(&build.flags.cmd.test_args());
cargo.args(&build.config.cmd.test_args());
if build.config.quiet_tests {
cargo.arg("--quiet");
@ -1147,6 +1156,7 @@ pub struct Distcheck;
impl Step for Distcheck {
type Output = ();
const ONLY_BUILD: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("distcheck")
@ -1160,16 +1170,6 @@ impl Step for Distcheck {
fn run(self, builder: &Builder) {
let build = builder.build;
if *build.build != *"x86_64-unknown-linux-gnu" {
return
}
if !build.config.host.iter().any(|s| s == "x86_64-unknown-linux-gnu") {
return
}
if !build.config.target.iter().any(|s| s == "x86_64-unknown-linux-gnu") {
return
}
println!("Distcheck");
let dir = build.out.join("tmp").join("distcheck");
let _ = fs::remove_dir_all(&dir);
@ -1236,7 +1236,7 @@ impl Step for Bootstrap {
if !build.fail_fast {
cmd.arg("--no-fail-fast");
}
cmd.arg("--").args(&build.flags.cmd.test_args());
cmd.arg("--").args(&build.config.cmd.test_args());
try_run(build, &mut cmd);
}

View File

@ -26,7 +26,7 @@ pub fn clean(build: &Build) {
rm_rf(&build.out.join("tmp"));
rm_rf(&build.out.join("dist"));
for host in build.config.host.iter() {
for host in &build.hosts {
let entries = match build.out.join(host).read_dir() {
Ok(iter) => iter,
Err(_) => continue,

View File

@ -32,6 +32,7 @@ use serde_json;
use util::{exe, libdir, is_dylib, copy};
use {Build, Compiler, Mode};
use native;
use tool;
use cache::{INTERNER, Interned};
use builder::{Step, RunConfig, ShouldRun, Builder};
@ -198,6 +199,12 @@ impl Step for StdLink {
// for reason why the sanitizers are not built in stage0.
copy_apple_sanitizer_dylibs(&build.native_dir(target), "osx", &libdir);
}
builder.ensure(tool::CleanTools {
compiler: target_compiler,
target: target,
mode: Mode::Libstd,
});
}
}
@ -389,6 +396,11 @@ impl Step for TestLink {
target);
add_to_sysroot(&builder.sysroot_libdir(target_compiler, target),
&libtest_stamp(build, compiler, target));
builder.ensure(tool::CleanTools {
compiler: target_compiler,
target: target,
mode: Mode::Libtest,
});
}
}
@ -567,6 +579,11 @@ impl Step for RustcLink {
target);
add_to_sysroot(&builder.sysroot_libdir(target_compiler, target),
&librustc_stamp(build, compiler, target));
builder.ensure(tool::CleanTools {
compiler: target_compiler,
target: target,
mode: Mode::Librustc,
});
}
}
@ -679,10 +696,10 @@ impl Step for Assemble {
// link to these. (FIXME: Is that correct? It seems to be correct most
// of the time but I think we do link to these for stage2/bin compilers
// when not performing a full bootstrap).
if builder.build.flags.keep_stage.map_or(false, |s| target_compiler.stage <= s) {
if builder.build.config.keep_stage.map_or(false, |s| target_compiler.stage <= s) {
builder.verbose("skipping compilation of compiler due to --keep-stage");
let compiler = build_compiler;
for stage in 0..min(target_compiler.stage, builder.flags.keep_stage.unwrap()) {
for stage in 0..min(target_compiler.stage, builder.config.keep_stage.unwrap()) {
let target_compiler = builder.compiler(stage, target_compiler.host);
let target = target_compiler.host;
builder.ensure(StdLink { compiler, target_compiler, target });

View File

@ -19,11 +19,14 @@ use std::fs::{self, File};
use std::io::prelude::*;
use std::path::PathBuf;
use std::process;
use std::cmp;
use num_cpus;
use toml;
use util::{exe, push_exe_path};
use cache::{INTERNER, Interned};
use flags::Flags;
pub use flags::Subcommand;
/// Global configuration for the entire build and/or bootstrap.
///
@ -35,7 +38,7 @@ use cache::{INTERNER, Interned};
/// Note that this structure is not decoded directly into, but rather it is
/// filled out from the decoded forms of the structs below. For documentation
/// each field, see the corresponding fields in
/// `src/bootstrap/config.toml.example`.
/// `config.toml.example`.
#[derive(Default)]
pub struct Config {
pub ccache: Option<String>,
@ -51,6 +54,17 @@ pub struct Config {
pub extended: bool,
pub sanitizers: bool,
pub profiler: bool,
pub ignore_git: bool,
pub run_host_only: bool,
pub on_fail: Option<String>,
pub stage: Option<u32>,
pub keep_stage: Option<u32>,
pub src: PathBuf,
pub jobs: Option<u32>,
pub cmd: Subcommand,
pub incremental: bool,
// llvm codegen options
pub llvm_enabled: bool,
@ -79,8 +93,8 @@ pub struct Config {
pub rust_dist_src: bool,
pub build: Interned<String>,
pub host: Vec<Interned<String>>,
pub target: Vec<Interned<String>>,
pub hosts: Vec<Interned<String>>,
pub targets: Vec<Interned<String>>,
pub local_rebuild: bool,
// dist misc
@ -249,6 +263,7 @@ struct Rust {
optimize_tests: Option<bool>,
debuginfo_tests: Option<bool>,
codegen_tests: Option<bool>,
ignore_git: Option<bool>,
}
/// TOML representation of how each build target is configured.
@ -265,7 +280,9 @@ struct TomlTarget {
}
impl Config {
pub fn parse(build: &str, file: Option<PathBuf>) -> Config {
pub fn parse(args: &[String]) -> Config {
let flags = Flags::parse(&args);
let file = flags.config.clone();
let mut config = Config::default();
config.llvm_enabled = true;
config.llvm_optimize = true;
@ -277,11 +294,22 @@ impl Config {
config.docs = true;
config.rust_rpath = true;
config.rust_codegen_units = 1;
config.build = INTERNER.intern_str(build);
config.channel = "dev".to_string();
config.codegen_tests = true;
config.ignore_git = false;
config.rust_dist_src = true;
config.on_fail = flags.on_fail;
config.stage = flags.stage;
config.src = flags.src;
config.jobs = flags.jobs;
config.cmd = flags.cmd;
config.incremental = flags.incremental;
config.keep_stage = flags.keep_stage;
// If --target was specified but --host wasn't specified, don't run any host-only tests.
config.run_host_only = flags.host.is_empty() && !flags.target.is_empty();
let toml = file.map(|file| {
let mut f = t!(File::open(&file));
let mut contents = String::new();
@ -298,20 +326,37 @@ impl Config {
let build = toml.build.clone().unwrap_or(Build::default());
set(&mut config.build, build.build.clone().map(|x| INTERNER.intern_string(x)));
config.host.push(config.build.clone());
set(&mut config.build, flags.build);
if config.build.is_empty() {
// set by bootstrap.py
config.build = INTERNER.intern_str(&env::var("BUILD").unwrap());
}
config.hosts.push(config.build.clone());
for host in build.host.iter() {
let host = INTERNER.intern_str(host);
if !config.host.contains(&host) {
config.host.push(host);
if !config.hosts.contains(&host) {
config.hosts.push(host);
}
}
for target in config.host.iter().cloned()
for target in config.hosts.iter().cloned()
.chain(build.target.iter().map(|s| INTERNER.intern_str(s)))
{
if !config.target.contains(&target) {
config.target.push(target);
if !config.targets.contains(&target) {
config.targets.push(target);
}
}
config.hosts = if !flags.host.is_empty() {
flags.host
} else {
config.hosts
};
config.targets = if !flags.target.is_empty() {
flags.target
} else {
config.targets
};
config.nodejs = build.nodejs.map(PathBuf::from);
config.gdb = build.gdb.map(PathBuf::from);
config.python = build.python.map(PathBuf::from);
@ -327,6 +372,7 @@ impl Config {
set(&mut config.sanitizers, build.sanitizers);
set(&mut config.profiler, build.profiler);
set(&mut config.openssl_static, build.openssl_static);
config.verbose = cmp::max(config.verbose, flags.verbose);
if let Some(ref install) = toml.install {
config.prefix = install.prefix.clone().map(PathBuf::from);
@ -373,6 +419,7 @@ impl Config {
set(&mut config.use_jemalloc, rust.use_jemalloc);
set(&mut config.backtrace, rust.backtrace);
set(&mut config.channel, rust.channel.clone());
set(&mut config.ignore_git, rust.ignore_git);
config.rustc_default_linker = rust.default_linker.clone();
config.rustc_default_ar = rust.default_ar.clone();
config.musl_root = rust.musl_root.clone().map(PathBuf::from);
@ -505,11 +552,11 @@ impl Config {
match key {
"CFG_BUILD" if value.len() > 0 => self.build = INTERNER.intern_str(value),
"CFG_HOST" if value.len() > 0 => {
self.host.extend(value.split(" ").map(|s| INTERNER.intern_str(s)));
self.hosts.extend(value.split(" ").map(|s| INTERNER.intern_str(s)));
}
"CFG_TARGET" if value.len() > 0 => {
self.target.extend(value.split(" ").map(|s| INTERNER.intern_str(s)));
self.targets.extend(value.split(" ").map(|s| INTERNER.intern_str(s)));
}
"CFG_EXPERIMENTAL_TARGETS" if value.len() > 0 => {
self.llvm_experimental_targets = Some(value.to_string());

View File

@ -413,8 +413,7 @@ impl Step for Rustc {
t!(fs::create_dir_all(image.join("bin")));
cp_r(&src.join("bin"), &image.join("bin"));
install(&builder.ensure(tool::Rustdoc { target_compiler: compiler }),
&image.join("bin"), 0o755);
install(&builder.rustdoc(compiler.host), &image.join("bin"), 0o755);
// Copy runtime DLLs needed by the compiler
if libdir != "bin" {
@ -546,7 +545,7 @@ impl Step for Std {
// We want to package up as many target libraries as possible
// for the `rust-std` package, so if this is a host target we
// depend on librustc and otherwise we just depend on libtest.
if build.config.host.iter().any(|t| t == target) {
if build.hosts.iter().any(|t| t == target) {
builder.ensure(compile::Rustc { compiler, target });
} else {
builder.ensure(compile::Test { compiler, target });

View File

@ -260,7 +260,7 @@ fn invoke_rustdoc(builder: &Builder, compiler: Compiler, target: Interned<String
t!(t!(File::create(&version_info)).write_all(info.as_bytes()));
}
let mut cmd = builder.rustdoc_cmd(compiler);
let mut cmd = builder.rustdoc_cmd(compiler.host);
let out = out.join("book");
@ -306,7 +306,7 @@ impl Step for Standalone {
///
/// This will list all of `src/doc` looking for markdown files and appropriately
/// perform transformations like substituting `VERSION`, `SHORT_HASH`, and
/// `STAMP` alongw ith providing the various header/footer HTML we've cutomized.
/// `STAMP` along with providing the various header/footer HTML we've customized.
///
/// In the end, this is just a glorified wrapper around rustdoc!
fn run(self, builder: &Builder) {
@ -343,7 +343,7 @@ impl Step for Standalone {
}
let html = out.join(filename).with_extension("html");
let rustdoc = builder.rustdoc(compiler);
let rustdoc = builder.rustdoc(compiler.host);
if up_to_date(&path, &html) &&
up_to_date(&footer, &html) &&
up_to_date(&favicon, &html) &&
@ -353,7 +353,7 @@ impl Step for Standalone {
continue
}
let mut cmd = builder.rustdoc_cmd(compiler);
let mut cmd = builder.rustdoc_cmd(compiler.host);
cmd.arg("--html-after-content").arg(&footer)
.arg("--html-before-content").arg(&version_info)
.arg("--html-in-header").arg(&favicon)
@ -408,7 +408,7 @@ impl Step for Std {
let out = build.doc_out(target);
t!(fs::create_dir_all(&out));
let compiler = builder.compiler(stage, build.build);
let rustdoc = builder.rustdoc(compiler);
let rustdoc = builder.rustdoc(compiler.host);
let compiler = if build.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
@ -493,7 +493,7 @@ impl Step for Test {
let out = build.doc_out(target);
t!(fs::create_dir_all(&out));
let compiler = builder.compiler(stage, build.build);
let rustdoc = builder.rustdoc(compiler);
let rustdoc = builder.rustdoc(compiler.host);
let compiler = if build.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {
@ -554,7 +554,7 @@ impl Step for Rustc {
let out = build.doc_out(target);
t!(fs::create_dir_all(&out));
let compiler = builder.compiler(stage, build.build);
let rustdoc = builder.rustdoc(compiler);
let rustdoc = builder.rustdoc(compiler.host);
let compiler = if build.force_use_stage1(compiler, target) {
builder.compiler(1, compiler.host)
} else {

View File

@ -33,7 +33,8 @@ pub struct Flags {
pub on_fail: Option<String>,
pub stage: Option<u32>,
pub keep_stage: Option<u32>,
pub build: Interned<String>,
pub build: Option<Interned<String>>,
pub host: Vec<Interned<String>>,
pub target: Vec<Interned<String>>,
pub config: Option<PathBuf>,
@ -68,6 +69,14 @@ pub enum Subcommand {
},
}
impl Default for Subcommand {
fn default() -> Subcommand {
Subcommand::Build {
paths: vec![PathBuf::from("nowhere")],
}
}
}
impl Flags {
pub fn parse(args: &[String]) -> Flags {
let mut extra_help = String::new();
@ -243,10 +252,8 @@ Arguments:
// All subcommands can have an optional "Available paths" section
if matches.opt_present("verbose") {
let flags = Flags::parse(&["build".to_string()]);
let mut config = Config::parse(&flags.build, cfg_file.clone());
config.build = flags.build.clone();
let mut build = Build::new(flags, config);
let config = Config::parse(&["build".to_string()]);
let mut build = Build::new(config);
metadata::build(&mut build);
let maybe_rules_help = Builder::get_help(&build, subcommand.as_str());
@ -320,9 +327,7 @@ Arguments:
stage: stage,
on_fail: matches.opt_str("on-fail"),
keep_stage: matches.opt_str("keep-stage").map(|j| j.parse().unwrap()),
build: INTERNER.intern_string(matches.opt_str("build").unwrap_or_else(|| {
env::var("BUILD").unwrap()
})),
build: matches.opt_str("build").map(|s| INTERNER.intern_string(s)),
host: split(matches.opt_strs("host"))
.into_iter().map(|x| INTERNER.intern_string(x)).collect::<Vec<_>>(),
target: split(matches.opt_strs("target"))

View File

@ -28,7 +28,7 @@ pub fn install_docs(builder: &Builder, stage: u32, host: Interned<String>) {
}
pub fn install_std(builder: &Builder, stage: u32) {
for target in builder.build.config.target.iter() {
for target in &builder.build.targets {
install_sh(builder, "std", "rust-std", stage, Some(*target));
}
}

View File

@ -136,13 +136,13 @@ extern crate toml;
extern crate libc;
use std::cell::Cell;
use std::cmp;
use std::collections::{HashSet, HashMap};
use std::env;
use std::fs::{self, File};
use std::io::Read;
use std::path::{PathBuf, Path};
use std::process::Command;
use std::slice;
use build_helper::{run_silent, run_suppressed, try_run_silent, try_run_suppressed, output, mtime};
@ -187,7 +187,7 @@ mod job {
}
pub use config::Config;
pub use flags::{Flags, Subcommand};
use flags::Subcommand;
use cache::{Interned, INTERNER};
/// A structure representing a Rust compiler.
@ -215,9 +215,6 @@ pub struct Build {
// User-specified configuration via config.toml
config: Config,
// User-specified configuration via CLI flags
flags: Flags,
// Derived properties from the above two configurations
src: PathBuf,
out: PathBuf,
@ -288,9 +285,9 @@ impl Build {
/// line and the filesystem `config`.
///
/// By default all build output will be placed in the current directory.
pub fn new(flags: Flags, config: Config) -> Build {
pub fn new(config: Config) -> Build {
let cwd = t!(env::current_dir());
let src = flags.src.clone();
let src = config.src.clone();
let out = cwd.join("build");
let is_sudo = match env::var_os("SUDO_USER") {
@ -302,43 +299,21 @@ impl Build {
}
None => false,
};
let rust_info = channel::GitInfo::new(&src);
let cargo_info = channel::GitInfo::new(&src.join("src/tools/cargo"));
let rls_info = channel::GitInfo::new(&src.join("src/tools/rls"));
let hosts = if !flags.host.is_empty() {
for host in flags.host.iter() {
if !config.host.contains(host) {
panic!("specified host `{}` is not in configuration", host);
}
}
flags.host.clone()
} else {
config.host.clone()
};
let targets = if !flags.target.is_empty() {
for target in flags.target.iter() {
if !config.target.contains(target) {
panic!("specified target `{}` is not in configuration", target);
}
}
flags.target.clone()
} else {
config.target.clone()
};
let rust_info = channel::GitInfo::new(&config, &src);
let cargo_info = channel::GitInfo::new(&config, &src.join("src/tools/cargo"));
let rls_info = channel::GitInfo::new(&config, &src.join("src/tools/rls"));
Build {
initial_rustc: config.initial_rustc.clone(),
initial_cargo: config.initial_cargo.clone(),
local_rebuild: config.local_rebuild,
fail_fast: flags.cmd.fail_fast(),
verbosity: cmp::max(flags.verbose, config.verbose),
fail_fast: config.cmd.fail_fast(),
verbosity: config.verbose,
build: config.host[0].clone(),
hosts: hosts,
targets: targets,
build: config.build,
hosts: config.hosts.clone(),
targets: config.targets.clone(),
flags: flags,
config: config,
src: src,
out: out,
@ -357,13 +332,19 @@ impl Build {
}
}
pub fn build_triple(&self) -> &[Interned<String>] {
unsafe {
slice::from_raw_parts(&self.build, 1)
}
}
/// Executes the entire build, as configured by the flags and configuration.
pub fn build(&mut self) {
unsafe {
job::setup(self);
}
if let Subcommand::Clean = self.flags.cmd {
if let Subcommand::Clean = self.config.cmd {
return clean::clean(self);
}
@ -608,7 +589,7 @@ impl Build {
/// Returns the number of parallel jobs that have been configured for this
/// build.
fn jobs(&self) -> u32 {
self.flags.jobs.unwrap_or_else(|| num_cpus::get() as u32)
self.config.jobs.unwrap_or_else(|| num_cpus::get() as u32)
}
/// Returns the path to the C compiler for the target specified.
@ -727,7 +708,7 @@ impl Build {
fn force_use_stage1(&self, compiler: Compiler, target: Interned<String>) -> bool {
!self.config.full_bootstrap &&
compiler.stage >= 2 &&
self.config.host.iter().any(|h| *h == target)
self.hosts.iter().any(|h| *h == target)
}
/// Returns the directory that OpenSSL artifacts are compiled into if

View File

@ -85,7 +85,7 @@ pub fn check(build: &mut Build) {
}
// We need cmake, but only if we're actually building LLVM or sanitizers.
let building_llvm = build.config.host.iter()
let building_llvm = build.hosts.iter()
.filter_map(|host| build.config.target_config.get(host))
.any(|config| config.llvm_config.is_none());
if building_llvm || build.config.sanitizers {
@ -114,7 +114,7 @@ pub fn check(build: &mut Build) {
// We're gonna build some custom C code here and there, host triples
// also build some C++ shims for LLVM so we need a C++ compiler.
for target in &build.config.target {
for target in &build.targets {
// On emscripten we don't actually need the C compiler to just
// build the target artifacts, only for testing. For the sake
// of easier bot configuration, just skip detection.
@ -128,7 +128,7 @@ pub fn check(build: &mut Build) {
}
}
for host in build.config.host.iter() {
for host in &build.hosts {
cmd_finder.must_have(build.cxx(*host).unwrap());
// The msvc hosts don't use jemalloc, turn it off globally to
@ -144,7 +144,7 @@ pub fn check(build: &mut Build) {
panic!("FileCheck executable {:?} does not exist", filecheck);
}
for target in &build.config.target {
for target in &build.targets {
// Can't compile for iOS unless we're on macOS
if target.contains("apple-ios") &&
!build.build.contains("apple-darwin") {

View File

@ -23,10 +23,10 @@ use channel::GitInfo;
use cache::Interned;
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
struct CleanTools {
compiler: Compiler,
target: Interned<String>,
mode: Mode,
pub struct CleanTools {
pub compiler: Compiler,
pub target: Interned<String>,
pub mode: Mode,
}
impl Step for CleanTools {
@ -82,7 +82,6 @@ impl Step for ToolBuild {
let target = self.target;
let tool = self.tool;
builder.ensure(CleanTools { compiler, target, mode: self.mode });
match self.mode {
Mode::Libstd => builder.ensure(compile::Std { compiler, target }),
Mode::Libtest => builder.ensure(compile::Test { compiler, target }),
@ -93,38 +92,48 @@ impl Step for ToolBuild {
let _folder = build.fold_output(|| format!("stage{}-{}", compiler.stage, tool));
println!("Building stage{} tool {} ({})", compiler.stage, tool, target);
let mut cargo = builder.cargo(compiler, Mode::Tool, target, "build");
let dir = build.src.join("src/tools").join(tool);
cargo.arg("--manifest-path").arg(dir.join("Cargo.toml"));
// We don't want to build tools dynamically as they'll be running across
// stages and such and it's just easier if they're not dynamically linked.
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
if let Some(dir) = build.openssl_install_dir(target) {
cargo.env("OPENSSL_STATIC", "1");
cargo.env("OPENSSL_DIR", dir);
cargo.env("LIBZ_SYS_STATIC", "1");
}
cargo.env("CFG_RELEASE_CHANNEL", &build.config.channel);
let info = GitInfo::new(&dir);
if let Some(sha) = info.sha() {
cargo.env("CFG_COMMIT_HASH", sha);
}
if let Some(sha_short) = info.sha_short() {
cargo.env("CFG_SHORT_COMMIT_HASH", sha_short);
}
if let Some(date) = info.commit_date() {
cargo.env("CFG_COMMIT_DATE", date);
}
let mut cargo = prepare_tool_cargo(builder, compiler, target, tool);
build.run(&mut cargo);
build.cargo_out(compiler, Mode::Tool, target).join(exe(tool, &compiler.host))
}
}
fn prepare_tool_cargo(
builder: &Builder,
compiler: Compiler,
target: Interned<String>,
tool: &'static str,
) -> Command {
let build = builder.build;
let mut cargo = builder.cargo(compiler, Mode::Tool, target, "build");
let dir = build.src.join("src/tools").join(tool);
cargo.arg("--manifest-path").arg(dir.join("Cargo.toml"));
// We don't want to build tools dynamically as they'll be running across
// stages and such and it's just easier if they're not dynamically linked.
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
if let Some(dir) = build.openssl_install_dir(target) {
cargo.env("OPENSSL_STATIC", "1");
cargo.env("OPENSSL_DIR", dir);
cargo.env("LIBZ_SYS_STATIC", "1");
}
cargo.env("CFG_RELEASE_CHANNEL", &build.config.channel);
let info = GitInfo::new(&build.config, &dir);
if let Some(sha) = info.sha() {
cargo.env("CFG_COMMIT_HASH", sha);
}
if let Some(sha_short) = info.sha_short() {
cargo.env("CFG_SHORT_COMMIT_HASH", sha_short);
}
if let Some(date) = info.commit_date() {
cargo.env("CFG_COMMIT_DATE", date);
}
cargo
}
macro_rules! tool {
($($name:ident, $path:expr, $tool_name:expr, $mode:expr;)+) => {
#[derive(Copy, Clone)]
@ -226,7 +235,7 @@ impl Step for RemoteTestServer {
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Rustdoc {
pub target_compiler: Compiler,
pub host: Interned<String>,
}
impl Step for Rustdoc {
@ -240,14 +249,20 @@ impl Step for Rustdoc {
fn make_run(run: RunConfig) {
run.builder.ensure(Rustdoc {
target_compiler: run.builder.compiler(run.builder.top_stage, run.host),
host: run.host,
});
}
fn run(self, builder: &Builder) -> PathBuf {
let target_compiler = self.target_compiler;
let build = builder.build;
let target_compiler = builder.compiler(builder.top_stage, self.host);
let target = target_compiler.host;
let build_compiler = if target_compiler.stage == 0 {
builder.compiler(0, builder.build.build)
} else if target_compiler.stage >= 2 {
// Past stage 2, we consider the compiler to be ABI-compatible and hence capable of
// building rustdoc itself.
builder.compiler(target_compiler.stage, builder.build.build)
} else {
// Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise
// we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage
@ -255,12 +270,18 @@ impl Step for Rustdoc {
builder.compiler(target_compiler.stage - 1, builder.build.build)
};
let tool_rustdoc = builder.ensure(ToolBuild {
compiler: build_compiler,
target: target_compiler.host,
tool: "rustdoc",
mode: Mode::Librustc,
});
builder.ensure(compile::Rustc { compiler: build_compiler, target });
let _folder = build.fold_output(|| format!("stage{}-rustdoc", target_compiler.stage));
println!("Building rustdoc for stage{} ({})", target_compiler.stage, target_compiler.host);
let mut cargo = prepare_tool_cargo(builder, build_compiler, target, "rustdoc");
build.run(&mut cargo);
// Cargo adds a number of paths to the dylib search path on windows, which results in
// the wrong rustdoc being executed. To avoid the conflicting rustdocs, we name the "tool"
// rustdoc a different name.
let tool_rustdoc = build.cargo_out(build_compiler, Mode::Tool, target)
.join(exe("rustdoc-tool-binary", &target_compiler.host));
// don't create a stage0-sysroot/bin directory.
if target_compiler.stage > 0 {

View File

@ -2,7 +2,7 @@
- [What is rustdoc?](what-is-rustdoc.md)
- [Command-line arguments](command-line-arguments.md)
- [In-source directives](in-source-directives.md)
- [The `#[doc]` attribute](the-doc-attribute.md)
- [Documentation tests](documentation-tests.md)
- [Plugins](plugins.md)
- [Passes](passes.md)

View File

@ -1,3 +1,239 @@
# Documentation tests
Coming soon!
`rustdoc` supports executing your documentation examples as tests. This makes sure
that your tests are up to date and working.
The basic idea is this:
```rust,ignore
/// # Examples
///
/// ```
/// let x = 5;
/// ```
```
The triple backticks start and end code blocks. If this were in a file named `foo.rs`,
running `rustdoc --test foo.rs` will extract this example, and then run it as a test.
There's some subtlety though! Read on for more details.
## Pre-processing examples
In the example above, you'll note something strange: there's no `main`
function! Forcing you to write `main` for every example, no matter how small,
adds friction. So `rustdoc` processes your examples slightly before
running them. Here's the full algorithm rustdoc uses to preprocess examples:
1. Any leading `#![foo]` attributes are left intact as crate attributes.
2. Some common `allow` attributes are inserted, including
`unused_variables`, `unused_assignments`, `unused_mut`,
`unused_attributes`, and `dead_code`. Small examples often trigger
these lints.
3. If the example does not contain `extern crate`, then `extern crate
<mycrate>;` is inserted (note the lack of `#[macro_use]`).
4. Finally, if the example does not contain `fn main`, the remainder of the
text is wrapped in `fn main() { your_code }`.
For more about that caveat in rule 3, see "Documeting Macros" below.
## Hiding portions of the example
Sometimes, you need some setup code, or other things that would distract
from your example, but are important to make the tests work. Consider
an example block that looks like this:
```text
/// Some documentation.
# fn foo() {}
```
It will render like this:
```rust
/// Some documentation.
# fn foo() {}
```
Yes, that's right: you can add lines that start with `# `, and they will
be hidden from the output, but will be used when compiling your code. You
can use this to your advantage. In this case, documentation comments need
to apply to some kind of function, so if I want to show you just a
documentation comment, I need to add a little function definition below
it. At the same time, it's only there to satisfy the compiler, so hiding
it makes the example more clear. You can use this technique to explain
longer examples in detail, while still preserving the testability of your
documentation.
For example, imagine that we wanted to document this code:
```rust
let x = 5;
let y = 6;
println!("{}", x + y);
```
We might want the documentation to end up looking like this:
> First, we set `x` to five:
>
> ```rust
> let x = 5;
> # let y = 6;
> # println!("{}", x + y);
> ```
>
> Next, we set `y` to six:
>
> ```rust
> # let x = 5;
> let y = 6;
> # println!("{}", x + y);
> ```
>
> Finally, we print the sum of `x` and `y`:
>
> ```rust
> # let x = 5;
> # let y = 6;
> println!("{}", x + y);
> ```
To keep each code block testable, we want the whole program in each block, but
we don't want the reader to see every line every time. Here's what we put in
our source code:
```text
First, we set `x` to five:
```rust
let x = 5;
# let y = 6;
# println!("{}", x + y);
```
Next, we set `y` to six:
```rust
# let x = 5;
let y = 6;
# println!("{}", x + y);
```
Finally, we print the sum of `x` and `y`:
```rust
# let x = 5;
# let y = 6;
println!("{}", x + y);
```
```
By repeating all parts of the example, you can ensure that your example still
compiles, while only showing the parts that are relevant to that part of your
explanation.
Another case where the use of `#` is handy is when you want to ignore
error handling. Lets say you want the following,
```rust,ignore
/// use std::io;
/// let mut input = String::new();
/// io::stdin().read_line(&mut input)?;
```
The problem is that `?` returns a `Result<T, E>` and test functions
don't return anything so this will give a mismatched types error.
```rust,ignore
/// A doc test using ?
///
/// ```
/// use std::io;
/// # fn foo() -> io::Result<()> {
/// let mut input = String::new();
/// io::stdin().read_line(&mut input)?;
/// # Ok(())
/// # }
/// ```
# fn foo() {}
```
You can get around this by wrapping the code in a function. This catches
and swallows the `Result<T, E>` when running tests on the docs. This
pattern appears regularly in the standard library.
### Documenting macros
Heres an example of documenting a macro:
```rust
/// Panic with a given message unless an expression evaluates to true.
///
/// # Examples
///
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// panic_unless!(1 + 1 == 2, “Math is broken.”);
/// # }
/// ```
///
/// ```rust,should_panic
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// panic_unless!(true == false, “Im broken.”);
/// # }
/// ```
#[macro_export]
macro_rules! panic_unless {
($condition:expr, $($rest:expr),+) => ({ if ! $condition { panic!($($rest),+); } });
}
# fn main() {}
```
Youll note three things: we need to add our own `extern crate` line, so that
we can add the `#[macro_use]` attribute. Second, well need to add our own
`main()` as well (for reasons discussed above). Finally, a judicious use of
`#` to comment out those two things, so they dont show up in the output.
## Attributes
There are a few annotations that are useful to help `rustdoc` do the right
thing when testing your code:
```rust
/// ```ignore
/// fn foo() {
/// ```
# fn foo() {}
```
The `ignore` directive tells Rust to ignore your code. This is almost never
what you want, as it's the most generic. Instead, consider annotating it
with `text` if it's not code, or using `#`s to get a working example that
only shows the part you care about.
```rust
/// ```should_panic
/// assert!(false);
/// ```
# fn foo() {}
```
`should_panic` tells `rustdoc` that the code should compile correctly, but
not actually pass as a test.
```rust
/// ```no_run
/// loop {
/// println!("Hello, world");
/// }
/// ```
# fn foo() {}
```
The `no_run` attribute will compile your code, but not run it. This is
important for examples such as "Here's how to retrieve a web page,"
which you would want to ensure compiles, but might be run in a test
environment that has no network access.

View File

@ -1,3 +0,0 @@
# In-source directives
Coming soon!

View File

@ -0,0 +1,178 @@
# The `#[doc]` attribute
The `#[doc]` attribute lets you control various aspects of how `rustdoc` does
its job.
The most basic function of `#[doc]` is to handle the actual documentation
text. That is, `///` is syntax sugar for `#[doc]`. This means that these two
are the same:
```rust,ignore
/// This is a doc comment.
#[doc = " This is a doc comment."]
```
(Note the leading space in the attribute version.)
In most cases, `///` is easier to use than `#[doc]`. One case where the latter is easier is
when generating documentation in macros; the `collapse-docs` pass will combine multiple
`#[doc]` attributes into a single doc comment, letting you generate code like this:
```rust,ignore
#[doc = "This is"]
#[doc = " a "]
#[doc = "doc comment"]
```
Which can feel more flexible. Note that this would generate this:
```rust,ignore
#[doc = "This is\n a \ndoc comment"]
```
but given that docs are rendered via Markdown, it will remove these newlines.
The `doc` attribute has more options though! These don't involve the text of
the output, but instead, various aspects of the presentation of the output.
We've split them into two kinds below: attributes that are useful at the
crate level, and ones that are useful at the item level.
## At the crate level
These options control how the docs look at a macro level.
### `html_favicon_url`
This form of the `doc` attribute lets you control the favicon of your docs.
```rust,ignore
#![doc(html_favicon_url = "https://example.com/favicon.ico")]
```
This will put `<link rel="shortcut icon" href="{}">` into your docs, where
the string for the attribute goes into the `{}`.
If you don't use this attribute, there will be no favicon.
### `html_logo_url`
This form of the `doc` attribute lets you control the logo in the upper
left hand side of the docs.
```rust,ignore
#![doc(html_logo_url = "https://example.com/logo.jpg")]
```
This will put `<a href='index.html'><img src='{}' alt='logo' width='100'></a>` into
your docs, where the string for the attribute goes into the `{}`.
If you don't use this attribute, there will be no logo.
### `html_playground_url`
This form of the `doc` attribute lets you control where the "run" buttons
on your documentation examples make requests to.
```rust,ignore
#![doc(html_playground_url = "https://playground.example.com/")]
```
Now, when you press "run", the button will make a request to this domain.
If you don't use this attribute, there will be no run buttons.
### `issue_tracker_base_url`
This form of the `doc` attribute is mostly only useful for the standard library;
When a feature is unstable, an issue number for tracking the feature must be
given. `rustdoc` uses this number, plus the base URL given here, to link to
the tracking issue.
```rust,ignore
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
```
### `html_no_source`
By default, `rustdoc` will include the source code of your program, with links
to it in the docs. But if you include this:
```rust,ignore
#![doc(html_no_source)]
```
it will not.
## At the item level
These forms of the `#[doc]` attribute are used on individual items, to control how
they are documented.
## `#[doc(no_inline)]`/`#[doc(inline)]`
These attributes are used on `use` statements, and control where the documentation shows
up. For example, consider this Rust code:
```rust,ignore
pub use bar::Bar;
/// bar docs
pub mod bar {
/// the docs for Bar
pub struct Bar;
}
```
The documentation will generate a "Reexports" section, and say `pub use bar::Bar;`, where
`Bar` is a link to its page.
If we change the `use` line like this:
```rust,ignore
#[doc(inline)]
pub use bar::Bar;
```
Instead, `Bar` will appear in a `Structs` section, just like `Bar` was defined at the
top level, rather than `pub use`'d.
Let's change our original example, by making `bar` private:
```rust,ignore
pub use bar::Bar;
/// bar docs
mod bar {
/// the docs for Bar
pub struct Bar;
}
```
Here, because `bar` is not public, `Bar` wouldn't have its own page, so there's nowhere
to link to. `rustdoc` will inline these definitions, and so we end up in the same case
as the `#[doc(inline)]` above; `Bar` is in a `Structs` section, as if it were defined at
the top level. If we add the `no_inline` form of the attribute:
```rust,ignore
#[doc(no_inline)]
pub use bar::Bar;
/// bar docs
mod bar {
/// the docs for Bar
pub struct Bar;
}
```
Now we'll have a `Reexports` line, and `Bar` will not link to anywhere.
## `#[doc(hidden)]`
Any item annotated with `#[doc(hidden)]` will not appear in the documentation, unless
the `strip-hidden` pass is removed.
## `#[doc(primitive)]`
Since primitive types are defined in the compiler, there's no place to attach documentation
attributes. This attribute is used by the standard library to provide a way to generate
documentation for primitive types.

View File

@ -0,0 +1,42 @@
# `doc_cfg`
The tracking issue for this feature is: [#43781]
------
The `doc_cfg` feature allows an API be documented as only available in some specific platforms.
This attribute has two effects:
1. In the annotated item's documentation, there will be a message saying "This is supported on
(platform) only".
2. The item's doc-tests will only run on the specific platform.
This feature was introduced as part of PR [#43348] to allow the platform-specific parts of the
standard library be documented.
```rust
#![feature(doc_cfg)]
#[cfg(any(windows, feature = "documentation"))]
#[doc(cfg(windows))]
/// The application's icon in the notification area (a.k.a. system tray).
///
/// # Examples
///
/// ```no_run
/// extern crate my_awesome_ui_library;
/// use my_awesome_ui_library::current_app;
/// use my_awesome_ui_library::windows::notification;
///
/// let icon = current_app().get::<notification::Icon>();
/// icon.show();
/// icon.show_message("Hello");
/// ```
pub struct Icon {
// ...
}
```
[#43781]: https://github.com/rust-lang/rust/issues/43781
[#43348]: https://github.com/rust-lang/rust/issues/43348

View File

@ -40,7 +40,7 @@ fn size_align<T>() -> (usize, usize) {
///
/// (Note however that layouts are *not* required to have positive
/// size, even though many allocators require that all memory
/// requeusts have positive size. A caller to the `Alloc::alloc`
/// requests have positive size. A caller to the `Alloc::alloc`
/// method must either ensure that conditions like this are met, or
/// use specific allocators with looser requirements.)
#[derive(Clone, Debug, PartialEq, Eq)]
@ -240,7 +240,7 @@ impl Layout {
///
/// Returns `Some((k, offset))`, where `k` is layout of the concatenated
/// record and `offset` is the relative location, in bytes, of the
/// start of the `next` embedded witnin the concatenated record
/// start of the `next` embedded within the concatenated record
/// (assuming that the record itself starts at offset 0).
///
/// On arithmetic overflow, returns `None`.
@ -297,7 +297,7 @@ impl Layout {
///
/// Returns `(k, offset)`, where `k` is layout of the concatenated
/// record and `offset` is the relative location, in bytes, of the
/// start of the `next` embedded witnin the concatenated record
/// start of the `next` embedded within the concatenated record
/// (assuming that the record itself starts at offset 0).
///
/// (The `offset` is always the same as `self.size()`; we use this
@ -354,15 +354,19 @@ pub enum AllocErr {
}
impl AllocErr {
#[inline]
pub fn invalid_input(details: &'static str) -> Self {
AllocErr::Unsupported { details: details }
}
#[inline]
pub fn is_memory_exhausted(&self) -> bool {
if let AllocErr::Exhausted { .. } = *self { true } else { false }
}
#[inline]
pub fn is_request_unsupported(&self) -> bool {
if let AllocErr::Unsupported { .. } = *self { true } else { false }
}
#[inline]
pub fn description(&self) -> &str {
match *self {
AllocErr::Exhausted { .. } => "allocator memory exhausted",
@ -544,7 +548,7 @@ pub unsafe trait Alloc {
/// practice this means implementors should eschew allocating,
/// especially from `self` (directly or indirectly).
///
/// Implementions of the allocation and reallocation methods
/// Implementations of the allocation and reallocation methods
/// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from
/// panicking (or aborting) in the event of memory exhaustion;
/// instead they should return an appropriate error from the

View File

@ -132,7 +132,7 @@ impl<K, V> InternalNode<K, V> {
/// An owned pointer to a node. This basically is either `Box<LeafNode<K, V>>` or
/// `Box<InternalNode<K, V>>`. However, it contains no information as to which of the two types
/// of nodes is acutally behind the box, and, partially due to this lack of information, has no
/// of nodes is actually behind the box, and, partially due to this lack of information, has no
/// destructor.
struct BoxedNode<K, V> {
ptr: Unique<LeafNode<K, V>>
@ -264,7 +264,7 @@ impl<K, V> Root<K, V> {
// correct variance.
/// A reference to a node.
///
/// This type has a number of paramaters that controls how it acts:
/// This type has a number of parameters that controls how it acts:
/// - `BorrowType`: This can be `Immut<'a>` or `Mut<'a>` for some `'a` or `Owned`.
/// When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`,
/// when this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`,

View File

@ -10,16 +10,16 @@
//! Utilities for formatting and printing `String`s
//!
//! This module contains the runtime support for the `format!` syntax extension.
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The `format!` macro is intended to be familiar to those coming from C's
//! printf/fprintf functions or Python's `str.format` function.
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the `format!` extension are:
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
@ -67,7 +67,7 @@
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the `format!` macro is a syntax extension which allows it to
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
@ -75,7 +75,7 @@
//! identifier '=' expression
//! ```
//!
//! For example, the following `format!` expressions all use named argument:
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
@ -102,30 +102,30 @@
//!
//! If this syntax is used, then the number of characters to print precedes the
//! actual object being formatted, and the number of characters must have the
//! type `usize`.
//! type [`usize`].
//!
//! ## Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
//! well as `isize`). The current mapping of types to traits is:
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`](trait.Display.html)
//! * `?` ⇒ [`Debug`](trait.Debug.html)
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`](trait.Binary.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! `fmt::Binary` trait can then be formatted with `{:b}`. Implementations
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the `Display` trait.
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
@ -144,15 +144,15 @@
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! `Formatter` struct. In order to help with this, the `Formatter` struct also
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is `fmt::Result` which is a
//! type alias of `Result<(), std::fmt::Error>`. Formatting implementations
//! should ensure that they propagate errors from the `Formatter` (e.g., when
//! calling `write!`) however, they should never return errors spuriously. That
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`][`Formatter`] (e.g., when
//! calling [`write!`]) however, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in `Formatter` returns an error. This is because, contrary to what
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
@ -209,12 +209,12 @@
//!
//! These two formatting traits have distinct purposes:
//!
//! - `fmt::Display` implementations assert that the type can be faithfully
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the `Display` trait.
//! - `fmt::Debug` implementations should be implemented for **all** public types.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the `Debug` trait is to facilitate debugging Rust code. In
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
@ -227,7 +227,7 @@
//!
//! ## Related macros
//!
//! There are a number of related macros in the `format!` family. The ones that
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
@ -241,11 +241,11 @@
//!
//! ### `write!`
//!
//! This and `writeln` are two macros which are used to emit the format string
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the `write_fmt` function defined on the
//! `std::io::Write` trait. Example usage is:
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
@ -256,7 +256,7 @@
//!
//! ### `print!`
//!
//! This and `println` emit their output to stdout. Similarly to the `write!`
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
@ -288,8 +288,8 @@
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the `format_args!` macro is a value of type `fmt::Arguments`.
//! This structure can then be passed to the `write` and `format` functions
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
@ -357,7 +357,7 @@
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the `Debug` formatting
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
@ -384,9 +384,9 @@
//! the `0` flag is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! The value for the width can also be provided as a `usize` in the list of
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by using the dollar syntax indicating that the second argument is
//! a `usize` specifying the width, for example:
//! a [`usize`] specifying the width, for example:
//!
//! ```
//! // All of these print "Hello x !"
@ -474,6 +474,29 @@
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! [`format!`]: ../../macro.format.html
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]: ../../std/macro.format.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`println!`]: ../../std/macro.println.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
@ -498,10 +521,10 @@ pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
use string;
/// The `format` function takes an `Arguments` struct and returns the resulting
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The `Arguments` instance can be created with the `format_args!` macro.
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
@ -514,7 +537,7 @@ use string;
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`][format!] might be preferrable.
/// Please note that using [`format!`] might be preferrable.
/// Example:
///
/// ```
@ -522,7 +545,9 @@ use string;
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [format!]: ../macro.format.html
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments) -> string::String {
let capacity = args.estimated_capacity();

View File

@ -28,6 +28,7 @@ pub mod __core {
extern "Rust" {
#[allocator]
fn __rust_alloc(size: usize, align: usize, err: *mut u8) -> *mut u8;
#[cold]
fn __rust_oom(err: *const u8) -> !;
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
fn __rust_usable_size(layout: *const u8,
@ -81,6 +82,7 @@ unsafe impl Alloc for Heap {
}
#[inline]
#[cold]
fn oom(&mut self, err: AllocErr) -> ! {
unsafe {
__rust_oom(&err as *const AllocErr as *const u8)

View File

@ -8,14 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use allocator::{Alloc, Layout};
use core::ptr::{self, Unique};
use core::mem;
use core::slice;
use heap::Heap;
use super::boxed::Box;
use core::ops::Drop;
use core::cmp;
use core::mem;
use core::ops::Drop;
use core::ptr::{self, Unique};
use core::slice;
use heap::{Alloc, Layout, Heap};
use super::boxed::Box;
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
@ -222,6 +221,20 @@ impl<T, A: Alloc> RawVec<T, A> {
&mut self.a
}
fn current_layout(&self) -> Option<Layout> {
if self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
Some(Layout::from_size_align_unchecked(size, align))
}
}
}
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
@ -280,27 +293,40 @@ impl<T, A: Alloc> RawVec<T, A> {
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
let (new_cap, ptr_res) = if self.cap == 0 {
// skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
let ptr_res = self.a.alloc_array::<T>(new_cap);
(new_cap, ptr_res)
} else {
// Since we guarantee that we never allocate more than isize::MAX bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
let new_cap = 2 * self.cap;
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
let ptr_res = self.a.realloc_array(self.ptr, self.cap, new_cap);
(new_cap, ptr_res)
let (new_cap, uniq) = match self.current_layout() {
Some(cur) => {
// Since we guarantee that we never allocate more than
// isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
// a precondition, so this can't overflow. Additionally the
// alignment will never be too large as to "not be
// satisfiable", so `Layout::from_size_align` will always
// return `Some`.
//
// tl;dr; we bypass runtime checks due to dynamic assertions
// in this module, allowing us to use
// `from_size_align_unchecked`.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
let new_layout = Layout::from_size_align_unchecked(new_size, cur.align());
alloc_guard(new_size);
let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8,
cur,
new_layout);
match ptr_res {
Ok(ptr) => (new_cap, Unique::new_unchecked(ptr as *mut T)),
Err(e) => self.a.oom(e),
}
}
None => {
// skip to 4 because tiny Vec's are dumb; but not if that
// would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
match self.a.alloc_array::<T>(new_cap) {
Ok(ptr) => (new_cap, ptr),
Err(e) => self.a.oom(e),
}
}
};
// If allocate or reallocate fail, we'll get `null` back
let uniq = match ptr_res {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
};
self.ptr = uniq;
self.cap = new_cap;
}
@ -323,21 +349,27 @@ impl<T, A: Alloc> RawVec<T, A> {
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false, // nothing to double
};
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
// Since we guarantee that we never allocate more than isize::MAX bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
// Since we guarantee that we never allocate more than isize::MAX
// bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
// this can't overflow.
//
// Similarly like with `double` above we can go straight to
// `Layout::from_size_align_unchecked` as we know this won't
// overflow and the alignment is sufficiently small.
let new_cap = 2 * self.cap;
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
let new_size = new_cap * elem_size;
alloc_guard(new_size);
let ptr = self.ptr() as *mut _;
let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
// We can't directly divide `size`.
@ -373,8 +405,6 @@ impl<T, A: Alloc> RawVec<T, A> {
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
@ -388,21 +418,22 @@ impl<T, A: Alloc> RawVec<T, A> {
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(new_alloc_size);
let result = if self.cap == 0 {
self.a.alloc_array::<T>(new_cap)
} else {
self.a.realloc_array(self.ptr, self.cap, new_cap)
let new_layout = match Layout::array::<T>(new_cap) {
Some(layout) => layout,
None => panic!("capacity overflow"),
};
// If allocate or reallocate fail, we'll get `null` back
let uniq = match result {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
alloc_guard(new_layout.size());
let res = match self.current_layout() {
Some(layout) => {
let old_ptr = self.ptr.as_ptr() as *mut u8;
self.a.realloc(old_ptr, layout, new_layout)
}
None => self.a.alloc(new_layout),
};
let uniq = match res {
Ok(ptr) => Unique::new_unchecked(ptr as *mut T),
Err(e) => self.a.oom(e),
};
self.ptr = uniq;
self.cap = new_cap;
}
@ -411,17 +442,14 @@ impl<T, A: Alloc> RawVec<T, A> {
/// Calculates the buffer's new size given that it'll hold `used_cap +
/// needed_extra_cap` elements. This logic is used in amortized reserve methods.
/// Returns `(new_capacity, new_alloc_size)`.
fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize, usize) {
let elem_size = mem::size_of::<T>();
fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> usize {
// Nothing we can really do about these checks :(
let required_cap = used_cap.checked_add(needed_extra_cap)
.expect("capacity overflow");
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
let new_cap = cmp::max(double_cap, required_cap);
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
(new_cap, new_alloc_size)
cmp::max(double_cap, required_cap)
}
/// Ensures that the buffer contains at least enough space to hold
@ -489,21 +517,25 @@ impl<T, A: Alloc> RawVec<T, A> {
return;
}
let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap);
let new_layout = match Layout::array::<T>(new_cap) {
Some(layout) => layout,
None => panic!("capacity overflow"),
};
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
let result = if self.cap == 0 {
self.a.alloc_array::<T>(new_cap)
} else {
self.a.realloc_array(self.ptr, self.cap, new_cap)
alloc_guard(new_layout.size());
let res = match self.current_layout() {
Some(layout) => {
let old_ptr = self.ptr.as_ptr() as *mut u8;
self.a.realloc(old_ptr, layout, new_layout)
}
None => self.a.alloc(new_layout),
};
let uniq = match result {
Err(err) => self.a.oom(err),
Ok(uniq) => uniq,
let uniq = match res {
Ok(ptr) => Unique::new_unchecked(ptr as *mut T),
Err(e) => self.a.oom(e),
};
self.ptr = uniq;
self.cap = new_cap;
}
@ -536,21 +568,24 @@ impl<T, A: Alloc> RawVec<T, A> {
// Don't actually need any more capacity. If the current `cap` is 0, we can't
// reallocate in place.
// Wrapping in case they give a bad `used_cap`
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap || self.cap == 0 {
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false,
};
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return false;
}
let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap);
// Here, `cap < used_cap + needed_extra_cap <= new_cap`
// (regardless of whether `self.cap - used_cap` wrapped).
// Therefore we can safely call grow_in_place.
let ptr = self.ptr() as *mut _;
let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size());
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
self.cap = new_cap;
@ -599,9 +634,24 @@ impl<T, A: Alloc> RawVec<T, A> {
}
} else if self.cap != amount {
unsafe {
match self.a.realloc_array(self.ptr, self.cap, amount) {
// We know here that our `amount` is greater than zero. This
// implies, via the assert above, that capacity is also greater
// than zero, which means that we've got a current layout that
// "fits"
//
// We also know that `self.cap` is greater than `amount`, and
// consequently we don't need runtime checks for creating either
// layout
let old_size = elem_size * self.cap;
let new_size = elem_size * amount;
let align = mem::align_of::<T>();
let old_layout = Layout::from_size_align_unchecked(old_size, align);
let new_layout = Layout::from_size_align_unchecked(new_size, align);
match self.a.realloc(self.ptr.as_ptr() as *mut u8,
old_layout,
new_layout) {
Ok(p) => self.ptr = Unique::new_unchecked(p as *mut T),
Err(err) => self.a.oom(err),
Ok(uniq) => self.ptr = uniq,
}
}
self.cap = amount;
@ -631,10 +681,11 @@ impl<T, A: Alloc> RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 && self.cap != 0 {
let ptr = self.ptr() as *mut u8;
let layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
self.a.dealloc(ptr, layout);
if elem_size != 0 {
if let Some(layout) = self.current_layout() {
let ptr = self.ptr() as *mut u8;
self.a.dealloc(ptr, layout);
}
}
}
}

View File

@ -653,7 +653,7 @@ impl String {
/// * `capacity` needs to be the correct value.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal datastructures.
/// internal data structures.
///
/// The ownership of `ptr` is effectively transferred to the
/// `String` which may then deallocate, reallocate or change the

View File

@ -374,7 +374,7 @@ impl<T> Vec<T> {
/// * `capacity` needs to be the capacity that the pointer was allocated with.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal datastructures. For example it is **not** safe
/// internal data structures. For example it is **not** safe
/// to build a `Vec<u8>` from a pointer to a C `char` array and a `size_t`.
///
/// The ownership of `ptr` is effectively transferred to the

View File

@ -571,6 +571,59 @@ impl<T> RefCell<T> {
debug_assert!(self.borrow.get() == UNUSED);
unsafe { self.value.into_inner() }
}
/// Replaces the wrapped value with a new one, returning the old value,
/// without deinitializing either one.
///
/// This function corresponds to [`std::mem::replace`](../mem/fn.replace.html).
///
/// # Examples
///
/// ```
/// #![feature(refcell_replace_swap)]
/// use std::cell::RefCell;
/// let c = RefCell::new(5);
/// let u = c.replace(6);
/// assert_eq!(u, 5);
/// assert_eq!(c, RefCell::new(6));
/// ```
///
/// # Panics
///
/// This function will panic if the `RefCell` has any outstanding borrows,
/// whether or not they are full mutable borrows.
#[inline]
#[unstable(feature = "refcell_replace_swap", issue="43570")]
pub fn replace(&self, t: T) -> T {
mem::replace(&mut *self.borrow_mut(), t)
}
/// Swaps the wrapped value of `self` with the wrapped value of `other`,
/// without deinitializing either one.
///
/// This function corresponds to [`std::mem::swap`](../mem/fn.swap.html).
///
/// # Examples
///
/// ```
/// #![feature(refcell_replace_swap)]
/// use std::cell::RefCell;
/// let c = RefCell::new(5);
/// let d = RefCell::new(6);
/// c.swap(&d);
/// assert_eq!(c, RefCell::new(6));
/// assert_eq!(d, RefCell::new(5));
/// ```
///
/// # Panics
///
/// This function will panic if either `RefCell` has any outstanding borrows,
/// whether or not they are full mutable borrows.
#[inline]
#[unstable(feature = "refcell_replace_swap", issue="43570")]
pub fn swap(&self, other: &Self) {
mem::swap(&mut *self.borrow_mut(), &mut *other.borrow_mut())
}
}
impl<T: ?Sized> RefCell<T> {

View File

@ -1347,7 +1347,6 @@ impl<'a> Formatter<'a> {
/// println!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() });
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
#[inline]
pub fn debug_struct<'b>(&'b mut self, name: &str) -> DebugStruct<'b, 'a> {
builders::debug_struct_new(self, name)
}
@ -1375,7 +1374,6 @@ impl<'a> Formatter<'a> {
/// println!("{:?}", Foo(10, "Hello World".to_string()));
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
#[inline]
pub fn debug_tuple<'b>(&'b mut self, name: &str) -> DebugTuple<'b, 'a> {
builders::debug_tuple_new(self, name)
}
@ -1400,7 +1398,6 @@ impl<'a> Formatter<'a> {
/// println!("{:?}", Foo(vec![10, 11]));
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
#[inline]
pub fn debug_list<'b>(&'b mut self) -> DebugList<'b, 'a> {
builders::debug_list_new(self)
}
@ -1425,7 +1422,6 @@ impl<'a> Formatter<'a> {
/// println!("{:?}", Foo(vec![10, 11]));
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
#[inline]
pub fn debug_set<'b>(&'b mut self) -> DebugSet<'b, 'a> {
builders::debug_set_new(self)
}
@ -1450,7 +1446,6 @@ impl<'a> Formatter<'a> {
/// println!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)]));
/// ```
#[stable(feature = "debug_builders", since = "1.2.0")]
#[inline]
pub fn debug_map<'b>(&'b mut self) -> DebugMap<'b, 'a> {
builders::debug_map_new(self)
}

View File

@ -1044,20 +1044,23 @@ extern "rust-intrinsic" {
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter is set to `true`, so it will not be optimized out.
/// The volatile parameter is set to `true`, so it will not be optimized out
/// unless size is equal to zero.
pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
count: usize);
/// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter is set to `true`, so it will not be optimized out.
/// The volatile parameter is set to `true`, so it will not be optimized out
/// unless size is equal to zero..
pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
/// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
/// size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`.
///
/// The volatile parameter is set to `true`, so it will not be optimized out.
/// The volatile parameter is set to `true`, so it will not be optimized out
/// unless size is equal to zero.
pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
/// Perform a volatile load from the `src` pointer.

View File

@ -1035,7 +1035,7 @@ unsafe impl<A, B> TrustedLen for Zip<A, B>
/// Now consider this twist where we add a call to `rev`. This version will
/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed,
/// but the values of the counter still go in order. This is because `map()` is
/// still being called lazilly on each item, but we are popping items off the
/// still being called lazily on each item, but we are popping items off the
/// back of the vector now, instead of shifting them from the front.
///
/// ```rust

View File

@ -345,7 +345,7 @@ pub trait Extend<A> {
/// In a similar fashion to the [`Iterator`] protocol, once a
/// `DoubleEndedIterator` returns `None` from a `next_back()`, calling it again
/// may or may not ever return `Some` again. `next()` and `next_back()` are
/// interchangable for this purpose.
/// interchangeable for this purpose.
///
/// [`Iterator`]: trait.Iterator.html
///

View File

@ -336,7 +336,7 @@ pub fn algorithm_m<T: RawFloat>(f: &Big, e: i16) -> T {
round_by_remainder(v, rem, q, z)
}
/// Skip over most AlgorithmM iterations by checking the bit length.
/// Skip over most Algorithm M iterations by checking the bit length.
fn quick_start<T: RawFloat>(u: &mut Big, v: &mut Big, k: &mut i16) {
// The bit length is an estimate of the base two logarithm, and log(u / v) = log(u) - log(v).
// The estimate is off by at most 1, but always an under-estimate, so the error on log(u)

View File

@ -10,15 +10,20 @@
/// The addition operator `+`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory. For
/// example, [`std::time::SystemTime`] implements `Add<Duration>`, which permits
/// operations of the form `SystemTime = SystemTime + Duration`.
///
/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
///
/// # Examples
///
/// This example creates a `Point` struct that implements the `Add` trait, and
/// then demonstrates adding two `Point`s.
/// ## `Add`able points
///
/// ```
/// use std::ops::Add;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point {
/// x: i32,
/// y: i32,
@ -35,31 +40,25 @@
/// }
/// }
///
/// impl PartialEq for Point {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// fn main() {
/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
/// Point { x: 3, y: 3 });
/// }
/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
/// Point { x: 3, y: 3 });
/// ```
///
/// ## Implementing `Add` with generics
///
/// Here is an example of the same `Point` struct implementing the `Add` trait
/// using generics.
///
/// ```
/// use std::ops::Add;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point<T> {
/// x: T,
/// y: T,
/// }
///
/// // Notice that the implementation uses the `Output` associated type
/// // Notice that the implementation uses the associated type `Output`.
/// impl<T: Add<Output=T>> Add for Point<T> {
/// type Output = Point<T>;
///
@ -71,32 +70,18 @@
/// }
/// }
///
/// impl<T: PartialEq> PartialEq for Point<T> {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// fn main() {
/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
/// Point { x: 3, y: 3 });
/// }
/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
/// Point { x: 3, y: 3 });
/// ```
///
/// Note that `RHS = Self` by default, but this is not mandatory. For example,
/// [std::time::SystemTime] implements `Add<Duration>`, which permits
/// operations of the form `SystemTime = SystemTime + Duration`.
///
/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html
#[lang = "add"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} + {RHS}`"]
pub trait Add<RHS=Self> {
/// The resulting type after applying the `+` operator
/// The resulting type after applying the `+` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `+` operator
/// Performs the `+` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn add(self, rhs: RHS) -> Self::Output;
}
@ -120,15 +105,20 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// The subtraction operator `-`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory. For
/// example, [`std::time::SystemTime`] implements `Sub<Duration>`, which permits
/// operations of the form `SystemTime = SystemTime - Duration`.
///
/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
///
/// # Examples
///
/// This example creates a `Point` struct that implements the `Sub` trait, and
/// then demonstrates subtracting two `Point`s.
/// ## `Sub`tractable points
///
/// ```
/// use std::ops::Sub;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point {
/// x: i32,
/// y: i32,
@ -145,31 +135,25 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// }
/// }
///
/// impl PartialEq for Point {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// fn main() {
/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 },
/// Point { x: 1, y: 0 });
/// }
/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 },
/// Point { x: 1, y: 0 });
/// ```
///
/// ## Implementing `Sub` with generics
///
/// Here is an example of the same `Point` struct implementing the `Sub` trait
/// using generics.
///
/// ```
/// use std::ops::Sub;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point<T> {
/// x: T,
/// y: T,
/// }
///
/// // Notice that the implementation uses the `Output` associated type
/// // Notice that the implementation uses the associated type `Output`.
/// impl<T: Sub<Output=T>> Sub for Point<T> {
/// type Output = Point<T>;
///
@ -181,32 +165,18 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// }
/// }
///
/// impl<T: PartialEq> PartialEq for Point<T> {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// fn main() {
/// assert_eq!(Point { x: 2, y: 3 } - Point { x: 1, y: 0 },
/// Point { x: 1, y: 3 });
/// }
/// assert_eq!(Point { x: 2, y: 3 } - Point { x: 1, y: 0 },
/// Point { x: 1, y: 3 });
/// ```
///
/// Note that `RHS = Self` by default, but this is not mandatory. For example,
/// [std::time::SystemTime] implements `Sub<Duration>`, which permits
/// operations of the form `SystemTime = SystemTime - Duration`.
///
/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html
#[lang = "sub"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} - {RHS}`"]
pub trait Sub<RHS=Self> {
/// The resulting type after applying the `-` operator
/// The resulting type after applying the `-` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `-` operator
/// Performs the `-` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn sub(self, rhs: RHS) -> Self::Output;
}
@ -230,17 +200,19 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// The multiplication operator `*`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// Implementing a `Mul`tipliable rational number struct:
/// ## `Mul`tipliable rational numbers
///
/// ```
/// use std::ops::Mul;
///
/// // The uniqueness of rational numbers in lowest terms is a consequence of
/// // the fundamental theorem of arithmetic.
/// #[derive(Eq)]
/// #[derive(PartialEq, Debug)]
/// // By the fundamental theorem of arithmetic, rational numbers in lowest
/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
/// // derive `Eq` and `PartialEq`.
/// #[derive(Debug, Eq, PartialEq)]
/// struct Rational {
/// nominator: usize,
/// denominator: usize,
@ -291,45 +263,37 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// Rational::new(1, 2));
/// ```
///
/// Note that `RHS = Self` by default, but this is not mandatory. Here is an
/// implementation which enables multiplication of vectors by scalars, as is
/// done in linear algebra.
/// ## Multiplying vectors by scalars as in linear algebra
///
/// ```
/// use std::ops::Mul;
///
/// struct Scalar {value: usize};
/// struct Scalar { value: usize }
///
/// #[derive(Debug)]
/// struct Vector {value: Vec<usize>};
/// #[derive(Debug, PartialEq)]
/// struct Vector { value: Vec<usize> }
///
/// impl Mul<Vector> for Scalar {
/// impl Mul<Scalar> for Vector {
/// type Output = Vector;
///
/// fn mul(self, rhs: Vector) -> Vector {
/// Vector {value: rhs.value.iter().map(|v| self.value * v).collect()}
/// fn mul(self, rhs: Scalar) -> Vector {
/// Vector { value: self.value.iter().map(|v| v * rhs.value).collect() }
/// }
/// }
///
/// impl PartialEq<Vector> for Vector {
/// fn eq(&self, other: &Self) -> bool {
/// self.value == other.value
/// }
/// }
///
/// let scalar = Scalar{value: 3};
/// let vector = Vector{value: vec![2, 4, 6]};
/// assert_eq!(scalar * vector, Vector{value: vec![6, 12, 18]});
/// let vector = Vector { value: vec![2, 4, 6] };
/// let scalar = Scalar { value: 3 };
/// assert_eq!(vector * scalar, Vector { value: vec![6, 12, 18] });
/// ```
#[lang = "mul"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} * {RHS}`"]
pub trait Mul<RHS=Self> {
/// The resulting type after applying the `*` operator
/// The resulting type after applying the `*` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `*` operator
/// Performs the `*` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn mul(self, rhs: RHS) -> Self::Output;
}
@ -353,17 +317,19 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// The division operator `/`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// Implementing a `Div`idable rational number struct:
/// ## `Div`idable rational numbers
///
/// ```
/// use std::ops::Div;
///
/// // The uniqueness of rational numbers in lowest terms is a consequence of
/// // the fundamental theorem of arithmetic.
/// #[derive(Eq)]
/// #[derive(PartialEq, Debug)]
/// // By the fundamental theorem of arithmetic, rational numbers in lowest
/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
/// // derive `Eq` and `PartialEq`.
/// #[derive(Debug, Eq, PartialEq)]
/// struct Rational {
/// nominator: usize,
/// denominator: usize,
@ -413,52 +379,42 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// x
/// }
///
/// fn main() {
/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4),
/// Rational::new(2, 3));
/// }
/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4),
/// Rational::new(2, 3));
/// ```
///
/// Note that `RHS = Self` by default, but this is not mandatory. Here is an
/// implementation which enables division of vectors by scalars, as is done in
/// linear algebra.
/// ## Dividing vectors by scalars as in linear algebra
///
/// ```
/// use std::ops::Div;
///
/// struct Scalar {value: f32};
/// struct Scalar { value: f32 }
///
/// #[derive(Debug)]
/// struct Vector {value: Vec<f32>};
/// #[derive(Debug, PartialEq)]
/// struct Vector { value: Vec<f32> }
///
/// impl Div<Scalar> for Vector {
/// type Output = Vector;
///
/// fn div(self, rhs: Scalar) -> Vector {
/// Vector {value: self.value.iter().map(|v| v / rhs.value).collect()}
/// Vector { value: self.value.iter().map(|v| v / rhs.value).collect() }
/// }
/// }
///
/// impl PartialEq<Vector> for Vector {
/// fn eq(&self, other: &Self) -> bool {
/// self.value == other.value
/// }
/// }
///
/// let scalar = Scalar{value: 2f32};
/// let vector = Vector{value: vec![2f32, 4f32, 6f32]};
/// assert_eq!(vector / scalar, Vector{value: vec![1f32, 2f32, 3f32]});
/// let scalar = Scalar { value: 2f32 };
/// let vector = Vector { value: vec![2f32, 4f32, 6f32] };
/// assert_eq!(vector / scalar, Vector { value: vec![1f32, 2f32, 3f32] });
/// ```
#[lang = "div"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} / {RHS}`"]
pub trait Div<RHS=Self> {
/// The resulting type after applying the `/` operator
/// The resulting type after applying the `/` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `/` operator
/// Performs the `/` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn div(self, rhs: RHS) -> Self::Output;
}
@ -499,6 +455,8 @@ div_impl_float! { f32 f64 }
/// The remainder operator `%`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is
@ -526,7 +484,7 @@ div_impl_float! { f32 f64 }
/// }
///
/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3,
/// // the remainder would be &[6, 7]
/// // the remainder would be &[6, 7].
/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3,
/// SplitSlice { slice: &[6, 7] });
/// ```
@ -534,11 +492,11 @@ div_impl_float! { f32 f64 }
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} % {RHS}`"]
pub trait Rem<RHS=Self> {
/// The resulting type after applying the `%` operator
/// The resulting type after applying the `%` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output = Self;
/// The method for the `%` operator
/// Performs the `%` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn rem(self, rhs: RHS) -> Self::Output;
}
@ -607,21 +565,21 @@ rem_impl_float! { f32 f64 }
/// }
/// }
///
/// // a negative positive is a negative
/// // A negative positive is a negative.
/// assert_eq!(-Sign::Positive, Sign::Negative);
/// // a double negative is a positive
/// // A double negative is a positive.
/// assert_eq!(-Sign::Negative, Sign::Positive);
/// // zero is its own negation
/// // Zero is its own negation.
/// assert_eq!(-Sign::Zero, Sign::Zero);
/// ```
#[lang = "neg"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Neg {
/// The resulting type after applying the `-` operator
/// The resulting type after applying the `-` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the unary `-` operator
/// Performs the unary `-` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn neg(self) -> Self::Output;
}
@ -668,7 +626,7 @@ neg_impl_numeric! { isize i8 i16 i32 i64 i128 f32 f64 }
/// ```
/// use std::ops::AddAssign;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point {
/// x: i32,
/// y: i32,
@ -683,12 +641,6 @@ neg_impl_numeric! { isize i8 i16 i32 i64 i128 f32 f64 }
/// }
/// }
///
/// impl PartialEq for Point {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// let mut point = Point { x: 1, y: 0 };
/// point += Point { x: 2, y: 3 };
/// assert_eq!(point, Point { x: 3, y: 3 });
@ -697,7 +649,7 @@ neg_impl_numeric! { isize i8 i16 i32 i64 i128 f32 f64 }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} += {Rhs}`"]
pub trait AddAssign<Rhs=Self> {
/// The method for the `+=` operator
/// Performs the `+=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn add_assign(&mut self, rhs: Rhs);
}
@ -725,7 +677,7 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// ```
/// use std::ops::SubAssign;
///
/// #[derive(Debug)]
/// #[derive(Debug, PartialEq)]
/// struct Point {
/// x: i32,
/// y: i32,
@ -740,12 +692,6 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
/// }
/// }
///
/// impl PartialEq for Point {
/// fn eq(&self, other: &Self) -> bool {
/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// let mut point = Point { x: 3, y: 3 };
/// point -= Point { x: 2, y: 3 };
/// assert_eq!(point, Point {x: 1, y: 0});
@ -754,7 +700,7 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} -= {Rhs}`"]
pub trait SubAssign<Rhs=Self> {
/// The method for the `-=` operator
/// Performs the `-=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn sub_assign(&mut self, rhs: Rhs);
}
@ -776,31 +722,27 @@ sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
///
/// # Examples
///
/// A trivial implementation of `MulAssign`. When `Foo *= Foo` happens, it ends up
/// calling `mul_assign`, and therefore, `main` prints `Multiplying!`.
///
/// ```
/// use std::ops::MulAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct Frequency { hertz: f64 }
///
/// impl MulAssign for Foo {
/// fn mul_assign(&mut self, _rhs: Foo) {
/// println!("Multiplying!");
/// impl MulAssign<f64> for Frequency {
/// fn mul_assign(&mut self, rhs: f64) {
/// self.hertz *= rhs;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo *= Foo;
/// }
/// let mut frequency = Frequency { hertz: 50.0 };
/// frequency *= 4.0;
/// assert_eq!(Frequency { hertz: 200.0 }, frequency);
/// ```
#[lang = "mul_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} *= {Rhs}`"]
pub trait MulAssign<Rhs=Self> {
/// The method for the `*=` operator
/// Performs the `*=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn mul_assign(&mut self, rhs: Rhs);
}
@ -822,31 +764,27 @@ mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
///
/// # Examples
///
/// A trivial implementation of `DivAssign`. When `Foo /= Foo` happens, it ends up
/// calling `div_assign`, and therefore, `main` prints `Dividing!`.
///
/// ```
/// use std::ops::DivAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct Frequency { hertz: f64 }
///
/// impl DivAssign for Foo {
/// fn div_assign(&mut self, _rhs: Foo) {
/// println!("Dividing!");
/// impl DivAssign<f64> for Frequency {
/// fn div_assign(&mut self, rhs: f64) {
/// self.hertz /= rhs;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo /= Foo;
/// }
/// let mut frequency = Frequency { hertz: 200.0 };
/// frequency /= 4.0;
/// assert_eq!(Frequency { hertz: 50.0 }, frequency);
/// ```
#[lang = "div_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} /= {Rhs}`"]
pub trait DivAssign<Rhs=Self> {
/// The method for the `/=` operator
/// Performs the `/=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn div_assign(&mut self, rhs: Rhs);
}
@ -867,31 +805,31 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
///
/// # Examples
///
/// A trivial implementation of `RemAssign`. When `Foo %= Foo` happens, it ends up
/// calling `rem_assign`, and therefore, `main` prints `Remainder-ing!`.
///
/// ```
/// use std::ops::RemAssign;
///
/// struct Foo;
/// struct CookieJar { cookies: u32 }
///
/// impl RemAssign for Foo {
/// fn rem_assign(&mut self, _rhs: Foo) {
/// println!("Remainder-ing!");
/// impl RemAssign<u32> for CookieJar {
/// fn rem_assign(&mut self, piles: u32) {
/// self.cookies %= piles;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo %= Foo;
/// }
/// let mut jar = CookieJar { cookies: 31 };
/// let piles = 4;
///
/// println!("Splitting up {} cookies into {} even piles!", jar.cookies, piles);
///
/// jar %= piles;
///
/// println!("{} cookies remain in the cookie jar!", jar.cookies);
/// ```
#[lang = "rem_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} %= {Rhs}`"]
pub trait RemAssign<Rhs=Self> {
/// The method for the `%=` operator
/// Performs the `%=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn rem_assign(&mut self, rhs: Rhs);
}

View File

@ -41,11 +41,11 @@
#[lang = "not"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Not {
/// The resulting type after applying the `!` operator
/// The resulting type after applying the `!` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the unary `!` operator
/// Performs the unary `!` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn not(self) -> Self::Output;
}
@ -68,9 +68,11 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// The bitwise AND operator `&`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// In this example, the `&` operator is lifted to a trivial `Scalar` type.
/// An implementation of `BitAnd` for a wrapper around `bool`.
///
/// ```
/// use std::ops::BitAnd;
@ -87,16 +89,13 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true));
/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false));
/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false));
/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false));
/// }
/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true));
/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false));
/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false));
/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false));
/// ```
///
/// In this example, the `BitAnd` trait is implemented for a `BooleanVector`
/// struct.
/// An implementation of `BitAnd` for a wrapper around `Vec<bool>`.
///
/// ```
/// use std::ops::BitAnd;
@ -114,22 +113,20 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, false, false, false]);
/// assert_eq!(bv1 & bv2, expected);
/// }
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, false, false, false]);
/// assert_eq!(bv1 & bv2, expected);
/// ```
#[lang = "bitand"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} & {RHS}`"]
pub trait BitAnd<RHS=Self> {
/// The resulting type after applying the `&` operator
/// The resulting type after applying the `&` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `&` operator
/// Performs the `&` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn bitand(self, rhs: RHS) -> Self::Output;
}
@ -152,9 +149,11 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// The bitwise OR operator `|`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// In this example, the `|` operator is lifted to a trivial `Scalar` type.
/// An implementation of `BitOr` for a wrapper around `bool`.
///
/// ```
/// use std::ops::BitOr;
@ -171,16 +170,13 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true));
/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true));
/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true));
/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false));
/// }
/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true));
/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true));
/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true));
/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false));
/// ```
///
/// In this example, the `BitOr` trait is implemented for a `BooleanVector`
/// struct.
/// An implementation of `BitOr` for a wrapper around `Vec<bool>`.
///
/// ```
/// use std::ops::BitOr;
@ -198,22 +194,20 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, true, true, false]);
/// assert_eq!(bv1 | bv2, expected);
/// }
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, true, true, false]);
/// assert_eq!(bv1 | bv2, expected);
/// ```
#[lang = "bitor"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} | {RHS}`"]
pub trait BitOr<RHS=Self> {
/// The resulting type after applying the `|` operator
/// The resulting type after applying the `|` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `|` operator
/// Performs the `|` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn bitor(self, rhs: RHS) -> Self::Output;
}
@ -236,9 +230,11 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// The bitwise XOR operator `^`.
///
/// Note that `RHS` is `Self` by default, but this is not mandatory.
///
/// # Examples
///
/// In this example, the `^` operator is lifted to a trivial `Scalar` type.
/// An implementation of `BitXor` that lifts `^` to a wrapper around `bool`.
///
/// ```
/// use std::ops::BitXor;
@ -255,16 +251,13 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false));
/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true));
/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true));
/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false));
/// }
/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false));
/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true));
/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true));
/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false));
/// ```
///
/// In this example, the `BitXor` trait is implemented for a `BooleanVector`
/// struct.
/// An implementation of `BitXor` trait for a wrapper around `Vec<bool>`.
///
/// ```
/// use std::ops::BitXor;
@ -285,22 +278,20 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![false, true, true, false]);
/// assert_eq!(bv1 ^ bv2, expected);
/// }
/// let bv1 = BooleanVector(vec![true, true, false, false]);
/// let bv2 = BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![false, true, true, false]);
/// assert_eq!(bv1 ^ bv2, expected);
/// ```
#[lang = "bitxor"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} ^ {RHS}`"]
pub trait BitXor<RHS=Self> {
/// The resulting type after applying the `^` operator
/// The resulting type after applying the `^` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `^` operator
/// Performs the `^` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn bitxor(self, rhs: RHS) -> Self::Output;
}
@ -326,7 +317,7 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// # Examples
///
/// An implementation of `Shl` that lifts the `<<` operation on integers to a
/// `Scalar` struct.
/// wrapper around `usize`.
///
/// ```
/// use std::ops::Shl;
@ -342,9 +333,8 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// Scalar(lhs << rhs)
/// }
/// }
/// fn main() {
/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16));
/// }
///
/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16));
/// ```
///
/// An implementation of `Shl` that spins a vector leftward by a given amount.
@ -361,7 +351,7 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// type Output = Self;
///
/// fn shl(self, rhs: usize) -> SpinVector<T> {
/// // rotate the vector by `rhs` places
/// // Rotate the vector by `rhs` places.
/// let (a, b) = self.vec.split_at(rhs);
/// let mut spun_vector: Vec<T> = vec![];
/// spun_vector.extend_from_slice(b);
@ -370,20 +360,18 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// }
/// }
///
/// fn main() {
/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2,
/// SpinVector { vec: vec![2, 3, 4, 0, 1] });
/// }
/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2,
/// SpinVector { vec: vec![2, 3, 4, 0, 1] });
/// ```
#[lang = "shl"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} << {RHS}`"]
pub trait Shl<RHS> {
/// The resulting type after applying the `<<` operator
/// The resulting type after applying the `<<` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `<<` operator
/// Performs the `<<` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn shl(self, rhs: RHS) -> Self::Output;
}
@ -430,7 +418,7 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
/// # Examples
///
/// An implementation of `Shr` that lifts the `>>` operation on integers to a
/// `Scalar` struct.
/// wrapper around `usize`.
///
/// ```
/// use std::ops::Shr;
@ -446,9 +434,8 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
/// Scalar(lhs >> rhs)
/// }
/// }
/// fn main() {
/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4));
/// }
///
/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4));
/// ```
///
/// An implementation of `Shr` that spins a vector rightward by a given amount.
@ -465,7 +452,7 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
/// type Output = Self;
///
/// fn shr(self, rhs: usize) -> SpinVector<T> {
/// // rotate the vector by `rhs` places
/// // Rotate the vector by `rhs` places.
/// let (a, b) = self.vec.split_at(self.vec.len() - rhs);
/// let mut spun_vector: Vec<T> = vec![];
/// spun_vector.extend_from_slice(b);
@ -474,20 +461,18 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
/// }
/// }
///
/// fn main() {
/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2,
/// SpinVector { vec: vec![3, 4, 0, 1, 2] });
/// }
/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2,
/// SpinVector { vec: vec![3, 4, 0, 1, 2] });
/// ```
#[lang = "shr"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} >> {RHS}`"]
pub trait Shr<RHS> {
/// The resulting type after applying the `>>` operator
/// The resulting type after applying the `>>` operator.
#[stable(feature = "rust1", since = "1.0.0")]
type Output;
/// The method for the `>>` operator
/// Performs the `>>` operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn shr(self, rhs: RHS) -> Self::Output;
}
@ -533,7 +518,8 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
///
/// # Examples
///
/// In this example, the `&=` operator is lifted to a trivial `Scalar` type.
/// An implementation of `BitAndAssign` that lifts the `&=` operator to a
/// wrapper around `bool`.
///
/// ```
/// use std::ops::BitAndAssign;
@ -548,27 +534,25 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
/// }
/// }
///
/// fn main() {
/// let mut scalar = Scalar(true);
/// scalar &= Scalar(true);
/// assert_eq!(scalar, Scalar(true));
/// let mut scalar = Scalar(true);
/// scalar &= Scalar(true);
/// assert_eq!(scalar, Scalar(true));
///
/// let mut scalar = Scalar(true);
/// scalar &= Scalar(false);
/// assert_eq!(scalar, Scalar(false));
/// let mut scalar = Scalar(true);
/// scalar &= Scalar(false);
/// assert_eq!(scalar, Scalar(false));
///
/// let mut scalar = Scalar(false);
/// scalar &= Scalar(true);
/// assert_eq!(scalar, Scalar(false));
/// let mut scalar = Scalar(false);
/// scalar &= Scalar(true);
/// assert_eq!(scalar, Scalar(false));
///
/// let mut scalar = Scalar(false);
/// scalar &= Scalar(false);
/// assert_eq!(scalar, Scalar(false));
/// }
/// let mut scalar = Scalar(false);
/// scalar &= Scalar(false);
/// assert_eq!(scalar, Scalar(false));
/// ```
///
/// In this example, the `BitAndAssign` trait is implemented for a
/// `BooleanVector` struct.
/// Here, the `BitAndAssign` trait is implemented for a wrapper around
/// `Vec<bool>`.
///
/// ```
/// use std::ops::BitAndAssign;
@ -577,7 +561,7 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
/// struct BooleanVector(Vec<bool>);
///
/// impl BitAndAssign for BooleanVector {
/// // rhs is the "right-hand side" of the expression `a &= b`
/// // `rhs` is the "right-hand side" of the expression `a &= b`.
/// fn bitand_assign(&mut self, rhs: Self) {
/// assert_eq!(self.0.len(), rhs.0.len());
/// *self = BooleanVector(self.0
@ -588,18 +572,16 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
/// }
/// }
///
/// fn main() {
/// let mut bv = BooleanVector(vec![true, true, false, false]);
/// bv &= BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, false, false, false]);
/// assert_eq!(bv, expected);
/// }
/// let mut bv = BooleanVector(vec![true, true, false, false]);
/// bv &= BooleanVector(vec![true, false, true, false]);
/// let expected = BooleanVector(vec![true, false, false, false]);
/// assert_eq!(bv, expected);
/// ```
#[lang = "bitand_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} &= {Rhs}`"]
pub trait BitAndAssign<Rhs=Self> {
/// The method for the `&=` operator
/// Performs the `&=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn bitand_assign(&mut self, rhs: Rhs);
}
@ -620,31 +602,31 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
///
/// # Examples
///
/// A trivial implementation of `BitOrAssign`. When `Foo |= Foo` happens, it ends up
/// calling `bitor_assign`, and therefore, `main` prints `Bitwise Or-ing!`.
///
/// ```
/// use std::ops::BitOrAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct PersonalPreferences {
/// likes_cats: bool,
/// likes_dogs: bool,
/// }
///
/// impl BitOrAssign for Foo {
/// fn bitor_assign(&mut self, _rhs: Foo) {
/// println!("Bitwise Or-ing!");
/// impl BitOrAssign for PersonalPreferences {
/// fn bitor_assign(&mut self, rhs: Self) {
/// self.likes_cats |= rhs.likes_cats;
/// self.likes_dogs |= rhs.likes_dogs;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo |= Foo;
/// }
/// let mut prefs = PersonalPreferences { likes_cats: true, likes_dogs: false };
/// prefs |= PersonalPreferences { likes_cats: false, likes_dogs: true };
/// assert_eq!(prefs, PersonalPreferences { likes_cats: true, likes_dogs: true });
/// ```
#[lang = "bitor_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} |= {Rhs}`"]
pub trait BitOrAssign<Rhs=Self> {
/// The method for the `|=` operator
/// Performs the `|=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn bitor_assign(&mut self, rhs: Rhs);
}
@ -665,31 +647,31 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
///
/// # Examples
///
/// A trivial implementation of `BitXorAssign`. When `Foo ^= Foo` happens, it ends up
/// calling `bitxor_assign`, and therefore, `main` prints `Bitwise Xor-ing!`.
///
/// ```
/// use std::ops::BitXorAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct Personality {
/// has_soul: bool,
/// likes_knitting: bool,
/// }
///
/// impl BitXorAssign for Foo {
/// fn bitxor_assign(&mut self, _rhs: Foo) {
/// println!("Bitwise Xor-ing!");
/// impl BitXorAssign for Personality {
/// fn bitxor_assign(&mut self, rhs: Self) {
/// self.has_soul ^= rhs.has_soul;
/// self.likes_knitting ^= rhs.likes_knitting;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo ^= Foo;
/// }
/// let mut personality = Personality { has_soul: false, likes_knitting: true };
/// personality ^= Personality { has_soul: true, likes_knitting: true };
/// assert_eq!(personality, Personality { has_soul: true, likes_knitting: false});
/// ```
#[lang = "bitxor_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} ^= {Rhs}`"]
pub trait BitXorAssign<Rhs=Self> {
/// The method for the `^=` operator
/// Performs the `^=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn bitxor_assign(&mut self, rhs: Rhs);
}
@ -710,31 +692,29 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
///
/// # Examples
///
/// A trivial implementation of `ShlAssign`. When `Foo <<= Foo` happens, it ends up
/// calling `shl_assign`, and therefore, `main` prints `Shifting left!`.
/// An implementation of `ShlAssign` for a wrapper around `usize`.
///
/// ```
/// use std::ops::ShlAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct Scalar(usize);
///
/// impl ShlAssign<Foo> for Foo {
/// fn shl_assign(&mut self, _rhs: Foo) {
/// println!("Shifting left!");
/// impl ShlAssign<usize> for Scalar {
/// fn shl_assign(&mut self, rhs: usize) {
/// self.0 <<= rhs;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo <<= Foo;
/// }
/// let mut scalar = Scalar(4);
/// scalar <<= 2;
/// assert_eq!(scalar, Scalar(16));
/// ```
#[lang = "shl_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} <<= {Rhs}`"]
pub trait ShlAssign<Rhs> {
/// The method for the `<<=` operator
/// Performs the `<<=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn shl_assign(&mut self, rhs: Rhs);
}
@ -776,31 +756,29 @@ shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
///
/// # Examples
///
/// A trivial implementation of `ShrAssign`. When `Foo >>= Foo` happens, it ends up
/// calling `shr_assign`, and therefore, `main` prints `Shifting right!`.
/// An implementation of `ShrAssign` for a wrapper around `usize`.
///
/// ```
/// use std::ops::ShrAssign;
///
/// struct Foo;
/// #[derive(Debug, PartialEq)]
/// struct Scalar(usize);
///
/// impl ShrAssign<Foo> for Foo {
/// fn shr_assign(&mut self, _rhs: Foo) {
/// println!("Shifting right!");
/// impl ShrAssign<usize> for Scalar {
/// fn shr_assign(&mut self, rhs: usize) {
/// self.0 >>= rhs;
/// }
/// }
///
/// # #[allow(unused_assignments)]
/// fn main() {
/// let mut foo = Foo;
/// foo >>= Foo;
/// }
/// let mut scalar = Scalar(16);
/// scalar >>= 2;
/// assert_eq!(scalar, Scalar(4));
/// ```
#[lang = "shr_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
#[rustc_on_unimplemented = "no implementation for `{Self} >>= {Rhs}`"]
pub trait ShrAssign<Rhs=Self> {
/// The method for the `>>=` operator
/// Performs the `>>=` operation.
#[stable(feature = "op_assign_traits", since = "1.8.0")]
fn shr_assign(&mut self, rhs: Rhs);
}

View File

@ -8,16 +8,44 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The `Deref` trait is used to specify the functionality of dereferencing
/// operations, like `*v`.
/// Used for immutable dereferencing operations, like `*v`.
///
/// `Deref` also enables ['`Deref` coercions'][coercions].
/// In addition to being used for explicit dereferencing operations with the
/// (unary) `*` operator in immutable contexts, `Deref` is also used implicitly
/// by the compiler in many circumstances. This mechanism is called
/// ['`Deref` coercion'][more]. In mutable contexts, [`DerefMut`] is used.
///
/// [coercions]: ../../book/first-edition/deref-coercions.html
/// Implementing `Deref` for smart pointers makes accessing the data behind them
/// convenient, which is why they implement `Deref`. On the other hand, the
/// rules regarding `Deref` and [`DerefMut`] were designed specifically to
/// accomodate smart pointers. Because of this, **`Deref` should only be
/// implemented for smart pointers** to avoid confusion.
///
/// For similar reasons, **this trait should never fail**. Failure during
/// dereferencing can be extremely confusing when `Deref` is invoked implicitly.
///
/// # More on `Deref` coercion
///
/// If `T` implements `Deref<Target = U>`, and `x` is a value of type `T`, then:
/// * In immutable contexts, `*x` on non-pointer types is equivalent to
/// `*Deref::deref(&x)`.
/// * Values of type `&T` are coerced to values of type `&U`
/// * `T` implicitly implements all the (immutable) methods of the type `U`.
///
/// For more details, visit [the chapter in *The Rust Programming Language*]
/// [book] as well as the reference sections on [the dereference operator]
/// [ref-deref-op], [the `Deref` trait][ref-deref-trait], and [type coercions].
///
/// [book]: ../../book/second-edition/ch15-02-deref.html
/// [`DerefMut`]: trait.DerefMut.html
/// [more]: #more-on-deref-coercion
/// [ref-deref-op]: ../../reference/expressions.html#the-dereference-operator
/// [ref-deref-trait]: ../../reference/the-deref-trait.html
/// [type coercions]: ../../reference/type-coercions.html
///
/// # Examples
///
/// A struct with a single field which is accessible via dereferencing the
/// A struct with a single field which is accessible by dereferencing the
/// struct.
///
/// ```
@ -35,19 +63,17 @@
/// }
/// }
///
/// fn main() {
/// let x = DerefExample { value: 'a' };
/// assert_eq!('a', *x);
/// }
/// let x = DerefExample { value: 'a' };
/// assert_eq!('a', *x);
/// ```
#[lang = "deref"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Deref {
/// The resulting type after dereferencing
/// The resulting type after dereferencing.
#[stable(feature = "rust1", since = "1.0.0")]
type Target: ?Sized;
/// The method called to dereference a value
/// Dereferences the value.
#[stable(feature = "rust1", since = "1.0.0")]
fn deref(&self) -> &Self::Target;
}
@ -66,16 +92,46 @@ impl<'a, T: ?Sized> Deref for &'a mut T {
fn deref(&self) -> &T { *self }
}
/// The `DerefMut` trait is used to specify the functionality of dereferencing
/// mutably like `*v = 1;`
/// Used for mutable dereferencing operations, like in `*v = 1;`.
///
/// `DerefMut` also enables ['`Deref` coercions'][coercions].
/// In addition to being used for explicit dereferencing operations with the
/// (unary) `*` operator in mutable contexts, `DerefMut` is also used implicitly
/// by the compiler in many circumstances. This mechanism is called
/// ['`Deref` coercion'][more]. In immutable contexts, [`Deref`] is used.
///
/// [coercions]: ../../book/first-edition/deref-coercions.html
/// Implementing `DerefMut` for smart pointers makes mutating the data behind
/// them convenient, which is why they implement `DerefMut`. On the other hand,
/// the rules regarding [`Deref`] and `DerefMut` were designed specifically to
/// accomodate smart pointers. Because of this, **`DerefMut` should only be
/// implemented for smart pointers** to avoid confusion.
///
/// For similar reasons, **this trait should never fail**. Failure during
/// dereferencing can be extremely confusing when `DerefMut` is invoked
/// implicitly.
///
/// # More on `Deref` coercion
///
/// If `T` implements `DerefMut<Target = U>`, and `x` is a value of type `T`,
/// then:
/// * In mutable contexts, `*x` on non-pointer types is equivalent to
/// `*Deref::deref(&x)`.
/// * Values of type `&mut T` are coerced to values of type `&mut U`
/// * `T` implicitly implements all the (mutable) methods of the type `U`.
///
/// For more details, visit [the chapter in *The Rust Programming Language*]
/// [book] as well as the reference sections on [the dereference operator]
/// [ref-deref-op], [the `Deref` trait][ref-deref-trait], and [type coercions].
///
/// [book]: ../../book/second-edition/ch15-02-deref.html
/// [`Deref`]: trait.Deref.html
/// [more]: #more-on-deref-coercion
/// [ref-deref-op]: ../../reference/expressions.html#the-dereference-operator
/// [ref-deref-trait]: ../../reference/the-deref-trait.html
/// [type coercions]: ../../reference/type-coercions.html
///
/// # Examples
///
/// A struct with a single field which is modifiable via dereferencing the
/// A struct with a single field which is modifiable by dereferencing the
/// struct.
///
/// ```
@ -99,16 +155,14 @@ impl<'a, T: ?Sized> Deref for &'a mut T {
/// }
/// }
///
/// fn main() {
/// let mut x = DerefMutExample { value: 'a' };
/// *x = 'b';
/// assert_eq!('b', *x);
/// }
/// let mut x = DerefMutExample { value: 'a' };
/// *x = 'b';
/// assert_eq!('b', *x);
/// ```
#[lang = "deref_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait DerefMut: Deref {
/// The method called to mutably dereference a value
/// Mutably dereferences the value.
#[stable(feature = "rust1", since = "1.0.0")]
fn deref_mut(&mut self) -> &mut Self::Target;
}

View File

@ -8,20 +8,27 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The `Drop` trait is used to run some code when a value goes out of scope.
/// Used to run some code when a value goes out of scope.
/// This is sometimes called a 'destructor'.
///
/// When a value goes out of scope, if it implements this trait, it will have
/// its `drop` method called. Then any fields the value contains will also
/// When a value goes out of scope, it will have its `drop` method called if
/// its type implements `Drop`. Then, any fields the value contains will also
/// be dropped recursively.
///
/// Because of the recursive dropping, you do not need to implement this trait
/// Because of this recursive dropping, you do not need to implement this trait
/// unless your type needs its own destructor logic.
///
/// Refer to [the chapter on `Drop` in *The Rust Programming Language*][book]
/// for some more elaboration.
///
/// [book]: ../../book/second-edition/ch15-03-drop.html
///
/// # Examples
///
/// A trivial implementation of `Drop`. The `drop` method is called when `_x`
/// goes out of scope, and therefore `main` prints `Dropping!`.
/// ## Implementing `Drop`
///
/// The `drop` method is called when `_x` goes out of scope, and therefore
/// `main` prints `Dropping!`.
///
/// ```
/// struct HasDrop;
@ -37,9 +44,11 @@
/// }
/// ```
///
/// Showing the recursive nature of `Drop`. When `outer` goes out of scope, the
/// `drop` method will be called first for `Outer`, then for `Inner`. Therefore
/// `main` prints `Dropping Outer!` and then `Dropping Inner!`.
/// ## Dropping is done recursively
///
/// When `outer` goes out of scope, the `drop` method will be called first for
/// `Outer`, then for `Inner`. Therefore, `main` prints `Dropping Outer!` and
/// then `Dropping Inner!`.
///
/// ```
/// struct Inner;
@ -62,12 +71,20 @@
/// }
/// ```
///
/// Because variables are dropped in the reverse order they are declared,
/// `main` will print `Declared second!` and then `Declared first!`.
/// ## Variables are dropped in reverse order of declaration
///
/// `_first` is declared first and `_second` is declared second, so `main` will
/// print `Declared second!` and then `Declared first!`.
///
/// ```
/// struct PrintOnDrop(&'static str);
///
/// impl Drop for PrintOnDrop {
/// fn drop(&mut self) {
/// println!("{}", self.0);
/// }
/// }
///
/// fn main() {
/// let _first = PrintOnDrop("Declared first!");
/// let _second = PrintOnDrop("Declared second!");
@ -76,24 +93,25 @@
#[lang = "drop"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Drop {
/// A method called when the value goes out of scope.
/// Executes the destructor for this type.
///
/// When this method has been called, `self` has not yet been deallocated.
/// If it were, `self` would be a dangling reference.
///
/// After this function is over, the memory of `self` will be deallocated.
///
/// This function cannot be called explicitly. This is compiler error
/// [E0040]. However, the [`std::mem::drop`] function in the prelude can be
/// This method is called implilcitly when the value goes out of scope,
/// and cannot be called explicitly (this is compiler error [E0040]).
/// However, the [`std::mem::drop`] function in the prelude can be
/// used to call the argument's `Drop` implementation.
///
/// [E0040]: ../../error-index.html#E0040
/// [`std::mem::drop`]: ../../std/mem/fn.drop.html
/// When this method has been called, `self` has not yet been deallocated.
/// That only happens after the method is over.
/// If this wasn't the case, `self` would be a dangling reference.
///
/// # Panics
///
/// Given that a `panic!` will call `drop()` as it unwinds, any `panic!` in
/// a `drop()` implementation will likely abort.
/// Given that a [`panic!`] will call `drop` as it unwinds, any [`panic!`]
/// in a `drop` implementation will likely abort.
///
/// [E0040]: ../../error-index.html#E0040
/// [`panic!`]: ../macro.panic.html
/// [`std::mem::drop`]: ../../std/mem/fn.drop.html
#[stable(feature = "rust1", since = "1.0.0")]
fn drop(&mut self);
}

View File

@ -8,26 +8,51 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// A version of the call operator that takes an immutable receiver.
/// The version of the call operator that takes an immutable receiver.
///
/// Instances of `Fn` can be called repeatedly without mutating state.
///
/// *This trait (`Fn`) is not to be confused with [function pointers][]
/// (`fn`).*
///
/// `Fn` is implemented automatically by closures which only take immutable
/// references to captured variables or don't capture anything at all, as well
/// as (safe) [function pointers][] (with some caveats, see their documentation
/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
/// implements `Fn`, too.
///
/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
/// is expected.
///
/// Use `Fn` as a bound when you want to accept a parameter of function-like
/// type and need to call it repeatedly and without mutating state (e.g. when
/// calling it concurrently). If you do not need such strict requirements, use
/// [`FnMut`] or [`FnOnce`] as bounds.
///
/// See the [chapter on closures in *The Rust Programming Language*][book] for
/// some more information on this topic.
///
/// Also of note is the special syntax for `Fn` traits (e.g.
/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
///
/// [book]: ../../book/second-edition/ch13-01-closures.html
/// [`FnMut`]: trait.FnMut.html
/// [`FnOnce`]: trait.FnOnce.html
/// [function pointers]: ../../std/primitive.fn.html
/// [nomicon]: ../../nomicon/hrtb.html
///
/// # Examples
///
/// Closures automatically implement this trait, which allows them to be
/// invoked. Note, however, that `Fn` takes an immutable reference to any
/// captured variables. To take a mutable capture, implement [`FnMut`], and to
/// consume the capture, implement [`FnOnce`].
///
/// [`FnMut`]: trait.FnMut.html
/// [`FnOnce`]: trait.FnOnce.html
/// ## Calling a closure
///
/// ```
/// let square = |x| x * x;
/// assert_eq!(square(5), 25);
/// ```
///
/// Closures can also be passed to higher-level functions through a `Fn`
/// parameter (or a `FnMut` or `FnOnce` parameter, which are supertraits of
/// `Fn`).
/// ## Using a `Fn` parameter
///
/// ```
/// fn call_with_one<F>(func: F) -> usize
@ -43,17 +68,46 @@
#[rustc_paren_sugar]
#[fundamental] // so that regex can rely that `&str: !FnMut`
pub trait Fn<Args> : FnMut<Args> {
/// This is called when the call operator is used.
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call(&self, args: Args) -> Self::Output;
}
/// A version of the call operator that takes a mutable receiver.
/// The version of the call operator that takes a mutable receiver.
///
/// Instances of `FnMut` can be called repeatedly and may mutate state.
///
/// `FnMut` is implemented automatically by closures which take mutable
/// references to captured variables, as well as all types that implement
/// [`Fn`], e.g. (safe) [function pointers][] (since `FnMut` is a supertrait of
/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
/// implements `FnMut`, too.
///
/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
///
/// Use `FnMut` as a bound when you want to accept a parameter of function-like
/// type and need to call it repeatedly, while allowing it to mutate state.
/// If you don't want the parameter to mutate state, use [`Fn`] as a
/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
///
/// See the [chapter on closures in *The Rust Programming Language*][book] for
/// some more information on this topic.
///
/// Also of note is the special syntax for `Fn` traits (e.g.
/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
///
/// [book]: ../../book/second-edition/ch13-01-closures.html
/// [`Fn`]: trait.Fn.html
/// [`FnOnce`]: trait.FnOnce.html
/// [function pointers]: ../../std/primitive.fn.html
/// [nomicon]: ../../nomicon/hrtb.html
///
/// # Examples
///
/// Closures that mutably capture variables automatically implement this trait,
/// which allows them to be invoked.
/// ## Calling a mutably capturing closure
///
/// ```
/// let mut x = 5;
@ -64,8 +118,7 @@ pub trait Fn<Args> : FnMut<Args> {
/// assert_eq!(x, 25);
/// ```
///
/// Closures can also be passed to higher-level functions through a `FnMut`
/// parameter (or a `FnOnce` parameter, which is a supertrait of `FnMut`).
/// ## Using a `FnMut` parameter
///
/// ```
/// fn do_twice<F>(mut func: F)
@ -88,17 +141,45 @@ pub trait Fn<Args> : FnMut<Args> {
#[rustc_paren_sugar]
#[fundamental] // so that regex can rely that `&str: !FnMut`
pub trait FnMut<Args> : FnOnce<Args> {
/// This is called when the call operator is used.
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
}
/// A version of the call operator that takes a by-value receiver.
/// The version of the call operator that takes a by-value receiver.
///
/// Instances of `FnOnce` can be called, but might not be callable multiple
/// times. Because of this, if the only thing known about a type is that it
/// implements `FnOnce`, it can only be called once.
///
/// `FnOnce` is implemented automatically by closure that might consume captured
/// variables, as well as all types that implement [`FnMut`], e.g. (safe)
/// [function pointers][] (since `FnOnce` is a supertrait of [`FnMut`]).
///
/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
///
/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
/// type and only need to call it once. If you need to call the parameter
/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
/// state, use [`Fn`].
///
/// See the [chapter on closures in *The Rust Programming Language*][book] for
/// some more information on this topic.
///
/// Also of note is the special syntax for `Fn` traits (e.g.
/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
///
/// [book]: ../../book/second-edition/ch13-01-closures.html
/// [`Fn`]: trait.Fn.html
/// [`FnMut`]: trait.FnMut.html
/// [function pointers]: ../../std/primitive.fn.html
/// [nomicon]: ../../nomicon/hrtb.html
///
/// # Examples
///
/// By-value closures automatically implement this trait, which allows them to
/// be invoked.
/// ## Calling a by-value closure
///
/// ```
/// let x = 5;
@ -106,21 +187,20 @@ pub trait FnMut<Args> : FnOnce<Args> {
/// assert_eq!(square_x(), 25);
/// ```
///
/// By-value Closures can also be passed to higher-level functions through a
/// `FnOnce` parameter.
/// ## Using a `FnOnce` parameter
///
/// ```
/// fn consume_with_relish<F>(func: F)
/// where F: FnOnce() -> String
/// {
/// // `func` consumes its captured variables, so it cannot be run more
/// // than once
/// // than once.
/// println!("Consumed: {}", func());
///
/// println!("Delicious!");
///
/// // Attempting to invoke `func()` again will throw a `use of moved
/// // value` error for `func`
/// // value` error for `func`.
/// }
///
/// let x = String::from("x");
@ -138,7 +218,7 @@ pub trait FnOnce<Args> {
#[stable(feature = "fn_once_output", since = "1.12.0")]
type Output;
/// This is called when the call operator is used.
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
}

View File

@ -8,13 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The `Index` trait is used to specify the functionality of indexing operations
/// like `container[index]` when used in an immutable context.
/// Used for indexing operations (`container[index]`) in immutable contexts.
///
/// `container[index]` is actually syntactic sugar for `*container.index(index)`,
/// but only when used as an immutable value. If a mutable value is requested,
/// [`IndexMut`] is used instead. This allows nice things such as
/// `let value = v[index]` if `value` implements [`Copy`].
/// `let value = v[index]` if the type of `value` implements [`Copy`].
///
/// [`IndexMut`]: ../../std/ops/trait.IndexMut.html
/// [`Copy`]: ../../std/marker/trait.Copy.html
@ -64,25 +63,23 @@
#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Index<Idx: ?Sized> {
/// The returned type after indexing
/// The returned type after indexing.
#[stable(feature = "rust1", since = "1.0.0")]
type Output: ?Sized;
/// The method for the indexing (`container[index]`) operation
/// Performs the indexing (`container[index]`) operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn index(&self, index: Idx) -> &Self::Output;
}
/// The `IndexMut` trait is used to specify the functionality of indexing
/// operations like `container[index]` when used in a mutable context.
/// Used for indexing operations (`container[index]`) in mutable contexts.
///
/// `container[index]` is actually syntactic sugar for
/// `*container.index_mut(index)`, but only when used as a mutable value. If
/// an immutable value is requested, the [`Index`] trait is used instead. This
/// allows nice things such as `v[index] = value` if `value` implements [`Copy`].
/// allows nice things such as `v[index] = value`.
///
/// [`Index`]: ../../std/ops/trait.Index.html
/// [`Copy`]: ../../std/marker/trait.Copy.html
///
/// # Examples
///
@ -106,7 +103,7 @@ pub trait Index<Idx: ?Sized> {
///
/// struct Balance {
/// pub left: Weight,
/// pub right:Weight,
/// pub right: Weight,
/// }
///
/// impl Index<Side> for Balance {
@ -131,28 +128,26 @@ pub trait Index<Idx: ?Sized> {
/// }
/// }
///
/// fn main() {
/// let mut balance = Balance {
/// right: Weight::Kilogram(2.5),
/// left: Weight::Pound(1.5),
/// };
/// let mut balance = Balance {
/// right: Weight::Kilogram(2.5),
/// left: Weight::Pound(1.5),
/// };
///
/// // In this case balance[Side::Right] is sugar for
/// // *balance.index(Side::Right), since we are only reading
/// // balance[Side::Right], not writing it.
/// assert_eq!(balance[Side::Right],Weight::Kilogram(2.5));
/// // In this case, `balance[Side::Right]` is sugar for
/// // `*balance.index(Side::Right)`, since we are only *reading*
/// // `balance[Side::Right]`, not writing it.
/// assert_eq!(balance[Side::Right], Weight::Kilogram(2.5));
///
/// // However in this case balance[Side::Left] is sugar for
/// // *balance.index_mut(Side::Left), since we are writing
/// // balance[Side::Left].
/// balance[Side::Left] = Weight::Kilogram(3.0);
/// }
/// // However, in this case `balance[Side::Left]` is sugar for
/// // `*balance.index_mut(Side::Left)`, since we are writing
/// // `balance[Side::Left]`.
/// balance[Side::Left] = Weight::Kilogram(3.0);
/// ```
#[lang = "index_mut"]
#[rustc_on_unimplemented = "the type `{Self}` cannot be mutably indexed by `{Idx}`"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
/// The method for the mutable indexing (`container[index]`) operation
/// Performs the mutable indexing (`container[index]`) operation.
#[stable(feature = "rust1", since = "1.0.0")]
fn index_mut(&mut self, index: Idx) -> &mut Self::Output;
}

View File

@ -21,6 +21,12 @@
//! custom operators are required, you should look toward macros or compiler
//! plugins to extend Rust's syntax.
//!
//! Implementations of operator traits should be unsurprising in their
//! respective contexts, keeping in mind their usual meanings and
//! [operator precedence]. For example, when implementing [`Mul`], the operation
//! should have some resemblance to multiplication (and share expected
//! properties like associativity).
//!
//! Note that the `&&` and `||` operators short-circuit, i.e. they only
//! evaluate their second operand if it contributes to the result. Since this
//! behavior is not enforceable by traits, `&&` and `||` are not supported as
@ -46,7 +52,7 @@
//! ```rust
//! use std::ops::{Add, Sub};
//!
//! #[derive(Debug)]
//! #[derive(Debug, PartialEq)]
//! struct Point {
//! x: i32,
//! y: i32,
@ -67,10 +73,9 @@
//! Point {x: self.x - other.x, y: self.y - other.y}
//! }
//! }
//! fn main() {
//! println!("{:?}", Point {x: 1, y: 0} + Point {x: 2, y: 3});
//! println!("{:?}", Point {x: 1, y: 0} - Point {x: 2, y: 3});
//! }
//!
//! assert_eq!(Point {x: 3, y: 3}, Point {x: 1, y: 0} + Point {x: 2, y: 3});
//! assert_eq!(Point {x: -1, y: -3}, Point {x: 1, y: 0} - Point {x: 2, y: 3});
//! ```
//!
//! See the documentation for each trait for an example implementation.
@ -143,7 +148,9 @@
//! [`FnOnce`]: trait.FnOnce.html
//! [`Add`]: trait.Add.html
//! [`Sub`]: trait.Sub.html
//! [`Mul`]: trait.Mul.html
//! [`clone`]: ../clone/trait.Clone.html#tymethod.clone
//! [operator precedence]: ../../reference/expressions.html#operator-precedence
#![stable(feature = "rust1", since = "1.0.0")]

View File

@ -66,7 +66,7 @@ pub trait Place<Data: ?Sized> {
/// or `Copy`, since the `make_place` method takes `self` by value.
#[unstable(feature = "placement_new_protocol", issue = "27779")]
pub trait Placer<Data: ?Sized> {
/// `Place` is the intermedate agent guarding the
/// `Place` is the intermediate agent guarding the
/// uninitialized state for `Data`.
type Place: InPlace<Data>;

View File

@ -10,10 +10,10 @@
use fmt;
/// An unbounded range. Use `..` (two dots) for its shorthand.
/// An unbounded range (`..`).
///
/// Its primary use case is slicing index. It cannot serve as an iterator
/// because it doesn't have a starting point.
/// `RangeFull` is primarily used as a [slicing index], its shorthand is `..`.
/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
///
/// # Examples
///
@ -23,8 +23,8 @@ use fmt;
/// assert_eq!((..), std::ops::RangeFull);
/// ```
///
/// It does not have an `IntoIterator` implementation, so you can't use it in a
/// `for` loop directly. This won't compile:
/// It does not have an [`IntoIterator`] implementation, so you can't use it in
/// a `for` loop directly. This won't compile:
///
/// ```compile_fail,E0277
/// for i in .. {
@ -32,7 +32,7 @@ use fmt;
/// }
/// ```
///
/// Used as a slicing index, `RangeFull` produces the full array as a slice.
/// Used as a [slicing index], `RangeFull` produces the full array as a slice.
///
/// ```
/// let arr = [0, 1, 2, 3];
@ -41,6 +41,10 @@ use fmt;
/// assert_eq!(arr[1.. ], [ 1,2,3]);
/// assert_eq!(arr[1..3], [ 1,2 ]);
/// ```
///
/// [`IntoIterator`]: ../iter/trait.Iterator.html
/// [`Iterator`]: ../iter/trait.IntoIterator.html
/// [slicing index]: ../slice/trait.SliceIndex.html
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RangeFull;
@ -52,24 +56,23 @@ impl fmt::Debug for RangeFull {
}
}
/// A (half-open) range which is bounded at both ends: { x | start <= x < end }.
/// Use `start..end` (two dots) for its shorthand.
/// A (half-open) range bounded inclusively below and exclusively above
/// (`start..end`).
///
/// See the [`contains`](#method.contains) method for its characterization.
/// The `Range` `start..end` contains all values with `x >= start` and
/// `x < end`.
///
/// # Examples
///
/// ```
/// fn main() {
/// assert_eq!((3..5), std::ops::Range{ start: 3, end: 5 });
/// assert_eq!(3+4+5, (3..6).sum());
/// assert_eq!((3..5), std::ops::Range { start: 3, end: 5 });
/// assert_eq!(3 + 4 + 5, (3..6).sum());
///
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ .. ], [0,1,2,3]);
/// assert_eq!(arr[ ..3], [0,1,2 ]);
/// assert_eq!(arr[1.. ], [ 1,2,3]);
/// assert_eq!(arr[1..3], [ 1,2 ]); // Range
/// }
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ .. ], [0,1,2,3]);
/// assert_eq!(arr[ ..3], [0,1,2 ]);
/// assert_eq!(arr[1.. ], [ 1,2,3]);
/// assert_eq!(arr[1..3], [ 1,2 ]); // Range
/// ```
#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
#[stable(feature = "rust1", since = "1.0.0")]
@ -91,49 +94,49 @@ impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> {
#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
impl<Idx: PartialOrd<Idx>> Range<Idx> {
/// Returns `true` if `item` is contained in the range.
///
/// # Examples
///
/// ```
/// #![feature(range_contains)]
/// fn main() {
/// assert!( ! (3..5).contains(2));
/// assert!( (3..5).contains(3));
/// assert!( (3..5).contains(4));
/// assert!( ! (3..5).contains(5));
///
/// assert!( ! (3..3).contains(3));
/// assert!( ! (3..2).contains(3));
/// }
/// assert!(!(3..5).contains(2));
/// assert!( (3..5).contains(3));
/// assert!( (3..5).contains(4));
/// assert!(!(3..5).contains(5));
///
/// assert!(!(3..3).contains(3));
/// assert!(!(3..2).contains(3));
/// ```
pub fn contains(&self, item: Idx) -> bool {
(self.start <= item) && (item < self.end)
}
}
/// A range which is only bounded below: { x | start <= x }.
/// Use `start..` for its shorthand.
/// A range only bounded inclusively below (`start..`).
///
/// See the [`contains`](#method.contains) method for its characterization.
/// The `RangeFrom` `start..` contains all values with `x >= start`.
///
/// Note: Currently, no overflow checking is done for the iterator
/// *Note*: Currently, no overflow checking is done for the [`Iterator`]
/// implementation; if you use an integer range and the integer overflows, it
/// might panic in debug mode or create an endless loop in release mode. This
/// overflow behavior might change in the future.
/// might panic in debug mode or create an endless loop in release mode. **This
/// overflow behavior might change in the future.**
///
/// # Examples
///
/// ```
/// fn main() {
/// assert_eq!((2..), std::ops::RangeFrom{ start: 2 });
/// assert_eq!(2+3+4, (2..).take(3).sum());
/// assert_eq!((2..), std::ops::RangeFrom { start: 2 });
/// assert_eq!(2 + 3 + 4, (2..).take(3).sum());
///
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ .. ], [0,1,2,3]);
/// assert_eq!(arr[ ..3], [0,1,2 ]);
/// assert_eq!(arr[1.. ], [ 1,2,3]); // RangeFrom
/// assert_eq!(arr[1..3], [ 1,2 ]);
/// }
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ .. ], [0,1,2,3]);
/// assert_eq!(arr[ ..3], [0,1,2 ]);
/// assert_eq!(arr[1.. ], [ 1,2,3]); // RangeFrom
/// assert_eq!(arr[1..3], [ 1,2 ]);
/// ```
///
/// [`Iterator`]: ../iter/trait.IntoIterator.html
#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RangeFrom<Idx> {
@ -151,46 +154,47 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> {
#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
/// Returns `true` if `item` is contained in the range.
///
/// # Examples
///
/// ```
/// #![feature(range_contains)]
/// fn main() {
/// assert!( ! (3..).contains(2));
/// assert!( (3..).contains(3));
/// assert!( (3..).contains(1_000_000_000));
/// }
///
/// assert!(!(3..).contains(2));
/// assert!( (3..).contains(3));
/// assert!( (3..).contains(1_000_000_000));
/// ```
pub fn contains(&self, item: Idx) -> bool {
(self.start <= item)
}
}
/// A range which is only bounded above: { x | x < end }.
/// Use `..end` (two dots) for its shorthand.
/// A range only bounded exclusively above (`..end`).
///
/// See the [`contains`](#method.contains) method for its characterization.
///
/// It cannot serve as an iterator because it doesn't have a starting point.
/// The `RangeTo` `..end` contains all values with `x < end`.
/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
///
/// # Examples
///
/// The `..{integer}` syntax is a `RangeTo`:
/// The `..end` syntax is a `RangeTo`:
///
/// ```
/// assert_eq!((..5), std::ops::RangeTo{ end: 5 });
/// assert_eq!((..5), std::ops::RangeTo { end: 5 });
/// ```
///
/// It does not have an `IntoIterator` implementation, so you can't use it in a
/// `for` loop directly. This won't compile:
/// It does not have an [`IntoIterator`] implementation, so you can't use it in
/// a `for` loop directly. This won't compile:
///
/// ```compile_fail,E0277
/// // error[E0277]: the trait bound `std::ops::RangeTo<{integer}>:
/// // std::iter::Iterator` is not satisfied
/// for i in ..5 {
/// // ...
/// }
/// ```
///
/// When used as a slicing index, `RangeTo` produces a slice of all array
/// When used as a [slicing index], `RangeTo` produces a slice of all array
/// elements before the index indicated by `end`.
///
/// ```
@ -200,6 +204,10 @@ impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
/// assert_eq!(arr[1.. ], [ 1,2,3]);
/// assert_eq!(arr[1..3], [ 1,2 ]);
/// ```
///
/// [`IntoIterator`]: ../iter/trait.Iterator.html
/// [`Iterator`]: ../iter/trait.IntoIterator.html
/// [slicing index]: ../slice/trait.SliceIndex.html
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RangeTo<Idx> {
@ -217,38 +225,38 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> {
#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
impl<Idx: PartialOrd<Idx>> RangeTo<Idx> {
/// Returns `true` if `item` is contained in the range.
///
/// # Examples
///
/// ```
/// #![feature(range_contains)]
/// fn main() {
/// assert!( (..5).contains(-1_000_000_000));
/// assert!( (..5).contains(4));
/// assert!( ! (..5).contains(5));
/// }
///
/// assert!( (..5).contains(-1_000_000_000));
/// assert!( (..5).contains(4));
/// assert!(!(..5).contains(5));
/// ```
pub fn contains(&self, item: Idx) -> bool {
(item < self.end)
}
}
/// An inclusive range which is bounded at both ends: { x | start <= x <= end }.
/// Use `start...end` (three dots) for its shorthand.
/// An range bounded inclusively below and above (`start...end`).
///
/// See the [`contains`](#method.contains) method for its characterization.
/// The `RangeInclusive` `start...end` contains all values with `x >= start`
/// and `x <= end`.
///
/// # Examples
///
/// ```
/// #![feature(inclusive_range,inclusive_range_syntax)]
/// fn main() {
/// assert_eq!((3...5), std::ops::RangeInclusive{ start: 3, end: 5 });
/// assert_eq!(3+4+5, (3...5).sum());
///
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ ...2], [0,1,2 ]);
/// assert_eq!(arr[1...2], [ 1,2 ]); // RangeInclusive
/// }
/// assert_eq!((3...5), std::ops::RangeInclusive { start: 3, end: 5 });
/// assert_eq!(3 + 4 + 5, (3...5).sum());
///
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ ...2], [0,1,2 ]);
/// assert_eq!(arr[1...2], [ 1,2 ]); // RangeInclusive
/// ```
#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
@ -274,61 +282,68 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> {
#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> {
/// Returns `true` if `item` is contained in the range.
///
/// # Examples
///
/// ```
/// #![feature(range_contains,inclusive_range_syntax)]
/// fn main() {
/// assert!( ! (3...5).contains(2));
/// assert!( (3...5).contains(3));
/// assert!( (3...5).contains(4));
/// assert!( (3...5).contains(5));
/// assert!( ! (3...5).contains(6));
///
/// assert!( (3...3).contains(3));
/// assert!( ! (3...2).contains(3));
/// }
/// assert!(!(3...5).contains(2));
/// assert!( (3...5).contains(3));
/// assert!( (3...5).contains(4));
/// assert!( (3...5).contains(5));
/// assert!(!(3...5).contains(6));
///
/// assert!( (3...3).contains(3));
/// assert!(!(3...2).contains(3));
/// ```
pub fn contains(&self, item: Idx) -> bool {
self.start <= item && item <= self.end
}
}
/// An inclusive range which is only bounded above: { x | x <= end }.
/// Use `...end` (three dots) for its shorthand.
/// A range only bounded inclusively above (`...end`).
///
/// See the [`contains`](#method.contains) method for its characterization.
///
/// It cannot serve as an iterator because it doesn't have a starting point.
/// The `RangeToInclusive` `...end` contains all values with `x <= end`.
/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
///
/// # Examples
///
/// The `...{integer}` syntax is a `RangeToInclusive`:
/// The `...end` syntax is a `RangeToInclusive`:
///
/// ```
/// #![feature(inclusive_range,inclusive_range_syntax)]
/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 });
/// ```
///
/// It does not have an `IntoIterator` implementation, so you can't use it in a
/// It does not have an [`IntoIterator`] implementation, so you can't use it in a
/// `for` loop directly. This won't compile:
///
/// ```compile_fail,E0277
/// #![feature(inclusive_range_syntax)]
///
/// // error[E0277]: the trait bound `std::ops::RangeToInclusive<{integer}>:
/// // std::iter::Iterator` is not satisfied
/// for i in ...5 {
/// // ...
/// }
/// ```
///
/// When used as a slicing index, `RangeToInclusive` produces a slice of all
/// When used as a [slicing index], `RangeToInclusive` produces a slice of all
/// array elements up to and including the index indicated by `end`.
///
/// ```
/// #![feature(inclusive_range_syntax)]
///
/// let arr = [0, 1, 2, 3];
/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive
/// assert_eq!(arr[1...2], [ 1,2 ]);
/// ```
///
/// [`IntoIterator`]: ../iter/trait.Iterator.html
/// [`Iterator`]: ../iter/trait.IntoIterator.html
/// [slicing index]: ../slice/trait.SliceIndex.html
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
pub struct RangeToInclusive<Idx> {
@ -348,15 +363,16 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> {
#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")]
impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> {
/// Returns `true` if `item` is contained in the range.
///
/// # Examples
///
/// ```
/// #![feature(range_contains,inclusive_range_syntax)]
/// fn main() {
/// assert!( (...5).contains(-1_000_000_000));
/// assert!( (...5).contains(5));
/// assert!( ! (...5).contains(6));
/// }
///
/// assert!( (...5).contains(-1_000_000_000));
/// assert!( (...5).contains(5));
/// assert!(!(...5).contains(6));
/// ```
pub fn contains(&self, item: Idx) -> bool {
(item <= self.end)

View File

@ -24,7 +24,7 @@ use marker::Unsize;
/// Such an impl can only be written if `Foo<T>` has only a single non-phantomdata
/// field involving `T`. If the type of that field is `Bar<T>`, an implementation
/// of `CoerceUnsized<Bar<U>> for Bar<T>` must exist. The coercion will work by
/// by coercing the `Bar<T>` field into `Bar<U>` and filling in the rest of the fields
/// coercing the `Bar<T>` field into `Bar<U>` and filling in the rest of the fields
/// from `Foo<T>` to create a `Foo<U>`. This will effectively drill down to a pointer
/// field and coerce that.
///

View File

@ -384,6 +384,11 @@ pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety
@ -427,6 +432,11 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety

View File

@ -287,3 +287,20 @@ fn refcell_ref_coercion() {
assert_eq!(&*coerced, comp);
}
}
#[test]
#[should_panic]
fn refcell_swap_borrows() {
let x = RefCell::new(0);
let _b = x.borrow();
let y = RefCell::new(1);
x.swap(&y);
}
#[test]
#[should_panic]
fn refcell_replace_borrows() {
let x = RefCell::new(0);
let _b = x.borrow();
x.replace(1);
}

View File

@ -31,6 +31,7 @@
#![feature(ord_max_min)]
#![feature(rand)]
#![feature(raw)]
#![feature(refcell_replace_swap)]
#![feature(sip_hash_13)]
#![feature(slice_patterns)]
#![feature(slice_rotate)]

View File

@ -566,7 +566,7 @@ assume_usize_width! {
);
}
/// Conversinos where neither the min nor the max of $source can be represented by
/// Conversions where neither the min nor the max of $source can be represented by
/// $target, but max/min of the target can be represented by the source.
macro_rules! test_impl_try_from_signed_to_unsigned_err {
($fn_name:ident, $source:ty, $target:ty) => {

View File

@ -548,7 +548,7 @@ impl<'a> LabelText<'a> {
}
/// Renders text as string suitable for a label in a .dot file.
/// This includes quotes or suitable delimeters.
/// This includes quotes or suitable delimiters.
pub fn to_dot_string(&self) -> String {
match self {
&LabelStr(ref s) => format!("\"{}\"", s.escape_default()),

View File

@ -111,7 +111,7 @@ impl fmt::Display for TokenStream {
/// `quote!(..)` accepts arbitrary tokens and expands into a `TokenStream` describing the input.
/// For example, `quote!(a + b)` will produce a expression, that, when evaluated, constructs
/// constructs the `TokenStream` `[Word("a"), Op('+', Alone), Word("b")]`.
/// the `TokenStream` `[Word("a"), Op('+', Alone), Word("b")]`.
///
/// Unquoting is done with `$`, and works by taking the single next ident as the unquoted term.
/// To quote `$` itself, use `$$`.

View File

@ -87,7 +87,7 @@ pub enum NestedVisitorMap<'this, 'tcx: 'this> {
/// Do not visit nested item-like things, but visit nested things
/// that are inside of an item-like.
///
/// **This is the most common choice.** A very commmon pattern is
/// **This is the most common choice.** A very common pattern is
/// to use `visit_all_item_likes()` as an outer loop,
/// and to have the visitor that visits the contents of each item
/// using this setting.

View File

@ -158,6 +158,11 @@ enum ParamMode {
Optional
}
struct LoweredNodeId {
node_id: NodeId,
hir_id: hir::HirId,
}
impl<'a> LoweringContext<'a> {
fn lower_crate(mut self, c: &Crate) -> hir::Crate {
/// Full-crate AST visitor that inserts into a fresh
@ -281,11 +286,14 @@ impl<'a> LoweringContext<'a> {
fn lower_node_id_generic<F>(&mut self,
ast_node_id: NodeId,
alloc_hir_id: F)
-> NodeId
-> LoweredNodeId
where F: FnOnce(&mut Self) -> hir::HirId
{
if ast_node_id == DUMMY_NODE_ID {
return ast_node_id;
return LoweredNodeId {
node_id: DUMMY_NODE_ID,
hir_id: hir::DUMMY_HIR_ID,
}
}
let min_size = ast_node_id.as_usize() + 1;
@ -294,12 +302,22 @@ impl<'a> LoweringContext<'a> {
self.node_id_to_hir_id.resize(min_size, hir::DUMMY_HIR_ID);
}
if self.node_id_to_hir_id[ast_node_id] == hir::DUMMY_HIR_ID {
// Generate a new HirId
self.node_id_to_hir_id[ast_node_id] = alloc_hir_id(self);
}
let existing_hir_id = self.node_id_to_hir_id[ast_node_id];
ast_node_id
if existing_hir_id == hir::DUMMY_HIR_ID {
// Generate a new HirId
let hir_id = alloc_hir_id(self);
self.node_id_to_hir_id[ast_node_id] = hir_id;
LoweredNodeId {
node_id: ast_node_id,
hir_id,
}
} else {
LoweredNodeId {
node_id: ast_node_id,
hir_id: existing_hir_id,
}
}
}
fn with_hir_id_owner<F>(&mut self, owner: NodeId, f: F)
@ -326,7 +344,7 @@ impl<'a> LoweringContext<'a> {
/// actually used in the HIR, as that would trigger an assertion in the
/// HirIdValidator later on, which makes sure that all NodeIds got mapped
/// properly. Calling the method twice with the same NodeId is fine though.
fn lower_node_id(&mut self, ast_node_id: NodeId) -> NodeId {
fn lower_node_id(&mut self, ast_node_id: NodeId) -> LoweredNodeId {
self.lower_node_id_generic(ast_node_id, |this| {
let &mut (def_index, ref mut local_id_counter) = this.current_hir_id_owner
.last_mut()
@ -343,7 +361,7 @@ impl<'a> LoweringContext<'a> {
fn lower_node_id_with_owner(&mut self,
ast_node_id: NodeId,
owner: NodeId)
-> NodeId {
-> LoweredNodeId {
self.lower_node_id_generic(ast_node_id, |this| {
let local_id_counter = this.item_local_id_counters
.get_mut(&owner)
@ -379,7 +397,7 @@ impl<'a> LoweringContext<'a> {
id
}
fn next_id(&mut self) -> NodeId {
fn next_id(&mut self) -> LoweredNodeId {
self.lower_node_id(self.sess.next_node_id())
}
@ -408,6 +426,7 @@ impl<'a> LoweringContext<'a> {
format: codemap::CompilerDesugaring(Symbol::intern(reason)),
span: Some(span),
allow_internal_unstable: true,
allow_internal_unsafe: false,
},
});
span.ctxt = SyntaxContext::empty().apply_mark(mark);
@ -531,7 +550,7 @@ impl<'a> LoweringContext<'a> {
match destination {
Some((id, label_ident)) => {
let target = if let Def::Label(loop_id) = self.expect_full_def(id) {
hir::LoopIdResult::Ok(self.lower_node_id(loop_id))
hir::LoopIdResult::Ok(self.lower_node_id(loop_id).node_id)
} else {
hir::LoopIdResult::Err(hir::LoopIdError::UnresolvedLabel)
};
@ -548,7 +567,7 @@ impl<'a> LoweringContext<'a> {
hir::Destination {
ident: None,
target_id: hir::ScopeTarget::Loop(
loop_id.map(|id| Ok(self.lower_node_id(id)))
loop_id.map(|id| Ok(self.lower_node_id(id).node_id))
.unwrap_or(Err(hir::LoopIdError::OutsideLoopScope))
.into())
}
@ -571,7 +590,7 @@ impl<'a> LoweringContext<'a> {
fn lower_ty_binding(&mut self, b: &TypeBinding) -> hir::TypeBinding {
hir::TypeBinding {
id: self.lower_node_id(b.id),
id: self.lower_node_id(b.id).node_id,
name: self.lower_ident(b.ident),
ty: self.lower_ty(&b.ty),
span: b.span,
@ -608,7 +627,7 @@ impl<'a> LoweringContext<'a> {
return self.lower_ty(ty);
}
TyKind::Path(ref qself, ref path) => {
let id = self.lower_node_id(t.id);
let id = self.lower_node_id(t.id).node_id;
let qpath = self.lower_qpath(t.id, qself, path, ParamMode::Explicit);
return self.ty_path(id, t.span, qpath);
}
@ -658,7 +677,7 @@ impl<'a> LoweringContext<'a> {
};
P(hir::Ty {
id: self.lower_node_id(t.id),
id: self.lower_node_id(t.id).node_id,
node: kind,
span: t.span,
})
@ -770,7 +789,7 @@ impl<'a> LoweringContext<'a> {
// Otherwise, the base path is an implicit `Self` type path,
// e.g. `Vec` in `Vec::new` or `<I as Iterator>::Item` in
// `<I as Iterator>::Item::default`.
let new_id = self.next_id();
let new_id = self.next_id().node_id;
self.ty_path(new_id, p.span, hir::QPath::Resolved(qself, path))
};
@ -794,7 +813,7 @@ impl<'a> LoweringContext<'a> {
}
// Wrap the associated extension in another type node.
let new_id = self.next_id();
let new_id = self.next_id().node_id;
ty = self.ty_path(new_id, p.span, qpath);
}
@ -898,8 +917,10 @@ impl<'a> LoweringContext<'a> {
}
fn lower_local(&mut self, l: &Local) -> P<hir::Local> {
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(l.id);
P(hir::Local {
id: self.lower_node_id(l.id),
id: node_id,
hir_id,
ty: l.ty.as_ref().map(|t| self.lower_ty(t)),
pat: self.lower_pat(&l.pat),
init: l.init.as_ref().map(|e| P(self.lower_expr(e))),
@ -917,8 +938,10 @@ impl<'a> LoweringContext<'a> {
}
fn lower_arg(&mut self, arg: &Arg) -> hir::Arg {
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(arg.id);
hir::Arg {
id: self.lower_node_id(arg.id),
id: node_id,
hir_id,
pat: self.lower_pat(&arg.pat),
}
}
@ -981,7 +1004,7 @@ impl<'a> LoweringContext<'a> {
}
hir::TyParam {
id: self.lower_node_id(tp.id),
id: self.lower_node_id(tp.id).node_id,
name,
bounds,
default: tp.default.as_ref().map(|x| self.lower_ty(x)),
@ -999,7 +1022,7 @@ impl<'a> LoweringContext<'a> {
fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
hir::Lifetime {
id: self.lower_node_id(l.id),
id: self.lower_node_id(l.id).node_id,
name: self.lower_ident(l.ident),
span: l.span,
}
@ -1071,7 +1094,7 @@ impl<'a> LoweringContext<'a> {
fn lower_where_clause(&mut self, wc: &WhereClause) -> hir::WhereClause {
hir::WhereClause {
id: self.lower_node_id(wc.id),
id: self.lower_node_id(wc.id).node_id,
predicates: wc.predicates
.iter()
.map(|predicate| self.lower_where_predicate(predicate))
@ -1110,7 +1133,7 @@ impl<'a> LoweringContext<'a> {
ref rhs_ty,
span}) => {
hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
id: self.lower_node_id(id),
id: self.lower_node_id(id).node_id,
lhs_ty: self.lower_ty(lhs_ty),
rhs_ty: self.lower_ty(rhs_ty),
span,
@ -1126,16 +1149,16 @@ impl<'a> LoweringContext<'a> {
.enumerate()
.map(|f| self.lower_struct_field(f))
.collect(),
self.lower_node_id(id))
self.lower_node_id(id).node_id)
}
VariantData::Tuple(ref fields, id) => {
hir::VariantData::Tuple(fields.iter()
.enumerate()
.map(|f| self.lower_struct_field(f))
.collect(),
self.lower_node_id(id))
self.lower_node_id(id).node_id)
}
VariantData::Unit(id) => hir::VariantData::Unit(self.lower_node_id(id)),
VariantData::Unit(id) => hir::VariantData::Unit(self.lower_node_id(id).node_id),
}
}
@ -1146,7 +1169,7 @@ impl<'a> LoweringContext<'a> {
};
hir::TraitRef {
path,
ref_id: self.lower_node_id(p.ref_id),
ref_id: self.lower_node_id(p.ref_id).node_id,
}
}
@ -1161,7 +1184,7 @@ impl<'a> LoweringContext<'a> {
fn lower_struct_field(&mut self, (index, f): (usize, &StructField)) -> hir::StructField {
hir::StructField {
span: f.span,
id: self.lower_node_id(f.id),
id: self.lower_node_id(f.id).node_id,
name: self.lower_ident(match f.ident {
Some(ident) => ident,
// FIXME(jseyfried) positional field hygiene
@ -1210,8 +1233,11 @@ impl<'a> LoweringContext<'a> {
}
}
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(b.id);
P(hir::Block {
id: self.lower_node_id(b.id),
id: node_id,
hir_id,
stmts: stmts.into(),
expr,
rules: self.lower_block_check_mode(&b.rules),
@ -1261,7 +1287,7 @@ impl<'a> LoweringContext<'a> {
hir::Visibility::Restricted {
path: path.clone(),
// We are allocating a new NodeId here
id: this.next_id(),
id: this.next_id().node_id,
}
}
};
@ -1399,7 +1425,7 @@ impl<'a> LoweringContext<'a> {
fn lower_trait_item(&mut self, i: &TraitItem) -> hir::TraitItem {
self.with_parent_def(i.id, |this| {
hir::TraitItem {
id: this.lower_node_id(i.id),
id: this.lower_node_id(i.id).node_id,
name: this.lower_ident(i.ident),
attrs: this.lower_attrs(&i.attrs),
node: match i.node {
@ -1460,7 +1486,7 @@ impl<'a> LoweringContext<'a> {
fn lower_impl_item(&mut self, i: &ImplItem) -> hir::ImplItem {
self.with_parent_def(i.id, |this| {
hir::ImplItem {
id: this.lower_node_id(i.id),
id: this.lower_node_id(i.id).node_id,
name: this.lower_ident(i.ident),
attrs: this.lower_attrs(&i.attrs),
vis: this.lower_visibility(&i.vis, None),
@ -1552,7 +1578,7 @@ impl<'a> LoweringContext<'a> {
});
Some(hir::Item {
id: self.lower_node_id(i.id),
id: self.lower_node_id(i.id).node_id,
name,
attrs,
node,
@ -1564,7 +1590,7 @@ impl<'a> LoweringContext<'a> {
fn lower_foreign_item(&mut self, i: &ForeignItem) -> hir::ForeignItem {
self.with_parent_def(i.id, |this| {
hir::ForeignItem {
id: this.lower_node_id(i.id),
id: this.lower_node_id(i.id).node_id,
name: i.ident.name,
attrs: this.lower_attrs(&i.attrs),
node: match i.node {
@ -1642,8 +1668,11 @@ impl<'a> LoweringContext<'a> {
}
fn lower_pat(&mut self, p: &Pat) -> P<hir::Pat> {
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(p.id);
P(hir::Pat {
id: self.lower_node_id(p.id),
id: node_id,
hir_id,
node: match p.node {
PatKind::Wild => hir::PatKind::Wild,
PatKind::Ident(ref binding_mode, pth1, ref sub) => {
@ -1825,7 +1854,7 @@ impl<'a> LoweringContext<'a> {
let call_move_val_init =
hir::StmtSemi(
make_call(self, &move_val_init, hir_vec![ptr, pop_unsafe_expr]),
self.next_id());
self.next_id().node_id);
let call_move_val_init = respan(e.span, call_move_val_init);
let place = self.expr_ident(e.span, place_ident, place_binding);
@ -1895,11 +1924,15 @@ impl<'a> LoweringContext<'a> {
// wrap the if-let expr in a block
let span = els.span;
let els = P(self.lower_expr(els));
let id = self.next_id();
let LoweredNodeId {
node_id,
hir_id,
} = self.next_id();
let blk = P(hir::Block {
stmts: hir_vec![],
expr: Some(els),
id,
id: node_id,
hir_id,
rules: hir::DefaultBlock,
span,
targeted_by_break: false,
@ -2009,8 +2042,11 @@ impl<'a> LoweringContext<'a> {
let struct_path = self.std_path(unstable_span, &struct_path, is_unit);
let struct_path = hir::QPath::Resolved(None, P(struct_path));
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id);
return hir::Expr {
id: self.lower_node_id(e.id),
id: node_id,
hir_id,
node: if is_unit {
hir::ExprPath(struct_path)
} else {
@ -2265,7 +2301,7 @@ impl<'a> LoweringContext<'a> {
hir::MatchSource::ForLoopDesugar),
ThinVec::new()))
};
let match_stmt = respan(e.span, hir::StmtExpr(match_expr, self.next_id()));
let match_stmt = respan(e.span, hir::StmtExpr(match_expr, self.next_id().node_id));
let next_expr = P(self.expr_ident(e.span, next_ident, next_pat.id));
@ -2285,7 +2321,7 @@ impl<'a> LoweringContext<'a> {
let body_block = self.with_loop_scope(e.id,
|this| this.lower_block(body, false));
let body_expr = P(self.expr_block(body_block, ThinVec::new()));
let body_stmt = respan(e.span, hir::StmtExpr(body_expr, self.next_id()));
let body_stmt = respan(e.span, hir::StmtExpr(body_expr, self.next_id().node_id));
let loop_block = P(self.block_all(e.span,
hir_vec![next_let,
@ -2297,8 +2333,10 @@ impl<'a> LoweringContext<'a> {
// `[opt_ident]: loop { ... }`
let loop_expr = hir::ExprLoop(loop_block, self.lower_opt_sp_ident(opt_ident),
hir::LoopSource::ForLoop);
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id);
let loop_expr = P(hir::Expr {
id: self.lower_node_id(e.id),
id: node_id,
hir_id,
node: loop_expr,
span: e.span,
attrs: ThinVec::new(),
@ -2437,8 +2475,11 @@ impl<'a> LoweringContext<'a> {
ExprKind::Mac(_) => panic!("Shouldn't exist here"),
};
let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id);
hir::Expr {
id: self.lower_node_id(e.id),
id: node_id,
hir_id,
node: kind,
span: e.span,
attrs: e.attrs.clone(),
@ -2451,7 +2492,7 @@ impl<'a> LoweringContext<'a> {
node: hir::StmtDecl(P(Spanned {
node: hir::DeclLocal(self.lower_local(l)),
span: s.span,
}), self.lower_node_id(s.id)),
}), self.lower_node_id(s.id).node_id),
span: s.span,
},
StmtKind::Item(ref it) => {
@ -2462,22 +2503,22 @@ impl<'a> LoweringContext<'a> {
node: hir::DeclItem(item_id),
span: s.span,
}), id.take()
.map(|id| self.lower_node_id(id))
.unwrap_or_else(|| self.next_id())),
.map(|id| self.lower_node_id(id).node_id)
.unwrap_or_else(|| self.next_id().node_id)),
span: s.span,
}).collect();
}
StmtKind::Expr(ref e) => {
Spanned {
node: hir::StmtExpr(P(self.lower_expr(e)),
self.lower_node_id(s.id)),
self.lower_node_id(s.id).node_id),
span: s.span,
}
}
StmtKind::Semi(ref e) => {
Spanned {
node: hir::StmtSemi(P(self.lower_expr(e)),
self.lower_node_id(s.id)),
self.lower_node_id(s.id).node_id),
span: s.span,
}
}
@ -2508,9 +2549,9 @@ impl<'a> LoweringContext<'a> {
hir::Visibility::Restricted {
path: P(self.lower_path(id, path, ParamMode::Explicit, true)),
id: if let Some(owner) = explicit_owner {
self.lower_node_id_with_owner(id, owner)
self.lower_node_id_with_owner(id, owner).node_id
} else {
self.lower_node_id(id)
self.lower_node_id(id).node_id
}
}
}
@ -2652,8 +2693,10 @@ impl<'a> LoweringContext<'a> {
}
fn expr(&mut self, span: Span, node: hir::Expr_, attrs: ThinVec<Attribute>) -> hir::Expr {
let LoweredNodeId { node_id, hir_id } = self.next_id();
hir::Expr {
id: self.next_id(),
id: node_id,
hir_id,
node,
span,
attrs,
@ -2666,17 +2709,20 @@ impl<'a> LoweringContext<'a> {
pat: P<hir::Pat>,
source: hir::LocalSource)
-> hir::Stmt {
let LoweredNodeId { node_id, hir_id } = self.next_id();
let local = P(hir::Local {
pat,
ty: None,
init: ex,
id: self.next_id(),
id: node_id,
hir_id,
span: sp,
attrs: ThinVec::new(),
source,
});
let decl = respan(sp, hir::DeclLocal(local));
respan(sp, hir::StmtDecl(P(decl), self.next_id()))
respan(sp, hir::StmtDecl(P(decl), self.next_id().node_id))
}
fn stmt_let(&mut self, sp: Span, mutbl: bool, ident: Name, ex: P<hir::Expr>)
@ -2696,10 +2742,13 @@ impl<'a> LoweringContext<'a> {
fn block_all(&mut self, span: Span, stmts: hir::HirVec<hir::Stmt>, expr: Option<P<hir::Expr>>)
-> hir::Block {
let LoweredNodeId { node_id, hir_id } = self.next_id();
hir::Block {
stmts,
expr,
id: self.next_id(),
id: node_id,
hir_id,
rules: hir::DefaultBlock,
span,
targeted_by_break: false,
@ -2743,18 +2792,22 @@ impl<'a> LoweringContext<'a> {
fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingAnnotation)
-> P<hir::Pat> {
let id = self.next_id();
let LoweredNodeId { node_id, hir_id } = self.next_id();
let parent_def = self.parent_def.unwrap();
let def_id = {
let defs = self.resolver.definitions();
let def_path_data = DefPathData::Binding(name);
let def_index = defs
.create_def_with_parent(parent_def, id, def_path_data, REGULAR_SPACE, Mark::root());
let def_index = defs.create_def_with_parent(parent_def,
node_id,
def_path_data,
REGULAR_SPACE,
Mark::root());
DefId::local(def_index)
};
P(hir::Pat {
id,
id: node_id,
hir_id,
node: hir::PatKind::Binding(bm,
def_id,
Spanned {
@ -2771,8 +2824,10 @@ impl<'a> LoweringContext<'a> {
}
fn pat(&mut self, span: Span, pat: hir::PatKind) -> P<hir::Pat> {
let LoweredNodeId { node_id, hir_id } = self.next_id();
P(hir::Pat {
id: self.next_id(),
id: node_id,
hir_id,
node: pat,
span,
})
@ -2801,11 +2856,13 @@ impl<'a> LoweringContext<'a> {
rule: hir::BlockCheckMode,
attrs: ThinVec<Attribute>)
-> hir::Expr {
let id = self.next_id();
let LoweredNodeId { node_id, hir_id } = self.next_id();
let block = P(hir::Block {
rules: rule,
span,
id,
id: node_id,
hir_id,
stmts,
expr: Some(expr),
targeted_by_break: false,
@ -2830,7 +2887,7 @@ impl<'a> LoweringContext<'a> {
// The original ID is taken by the `PolyTraitRef`,
// so the `Ty` itself needs a different one.
id = self.next_id();
id = self.next_id().node_id;
hir::TyTraitObject(hir_vec![principal], self.elided_lifetime(span))
} else {
@ -2844,7 +2901,7 @@ impl<'a> LoweringContext<'a> {
fn elided_lifetime(&mut self, span: Span) -> hir::Lifetime {
hir::Lifetime {
id: self.next_id(),
id: self.next_id().node_id,
span,
name: keywords::Invalid.name()
}

View File

@ -434,18 +434,22 @@ impl Definitions {
DefPath::make(LOCAL_CRATE, index, |p| self.def_key(p))
}
#[inline]
pub fn opt_def_index(&self, node: ast::NodeId) -> Option<DefIndex> {
self.node_to_def_index.get(&node).cloned()
}
#[inline]
pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option<DefId> {
self.opt_def_index(node).map(DefId::local)
}
#[inline]
pub fn local_def_id(&self, node: ast::NodeId) -> DefId {
self.opt_local_def_id(node).unwrap()
}
#[inline]
pub fn as_local_node_id(&self, def_id: DefId) -> Option<ast::NodeId> {
if def_id.krate == LOCAL_CRATE {
let space_index = def_id.index.address_space().index();
@ -461,10 +465,27 @@ impl Definitions {
}
}
#[inline]
pub fn node_to_hir_id(&self, node_id: ast::NodeId) -> hir::HirId {
self.node_to_hir_id[node_id]
}
pub fn find_node_for_hir_id(&self, hir_id: hir::HirId) -> ast::NodeId {
self.node_to_hir_id
.iter()
.position(|x| *x == hir_id)
.map(|idx| ast::NodeId::new(idx))
.unwrap()
}
#[inline]
pub fn def_index_to_hir_id(&self, def_index: DefIndex) -> hir::HirId {
let space_index = def_index.address_space().index();
let array_index = def_index.as_array_index();
let node_id = self.def_index_to_node[space_index][array_index];
self.node_to_hir_id[node_id]
}
/// Add a definition with a parent definition.
pub fn create_root_def(&mut self,
crate_name: &str,

View File

@ -250,7 +250,7 @@ pub struct Map<'hir> {
pub forest: &'hir Forest,
/// Same as the dep_graph in forest, just available with one fewer
/// deref. This is a gratuitious micro-optimization.
/// deref. This is a gratuitous micro-optimization.
pub dep_graph: DepGraph,
/// NodeIds are sequential integers from 0, so we can be
@ -359,6 +359,7 @@ impl<'hir> Map<'hir> {
}
}
#[inline]
pub fn definitions(&self) -> &Definitions {
&self.definitions
}
@ -379,6 +380,7 @@ impl<'hir> Map<'hir> {
self.definitions.def_path(def_id.index)
}
#[inline]
pub fn local_def_id(&self, node: NodeId) -> DefId {
self.opt_local_def_id(node).unwrap_or_else(|| {
bug!("local_def_id: no entry for `{}`, which has a map of `{:?}`",
@ -386,14 +388,31 @@ impl<'hir> Map<'hir> {
})
}
#[inline]
pub fn opt_local_def_id(&self, node: NodeId) -> Option<DefId> {
self.definitions.opt_local_def_id(node)
}
#[inline]
pub fn as_local_node_id(&self, def_id: DefId) -> Option<NodeId> {
self.definitions.as_local_node_id(def_id)
}
#[inline]
pub fn node_to_hir_id(&self, node_id: NodeId) -> HirId {
self.definitions.node_to_hir_id(node_id)
}
#[inline]
pub fn def_index_to_hir_id(&self, def_index: DefIndex) -> HirId {
self.definitions.def_index_to_hir_id(def_index)
}
#[inline]
pub fn def_index_to_node_id(&self, def_index: DefIndex) -> NodeId {
self.definitions.as_local_node_id(DefId::local(def_index)).unwrap()
}
fn entry_count(&self) -> usize {
self.map.len()
}

View File

@ -129,9 +129,11 @@ pub const CRATE_HIR_ID: HirId = HirId {
pub const DUMMY_HIR_ID: HirId = HirId {
owner: CRATE_DEF_INDEX,
local_id: ItemLocalId(!0)
local_id: DUMMY_ITEM_LOCAL_ID,
};
pub const DUMMY_ITEM_LOCAL_ID: ItemLocalId = ItemLocalId(!0);
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
pub struct Lifetime {
pub id: NodeId,
@ -496,7 +498,7 @@ impl Crate {
&self.impl_items[&id]
}
/// Visits all items in the crate in some determinstic (but
/// Visits all items in the crate in some deterministic (but
/// unspecified) order. If you just need to process every item,
/// but don't care about nesting, this method is the best choice.
///
@ -547,6 +549,7 @@ pub struct Block {
/// without a semicolon, if any
pub expr: Option<P<Expr>>,
pub id: NodeId,
pub hir_id: HirId,
/// Distinguishes between `unsafe { ... }` and `{ ... }`
pub rules: BlockCheckMode,
pub span: Span,
@ -560,6 +563,7 @@ pub struct Block {
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
pub struct Pat {
pub id: NodeId,
pub hir_id: HirId,
pub node: PatKind,
pub span: Span,
}
@ -897,6 +901,7 @@ pub struct Local {
/// Initializer expression to set the value, if any
pub init: Option<P<Expr>>,
pub id: NodeId,
pub hir_id: HirId,
pub span: Span,
pub attrs: ThinVec<Attribute>,
pub source: LocalSource,
@ -987,6 +992,7 @@ pub struct Expr {
pub span: Span,
pub node: Expr_,
pub attrs: ThinVec<Attribute>,
pub hir_id: HirId,
}
impl fmt::Debug for Expr {
@ -1430,6 +1436,7 @@ pub struct InlineAsm {
pub struct Arg {
pub pat: P<Pat>,
pub id: NodeId,
pub hir_id: HirId,
}
/// Represents the header (not the body) of a function declaration

View File

@ -14,7 +14,7 @@ use hir::map::DefPathHash;
use ich::{self, CachingCodemapView};
use session::config::DebugInfoLevel::NoDebugInfo;
use ty;
use util::nodemap::NodeMap;
use util::nodemap::{NodeMap, ItemLocalMap};
use std::hash as std_hash;
use std::collections::{HashMap, HashSet, BTreeMap};
@ -358,6 +358,18 @@ pub fn hash_stable_nodemap<'a, 'tcx, 'gcx, V, W>(
});
}
pub fn hash_stable_itemlocalmap<'a, 'tcx, 'gcx, V, W>(
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>,
map: &ItemLocalMap<V>)
where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
W: StableHasherResult,
{
hash_stable_hashmap(hcx, hasher, map, |_, local_id| {
*local_id
});
}
pub fn hash_stable_btreemap<'a, 'tcx, 'gcx, K, V, SK, F, W>(
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,

View File

@ -359,6 +359,7 @@ impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::B
ref stmts,
ref expr,
id,
hir_id: _,
rules,
span,
targeted_by_break,
@ -423,6 +424,7 @@ impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::P
let hir::Pat {
id,
hir_id: _,
ref node,
ref span
} = *self;
@ -504,6 +506,7 @@ impl_stable_hash_for!(struct hir::Local {
ty,
init,
id,
hir_id,
span,
attrs,
source
@ -551,6 +554,7 @@ impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::E
hcx.while_hashing_hir_bodies(true, |hcx| {
let hir::Expr {
id,
hir_id: _,
ref span,
ref node,
ref attrs
@ -1023,7 +1027,8 @@ impl_stable_hash_for!(enum hir::Stmt_ {
impl_stable_hash_for!(struct hir::Arg {
pat,
id
id,
hir_id
});
impl_stable_hash_for!(struct hir::Body {

View File

@ -11,7 +11,7 @@
//! This module contains `HashStable` implementations for various data types
//! from rustc::ty in no particular order.
use ich::{self, StableHashingContext, NodeIdHashingMode};
use ich::StableHashingContext;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
use std::hash as std_hash;
@ -624,68 +624,6 @@ impl_stable_hash_for!(struct ty::ExistentialProjection<'tcx> {
ty
});
impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for ty::TypeckTables<'gcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::TypeckTables {
ref type_dependent_defs,
ref node_types,
ref node_substs,
ref adjustments,
ref pat_binding_modes,
ref upvar_capture_map,
ref closure_tys,
ref closure_kinds,
ref generator_interiors,
ref generator_sigs,
ref liberated_fn_sigs,
ref fru_field_types,
ref cast_kinds,
ref used_trait_imports,
tainted_by_errors,
ref free_region_map,
} = *self;
hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
ich::hash_stable_nodemap(hcx, hasher, type_dependent_defs);
ich::hash_stable_nodemap(hcx, hasher, node_types);
ich::hash_stable_nodemap(hcx, hasher, node_substs);
ich::hash_stable_nodemap(hcx, hasher, adjustments);
ich::hash_stable_nodemap(hcx, hasher, pat_binding_modes);
ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| {
let ty::UpvarId {
var_id,
closure_expr_id
} = *up_var_id;
let var_def_id = hcx.tcx().hir.local_def_id(var_id);
let closure_def_id = hcx.tcx().hir.local_def_id(closure_expr_id);
(hcx.def_path_hash(var_def_id), hcx.def_path_hash(closure_def_id))
});
ich::hash_stable_nodemap(hcx, hasher, closure_tys);
ich::hash_stable_nodemap(hcx, hasher, closure_kinds);
ich::hash_stable_nodemap(hcx, hasher, generator_interiors);
ich::hash_stable_nodemap(hcx, hasher, generator_sigs);
ich::hash_stable_nodemap(hcx, hasher, liberated_fn_sigs);
ich::hash_stable_nodemap(hcx, hasher, fru_field_types);
ich::hash_stable_nodemap(hcx, hasher, cast_kinds);
ich::hash_stable_hashset(hcx, hasher, used_trait_imports, |hcx, def_id| {
hcx.def_path_hash(*def_id)
});
tainted_by_errors.hash_stable(hcx, hasher);
free_region_map.hash_stable(hcx, hasher);
})
}
}
impl_stable_hash_for!(enum ty::fast_reject::SimplifiedType {
BoolSimplifiedType,
CharSimplifiedType,

View File

@ -14,7 +14,7 @@ pub use self::fingerprint::Fingerprint;
pub use self::caching_codemap_view::CachingCodemapView;
pub use self::hcx::{StableHashingContext, NodeIdHashingMode, hash_stable_hashmap,
hash_stable_hashset, hash_stable_nodemap,
hash_stable_btreemap};
hash_stable_btreemap, hash_stable_itemlocalmap};
mod fingerprint;
mod caching_codemap_view;
mod hcx;

View File

@ -169,7 +169,7 @@ impl<'a, 'gcx, 'tcx> At<'a, 'gcx, 'tcx> {
}
/// Sets the "trace" values that will be used for
/// error-repporting, but doesn't actually perform any operation
/// error-reporting, but doesn't actually perform any operation
/// yet (this is useful when you want to set the trace using
/// distinct values from those you wish to operate upon).
pub fn trace<T>(self,

View File

@ -913,7 +913,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
}
infer::UpvarRegion(ref upvar_id, _) => {
format!(" for capture of `{}` by closure",
self.tcx.local_var_name_str(upvar_id.var_id).to_string())
self.tcx.local_var_name_str_def_index(upvar_id.var_id))
}
};

View File

@ -8,13 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::{self, Local, Pat, Body};
use hir::{self, Local, Pat, Body, HirId};
use hir::intravisit::{self, Visitor, NestedVisitorMap};
use infer::InferCtxt;
use infer::type_variable::TypeVariableOrigin;
use ty::{self, Ty, TyInfer, TyVar};
use syntax::ast::NodeId;
use syntax_pos::Span;
struct FindLocalByTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
@ -26,7 +24,7 @@ struct FindLocalByTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
}
impl<'a, 'gcx, 'tcx> FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
fn node_matches_type(&mut self, node_id: NodeId) -> bool {
fn node_matches_type(&mut self, node_id: HirId) -> bool {
let ty_opt = self.infcx.in_progress_tables.and_then(|tables| {
tables.borrow().node_id_to_type_opt(node_id)
});
@ -56,7 +54,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
}
fn visit_local(&mut self, local: &'gcx Local) {
if self.found_local_pattern.is_none() && self.node_matches_type(local.id) {
if self.found_local_pattern.is_none() && self.node_matches_type(local.hir_id) {
self.found_local_pattern = Some(&*local.pat);
}
intravisit::walk_local(self, local);
@ -64,7 +62,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
fn visit_body(&mut self, body: &'gcx Body) {
for argument in &body.arguments {
if self.found_arg_pattern.is_none() && self.node_matches_type(argument.id) {
if self.found_arg_pattern.is_none() && self.node_matches_type(argument.hir_id) {
self.found_arg_pattern = Some(&*argument.pat);
}
}

View File

@ -45,8 +45,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
err.span_note(span,
&format!("...so that closure can access `{}`",
self.tcx
.local_var_name_str(upvar_id.var_id)
.to_string()));
.local_var_name_str_def_index(upvar_id.var_id)));
}
infer::InfStackClosure(span) => {
err.span_note(span, "...so that closure does not outlive its stack frame");
@ -176,18 +175,19 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
E0313,
"lifetime of borrowed pointer outlives lifetime \
of captured variable `{}`...",
self.tcx.local_var_name_str(upvar_id.var_id));
self.tcx
.local_var_name_str_def_index(upvar_id.var_id));
self.tcx.note_and_explain_region(&mut err,
"...the borrowed pointer is valid for ",
sub,
"...");
self.tcx
.note_and_explain_region(&mut err,
&format!("...but `{}` is only valid for ",
self.tcx
.local_var_name_str(upvar_id.var_id)),
sup,
"");
.note_and_explain_region(
&mut err,
&format!("...but `{}` is only valid for ",
self.tcx.local_var_name_str_def_index(upvar_id.var_id)),
sup,
"");
err
}
infer::InfStackClosure(span) => {

View File

@ -46,7 +46,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
.iter()
.enumerate()
.filter_map(|(index, arg)| {
let ty = tables.borrow().node_id_to_type(arg.id);
let ty = tables.borrow().node_id_to_type(arg.hir_id);
let mut found_anon_region = false;
let new_arg_ty = self.tcx
.fold_regions(&ty, &mut false, |r, _| if *r == *anon_region {

View File

@ -589,7 +589,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
(result, map)
}
/// Searches the region constriants created since `snapshot` was started
/// Searches the region constraints created since `snapshot` was started
/// and checks to determine whether any of the skolemized regions created
/// in `skol_map` would "escape" -- meaning that they are related to
/// other regions in some way. If so, the higher-ranked subtyping doesn't

View File

@ -46,7 +46,7 @@ pub trait LatticeDir<'f, 'gcx: 'f+'tcx, 'tcx: 'f> : TypeRelation<'f, 'gcx, 'tcx>
// the LUB/GLB of `a` and `b` as appropriate.
//
// Subtle hack: ordering *may* be significant here. This method
// relates `v` to `a` first, which may help us to avoid unecessary
// relates `v` to `a` first, which may help us to avoid unnecessary
// type variable obligations. See caller for details.
fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
}

View File

@ -358,8 +358,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> {
impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> {
/// Used only by `rustc_typeck` during body type-checking/inference,
/// will initialize `in_progress_tables` with fresh `TypeckTables`.
pub fn with_fresh_in_progress_tables(mut self) -> Self {
self.fresh_tables = Some(RefCell::new(ty::TypeckTables::empty()));
pub fn with_fresh_in_progress_tables(mut self, table_owner: DefId) -> Self {
self.fresh_tables = Some(RefCell::new(ty::TypeckTables::empty(Some(table_owner))));
self
}
@ -1331,9 +1331,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
{
if let Some(tables) = self.in_progress_tables {
if let Some(id) = self.tcx.hir.as_local_node_id(def_id) {
let hir_id = self.tcx.hir.node_to_hir_id(id);
return tables.borrow()
.closure_kinds
.get(&id)
.closure_kinds()
.get(hir_id)
.cloned()
.map(|(kind, _)| kind);
}
@ -1353,7 +1354,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
pub fn fn_sig(&self, def_id: DefId) -> ty::PolyFnSig<'tcx> {
if let Some(tables) = self.in_progress_tables {
if let Some(id) = self.tcx.hir.as_local_node_id(def_id) {
if let Some(&ty) = tables.borrow().closure_tys.get(&id) {
let hir_id = self.tcx.hir.node_to_hir_id(id);
if let Some(&ty) = tables.borrow().closure_tys().get(hir_id) {
return ty;
}
}

View File

@ -128,7 +128,7 @@ pub enum UndoLogEntry<'tcx> {
/// We added the given `given`
AddGiven(Region<'tcx>, ty::RegionVid),
/// We added a GLB/LUB "combinaton variable"
/// We added a GLB/LUB "combination variable"
AddCombination(CombineMapType, TwoRegions<'tcx>),
/// During skolemization, we sometimes purge entries from the undo

View File

@ -111,8 +111,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx>
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
if !t.needs_infer() {
if !t.needs_infer() && !ty::keep_local(&t) {
t // micro-optimize -- if there is nothing in this type that this fold affects...
// ^ we need to have the `keep_local` check to un-default
// defaulted tuples.
} else {
let t = self.infcx.shallow_resolve(t);
match t.sty {
@ -131,6 +133,12 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx>
ty::TyInfer(_) => {
bug!("Unexpected type in full type resolver: {:?}", t);
}
ty::TyTuple(tys, true) => {
// Un-default defaulted tuples - we are going to a
// different infcx, and the default will just cause
// pollution.
self.tcx().intern_tup(tys, false)
}
_ => {
t.super_fold_with(self)
}

View File

@ -69,7 +69,7 @@ pub struct LintStore {
/// is true if the lint group was added by a plugin.
lint_groups: FxHashMap<&'static str, (Vec<LintId>, bool)>,
/// Extra info for future incompatibility lints, descibing the
/// Extra info for future incompatibility lints, describing the
/// issue or RFC that caused the incompatibility.
future_incompatible: FxHashMap<LintId, FutureIncompatibleInfo>,
}
@ -986,7 +986,7 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut cx = LateContext {
tcx,
tables: &ty::TypeckTables::empty(),
tables: &ty::TypeckTables::empty(None),
param_env: ty::ParamEnv::empty(Reveal::UserFacing),
access_levels,
lint_sess: LintSession::new(&tcx.sess.lint_store),

View File

@ -94,8 +94,8 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
}
}
fn lookup_and_handle_method(&mut self, id: ast::NodeId) {
self.check_def_id(self.tables.type_dependent_defs[&id].def_id());
fn lookup_and_handle_method(&mut self, id: hir::HirId) {
self.check_def_id(self.tables.type_dependent_defs()[id].def_id());
}
fn handle_field_access(&mut self, lhs: &hir::Expr, name: ast::Name) {
@ -119,7 +119,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, def: Def,
pats: &[codemap::Spanned<hir::FieldPat>]) {
let variant = match self.tables.node_id_to_type(lhs.id).sty {
let variant = match self.tables.node_id_to_type(lhs.hir_id).sty {
ty::TyAdt(adt, _) => adt.variant_of_def(def),
_ => span_bug!(lhs.span, "non-ADT in struct pattern")
};
@ -235,11 +235,11 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> {
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
match expr.node {
hir::ExprPath(ref qpath @ hir::QPath::TypeRelative(..)) => {
let def = self.tables.qpath_def(qpath, expr.id);
let def = self.tables.qpath_def(qpath, expr.hir_id);
self.handle_definition(def);
}
hir::ExprMethodCall(..) => {
self.lookup_and_handle_method(expr.id);
self.lookup_and_handle_method(expr.hir_id);
}
hir::ExprField(ref lhs, ref name) => {
self.handle_field_access(&lhs, name.node);
@ -282,7 +282,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> {
self.handle_field_pattern_match(pat, path.def, fields);
}
PatKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => {
let def = self.tables.qpath_def(qpath, pat.id);
let def = self.tables.qpath_def(qpath, pat.hir_id);
self.handle_definition(def);
}
_ => ()
@ -425,7 +425,7 @@ fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let mut symbol_visitor = MarkSymbolVisitor {
worklist,
tcx,
tables: &ty::TypeckTables::empty(),
tables: &ty::TypeckTables::empty(None),
live_symbols: box FxHashSet(),
struct_has_extern_repr: false,
ignore_non_const_paths: false,

View File

@ -165,7 +165,7 @@ impl<'a, 'tcx> Visitor<'tcx> for EffectCheckVisitor<'a, 'tcx> {
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
match expr.node {
hir::ExprMethodCall(..) => {
let def_id = self.tables.type_dependent_defs[&expr.id].def_id();
let def_id = self.tables.type_dependent_defs()[expr.hir_id].def_id();
let sig = self.tcx.fn_sig(def_id);
debug!("effect: method call case, signature is {:?}",
sig);
@ -262,7 +262,7 @@ impl<'a, 'tcx> Visitor<'tcx> for EffectCheckVisitor<'a, 'tcx> {
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut visitor = EffectCheckVisitor {
tcx,
tables: &ty::TypeckTables::empty(),
tables: &ty::TypeckTables::empty(None),
body_id: hir::BodyId { node_id: ast::CRATE_NODE_ID },
unsafe_context: UnsafeContext::new(SafeContext),
};

View File

@ -296,7 +296,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
debug!("consume_body(body={:?})", body);
for arg in &body.arguments {
let arg_ty = return_if_err!(self.mc.node_ty(arg.pat.id));
let arg_ty = return_if_err!(self.mc.node_ty(arg.pat.hir_id));
let fn_body_scope_r = self.tcx().node_scope_region(body.value.id);
let arg_cmt = self.mc.cat_rvalue(
@ -541,7 +541,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
}
ty::TyError => { }
_ => {
let def_id = self.mc.tables.type_dependent_defs[&call.id].def_id();
let def_id = self.mc.tables.type_dependent_defs()[call.hir_id].def_id();
match OverloadedCallType::from_method_id(self.tcx(), def_id) {
FnMutOverloadedCall => {
let call_scope_r = self.tcx().node_scope_region(call.id);
@ -801,7 +801,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
pat);
return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| {
if let PatKind::Binding(..) = pat.node {
let bm = *self.mc.tables.pat_binding_modes.get(&pat.id)
let bm = *self.mc.tables.pat_binding_modes().get(pat.hir_id)
.expect("missing binding mode");
match bm {
ty::BindByReference(..) =>
@ -827,10 +827,11 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| {
if let PatKind::Binding(_, def_id, ..) = pat.node {
debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode);
let bm = *mc.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode");
let bm = *mc.tables.pat_binding_modes().get(pat.hir_id)
.expect("missing binding mode");
// pat_ty: the type of the binding being produced.
let pat_ty = return_if_err!(mc.node_ty(pat.id));
let pat_ty = return_if_err!(mc.node_ty(pat.hir_id));
// Each match binding is effectively an assignment to the
// binding being produced.
@ -867,7 +868,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
PatKind::Struct(ref qpath, ..) => qpath,
_ => return
};
let def = mc.tables.qpath_def(qpath, pat.id);
let def = mc.tables.qpath_def(qpath, pat.hir_id);
match def {
Def::Variant(variant_did) |
Def::VariantCtor(variant_did, ..) => {
@ -891,10 +892,13 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
self.tcx().with_freevars(closure_expr.id, |freevars| {
for freevar in freevars {
let def_id = freevar.def.def_id();
let id_var = self.tcx().hir.as_local_node_id(def_id).unwrap();
let upvar_id = ty::UpvarId { var_id: id_var,
closure_expr_id: closure_expr.id };
let var_def_id = freevar.def.def_id();
debug_assert!(var_def_id.is_local());
let closure_def_id = self.tcx().hir.local_def_id(closure_expr.id);
let upvar_id = ty::UpvarId {
var_id: var_def_id.index,
closure_expr_id: closure_def_id.index
};
let upvar_capture = self.mc.tables.upvar_capture(upvar_id);
let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
fn_decl_span,
@ -927,8 +931,9 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
-> mc::McResult<mc::cmt<'tcx>> {
// Create the cmt for the variable being borrowed, from the
// caller's perspective
let var_id = self.tcx().hir.as_local_node_id(upvar_def.def_id()).unwrap();
let var_ty = self.mc.node_ty(var_id)?;
let var_node_id = self.tcx().hir.as_local_node_id(upvar_def.def_id()).unwrap();
let var_hir_id = self.tcx().hir.node_to_hir_id(var_node_id);
let var_ty = self.mc.node_ty(var_hir_id)?;
self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def)
}
}

View File

@ -146,13 +146,13 @@ impl<'a, 'tcx> Visitor<'tcx> for ExprVisitor<'a, 'tcx> {
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
let def = if let hir::ExprPath(ref qpath) = expr.node {
self.tables.qpath_def(qpath, expr.id)
self.tables.qpath_def(qpath, expr.hir_id)
} else {
Def::Err
};
if let Def::Fn(did) = def {
if self.def_id_is_transmute(did) {
let typ = self.tables.node_id_to_type(expr.id);
let typ = self.tables.node_id_to_type(expr.hir_id);
let sig = typ.fn_sig(self.tcx);
let from = sig.inputs().skip_binder()[0];
let to = *sig.output().skip_binder();

View File

@ -70,7 +70,7 @@ pub use self::Note::*;
use self::Aliasability::*;
use middle::region::RegionMaps;
use hir::def_id::DefId;
use hir::def_id::{DefId, DefIndex};
use hir::map as hir_map;
use infer::InferCtxt;
use hir::def::{Def, CtorKind};
@ -190,7 +190,7 @@ pub type cmt<'tcx> = Rc<cmt_<'tcx>>;
pub enum ImmutabilityBlame<'tcx> {
ImmLocal(ast::NodeId),
ClosureEnv(ast::NodeId),
ClosureEnv(DefIndex),
LocalDeref(ast::NodeId),
AdtFieldDeref(&'tcx ty::AdtDef, &'tcx ty::FieldDef)
}
@ -334,7 +334,9 @@ impl MutabilityCategory {
let ret = match tcx.hir.get(id) {
hir_map::NodeLocal(p) => match p.node {
PatKind::Binding(..) => {
let bm = *tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
let bm = *tables.pat_binding_modes()
.get(p.hir_id)
.expect("missing binding mode");
if bm == ty::BindByValue(hir::MutMutable) {
McDeclared
} else {
@ -435,7 +437,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
}
fn resolve_type_vars_or_error(&self,
id: ast::NodeId,
id: hir::HirId,
ty: Option<Ty<'tcx>>)
-> McResult<Ty<'tcx>> {
match ty {
@ -451,33 +453,41 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
// FIXME
None if self.is_tainted_by_errors() => Err(()),
None => {
let id = self.tcx.hir.definitions().find_node_for_hir_id(id);
bug!("no type for node {}: {} in mem_categorization",
id, self.tcx.hir.node_to_string(id));
}
}
}
pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
self.resolve_type_vars_or_error(id, self.tables.node_id_to_type_opt(id))
pub fn node_ty(&self,
hir_id: hir::HirId)
-> McResult<Ty<'tcx>> {
self.resolve_type_vars_or_error(hir_id,
self.tables.node_id_to_type_opt(hir_id))
}
pub fn expr_ty(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
self.resolve_type_vars_or_error(expr.id, self.tables.expr_ty_opt(expr))
self.resolve_type_vars_or_error(expr.hir_id, self.tables.expr_ty_opt(expr))
}
pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
self.resolve_type_vars_or_error(expr.id, self.tables.expr_ty_adjusted_opt(expr))
self.resolve_type_vars_or_error(expr.hir_id, self.tables.expr_ty_adjusted_opt(expr))
}
fn pat_ty(&self, pat: &hir::Pat) -> McResult<Ty<'tcx>> {
let base_ty = self.node_ty(pat.id)?;
let base_ty = self.node_ty(pat.hir_id)?;
// FIXME (Issue #18207): This code detects whether we are
// looking at a `ref x`, and if so, figures out what the type
// *being borrowed* is. But ideally we would put in a more
// fundamental fix to this conflated use of the node id.
let ret_ty = match pat.node {
PatKind::Binding(..) => {
let bm = *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode");
let bm = *self.tables
.pat_binding_modes()
.get(pat.hir_id)
.expect("missing binding mode");
if let ty::BindByReference(_) = bm {
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
@ -604,7 +614,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
}
hir::ExprPath(ref qpath) => {
let def = self.tables.qpath_def(qpath, expr.id);
let def = self.tables.qpath_def(qpath, expr.hir_id);
self.cat_def(expr.id, expr.span, expr_ty, def)
}
@ -643,7 +653,13 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
Ok(self.cat_rvalue_node(id, span, expr_ty))
}
Def::Static(_, mutbl) => {
Def::Static(def_id, mutbl) => {
// `#[thread_local]` statics may not outlive the current function.
for attr in &self.tcx.get_attrs(def_id)[..] {
if attr.check_name("thread_local") {
return Ok(self.cat_rvalue_node(id, span, expr_ty));
}
}
Ok(Rc::new(cmt_ {
id:id,
span:span,
@ -684,6 +700,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
fn_node_id: ast::NodeId)
-> McResult<cmt<'tcx>>
{
let fn_hir_id = self.tcx.hir.node_to_hir_id(fn_node_id);
// An upvar can have up to 3 components. We translate first to a
// `Categorization::Upvar`, which is itself a fiction -- it represents the reference to the
// field from the environment.
@ -707,7 +725,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
// FnMut | copied -> &'env mut | upvar -> &'env mut -> &'up bk
// FnOnce | copied | upvar -> &'up bk
let kind = match self.tables.closure_kinds.get(&fn_node_id) {
let kind = match self.tables.closure_kinds().get(fn_hir_id) {
Some(&(kind, _)) => kind,
None => {
let ty = self.node_ty(fn_node_id)?;
@ -718,9 +736,15 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
}
};
let upvar_id = ty::UpvarId { var_id,
closure_expr_id: fn_node_id };
let var_ty = self.node_ty(var_id)?;
let closure_expr_def_index = self.tcx.hir.local_def_id(fn_node_id).index;
let var_def_index = self.tcx.hir.local_def_id(var_id).index;
let upvar_id = ty::UpvarId {
var_id: var_def_index,
closure_expr_id: closure_expr_def_index
};
let var_hir_id = self.tcx.hir.node_to_hir_id(var_id);
let var_ty = self.node_ty(var_hir_id)?;
// Mutability of original variable itself
let var_mutbl = MutabilityCategory::from_local(self.tcx, self.tables, var_id);
@ -755,8 +779,6 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
// If this is a by-ref capture, then the upvar we loaded is
// actually a reference, so we have to add an implicit deref
// for that.
let upvar_id = ty::UpvarId { var_id,
closure_expr_id: fn_node_id };
let upvar_capture = self.tables.upvar_capture(upvar_id);
let cmt_result = match upvar_capture {
ty::UpvarCapture::ByValue => {
@ -794,7 +816,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
// The environment of a closure is guaranteed to
// outlive any bindings introduced in the body of the
// closure itself.
scope: self.tcx.hir.local_def_id(upvar_id.closure_expr_id),
scope: DefId::local(upvar_id.closure_expr_id),
bound_region: ty::BrEnv
}));
@ -1130,7 +1152,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
match pat.node {
PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => {
let def = self.tables.qpath_def(qpath, pat.id);
let def = self.tables.qpath_def(qpath, pat.hir_id);
let (cmt, expected_len) = match def {
Def::Err => {
debug!("access to unresolvable pattern {:?}", pat);
@ -1167,7 +1189,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
PatKind::Struct(ref qpath, ref field_pats, _) => {
// {f1: p1, ..., fN: pN}
let def = self.tables.qpath_def(qpath, pat.id);
let def = self.tables.qpath_def(qpath, pat.hir_id);
let cmt = match def {
Def::Err => {
debug!("access to unresolvable pattern {:?}", pat);

View File

@ -107,10 +107,10 @@ impl<'a, 'tcx> Visitor<'tcx> for ReachableContext<'a, 'tcx> {
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
let def = match expr.node {
hir::ExprPath(ref qpath) => {
Some(self.tables.qpath_def(qpath, expr.id))
Some(self.tables.qpath_def(qpath, expr.hir_id))
}
hir::ExprMethodCall(..) => {
Some(self.tables.type_dependent_defs[&expr.id])
Some(self.tables.type_dependent_defs()[expr.hir_id])
}
_ => None
};
@ -296,6 +296,9 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
hir::ImplItemKind::Type(_) => {}
}
}
hir_map::NodeExpr(&hir::Expr { node: hir::ExprClosure(.., body, _), .. }) => {
self.visit_nested_body(body);
}
// Nothing to recurse on for these
hir_map::NodeForeignItem(_) |
hir_map::NodeVariant(_) |
@ -375,7 +378,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) ->
});
let mut reachable_context = ReachableContext {
tcx,
tables: &ty::TypeckTables::empty(),
tables: &ty::TypeckTables::empty(None),
reachable_symbols: NodeSet(),
worklist: Vec::new(),
any_library,

View File

@ -764,7 +764,7 @@ impl<'tcx> Debug for TerminatorKind<'tcx> {
impl<'tcx> TerminatorKind<'tcx> {
/// Write the "head" part of the terminator; that is, its name and the data it uses to pick the
/// successor basic block, if any. The only information not inlcuded is the list of possible
/// successor basic block, if any. The only information not included is the list of possible
/// successors, which may be rendered differently between the text and the graphviz format.
pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
use self::TerminatorKind::*;

View File

@ -112,7 +112,7 @@ pub struct Session {
/// Map from imported macro spans (which consist of
/// the localized span for the macro body) to the
/// macro name and defintion span in the source crate.
/// macro name and definition span in the source crate.
pub imported_macro_spans: RefCell<HashMap<Span, (String, Span)>>,
incr_comp_session: RefCell<IncrCompSession>,
@ -828,7 +828,7 @@ pub fn compile_result_from_err_count(err_count: usize) -> CompileResult {
#[inline(never)]
pub fn bug_fmt(file: &'static str, line: u32, args: fmt::Arguments) -> ! {
// this wrapper mostly exists so I don't have to write a fully
// qualified path of None::<Span> inside the bug!() macro defintion
// qualified path of None::<Span> inside the bug!() macro definition
opt_span_bug_fmt(file, line, None::<Span>, args);
}

View File

@ -111,8 +111,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
}
}
// returns if `cond` not occuring implies that `error` does not occur - i.e. that
// `error` occuring implies that `cond` occurs.
// returns if `cond` not occurring implies that `error` does not occur - i.e. that
// `error` occurring implies that `cond` occurs.
fn error_implies(&self,
cond: &ty::Predicate<'tcx>,
error: &ty::Predicate<'tcx>)
@ -683,7 +683,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
// Additional context information explaining why the closure only implements
// a particular trait.
if let Some(tables) = self.in_progress_tables {
match tables.borrow().closure_kinds.get(&node_id) {
let tables = tables.borrow();
let closure_hir_id = self.tcx.hir.node_to_hir_id(node_id);
match tables.closure_kinds().get(closure_hir_id) {
Some(&(ty::ClosureKind::FnOnce, Some((span, name)))) => {
err.span_note(span, &format!(
"closure is `FnOnce` because it moves the \

View File

@ -1422,7 +1422,7 @@ impl<'tcx> ProjectionCache<'tcx> {
}
/// Try to start normalize `key`; returns an error if
/// normalization already occured (this error corresponds to a
/// normalization already occurred (this error corresponds to a
/// cache hit, so it's actually a good thing).
fn try_start(&mut self, key: ty::ProjectionTy<'tcx>)
-> Result<(), ProjectionCacheEntry<'tcx>> {

View File

@ -31,7 +31,7 @@ use util::nodemap::{DefIdMap, FxHashMap};
///
/// - Parent extraction. In particular, the graph can give you the *immediate*
/// parents of a given specializing impl, which is needed for extracting
/// default items amongst other thigns. In the simple "chain" rule, every impl
/// default items amongst other things. In the simple "chain" rule, every impl
/// has at most one parent.
pub struct Graph {
// all impls have a parent; the "root" impls have as their parent the def_id
@ -95,7 +95,7 @@ impl<'a, 'gcx, 'tcx> Children {
}
/// Attempt to insert an impl into this set of children, while comparing for
/// specialiation relationships.
/// specialization relationships.
fn insert(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
@ -206,7 +206,7 @@ impl<'a, 'gcx, 'tcx> Graph {
// if the reference itself contains an earlier error (e.g., due to a
// resolution failure), then we just insert the impl at the top level of
// the graph and claim that there's no overlap (in order to supress
// the graph and claim that there's no overlap (in order to suppress
// bogus errors).
if trait_ref.references_error() {
debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \

View File

@ -29,7 +29,7 @@ use ty::subst::Substs;
/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
/// `false`.
///
/// 2. A thin-to-fat coercon involves unsizing the underlying data. We start
/// 2. A thin-to-fat coercion involves unsizing the underlying data. We start
/// with a thin pointer, deref a number of times, unsize the underlying data,
/// then autoref. The 'unsize' phase may change a fixed length array to a
/// dynamically sized one, a concrete object to a trait object, or statically
@ -52,7 +52,7 @@ use ty::subst::Substs;
/// that case, we have the pointer we need coming in, so there are no
/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
/// At some point, of course, `Box` should move out of the compiler, in which
/// case this is analogous to transformating a struct. E.g., Box<[i32; 4]> ->
/// case this is analogous to transforming a struct. E.g., Box<[i32; 4]> ->
/// Box<[i32]> is an `Adjust::Unsize` with the target `Box<[i32]>`.
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub struct Adjustment<'tcx> {

View File

@ -14,12 +14,13 @@ use dep_graph::DepGraph;
use errors::DiagnosticBuilder;
use session::Session;
use middle;
use hir::TraitMap;
use hir::{TraitMap};
use hir::def::{Def, ExportMap};
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use hir::map as hir_map;
use hir::map::DefPathHash;
use lint::{self, Lint};
use ich::{self, StableHashingContext, NodeIdHashingMode};
use middle::free_region::FreeRegionMap;
use middle::lang_items;
use middle::resolve_lifetime;
@ -42,15 +43,18 @@ use ty::inhabitedness::DefIdForest;
use ty::maps;
use ty::steal::Steal;
use ty::BindingMode;
use util::nodemap::{NodeMap, NodeSet, DefIdSet};
use util::nodemap::{NodeMap, NodeSet, DefIdSet, ItemLocalMap};
use util::nodemap::{FxHashMap, FxHashSet};
use rustc_data_structures::accumulate_vec::AccumulateVec;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
use arena::{TypedArena, DroplessArena};
use rustc_data_structures::indexed_vec::IndexVec;
use std::borrow::Borrow;
use std::cell::{Cell, RefCell};
use std::cmp::Ordering;
use std::collections::hash_map::{self, Entry};
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::Deref;
@ -207,58 +211,155 @@ pub struct CommonTypes<'tcx> {
pub re_erased: Region<'tcx>,
}
pub struct LocalTableInContext<'a, V: 'a> {
local_id_root: Option<DefId>,
data: &'a ItemLocalMap<V>
}
/// Validate that the given HirId (respectively its `local_id` part) can be
/// safely used as a key in the tables of a TypeckTable. For that to be
/// the case, the HirId must have the same `owner` as all the other IDs in
/// this table (signified by `local_id_root`). Otherwise the HirId
/// would be in a different frame of reference and using its `local_id`
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
fn validate_hir_id_for_typeck_tables(local_id_root: Option<DefId>,
hir_id: hir::HirId,
mut_access: bool) {
if cfg!(debug_assertions) {
if let Some(local_id_root) = local_id_root {
if hir_id.owner != local_id_root.index {
ty::tls::with(|tcx| {
let node_id = tcx.hir
.definitions()
.find_node_for_hir_id(hir_id);
bug!("node {} with HirId::owner {:?} cannot be placed in \
TypeckTables with local_id_root {:?}",
tcx.hir.node_to_string(node_id),
DefId::local(hir_id.owner),
local_id_root)
});
}
} else {
// We use "Null Object" TypeckTables in some of the analysis passes.
// These are just expected to be empty and their `local_id_root` is
// `None`. Therefore we cannot verify whether a given `HirId` would
// be a valid key for the given table. Instead we make sure that
// nobody tries to write to such a Null Object table.
if mut_access {
bug!("access to invalid TypeckTables")
}
}
}
}
impl<'a, V> LocalTableInContext<'a, V> {
pub fn contains_key(&self, id: hir::HirId) -> bool {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.data.contains_key(&id.local_id)
}
pub fn get(&self, id: hir::HirId) -> Option<&V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.data.get(&id.local_id)
}
pub fn iter(&self) -> hash_map::Iter<hir::ItemLocalId, V> {
self.data.iter()
}
}
impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
type Output = V;
fn index(&self, key: hir::HirId) -> &V {
self.get(key).expect("LocalTableInContext: key not found")
}
}
pub struct LocalTableInContextMut<'a, V: 'a> {
local_id_root: Option<DefId>,
data: &'a mut ItemLocalMap<V>
}
impl<'a, V> LocalTableInContextMut<'a, V> {
pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.get_mut(&id.local_id)
}
pub fn entry(&mut self, id: hir::HirId) -> Entry<hir::ItemLocalId, V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.entry(id.local_id)
}
pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.insert(id.local_id, val)
}
pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.remove(&id.local_id)
}
}
#[derive(RustcEncodable, RustcDecodable)]
pub struct TypeckTables<'tcx> {
/// The HirId::owner all ItemLocalIds in this table are relative to.
pub local_id_root: Option<DefId>,
/// Resolved definitions for `<T>::X` associated paths and
/// method calls, including those of overloaded operators.
pub type_dependent_defs: NodeMap<Def>,
type_dependent_defs: ItemLocalMap<Def>,
/// Stores the types for various nodes in the AST. Note that this table
/// is not guaranteed to be populated until after typeck. See
/// typeck::check::fn_ctxt for details.
pub node_types: NodeMap<Ty<'tcx>>,
node_types: ItemLocalMap<Ty<'tcx>>,
/// Stores the type parameters which were substituted to obtain the type
/// of this node. This only applies to nodes that refer to entities
/// parameterized by type parameters, such as generic fns, types, or
/// other items.
pub node_substs: NodeMap<&'tcx Substs<'tcx>>,
node_substs: ItemLocalMap<&'tcx Substs<'tcx>>,
pub adjustments: NodeMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
// Stores the actual binding mode for all instances of hir::BindingAnnotation.
pub pat_binding_modes: NodeMap<BindingMode>,
pat_binding_modes: ItemLocalMap<BindingMode>,
/// Borrows
pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
/// Records the type of each closure.
pub closure_tys: NodeMap<ty::PolyFnSig<'tcx>>,
closure_tys: ItemLocalMap<ty::PolyFnSig<'tcx>>,
/// Records the kind of each closure and the span and name of the variable
/// that caused the closure to be this kind.
pub closure_kinds: NodeMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>,
closure_kinds: ItemLocalMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>,
pub generator_sigs: NodeMap<Option<ty::GenSig<'tcx>>>,
pub generator_sigs: ItemLocalMap<Option<ty::GenSig<'tcx>>>,
pub generator_interiors: NodeMap<ty::GeneratorInterior<'tcx>>,
pub generator_interiors: ItemLocalMap<ty::GeneratorInterior<'tcx>>,
/// For each fn, records the "liberated" types of its arguments
/// and return type. Liberated means that all bound regions
/// (including late-bound regions) are replaced with free
/// equivalents. This table is not used in trans (since regions
/// are erased there) and hence is not serialized to metadata.
pub liberated_fn_sigs: NodeMap<ty::FnSig<'tcx>>,
liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
/// For each FRU expression, record the normalized types of the fields
/// of the struct - this is needed because it is non-trivial to
/// normalize while preserving regions. This table is used only in
/// MIR construction and hence is not serialized to metadata.
pub fru_field_types: NodeMap<Vec<Ty<'tcx>>>,
fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>,
/// Maps a cast expression to its kind. This is keyed on the
/// *from* expression of the cast, not the cast itself.
pub cast_kinds: NodeMap<ty::cast::CastKind>,
cast_kinds: ItemLocalMap<ty::cast::CastKind>,
/// Set of trait imports actually used in the method resolution.
/// This is used for warning unused imports.
@ -275,21 +376,22 @@ pub struct TypeckTables<'tcx> {
}
impl<'tcx> TypeckTables<'tcx> {
pub fn empty() -> TypeckTables<'tcx> {
pub fn empty(local_id_root: Option<DefId>) -> TypeckTables<'tcx> {
TypeckTables {
type_dependent_defs: NodeMap(),
node_types: FxHashMap(),
node_substs: NodeMap(),
adjustments: NodeMap(),
pat_binding_modes: NodeMap(),
local_id_root,
type_dependent_defs: ItemLocalMap(),
node_types: ItemLocalMap(),
node_substs: ItemLocalMap(),
adjustments: ItemLocalMap(),
pat_binding_modes: ItemLocalMap(),
upvar_capture_map: FxHashMap(),
generator_sigs: NodeMap(),
generator_interiors: NodeMap(),
closure_tys: NodeMap(),
closure_kinds: NodeMap(),
liberated_fn_sigs: NodeMap(),
fru_field_types: NodeMap(),
cast_kinds: NodeMap(),
generator_sigs: ItemLocalMap(),
generator_interiors: ItemLocalMap(),
closure_tys: ItemLocalMap(),
closure_kinds: ItemLocalMap(),
liberated_fn_sigs: ItemLocalMap(),
fru_field_types: ItemLocalMap(),
cast_kinds: ItemLocalMap(),
used_trait_imports: DefIdSet(),
tainted_by_errors: false,
free_region_map: FreeRegionMap::new(),
@ -297,41 +399,87 @@ impl<'tcx> TypeckTables<'tcx> {
}
/// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node.
pub fn qpath_def(&self, qpath: &hir::QPath, id: NodeId) -> Def {
pub fn qpath_def(&self, qpath: &hir::QPath, id: hir::HirId) -> Def {
match *qpath {
hir::QPath::Resolved(_, ref path) => path.def,
hir::QPath::TypeRelative(..) => {
self.type_dependent_defs.get(&id).cloned().unwrap_or(Def::Err)
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.type_dependent_defs.get(&id.local_id).cloned().unwrap_or(Def::Err)
}
}
}
pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> {
pub fn type_dependent_defs(&self) -> LocalTableInContext<Def> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.type_dependent_defs
}
}
pub fn type_dependent_defs_mut(&mut self) -> LocalTableInContextMut<Def> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.type_dependent_defs
}
}
pub fn node_types(&self) -> LocalTableInContext<Ty<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.node_types
}
}
pub fn node_types_mut(&mut self) -> LocalTableInContextMut<Ty<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.node_types
}
}
pub fn node_id_to_type(&self, id: hir::HirId) -> Ty<'tcx> {
match self.node_id_to_type_opt(id) {
Some(ty) => ty,
None => {
bug!("node_id_to_type: no type for node `{}`",
tls::with(|tcx| tcx.hir.node_to_string(id)))
tls::with(|tcx| {
let id = tcx.hir.definitions().find_node_for_hir_id(id);
tcx.hir.node_to_string(id)
}))
}
}
}
pub fn node_id_to_type_opt(&self, id: NodeId) -> Option<Ty<'tcx>> {
self.node_types.get(&id).cloned()
pub fn node_id_to_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_types.get(&id.local_id).cloned()
}
pub fn node_substs(&self, id: NodeId) -> &'tcx Substs<'tcx> {
self.node_substs.get(&id).cloned().unwrap_or(Substs::empty())
pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<&'tcx Substs<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.node_substs
}
}
pub fn node_substs(&self, id: hir::HirId) -> &'tcx Substs<'tcx> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_substs.get(&id.local_id).cloned().unwrap_or(Substs::empty())
}
pub fn node_substs_opt(&self, id: hir::HirId) -> Option<&'tcx Substs<'tcx>> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_substs.get(&id.local_id).cloned()
}
// Returns the type of a pattern as a monotype. Like @expr_ty, this function
// doesn't provide type parameter substitutions.
pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> {
self.node_id_to_type(pat.id)
self.node_id_to_type(pat.hir_id)
}
pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option<Ty<'tcx>> {
self.node_id_to_type_opt(pat.id)
self.node_id_to_type_opt(pat.hir_id)
}
// Returns the type of an expression as a monotype.
@ -345,16 +493,32 @@ impl<'tcx> TypeckTables<'tcx> {
// ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
// instead of "fn(ty) -> T with T = isize".
pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> {
self.node_id_to_type(expr.id)
self.node_id_to_type(expr.hir_id)
}
pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
self.node_id_to_type_opt(expr.id)
self.node_id_to_type_opt(expr.hir_id)
}
pub fn adjustments(&self) -> LocalTableInContext<Vec<ty::adjustment::Adjustment<'tcx>>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.adjustments
}
}
pub fn adjustments_mut(&mut self)
-> LocalTableInContextMut<Vec<ty::adjustment::Adjustment<'tcx>>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.adjustments
}
}
pub fn expr_adjustments(&self, expr: &hir::Expr)
-> &[ty::adjustment::Adjustment<'tcx>] {
self.adjustments.get(&expr.id).map_or(&[], |a| &a[..])
validate_hir_id_for_typeck_tables(self.local_id_root, expr.hir_id, false);
self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
}
/// Returns the type of `expr`, considering any `Adjustment`
@ -379,15 +543,169 @@ impl<'tcx> TypeckTables<'tcx> {
return false;
}
match self.type_dependent_defs.get(&expr.id) {
match self.type_dependent_defs().get(expr.hir_id) {
Some(&Def::Method(_)) => true,
_ => false
}
}
pub fn pat_binding_modes(&self) -> LocalTableInContext<BindingMode> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.pat_binding_modes
}
}
pub fn pat_binding_modes_mut(&mut self)
-> LocalTableInContextMut<BindingMode> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.pat_binding_modes
}
}
pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
self.upvar_capture_map[&upvar_id]
}
pub fn closure_tys(&self) -> LocalTableInContext<ty::PolyFnSig<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.closure_tys
}
}
pub fn closure_tys_mut(&mut self)
-> LocalTableInContextMut<ty::PolyFnSig<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.closure_tys
}
}
pub fn closure_kinds(&self) -> LocalTableInContext<(ty::ClosureKind,
Option<(Span, ast::Name)>)> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.closure_kinds
}
}
pub fn closure_kinds_mut(&mut self)
-> LocalTableInContextMut<(ty::ClosureKind, Option<(Span, ast::Name)>)> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.closure_kinds
}
}
pub fn liberated_fn_sigs(&self) -> LocalTableInContext<ty::FnSig<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.liberated_fn_sigs
}
}
pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<ty::FnSig<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.liberated_fn_sigs
}
}
pub fn fru_field_types(&self) -> LocalTableInContext<Vec<Ty<'tcx>>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.fru_field_types
}
}
pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<Vec<Ty<'tcx>>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.fru_field_types
}
}
pub fn cast_kinds(&self) -> LocalTableInContext<ty::cast::CastKind> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.cast_kinds
}
}
pub fn cast_kinds_mut(&mut self) -> LocalTableInContextMut<ty::cast::CastKind> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.cast_kinds
}
}
}
impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for TypeckTables<'gcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::TypeckTables {
local_id_root,
ref type_dependent_defs,
ref node_types,
ref node_substs,
ref adjustments,
ref pat_binding_modes,
ref upvar_capture_map,
ref closure_tys,
ref closure_kinds,
ref liberated_fn_sigs,
ref fru_field_types,
ref cast_kinds,
ref used_trait_imports,
tainted_by_errors,
ref free_region_map,
} = *self;
hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
ich::hash_stable_itemlocalmap(hcx, hasher, type_dependent_defs);
ich::hash_stable_itemlocalmap(hcx, hasher, node_types);
ich::hash_stable_itemlocalmap(hcx, hasher, node_substs);
ich::hash_stable_itemlocalmap(hcx, hasher, adjustments);
ich::hash_stable_itemlocalmap(hcx, hasher, pat_binding_modes);
ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| {
let ty::UpvarId {
var_id,
closure_expr_id
} = *up_var_id;
let local_id_root =
local_id_root.expect("trying to hash invalid TypeckTables");
let var_def_id = DefId {
krate: local_id_root.krate,
index: var_id,
};
let closure_def_id = DefId {
krate: local_id_root.krate,
index: closure_expr_id,
};
(hcx.def_path_hash(var_def_id), hcx.def_path_hash(closure_def_id))
});
ich::hash_stable_itemlocalmap(hcx, hasher, closure_tys);
ich::hash_stable_itemlocalmap(hcx, hasher, closure_kinds);
ich::hash_stable_itemlocalmap(hcx, hasher, liberated_fn_sigs);
ich::hash_stable_itemlocalmap(hcx, hasher, fru_field_types);
ich::hash_stable_itemlocalmap(hcx, hasher, cast_kinds);
ich::hash_stable_hashset(hcx, hasher, used_trait_imports, |hcx, def_id| {
hcx.def_path_hash(*def_id)
});
tainted_by_errors.hash_stable(hcx, hasher);
free_region_map.hash_stable(hcx, hasher);
})
}
}
impl<'tcx> CommonTypes<'tcx> {
@ -1201,7 +1519,7 @@ macro_rules! direct_interners {
}
}
fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool {
pub fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool {
x.has_type_flags(ty::TypeFlags::KEEP_IN_LOCAL_TCX)
}

View File

@ -158,7 +158,10 @@ impl FlagComputation {
self.add_ty(m.ty);
}
&ty::TyTuple(ref ts, _) => {
&ty::TyTuple(ref ts, is_default) => {
if is_default {
self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX);
}
self.add_tys(&ts[..]);
}

View File

@ -24,7 +24,7 @@ use ty::{DefId, DefIdTree};
#[derive(Clone)]
pub struct DefIdForest {
/// The minimal set of DefIds required to represent the whole set.
/// If A and B are DefIds in the DefIdForest, and A is a desecendant
/// If A and B are DefIds in the DefIdForest, and A is a descendant
/// of B, then only B will be in root_ids.
/// We use a SmallVec here because (for its use for cacheing inhabitedness)
/// its rare that this will contain even two ids.
@ -61,7 +61,7 @@ impl<'a, 'gcx, 'tcx> DefIdForest {
self.root_ids.is_empty()
}
/// Test whether the forest conains a given DefId.
/// Test whether the forest contains a given DefId.
pub fn contains(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
id: DefId) -> bool

View File

@ -125,7 +125,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// If possible, this pushes a global path resolving to `external_def_id` that is visible
/// from at least one local module and returns true. If the crate defining `external_def_id` is
/// declared with an `extern crate`, the path is guarenteed to use the `extern crate`.
/// declared with an `extern crate`, the path is guaranteed to use the `extern crate`.
pub fn try_push_visible_item_path<T>(self, buffer: &mut T, external_def_id: DefId) -> bool
where T: ItemPathBuffer
{

View File

@ -594,7 +594,7 @@ macro_rules! define_maps {
}
// FIXME(eddyb) Get more valid Span's on queries.
// def_span guard is necesary to prevent a recursive loop,
// def_span guard is necessary to prevent a recursive loop,
// default_span calls def_span query internally.
if span == DUMMY_SP && stringify!($name) != "def_span" {
span = key.default_span(tcx)

View File

@ -77,7 +77,7 @@ pub use self::sty::TypeVariants::*;
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
pub use self::context::{TyCtxt, GlobalArenas, tls};
pub use self::context::{TyCtxt, GlobalArenas, tls, keep_local};
pub use self::context::{Lift, TypeckTables};
pub use self::instance::{Instance, InstanceDef};
@ -575,8 +575,8 @@ impl<T> Slice<T> {
/// by the upvar) and the id of the closure expression.
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct UpvarId {
pub var_id: NodeId,
pub closure_expr_id: NodeId,
pub var_id: DefIndex,
pub closure_expr_id: DefIndex,
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)]
@ -1986,6 +1986,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
}
}
pub fn local_var_name_str_def_index(self, def_index: DefIndex) -> InternedString {
let node_id = self.hir.as_local_node_id(DefId::local(def_index)).unwrap();
self.local_var_name_str(node_id)
}
pub fn expr_is_lval(self, expr: &hir::Expr) -> bool {
match expr.node {
hir::ExprPath(hir::QPath::Resolved(_, ref path)) => {

View File

@ -224,7 +224,7 @@ pub trait MemoizationMap {
type Key: Clone;
type Value: Clone;
/// If `key` is present in the map, return the valuee,
/// If `key` is present in the map, return the value,
/// otherwise invoke `op` and store the value in the map.
///
/// NB: if the receiver is a `DepTrackingMap`, special care is

View File

@ -13,6 +13,7 @@
#![allow(non_snake_case)]
use hir::def_id::DefId;
use hir::ItemLocalId;
use syntax::ast;
pub use rustc_data_structures::fx::FxHashMap;
@ -20,12 +21,14 @@ pub use rustc_data_structures::fx::FxHashSet;
pub type NodeMap<T> = FxHashMap<ast::NodeId, T>;
pub type DefIdMap<T> = FxHashMap<DefId, T>;
pub type ItemLocalMap<T> = FxHashMap<ItemLocalId, T>;
pub type NodeSet = FxHashSet<ast::NodeId>;
pub type DefIdSet = FxHashSet<DefId>;
pub fn NodeMap<T>() -> NodeMap<T> { FxHashMap() }
pub fn DefIdMap<T>() -> DefIdMap<T> { FxHashMap() }
pub fn ItemLocalMap<T>() -> ItemLocalMap<T> { FxHashMap() }
pub fn NodeSet() -> NodeSet { FxHashSet() }
pub fn DefIdSet() -> DefIdSet { FxHashSet() }

View File

@ -905,9 +905,9 @@ impl<'tcx> fmt::Display for ty::TyS<'tcx> {
impl fmt::Debug for ty::UpvarId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "UpvarId({};`{}`;{})",
write!(f, "UpvarId({:?};`{}`;{:?})",
self.var_id,
ty::tls::with(|tcx| tcx.local_var_name_str(self.var_id)),
ty::tls::with(|tcx| tcx.local_var_name_str_def_index(self.var_id)),
self.closure_expr_id)
}
}

View File

@ -79,6 +79,7 @@ impl<'a> Folder for ExpandAllocatorDirectives<'a> {
format: MacroAttribute(Symbol::intern(name)),
span: None,
allow_internal_unstable: true,
allow_internal_unsafe: false,
}
});
let span = Span {

View File

@ -472,7 +472,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
if new_loan.span == old_loan.span {
// Both borrows are happening in the same place
// Meaning the borrow is occuring in a loop
// Meaning the borrow is occurring in a loop
err.span_label(
new_loan.span,
format!("mutable borrow starts here in previous \

View File

@ -153,7 +153,9 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> {
}
fn decl_without_init(&mut self, id: ast::NodeId, _span: Span) {
let ty = self.bccx.tables.node_id_to_type(id);
let ty = self.bccx
.tables
.node_id_to_type(self.bccx.tcx.hir.node_to_hir_id(id));
gather_moves::gather_decl(self.bccx, &self.move_data, id, ty);
}
}
@ -445,7 +447,8 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
}
None
}
LpUpvar(ty::UpvarId{ var_id: local_id, closure_expr_id: _ }) => {
LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => {
let local_id = self.tcx().hir.def_index_to_node_id(var_id);
self.tcx().used_mut_nodes.borrow_mut().insert(local_id);
None
}

View File

@ -93,11 +93,11 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &Vec<Move
}
}
if let NoteClosureEnv(upvar_id) = error.move_from.note {
err.span_label(bccx.tcx.hir.span(upvar_id.var_id),
let var_node_id = bccx.tcx.hir.def_index_to_node_id(upvar_id.var_id);
err.span_label(bccx.tcx.hir.span(var_node_id),
"captured outer variable");
}
err.emit();
}
}

View File

@ -27,7 +27,7 @@ use rustc::middle::dataflow::DataFlowContext;
use rustc::middle::dataflow::BitwiseOperator;
use rustc::middle::dataflow::DataFlowOperator;
use rustc::middle::dataflow::KillFrom;
use rustc::hir::def_id::DefId;
use rustc::hir::def_id::{DefId, DefIndex};
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
@ -324,8 +324,9 @@ pub enum LoanPathElem<'tcx> {
LpInterior(Option<DefId>, InteriorKind),
}
pub fn closure_to_block(closure_id: ast::NodeId,
tcx: TyCtxt) -> ast::NodeId {
fn closure_to_block(closure_id: DefIndex,
tcx: TyCtxt) -> ast::NodeId {
let closure_id = tcx.hir.def_index_to_node_id(closure_id);
match tcx.hir.get(closure_id) {
hir_map::NodeExpr(expr) => match expr.node {
hir::ExprClosure(.., body_id, _, _) => {
@ -597,8 +598,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
let need_note = match lp.ty.sty {
ty::TypeVariants::TyClosure(id, _) => {
let node_id = self.tcx.hir.as_local_node_id(id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
if let Some(&(ty::ClosureKind::FnOnce, Some((span, name)))) =
self.tables.closure_kinds.get(&node_id)
self.tables.closure_kinds().get(hir_id)
{
err.span_note(span, &format!(
"closure cannot be invoked more than once because \
@ -1044,7 +1046,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
} else {
"consider changing this closure to take self by mutable reference"
};
err.span_help(self.tcx.hir.span(id), help);
let node_id = self.tcx.hir.def_index_to_node_id(id);
err.span_help(self.tcx.hir.span(node_id), help);
err
}
_ => {
@ -1100,8 +1103,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
};
match pat.node {
hir::PatKind::Binding(..) =>
*self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"),
hir::PatKind::Binding(..) => {
*self.tables
.pat_binding_modes()
.get(pat.hir_id)
.expect("missing binding mode")
}
_ => bug!("local is not a binding: {:?}", pat)
}
}
@ -1248,7 +1255,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
_ => bug!()
};
if kind == ty::ClosureKind::Fn {
db.span_help(self.tcx.hir.span(upvar_id.closure_expr_id),
let closure_node_id =
self.tcx.hir.def_index_to_node_id(upvar_id.closure_expr_id);
db.span_help(self.tcx.hir.span(closure_node_id),
"consider changing this closure to take \
self by mutable reference");
}
@ -1281,7 +1290,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
loan_path: &LoanPath<'tcx>,
out: &mut String) {
match loan_path.kind {
LpUpvar(ty::UpvarId{ var_id: id, closure_expr_id: _ }) |
LpUpvar(ty::UpvarId { var_id: id, closure_expr_id: _ }) => {
out.push_str(&self.tcx.local_var_name_str_def_index(id));
}
LpVar(id) => {
out.push_str(&self.tcx.local_var_name_str(id));
}
@ -1419,8 +1430,11 @@ impl<'tcx> fmt::Debug for LoanPath<'tcx> {
}
LpUpvar(ty::UpvarId{ var_id, closure_expr_id }) => {
let s = ty::tls::with(|tcx| tcx.hir.node_to_string(var_id));
write!(f, "$({} captured by id={})", s, closure_expr_id)
let s = ty::tls::with(|tcx| {
let var_node_id = tcx.hir.def_index_to_node_id(var_id);
tcx.hir.node_to_string(var_node_id)
});
write!(f, "$({} captured by id={:?})", s, closure_expr_id)
}
LpDowncast(ref lp, variant_def_id) => {
@ -1451,7 +1465,10 @@ impl<'tcx> fmt::Display for LoanPath<'tcx> {
}
LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => {
let s = ty::tls::with(|tcx| tcx.hir.node_to_user_string(var_id));
let s = ty::tls::with(|tcx| {
let var_node_id = tcx.hir.def_index_to_node_id(var_id);
tcx.hir.node_to_string(var_node_id)
});
write!(f, "$({} captured by closure)", s)
}

View File

@ -188,7 +188,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> {
// Then, if the match has no arms, check whether the scrutinee
// is uninhabited.
let pat_ty = self.tables.node_id_to_type(scrut.id);
let pat_ty = self.tables.node_id_to_type(scrut.hir_id);
let module = self.tcx.hir.get_module_parent(scrut.id);
if inlined_arms.is_empty() {
let scrutinee_is_uninhabited = if self.tcx.sess.features.borrow().never_type {
@ -217,7 +217,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> {
.flat_map(|arm| &arm.0)
.map(|pat| vec![pat.0])
.collect();
let scrut_ty = self.tables.node_id_to_type(scrut.id);
let scrut_ty = self.tables.node_id_to_type(scrut.hir_id);
check_exhaustive(cx, scrut_ty, scrut.span, &matrix);
})
}
@ -269,7 +269,11 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> {
fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) {
pat.walk(|p| {
if let PatKind::Binding(_, _, name, None) = p.node {
let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
let bm = *cx.tables
.pat_binding_modes()
.get(p.hir_id)
.expect("missing binding mode");
if bm != ty::BindByValue(hir::MutImmutable) {
// Nothing to check.
return true;
@ -458,7 +462,11 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor,
let mut by_ref_span = None;
for pat in pats {
pat.each_binding(|_, id, span, _path| {
let bm = *cx.tables.pat_binding_modes.get(&id).expect("missing binding mode");
let hir_id = cx.tcx.hir.node_to_hir_id(id);
let bm = *cx.tables
.pat_binding_modes()
.get(hir_id)
.expect("missing binding mode");
if let ty::BindByReference(..) = bm {
by_ref_span = Some(span);
}
@ -491,10 +499,13 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor,
for pat in pats {
pat.walk(|p| {
if let PatKind::Binding(_, _, _, ref sub) = p.node {
let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
let bm = *cx.tables
.pat_binding_modes()
.get(p.hir_id)
.expect("missing binding mode");
match bm {
ty::BindByValue(..) => {
let pat_ty = cx.tables.node_id_to_type(p.id);
let pat_ty = cx.tables.node_id_to_type(p.hir_id);
if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) {
check_move(p, sub.as_ref().map(|p| &**p));
}

Some files were not shown because too many files have changed in this diff Show More