diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 9ff52496dab..8b35b064eb3 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1711,7 +1711,7 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) let codegen_units = codegen_units.unwrap_or_else(|| { match opt_level { - // If we're compiling at `-O0` then default to 32 codegen units. + // If we're compiling at `-O0` then default to 16 codegen units. // The number here shouldn't matter too too much as debug mode // builds don't rely on performance at all, meaning that lost // opportunities for inlining through multiple codegen units is @@ -1729,7 +1729,21 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) // unit takes *too* long to build we'll be guaranteed that all // cpus will finish pretty closely to one another and we should // make relatively optimal use of system resources - OptLevel::No => 32, + // + // Another note worth mentioning here, however, is that this number + // isn't *too* high. When codegen units are increased that means we + // currently have to codegen `#[inline]` functions into each codegen + // unit, which means the more codegen units we're using the more we + // may be generating. In other words, increasing codegen units may + // increase the overall work the compiler does. If we don't have + // enough cores to make up for this loss then increasing the number + // of codegen units could become an overall loss! + // + // As a result we choose a hopefully conservative value 16, which + // should be more than the number of cpus of most hardware compiling + // Rust but also not too much for 2-4 core machines to have too much + // loss of compile time. + OptLevel::No => 16, // All other optimization levels default use one codegen unit, // the historical default in Rust for a Long Time.