Auto merge of #45205 - rkruppe:saturating-casts, r=eddyb

Saturating casts between integers and floats

Introduces a new flag, `-Z saturating-float-casts`, which makes code generation for int->float and float->int casts safe (`undef`-free), implementing [the saturating semantics laid out by](https://github.com/rust-lang/rust/issues/10184#issuecomment-299229143) @jorendorff for float->int casts and overflowing to infinity for `u128::MAX` -> `f32`.
Constant evaluation in trans was changed to behave like HIR const eval already did, i.e., saturate for u128->f32 and report an error for problematic float->int casts.

Many thanks to @eddyb, whose APFloat port simplified many parts of this patch, and made HIR constant evaluation recognize dangerous float casts as mentioned above.
Also thanks to @ActuallyaDeviloper whose branchless implementation served as inspiration for this implementation.

cc #10184 #41799
fixes #45134
This commit is contained in:
bors 2017-11-08 17:27:56 +00:00
commit 7ca430df71
11 changed files with 535 additions and 16 deletions

1
src/Cargo.lock generated
View File

@ -1869,6 +1869,7 @@ dependencies = [
"rustc 0.0.0",
"rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_allocator 0.0.0",
"rustc_apfloat 0.0.0",
"rustc_back 0.0.0",
"rustc_const_math 0.0.0",
"rustc_data_structures 0.0.0",

View File

@ -1135,6 +1135,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"control whether #[inline] functions are in all cgus"),
tls_model: Option<String> = (None, parse_opt_string, [TRACKED],
"choose the TLS model to use (rustc --print tls-models for details)"),
saturating_float_casts: bool = (false, parse_bool, [TRACKED],
"make casts between integers and floats safe: clip out-of-range inputs to the min/max \
integer or to infinity respectively, and turn `NAN` into 0 when casting to integers"),
}
pub fn default_lib_output() -> CrateType {

View File

@ -96,7 +96,7 @@ impl Status {
}
impl<T> StatusAnd<T> {
fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
StatusAnd {
status: self.status,
value: f(self.value),
@ -378,7 +378,7 @@ pub trait Float
fn from_bits(input: u128) -> Self;
fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> {
if input < 0 {
Self::from_u128_r(-input as u128, -round).map(|r| -r)
Self::from_u128_r(input.wrapping_neg() as u128, -round).map(|r| -r)
} else {
Self::from_u128_r(input as u128, round)
}

View File

@ -203,3 +203,11 @@ impl ::std::ops::Neg for ConstFloat {
ConstFloat { bits, ty: self.ty }
}
}
/// This is `f32::MAX + (0.5 ULP)` as an integer. Numbers greater or equal to this
/// are rounded to infinity when converted to `f32`.
///
/// NB: Computed as maximum significand with an extra 1 bit added (for the half ULP)
/// shifted by the maximum exponent (accounting for normalization).
pub const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
<< (Single::MAX_EXP - Single::PRECISION as i16);

View File

@ -19,6 +19,7 @@ owning_ref = "0.3.3"
rustc-demangle = "0.1.4"
rustc = { path = "../librustc" }
rustc_allocator = { path = "../librustc_allocator" }
rustc_apfloat = { path = "../librustc_apfloat" }
rustc_back = { path = "../librustc_back" }
rustc_const_math = { path = "../librustc_const_math" }
rustc_data_structures = { path = "../librustc_data_structures" }

View File

@ -24,6 +24,7 @@
#![feature(custom_attribute)]
#![allow(unused_attributes)]
#![feature(i128_type)]
#![feature(i128)]
#![feature(libc)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
@ -43,6 +44,7 @@ extern crate libc;
extern crate owning_ref;
#[macro_use] extern crate rustc;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_back;
extern crate rustc_data_structures;
extern crate rustc_incremental;

View File

@ -11,7 +11,7 @@
use llvm::{self, ValueRef};
use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind};
use rustc_const_math::ConstInt::*;
use rustc_const_math::{ConstInt, ConstMathErr};
use rustc_const_math::{ConstInt, ConstMathErr, MAX_F32_PLUS_HALF_ULP};
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::traits;
@ -21,6 +21,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::subst::{Kind, Substs, Subst};
use rustc_apfloat::{ieee, Float, Status};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {adt, base, machine};
use abi::{self, Abi};
@ -689,20 +690,18 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
}
(CastTy::Int(_), CastTy::Float) => {
if signed {
llvm::LLVMConstSIToFP(llval, ll_t_out.to_ref())
} else {
llvm::LLVMConstUIToFP(llval, ll_t_out.to_ref())
}
cast_const_int_to_float(self.ccx, llval, signed, ll_t_out)
}
(CastTy::Float, CastTy::Float) => {
llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
}
(CastTy::Float, CastTy::Int(IntTy::I)) => {
llvm::LLVMConstFPToSI(llval, ll_t_out.to_ref())
cast_const_float_to_int(self.ccx, &operand,
true, ll_t_out, span)
}
(CastTy::Float, CastTy::Int(_)) => {
llvm::LLVMConstFPToUI(llval, ll_t_out.to_ref())
cast_const_float_to_int(self.ccx, &operand,
false, ll_t_out, span)
}
(CastTy::Ptr(_), CastTy::Ptr(_)) |
(CastTy::FnPtr, CastTy::Ptr(_)) |
@ -955,6 +954,64 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
unsafe fn cast_const_float_to_int(ccx: &CrateContext,
operand: &Const,
signed: bool,
int_ty: Type,
span: Span) -> ValueRef {
let llval = operand.llval;
let float_bits = match operand.ty.sty {
ty::TyFloat(fty) => fty.bit_width(),
_ => bug!("cast_const_float_to_int: operand not a float"),
};
// Note: this breaks if llval is a complex constant expression rather than a simple constant.
// One way that might happen would be if addresses could be turned into integers in constant
// expressions, but that doesn't appear to be possible?
// In any case, an ICE is better than producing undef.
let llval_bits = consts::bitcast(llval, Type::ix(ccx, float_bits as u64));
let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
panic!("could not get bits of constant float {:?}",
Value(llval));
});
let int_width = int_ty.int_width() as usize;
// Try to convert, but report an error for overflow and NaN. This matches HIR const eval.
let cast_result = match float_bits {
32 if signed => ieee::Single::from_bits(bits).to_i128(int_width).map(|v| v as u128),
64 if signed => ieee::Double::from_bits(bits).to_i128(int_width).map(|v| v as u128),
32 => ieee::Single::from_bits(bits).to_u128(int_width),
64 => ieee::Double::from_bits(bits).to_u128(int_width),
n => bug!("unsupported float width {}", n),
};
if cast_result.status.contains(Status::INVALID_OP) {
let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
err.report(ccx.tcx(), span, "expression");
}
C_big_integral(int_ty, cast_result.value)
}
unsafe fn cast_const_int_to_float(ccx: &CrateContext,
llval: ValueRef,
signed: bool,
float_ty: Type) -> ValueRef {
// Note: this breaks if llval is a complex constant expression rather than a simple constant.
// One way that might happen would be if addresses could be turned into integers in constant
// expressions, but that doesn't appear to be possible?
// In any case, an ICE is better than producing undef.
let value = const_to_opt_u128(llval, signed).unwrap_or_else(|| {
panic!("could not get z128 value of constant integer {:?}",
Value(llval));
});
if signed {
llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
} else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
// We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
let infinity_bits = C_u32(ccx, ieee::Single::INFINITY.to_bits() as u32);
consts::bitcast(infinity_bits, float_ty)
} else {
llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
}
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_constant(&mut self,
bcx: &Builder<'a, 'tcx>,

View File

@ -15,11 +15,15 @@ use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::mir::tcx::LvalueTy;
use rustc::mir;
use rustc::middle::lang_items::ExchangeMallocFnLangItem;
use rustc_apfloat::{ieee, Float, Status, Round};
use rustc_const_math::MAX_F32_PLUS_HALF_ULP;
use std::{u128, i128};
use base;
use builder::Builder;
use callee;
use common::{self, val_ty, C_bool, C_i32, C_null, C_usize, C_uint};
use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral};
use consts;
use adt;
use machine;
use monomorphize;
@ -333,14 +337,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.ptrtoint(llval, ll_t_out),
(CastTy::Int(_), CastTy::Ptr(_)) =>
bcx.inttoptr(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) if signed =>
bcx.sitofp(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) =>
bcx.uitofp(llval, ll_t_out),
cast_int_to_float(&bcx, signed, llval, ll_t_in, ll_t_out),
(CastTy::Float, CastTy::Int(IntTy::I)) =>
bcx.fptosi(llval, ll_t_out),
cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
(CastTy::Float, CastTy::Int(_)) =>
bcx.fptoui(llval, ll_t_out),
cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
_ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
};
OperandValue::Immediate(newval)
@ -815,3 +817,158 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
bcx.ccx.get_intrinsic(&name)
}
fn cast_int_to_float(bcx: &Builder,
signed: bool,
x: ValueRef,
int_ty: Type,
float_ty: Type) -> ValueRef {
// Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
// It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
// LLVM's uitofp produces undef in those cases, so we manually check for that case.
let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32;
if is_u128_to_f32 && bcx.sess().opts.debugging_opts.saturating_float_casts {
// All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
// and for everything else LLVM's uitofp works just fine.
let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP);
let overflow = bcx.icmp(llvm::IntUGE, x, max);
let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32);
let infinity = consts::bitcast(infinity_bits, float_ty);
bcx.select(overflow, infinity, bcx.uitofp(x, float_ty))
} else {
if signed {
bcx.sitofp(x, float_ty)
} else {
bcx.uitofp(x, float_ty)
}
}
}
fn cast_float_to_int(bcx: &Builder,
signed: bool,
x: ValueRef,
float_ty: Type,
int_ty: Type) -> ValueRef {
let fptosui_result = if signed {
bcx.fptosi(x, int_ty)
} else {
bcx.fptoui(x, int_ty)
};
if !bcx.sess().opts.debugging_opts.saturating_float_casts {
return fptosui_result;
}
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
// destination integer type after rounding towards zero. This `undef` value can cause UB in
// safe code (see issue #10184), so we implement a saturating conversion on top of it:
// Semantically, the mathematical value of the input is rounded towards zero to the next
// mathematical integer, and then the result is clamped into the range of the destination
// integer type. Positive and negative infinity are mapped to the maximum and minimum value of
// the destination integer type. NaN is mapped to 0.
//
// Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
// a value representable in int_ty.
// They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
// Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
// int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
// representable. Note that this only works if float_ty's exponent range is sufficently large.
// f16 or 256 bit integers would break this property. Right now the smallest float type is f32
// with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
fn compute_clamp_bounds<F: Float>(signed: bool, int_ty: Type) -> (u128, u128) {
let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
assert_eq!(rounded_min.status, Status::OK);
let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
assert!(rounded_max.value.is_finite());
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
}
fn int_max(signed: bool, int_ty: Type) -> u128 {
let shift_amount = 128 - int_ty.int_width();
if signed {
i128::MAX as u128 >> shift_amount
} else {
u128::MAX >> shift_amount
}
}
fn int_min(signed: bool, int_ty: Type) -> i128 {
if signed {
i128::MIN >> (128 - int_ty.int_width())
} else {
0
}
}
let float_bits_to_llval = |bits| {
let bits_llval = match float_ty.float_width() {
32 => C_u32(bcx.ccx, bits as u32),
64 => C_u64(bcx.ccx, bits as u64),
n => bug!("unsupported float width {}", n),
};
consts::bitcast(bits_llval, float_ty)
};
let (f_min, f_max) = match float_ty.float_width() {
32 => compute_clamp_bounds::<ieee::Single>(signed, int_ty),
64 => compute_clamp_bounds::<ieee::Double>(signed, int_ty),
n => bug!("unsupported float width {}", n),
};
let f_min = float_bits_to_llval(f_min);
let f_max = float_bits_to_llval(f_max);
// To implement saturation, we perform the following steps:
//
// 1. Cast x to an integer with fpto[su]i. This may result in undef.
// 2. Compare x to f_min and f_max, and use the comparison results to select:
// a) int_ty::MIN if x < f_min or x is NaN
// b) int_ty::MAX if x > f_max
// c) the result of fpto[su]i otherwise
// 3. If x is NaN, return 0.0, otherwise return the result of step 2.
//
// This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
// destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
// undef does not introduce any non-determinism either.
// More importantly, the above procedure correctly implements saturating conversion.
// Proof (sketch):
// If x is NaN, 0 is returned by definition.
// Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
// This yields three cases to consider:
// (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
// saturating conversion for inputs in that range.
// (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
// (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
// than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
// is correct.
// (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
// int_ty::MIN and therefore the return value of int_ty::MIN is correct.
// QED.
// Step 1 was already performed above.
// Step 2: We use two comparisons and two selects, with %s1 being the result:
// %less_or_nan = fcmp ult %x, %f_min
// %greater = fcmp olt %x, %f_max
// %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
// %s1 = select %greater, int_ty::MAX, %s0
// Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
// operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
// becomes int_ty::MIN if x is NaN.
// Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
// negation, and the negation can be merged into the select. Therefore, it not necessarily any
// more expensive than a ordered ("normal") comparison. Whether these optimizations will be
// performed is ultimately up to the backend, but at least x86 does perform them.
let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min);
let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
let int_max = C_big_integral(int_ty, int_max(signed, int_ty));
let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128);
let s0 = bcx.select(less_or_nan, int_min, fptosui_result);
let s1 = bcx.select(greater, int_max, s0);
// Step 3: NaN replacement.
// For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
// Therefore we only need to execute this step for signed integer types.
if signed {
// LLVM has no isNaN predicate, so we use (x == x) instead
bcx.select(bcx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
} else {
s1
}
}

View File

@ -0,0 +1,65 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
// This file tests that we don't generate any code for saturation if
// -Z saturating-float-casts is not enabled.
#![crate_type = "lib"]
#![feature(i128_type)]
// CHECK-LABEL: @f32_to_u32
#[no_mangle]
pub fn f32_to_u32(x: f32) -> u32 {
// CHECK: fptoui
// CHECK-NOT: fcmp
// CHECK-NOT: icmp
// CHECK-NOT: select
x as u32
}
// CHECK-LABEL: @f32_to_i32
#[no_mangle]
pub fn f32_to_i32(x: f32) -> i32 {
// CHECK: fptosi
// CHECK-NOT: fcmp
// CHECK-NOT: icmp
// CHECK-NOT: select
x as i32
}
#[no_mangle]
pub fn f64_to_u8(x: f32) -> u16 {
// CHECK-NOT: fcmp
// CHECK-NOT: icmp
// CHECK-NOT: select
x as u16
}
// CHECK-LABEL: @i32_to_f64
#[no_mangle]
pub fn i32_to_f64(x: i32) -> f64 {
// CHECK: sitofp
// CHECK-NOT: fcmp
// CHECK-NOT: icmp
// CHECK-NOT: select
x as f64
}
// CHECK-LABEL: @u128_to_f32
#[no_mangle]
pub fn u128_to_f32(x: u128) -> f32 {
// CHECK: uitofp
// CHECK-NOT: fcmp
// CHECK-NOT: icmp
// CHECK-NOT: select
x as f32
}

View File

@ -0,0 +1,61 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(i128_type)]
#![allow(const_err)] // this test is only about hard errors
use std::{f32, f64};
// Forces evaluation of constants, triggering hard error
fn force<T>(_: T) {}
fn main() {
{ const X: u16 = -1. as u16; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = -100. as u128; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = f32::NAN as i8; force(X); } //~ ERROR constant evaluation error
{ const X: i32 = f32::NAN as i32; force(X); } //~ ERROR constant evaluation error
{ const X: u64 = f32::NAN as u64; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = f32::NAN as u128; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = f32::INFINITY as i8; force(X); } //~ ERROR constant evaluation error
{ const X: u32 = f32::INFINITY as u32; force(X); } //~ ERROR constant evaluation error
{ const X: i128 = f32::INFINITY as i128; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = f32::INFINITY as u128; force(X); } //~ ERROR constant evaluation error
{ const X: u8 = f32::NEG_INFINITY as u8; force(X); } //~ ERROR constant evaluation error
{ const X: u16 = f32::NEG_INFINITY as u16; force(X); } //~ ERROR constant evaluation error
{ const X: i64 = f32::NEG_INFINITY as i64; force(X); } //~ ERROR constant evaluation error
{ const X: i128 = f32::NEG_INFINITY as i128; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = f64::NAN as i8; force(X); } //~ ERROR constant evaluation error
{ const X: i32 = f64::NAN as i32; force(X); } //~ ERROR constant evaluation error
{ const X: u64 = f64::NAN as u64; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = f64::NAN as u128; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = f64::INFINITY as i8; force(X); } //~ ERROR constant evaluation error
{ const X: u32 = f64::INFINITY as u32; force(X); } //~ ERROR constant evaluation error
{ const X: i128 = f64::INFINITY as i128; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = f64::INFINITY as u128; force(X); } //~ ERROR constant evaluation error
{ const X: u8 = f64::NEG_INFINITY as u8; force(X); } //~ ERROR constant evaluation error
{ const X: u16 = f64::NEG_INFINITY as u16; force(X); } //~ ERROR constant evaluation error
{ const X: i64 = f64::NEG_INFINITY as i64; force(X); } //~ ERROR constant evaluation error
{ const X: i128 = f64::NEG_INFINITY as i128; force(X); } //~ ERROR constant evaluation error
{ const X: u8 = 256. as u8; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = -129. as i8; force(X); } //~ ERROR constant evaluation error
{ const X: i8 = 128. as i8; force(X); } //~ ERROR constant evaluation error
{ const X: i32 = 2147483648. as i32; force(X); } //~ ERROR constant evaluation error
{ const X: i32 = -2147483904. as i32; force(X); } //~ ERROR constant evaluation error
{ const X: u32 = 4294967296. as u32; force(X); } //~ ERROR constant evaluation error
{ const X: u128 = 1e40 as u128; force(X); } //~ ERROR constant evaluation error
{ const X: i128 = 1e40 as i128; force(X); } //~ ERROR constant evaluation error
}

View File

@ -0,0 +1,164 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z saturating-float-casts
#![feature(test, i128, i128_type, stmt_expr_attributes)]
#![deny(overflowing_literals)]
extern crate test;
use std::{f32, f64};
use std::{u8, i8, u16, i16, u32, i32, u64, i64};
#[cfg(not(target_os="emscripten"))]
use std::{u128, i128};
use test::black_box;
macro_rules! test {
($val:expr, $src_ty:ident -> $dest_ty:ident, $expected:expr) => (
// black_box disables constant evaluation to test run-time conversions:
assert_eq!(black_box::<$src_ty>($val) as $dest_ty, $expected,
"run-time {} -> {}", stringify!($src_ty), stringify!($dest_ty));
);
($fval:expr, f* -> $ity:ident, $ival:expr) => (
test!($fval, f32 -> $ity, $ival);
test!($fval, f64 -> $ity, $ival);
)
}
// This macro tests const eval in addition to run-time evaluation.
// If and when saturating casts are adopted, this macro should be merged with test!() to ensure
// that run-time and const eval agree on inputs that currently trigger a const eval error.
macro_rules! test_c {
($val:expr, $src_ty:ident -> $dest_ty:ident, $expected:expr) => ({
test!($val, $src_ty -> $dest_ty, $expected);
{
const X: $src_ty = $val;
const Y: $dest_ty = X as $dest_ty;
assert_eq!(Y, $expected,
"const eval {} -> {}", stringify!($src_ty), stringify!($dest_ty));
}
});
($fval:expr, f* -> $ity:ident, $ival:expr) => (
test!($fval, f32 -> $ity, $ival);
test!($fval, f64 -> $ity, $ival);
)
}
macro_rules! common_fptoi_tests {
($fty:ident -> $($ity:ident)+) => ({ $(
test!($fty::NAN, $fty -> $ity, 0);
test!($fty::INFINITY, $fty -> $ity, $ity::MAX);
test!($fty::NEG_INFINITY, $fty -> $ity, $ity::MIN);
// These two tests are not solely float->int tests, in particular the latter relies on
// `u128::MAX as f32` not being UB. But that's okay, since this file tests int->float
// as well, the test is just slightly misplaced.
test!($ity::MIN as $fty, $fty -> $ity, $ity::MIN);
test!($ity::MAX as $fty, $fty -> $ity, $ity::MAX);
test_c!(0., $fty -> $ity, 0);
test_c!($fty::MIN_POSITIVE, $fty -> $ity, 0);
test!(-0.9, $fty -> $ity, 0);
test_c!(1., $fty -> $ity, 1);
test_c!(42., $fty -> $ity, 42);
)+ });
(f* -> $($ity:ident)+) => ({
common_fptoi_tests!(f32 -> $($ity)+);
common_fptoi_tests!(f64 -> $($ity)+);
})
}
macro_rules! fptoui_tests {
($fty: ident -> $($ity: ident)+) => ({ $(
test!(-0., $fty -> $ity, 0);
test!(-$fty::MIN_POSITIVE, $fty -> $ity, 0);
test!(-0.99999994, $fty -> $ity, 0);
test!(-1., $fty -> $ity, 0);
test!(-100., $fty -> $ity, 0);
test!(#[allow(overflowing_literals)] -1e50, $fty -> $ity, 0);
test!(#[allow(overflowing_literals)] -1e130, $fty -> $ity, 0);
)+ });
(f* -> $($ity:ident)+) => ({
fptoui_tests!(f32 -> $($ity)+);
fptoui_tests!(f64 -> $($ity)+);
})
}
pub fn main() {
common_fptoi_tests!(f* -> i8 i16 i32 i64 u8 u16 u32 u64);
fptoui_tests!(f* -> u8 u16 u32 u64);
// FIXME emscripten does not support i128
#[cfg(not(target_os="emscripten"))] {
common_fptoi_tests!(f* -> i128 u128);
fptoui_tests!(f* -> u128);
}
// The following tests cover edge cases for some integer types.
// # u8
test_c!(254., f* -> u8, 254);
test!(256., f* -> u8, 255);
// # i8
test_c!(-127., f* -> i8, -127);
test!(-129., f* -> i8, -128);
test_c!(126., f* -> i8, 126);
test!(128., f* -> i8, 127);
// # i32
// -2147483648. is i32::MIN (exactly)
test_c!(-2147483648., f* -> i32, i32::MIN);
// 2147483648. is i32::MAX rounded up
test!(2147483648., f32 -> i32, 2147483647);
// With 24 significand bits, floats with magnitude in [2^30 + 1, 2^31] are rounded to
// multiples of 2^7. Therefore, nextDown(round(i32::MAX)) is 2^31 - 128:
test_c!(2147483520., f32 -> i32, 2147483520);
// Similarly, nextUp(i32::MIN) is i32::MIN + 2^8 and nextDown(i32::MIN) is i32::MIN - 2^7
test!(-2147483904., f* -> i32, i32::MIN);
test_c!(-2147483520., f* -> i32, -2147483520);
// # u32
// round(MAX) and nextUp(round(MAX))
test_c!(4294967040., f* -> u32, 4294967040);
test!(4294967296., f* -> u32, 4294967295);
// # u128
#[cfg(not(target_os="emscripten"))]
{
// float->int:
test_c!(f32::MAX, f32 -> u128, 0xffffff00000000000000000000000000);
// nextDown(f32::MAX) = 2^128 - 2 * 2^104
const SECOND_LARGEST_F32: f32 = 340282326356119256160033759537265639424.;
test_c!(SECOND_LARGEST_F32, f32 -> u128, 0xfffffe00000000000000000000000000);
// int->float:
// f32::MAX - 0.5 ULP and smaller should be rounded down
test_c!(0xfffffe00000000000000000000000000, u128 -> f32, SECOND_LARGEST_F32);
test_c!(0xfffffe7fffffffffffffffffffffffff, u128 -> f32, SECOND_LARGEST_F32);
test_c!(0xfffffe80000000000000000000000000, u128 -> f32, SECOND_LARGEST_F32);
// numbers within < 0.5 ULP of f32::MAX it should be rounded to f32::MAX
test_c!(0xfffffe80000000000000000000000001, u128 -> f32, f32::MAX);
test_c!(0xfffffeffffffffffffffffffffffffff, u128 -> f32, f32::MAX);
test_c!(0xffffff00000000000000000000000000, u128 -> f32, f32::MAX);
test_c!(0xffffff00000000000000000000000001, u128 -> f32, f32::MAX);
test_c!(0xffffff7fffffffffffffffffffffffff, u128 -> f32, f32::MAX);
// f32::MAX + 0.5 ULP and greater should be rounded to infinity
test_c!(0xffffff80000000000000000000000000, u128 -> f32, f32::INFINITY);
test_c!(0xffffff80000000f00000000000000000, u128 -> f32, f32::INFINITY);
test_c!(0xffffff87ffffffffffffffff00000001, u128 -> f32, f32::INFINITY);
// u128->f64 should not be affected by the u128->f32 checks
test_c!(0xffffff80000000000000000000000000, u128 -> f64,
340282356779733661637539395458142568448.0);
test_c!(u128::MAX, u128 -> f64, 340282366920938463463374607431768211455.0);
}
}