Auto merge of #29316 - GBGamer:change-unchecked-div-generic, r=eddyb

Similarly to the simd intrinsics. I believe this is a better solution than #29288, and I could implement it as well for overflowing_add/sub/mul. Also rename from udiv/sdiv to div, and same for rem.
This commit is contained in:
bors 2015-11-01 07:03:09 +00:00
commit a5fbb3a25f
8 changed files with 597 additions and 396 deletions

View File

@ -511,114 +511,172 @@ extern "rust-intrinsic" {
pub fn roundf64(x: f64) -> f64;
/// Returns the number of bits set in a `u8`.
#[cfg(stage0)]
pub fn ctpop8(x: u8) -> u8;
/// Returns the number of bits set in a `u16`.
#[cfg(stage0)]
pub fn ctpop16(x: u16) -> u16;
/// Returns the number of bits set in a `u32`.
#[cfg(stage0)]
pub fn ctpop32(x: u32) -> u32;
/// Returns the number of bits set in a `u64`.
#[cfg(stage0)]
pub fn ctpop64(x: u64) -> u64;
/// Returns the number of bits set in an integer type `T`
#[cfg(not(stage0))]
pub fn ctpop<T>(x: T) -> T;
/// Returns the number of leading bits unset in a `u8`.
#[cfg(stage0)]
pub fn ctlz8(x: u8) -> u8;
/// Returns the number of leading bits unset in a `u16`.
#[cfg(stage0)]
pub fn ctlz16(x: u16) -> u16;
/// Returns the number of leading bits unset in a `u32`.
#[cfg(stage0)]
pub fn ctlz32(x: u32) -> u32;
/// Returns the number of leading bits unset in a `u64`.
#[cfg(stage0)]
pub fn ctlz64(x: u64) -> u64;
/// Returns the number of leading bits unset in an integer type `T`
#[cfg(not(stage0))]
pub fn ctlz<T>(x: T) -> T;
/// Returns the number of trailing bits unset in a `u8`.
#[cfg(stage0)]
pub fn cttz8(x: u8) -> u8;
/// Returns the number of trailing bits unset in a `u16`.
#[cfg(stage0)]
pub fn cttz16(x: u16) -> u16;
/// Returns the number of trailing bits unset in a `u32`.
#[cfg(stage0)]
pub fn cttz32(x: u32) -> u32;
/// Returns the number of trailing bits unset in a `u64`.
#[cfg(stage0)]
pub fn cttz64(x: u64) -> u64;
/// Returns the number of trailing bits unset in an integer type `T`
#[cfg(not(stage0))]
pub fn cttz<T>(x: T) -> T;
/// Reverses the bytes in a `u16`.
#[cfg(stage0)]
pub fn bswap16(x: u16) -> u16;
/// Reverses the bytes in a `u32`.
#[cfg(stage0)]
pub fn bswap32(x: u32) -> u32;
/// Reverses the bytes in a `u64`.
#[cfg(stage0)]
pub fn bswap64(x: u64) -> u64;
/// Reverses the bytes in an integer type `T`.
#[cfg(not(stage0))]
pub fn bswap<T>(x: T) -> T;
/// Performs checked `i8` addition.
#[cfg(stage0)]
pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` addition.
#[cfg(stage0)]
pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` addition.
#[cfg(stage0)]
pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` addition.
#[cfg(stage0)]
pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` addition.
#[cfg(stage0)]
pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` addition.
#[cfg(stage0)]
pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` addition.
#[cfg(stage0)]
pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` addition.
#[cfg(stage0)]
pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
/// Performs checked integer addition.
#[cfg(not(stage0))]
pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
/// Performs checked `i8` subtraction.
#[cfg(stage0)]
pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` subtraction.
#[cfg(stage0)]
pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` subtraction.
#[cfg(stage0)]
pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` subtraction.
#[cfg(stage0)]
pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` subtraction.
#[cfg(stage0)]
pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` subtraction.
#[cfg(stage0)]
pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` subtraction.
#[cfg(stage0)]
pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` subtraction.
#[cfg(stage0)]
pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
/// Performs checked integer subtraction
#[cfg(not(stage0))]
pub fn sub_with_overflow<T>(x: T, y: T) -> (T, bool);
/// Performs checked `i8` multiplication.
#[cfg(stage0)]
pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` multiplication.
#[cfg(stage0)]
pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` multiplication.
#[cfg(stage0)]
pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` multiplication.
#[cfg(stage0)]
pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` multiplication.
#[cfg(stage0)]
pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` multiplication.
#[cfg(stage0)]
pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` multiplication.
#[cfg(stage0)]
pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` multiplication.
#[cfg(stage0)]
pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
/// Returns (a + b) mod 2^N, where N is the width of N in bits.
/// Performs checked integer multiplication
#[cfg(not(stage0))]
pub fn mul_with_overflow<T>(x: T, y: T) -> (T, bool);
/// Performs an unchecked division, resulting in undefined behavior
/// where y = 0 or x = `T::min_value()` and y = -1
#[cfg(not(stage0))]
pub fn unchecked_div<T>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked division, resulting in
/// undefined behavior where y = 0 or x = `T::min_value()` and y = -1
#[cfg(not(stage0))]
pub fn unchecked_rem<T>(x: T, y: T) -> T;
/// Returns (a + b) mod 2^N, where N is the width of T in bits.
pub fn overflowing_add<T>(a: T, b: T) -> T;
/// Returns (a - b) mod 2^N, where N is the width of N in bits.
/// Returns (a - b) mod 2^N, where N is the width of T in bits.
pub fn overflowing_sub<T>(a: T, b: T) -> T;
/// Returns (a * b) mod 2^N, where N is the width of N in bits.
/// Returns (a * b) mod 2^N, where N is the width of T in bits.
pub fn overflowing_mul<T>(a: T, b: T) -> T;
/// Performs an unchecked signed division, which results in undefined behavior,
/// in cases where y == 0, or x == isize::MIN and y == -1
pub fn unchecked_sdiv<T>(x: T, y: T) -> T;
/// Performs an unchecked unsigned division, which results in undefined behavior,
/// in cases where y == 0
pub fn unchecked_udiv<T>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked signed division, which results in
/// undefined behavior, in cases where y == 0, or x == isize::MIN and y == -1
pub fn unchecked_srem<T>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked unsigned division, which results in
/// undefined behavior, in cases where y == 0
pub fn unchecked_urem<T>(x: T, y: T) -> T;
/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
pub fn discriminant_value<T>(v: &T) -> u64;

View File

@ -55,6 +55,7 @@ macro_rules! impl_full_ops {
($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
$(
impl FullOps for $ty {
#[cfg(stage0)]
fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
// this cannot overflow, the output is between 0 and 2*2^nbits - 1
// FIXME will LLVM optimize this into ADC or similar???
@ -62,6 +63,16 @@ macro_rules! impl_full_ops {
let (v, carry2) = unsafe { $addfn(v, if carry {1} else {0}) };
(carry1 || carry2, v)
}
#[cfg(not(stage0))]
fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
// this cannot overflow, the output is between 0 and 2*2^nbits - 1
// FIXME will LLVM optimize this into ADC or similar???
let (v, carry1) = unsafe { intrinsics::add_with_overflow(self, other) };
let (v, carry2) = unsafe {
intrinsics::add_with_overflow(v, if carry {1} else {0})
};
(carry1 || carry2, v)
}
fn full_mul(self, other: $ty, carry: $ty) -> ($ty, $ty) {
// this cannot overflow, the output is between 0 and 2^nbits * (2^nbits - 1)

View File

@ -103,6 +103,11 @@ macro_rules! zero_one_impl_float {
}
zero_one_impl_float! { f32 f64 }
// Just for stage0; a byte swap on a byte is a no-op
// Delete this once it becomes unused
#[cfg(stage0)]
unsafe fn bswap8(x: u8) -> u8 { x }
macro_rules! checked_op {
($U:ty, $op:path, $x:expr, $y:expr) => {{
let (result, overflowed) = unsafe { $op($x as $U, $y as $U) };
@ -110,10 +115,6 @@ macro_rules! checked_op {
}}
}
/// Swapping a single byte is a no-op. This is marked as `unsafe` for
/// consistency with the other `bswap` intrinsics.
unsafe fn bswap8(x: u8) -> u8 { x }
// `Int` + `SignedInt` implemented for signed integers
macro_rules! int_impl {
($ActualT:ty, $UnsignedT:ty, $BITS:expr,
@ -611,54 +612,110 @@ macro_rules! int_impl {
}
#[lang = "i8"]
#[cfg(stage0)]
impl i8 {
int_impl! { i8, u8, 8,
intrinsics::i8_add_with_overflow,
intrinsics::i8_sub_with_overflow,
intrinsics::i8_mul_with_overflow }
}
#[lang = "i8"]
#[cfg(not(stage0))]
impl i8 {
int_impl! { i8, u8, 8,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "i16"]
#[cfg(stage0)]
impl i16 {
int_impl! { i16, u16, 16,
intrinsics::i16_add_with_overflow,
intrinsics::i16_sub_with_overflow,
intrinsics::i16_mul_with_overflow }
}
#[lang = "i16"]
#[cfg(not(stage0))]
impl i16 {
int_impl! { i16, u16, 16,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "i32"]
#[cfg(stage0)]
impl i32 {
int_impl! { i32, u32, 32,
intrinsics::i32_add_with_overflow,
intrinsics::i32_sub_with_overflow,
intrinsics::i32_mul_with_overflow }
}
#[lang = "i32"]
#[cfg(not(stage0))]
impl i32 {
int_impl! { i32, u32, 32,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "i64"]
#[cfg(stage0)]
impl i64 {
int_impl! { i64, u64, 64,
intrinsics::i64_add_with_overflow,
intrinsics::i64_sub_with_overflow,
intrinsics::i64_mul_with_overflow }
}
#[lang = "i64"]
#[cfg(not(stage0))]
impl i64 {
int_impl! { i64, u64, 64,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[cfg(target_pointer_width = "32")]
#[lang = "isize"]
#[cfg(stage0)]
impl isize {
int_impl! { i32, u32, 32,
intrinsics::i32_add_with_overflow,
intrinsics::i32_sub_with_overflow,
intrinsics::i32_mul_with_overflow }
}
#[cfg(target_pointer_width = "32")]
#[lang = "isize"]
#[cfg(not(stage0))]
impl isize {
int_impl! { i32, u32, 32,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[cfg(target_pointer_width = "64")]
#[lang = "isize"]
#[cfg(stage0)]
impl isize {
int_impl! { i64, u64, 64,
intrinsics::i64_add_with_overflow,
intrinsics::i64_sub_with_overflow,
intrinsics::i64_mul_with_overflow }
}
#[cfg(target_pointer_width = "64")]
#[lang = "isize"]
#[cfg(not(stage0))]
impl isize {
int_impl! { i64, u64, 64,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
// `Int` + `UnsignedInt` implemented for signed integers
macro_rules! uint_impl {
@ -744,17 +801,8 @@ macro_rules! uint_impl {
unsafe { $ctlz(self as $ActualT) as u32 }
}
/// Returns the number of trailing zeros in the binary representation
/// of `self`.
///
/// # Examples
///
/// ```rust
/// let n = 0b0101000u16;
///
/// assert_eq!(n.trailing_zeros(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(stage0)]
#[inline]
pub fn trailing_zeros(self) -> u32 {
// As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
@ -772,6 +820,35 @@ macro_rules! uint_impl {
}
}
}
/// Returns the number of trailing zeros in the binary representation
/// of `self`.
///
/// # Examples
///
/// ```rust
/// let n = 0b0101000u16;
///
/// assert_eq!(n.trailing_zeros(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(stage0))]
#[inline]
pub fn trailing_zeros(self) -> u32 {
// As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
// emits two conditional moves on x86_64. By promoting the value to
// u16 and setting bit 8, we get better code without any conditional
// operations.
// FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284)
// pending, remove this workaround once LLVM generates better code
// for cttz8.
unsafe {
if $BITS == 8 {
intrinsics::cttz(self as u16 | 0x100) as u32
} else {
intrinsics::cttz(self) as u32
}
}
}
/// Shifts the bits to the left by a specified amount, `n`,
/// wrapping the truncated bits to the end of the resulting integer.
@ -1163,6 +1240,7 @@ macro_rules! uint_impl {
}
#[lang = "u8"]
#[cfg(stage0)]
impl u8 {
uint_impl! { u8, 8,
intrinsics::ctpop8,
@ -1173,8 +1251,21 @@ impl u8 {
intrinsics::u8_sub_with_overflow,
intrinsics::u8_mul_with_overflow }
}
#[lang = "u8"]
#[cfg(not(stage0))]
impl u8 {
uint_impl! { u8, 8,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "u16"]
#[cfg(stage0)]
impl u16 {
uint_impl! { u16, 16,
intrinsics::ctpop16,
@ -1185,8 +1276,21 @@ impl u16 {
intrinsics::u16_sub_with_overflow,
intrinsics::u16_mul_with_overflow }
}
#[lang = "u16"]
#[cfg(not(stage0))]
impl u16 {
uint_impl! { u16, 16,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "u32"]
#[cfg(stage0)]
impl u32 {
uint_impl! { u32, 32,
intrinsics::ctpop32,
@ -1197,9 +1301,21 @@ impl u32 {
intrinsics::u32_sub_with_overflow,
intrinsics::u32_mul_with_overflow }
}
#[lang = "u32"]
#[cfg(not(stage0))]
impl u32 {
uint_impl! { u32, 32,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[lang = "u64"]
#[cfg(stage0)]
impl u64 {
uint_impl! { u64, 64,
intrinsics::ctpop64,
@ -1210,9 +1326,22 @@ impl u64 {
intrinsics::u64_sub_with_overflow,
intrinsics::u64_mul_with_overflow }
}
#[lang = "u64"]
#[cfg(not(stage0))]
impl u64 {
uint_impl! { u64, 64,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[cfg(target_pointer_width = "32")]
#[lang = "usize"]
#[cfg(stage0)]
impl usize {
uint_impl! { u32, 32,
intrinsics::ctpop32,
@ -1223,9 +1352,23 @@ impl usize {
intrinsics::u32_sub_with_overflow,
intrinsics::u32_mul_with_overflow }
}
#[cfg(target_pointer_width = "32")]
#[lang = "usize"]
#[cfg(not(stage0))]
impl usize {
uint_impl! { u32, 32,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
#[cfg(target_pointer_width = "64")]
#[lang = "usize"]
#[cfg(stage0)]
impl usize {
uint_impl! { u64, 64,
intrinsics::ctpop64,
@ -1236,6 +1379,19 @@ impl usize {
intrinsics::u64_sub_with_overflow,
intrinsics::u64_mul_with_overflow }
}
#[cfg(target_pointer_width = "64")]
#[lang = "usize"]
#[cfg(not(stage0))]
impl usize {
uint_impl! { u64, 64,
intrinsics::ctpop,
intrinsics::ctlz,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
intrinsics::sub_with_overflow,
intrinsics::mul_with_overflow }
}
/// Used for representing the classification of floating point numbers
#[derive(Copy, Clone, PartialEq, Debug)]

View File

@ -12,23 +12,35 @@
#![unstable(feature = "wrapping", reason = "may be removed or relocated",
issue = "27755")]
#[cfg(stage0)]
pub use intrinsics::{
u8_add_with_overflow, i8_add_with_overflow,
u16_add_with_overflow, i16_add_with_overflow,
u32_add_with_overflow, i32_add_with_overflow,
u64_add_with_overflow, i64_add_with_overflow,
u8_sub_with_overflow, i8_sub_with_overflow,
u16_sub_with_overflow, i16_sub_with_overflow,
u32_sub_with_overflow, i32_sub_with_overflow,
u64_sub_with_overflow, i64_sub_with_overflow,
u8_mul_with_overflow, i8_mul_with_overflow,
u16_mul_with_overflow, i16_mul_with_overflow,
u32_mul_with_overflow, i32_mul_with_overflow,
u64_mul_with_overflow, i64_mul_with_overflow,
};
#[cfg(not(stage0))]
pub use intrinsics::{
add_with_overflow,
sub_with_overflow,
mul_with_overflow,
};
use super::Wrapping;
use ops::*;
use intrinsics::{i8_add_with_overflow, u8_add_with_overflow};
use intrinsics::{i16_add_with_overflow, u16_add_with_overflow};
use intrinsics::{i32_add_with_overflow, u32_add_with_overflow};
use intrinsics::{i64_add_with_overflow, u64_add_with_overflow};
use intrinsics::{i8_sub_with_overflow, u8_sub_with_overflow};
use intrinsics::{i16_sub_with_overflow, u16_sub_with_overflow};
use intrinsics::{i32_sub_with_overflow, u32_sub_with_overflow};
use intrinsics::{i64_sub_with_overflow, u64_sub_with_overflow};
use intrinsics::{i8_mul_with_overflow, u8_mul_with_overflow};
use intrinsics::{i16_mul_with_overflow, u16_mul_with_overflow};
use intrinsics::{i32_mul_with_overflow, u32_mul_with_overflow};
use intrinsics::{i64_mul_with_overflow, u64_mul_with_overflow};
use ::{i8,i16,i32,i64};
pub trait OverflowingOps {
@ -191,23 +203,47 @@ macro_rules! signed_overflowing_impl {
($($t:ident)*) => ($(
impl OverflowingOps for $t {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _add_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _sub_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _mul_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: $t) -> ($t, bool) {
@ -253,23 +289,47 @@ macro_rules! unsigned_overflowing_impl {
($($t:ident)*) => ($(
impl OverflowingOps for $t {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _add_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _sub_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _mul_with_overflow)(self, rhs)
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: $t) -> ($t, bool) {
@ -305,6 +365,7 @@ unsigned_overflowing_impl! { u8 u16 u32 u64 }
#[cfg(target_pointer_width = "64")]
impl OverflowingOps for usize {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_add_with_overflow(self as u64, rhs as u64);
@ -312,6 +373,14 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_sub_with_overflow(self as u64, rhs as u64);
@ -319,6 +388,14 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_mul_with_overflow(self as u64, rhs as u64);
@ -326,6 +403,13 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_div(rhs as u64);
(r as usize, f)
@ -355,6 +439,7 @@ impl OverflowingOps for usize {
#[cfg(target_pointer_width = "32")]
impl OverflowingOps for usize {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_add_with_overflow(self as u32, rhs as u32);
@ -362,6 +447,14 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_sub_with_overflow(self as u32, rhs as u32);
@ -369,6 +462,14 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_mul_with_overflow(self as u32, rhs as u32);
@ -376,6 +477,13 @@ impl OverflowingOps for usize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_div(rhs as u32);
(r as usize, f)
@ -405,6 +513,7 @@ impl OverflowingOps for usize {
#[cfg(target_pointer_width = "64")]
impl OverflowingOps for isize {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_add_with_overflow(self as i64, rhs as i64);
@ -412,6 +521,14 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_sub_with_overflow(self as i64, rhs as i64);
@ -419,6 +536,14 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_mul_with_overflow(self as i64, rhs as i64);
@ -426,6 +551,13 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: isize) -> (isize, bool) {
let (r, f) = (self as i64).overflowing_div(rhs as i64);
(r as isize, f)
@ -455,6 +587,7 @@ impl OverflowingOps for isize {
#[cfg(target_pointer_width = "32")]
impl OverflowingOps for isize {
#[inline(always)]
#[cfg(stage0)]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_add_with_overflow(self as i32, rhs as i32);
@ -462,6 +595,14 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
add_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_sub_with_overflow(self as i32, rhs as i32);
@ -469,6 +610,14 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
sub_with_overflow(self, rhs)
}
}
#[inline(always)]
#[cfg(stage0)]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_mul_with_overflow(self as i32, rhs as i32);
@ -476,6 +625,13 @@ impl OverflowingOps for isize {
}
}
#[inline(always)]
#[cfg(not(stage0))]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
mul_with_overflow(self, rhs)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: isize) -> (isize, bool) {
let (r, f) = (self as i32).overflowing_div(rhs as i32);
(r as isize, f)

View File

@ -79,12 +79,10 @@ Transmute with two differently sized types was attempted. Erroneous code
example:
```
extern "rust-intrinsic" {
pub fn ctpop8(x: u8) -> u8;
}
fn takes_u8(_: u8) {}
fn main() {
unsafe { ctpop8(::std::mem::transmute(0u16)); }
unsafe { takes_u8(::std::mem::transmute(0u16)); }
// error: transmute called with differently sized types
}
```
@ -92,14 +90,12 @@ fn main() {
Please use types with same size or use the expected type directly. Example:
```
extern "rust-intrinsic" {
pub fn ctpop8(x: u8) -> u8;
}
fn takes_u8(_: u8) {}
fn main() {
unsafe { ctpop8(::std::mem::transmute(0i8)); } // ok!
unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok!
// or:
unsafe { ctpop8(0u8); } // ok!
unsafe { takes_u8(0u8); } // ok!
}
```
"##,
@ -118,5 +114,4 @@ Example:
let x = &[0, 1, 2][2]; // ok
```
"##,
}

View File

@ -89,13 +89,6 @@ pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Opti
"nearbyintf64" => "llvm.nearbyint.f64",
"roundf32" => "llvm.round.f32",
"roundf64" => "llvm.round.f64",
"ctpop8" => "llvm.ctpop.i8",
"ctpop16" => "llvm.ctpop.i16",
"ctpop32" => "llvm.ctpop.i32",
"ctpop64" => "llvm.ctpop.i64",
"bswap16" => "llvm.bswap.i16",
"bswap32" => "llvm.bswap.i32",
"bswap64" => "llvm.bswap.i64",
"assume" => "llvm.assume",
_ => return None
};
@ -589,217 +582,63 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
C_nil(ccx)
},
(_, "ctlz8") => count_zeros_intrinsic(bcx,
"llvm.ctlz.i8",
llargs[0],
call_debug_location),
(_, "ctlz16") => count_zeros_intrinsic(bcx,
"llvm.ctlz.i16",
llargs[0],
call_debug_location),
(_, "ctlz32") => count_zeros_intrinsic(bcx,
"llvm.ctlz.i32",
llargs[0],
call_debug_location),
(_, "ctlz64") => count_zeros_intrinsic(bcx,
"llvm.ctlz.i64",
llargs[0],
call_debug_location),
(_, "cttz8") => count_zeros_intrinsic(bcx,
"llvm.cttz.i8",
llargs[0],
call_debug_location),
(_, "cttz16") => count_zeros_intrinsic(bcx,
"llvm.cttz.i16",
llargs[0],
call_debug_location),
(_, "cttz32") => count_zeros_intrinsic(bcx,
"llvm.cttz.i32",
llargs[0],
call_debug_location),
(_, "cttz64") => count_zeros_intrinsic(bcx,
"llvm.cttz.i64",
llargs[0],
call_debug_location),
(_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
(_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
(_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
(_, "unchecked_div") | (_, "unchecked_rem") => {
let sty = &arg_tys[0].sty;
match int_type_width_signed(sty, ccx) {
Some((width, signed)) =>
match &*name {
"ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
llargs[0], call_debug_location),
"cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
llargs[0], call_debug_location),
"ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&llargs, None, call_debug_location),
"bswap" => {
if width == 8 {
llargs[0] // byte swap a u8/i8 is just a no-op
} else {
Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
&llargs, None, call_debug_location)
}
}
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name[..3], width);
with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
call_debug_location)
},
"overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
"overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
"overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
"unchecked_div" =>
if signed {
SDiv(bcx, llargs[0], llargs[1], call_debug_location)
} else {
UDiv(bcx, llargs[0], llargs[1], call_debug_location)
},
"unchecked_rem" =>
if signed {
SRem(bcx, llargs[0], llargs[1], call_debug_location)
} else {
URem(bcx, llargs[0], llargs[1], call_debug_location)
},
_ => unreachable!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess, call_info.span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
C_null(llret_ty)
}
}
(_, "i8_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.sadd.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i16_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.sadd.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i32_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.sadd.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i64_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.sadd.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
},
(_, "u8_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.uadd.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u16_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.uadd.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u32_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.uadd.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u64_add_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.uadd.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i8_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.ssub.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i16_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.ssub.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i32_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.ssub.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i64_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.ssub.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u8_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.usub.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u16_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.usub.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u32_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.usub.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u64_sub_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.usub.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i8_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.smul.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i16_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.smul.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i32_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.smul.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "i64_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.smul.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u8_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.umul.with.overflow.i8",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u16_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.umul.with.overflow.i16",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u32_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.umul.with.overflow.i32",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "u64_mul_with_overflow") =>
with_overflow_intrinsic(bcx,
"llvm.umul.with.overflow.i64",
llargs[0],
llargs[1],
llresult,
call_debug_location),
(_, "unchecked_udiv") => UDiv(bcx, llargs[0], llargs[1], call_debug_location),
(_, "unchecked_sdiv") => SDiv(bcx, llargs[0], llargs[1], call_debug_location),
(_, "unchecked_urem") => URem(bcx, llargs[0], llargs[1], call_debug_location),
(_, "unchecked_srem") => SRem(bcx, llargs[0], llargs[1], call_debug_location),
(_, "overflowing_add") => Add(bcx, llargs[0], llargs[1], call_debug_location),
(_, "overflowing_sub") => Sub(bcx, llargs[0], llargs[1], call_debug_location),
(_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location),
(_, "return_address") => {
if !fcx.caller_expects_out_pointer {
@ -1174,7 +1013,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
fn count_zeros_intrinsic(bcx: Block,
name: &'static str,
name: &str,
val: ValueRef,
call_debug_location: DebugLoc)
-> ValueRef {
@ -1184,7 +1023,7 @@ fn count_zeros_intrinsic(bcx: Block,
}
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
name: &'static str,
name: &str,
a: ValueRef,
b: ValueRef,
out: ValueRef,
@ -1716,3 +1555,39 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
}
bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic");
}
// Returns the width of an int TypeVariant, and if it's signed or not
// Returns None if the type is not an integer
fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
-> Option<(u64, bool)> {
use rustc::middle::ty::{TyInt, TyUint};
match *sty {
TyInt(t) => Some((match t {
ast::TyIs => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => 32,
"64" => 64,
tws => panic!("Unsupported target word size for isize: {}", tws),
}
},
ast::TyI8 => 8,
ast::TyI16 => 16,
ast::TyI32 => 32,
ast::TyI64 => 64,
}, true)),
TyUint(t) => Some((match t {
ast::TyUs => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => 32,
"64" => 64,
tws => panic!("Unsupported target word size for usize: {}", tws),
}
},
ast::TyU8 => 8,
ast::TyU16 => 16,
ast::TyU32 => 32,
ast::TyU64 => 64,
}, false)),
_ => None,
}
}

View File

@ -254,60 +254,19 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
"nearbyintf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64),
"roundf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32),
"roundf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64),
"ctpop8" => (0, vec!( tcx.types.u8 ), tcx.types.u8),
"ctpop16" => (0, vec!( tcx.types.u16 ), tcx.types.u16),
"ctpop32" => (0, vec!( tcx.types.u32 ), tcx.types.u32),
"ctpop64" => (0, vec!( tcx.types.u64 ), tcx.types.u64),
"ctlz8" => (0, vec!( tcx.types.u8 ), tcx.types.u8),
"ctlz16" => (0, vec!( tcx.types.u16 ), tcx.types.u16),
"ctlz32" => (0, vec!( tcx.types.u32 ), tcx.types.u32),
"ctlz64" => (0, vec!( tcx.types.u64 ), tcx.types.u64),
"cttz8" => (0, vec!( tcx.types.u8 ), tcx.types.u8),
"cttz16" => (0, vec!( tcx.types.u16 ), tcx.types.u16),
"cttz32" => (0, vec!( tcx.types.u32 ), tcx.types.u32),
"cttz64" => (0, vec!( tcx.types.u64 ), tcx.types.u64),
"bswap16" => (0, vec!( tcx.types.u16 ), tcx.types.u16),
"bswap32" => (0, vec!( tcx.types.u32 ), tcx.types.u32),
"bswap64" => (0, vec!( tcx.types.u64 ), tcx.types.u64),
"volatile_load" =>
(1, vec!( tcx.mk_imm_ptr(param(ccx, 0)) ), param(ccx, 0)),
"volatile_store" =>
(1, vec!( tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0) ), tcx.mk_nil()),
"i8_add_with_overflow" | "i8_sub_with_overflow" | "i8_mul_with_overflow" =>
(0, vec!(tcx.types.i8, tcx.types.i8),
tcx.mk_tup(vec!(tcx.types.i8, tcx.types.bool))),
"ctpop" | "ctlz" | "cttz" | "bswap" => (1, vec!(param(ccx, 0)), param(ccx, 0)),
"i16_add_with_overflow" | "i16_sub_with_overflow" | "i16_mul_with_overflow" =>
(0, vec!(tcx.types.i16, tcx.types.i16),
tcx.mk_tup(vec!(tcx.types.i16, tcx.types.bool))),
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" =>
(1, vec!(param(ccx, 0), param(ccx, 0)),
tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
"i32_add_with_overflow" | "i32_sub_with_overflow" | "i32_mul_with_overflow" =>
(0, vec!(tcx.types.i32, tcx.types.i32),
tcx.mk_tup(vec!(tcx.types.i32, tcx.types.bool))),
"i64_add_with_overflow" | "i64_sub_with_overflow" | "i64_mul_with_overflow" =>
(0, vec!(tcx.types.i64, tcx.types.i64),
tcx.mk_tup(vec!(tcx.types.i64, tcx.types.bool))),
"u8_add_with_overflow" | "u8_sub_with_overflow" | "u8_mul_with_overflow" =>
(0, vec!(tcx.types.u8, tcx.types.u8),
tcx.mk_tup(vec!(tcx.types.u8, tcx.types.bool))),
"u16_add_with_overflow" | "u16_sub_with_overflow" | "u16_mul_with_overflow" =>
(0, vec!(tcx.types.u16, tcx.types.u16),
tcx.mk_tup(vec!(tcx.types.u16, tcx.types.bool))),
"u32_add_with_overflow" | "u32_sub_with_overflow" | "u32_mul_with_overflow"=>
(0, vec!(tcx.types.u32, tcx.types.u32),
tcx.mk_tup(vec!(tcx.types.u32, tcx.types.bool))),
"u64_add_with_overflow" | "u64_sub_with_overflow" | "u64_mul_with_overflow" =>
(0, vec!(tcx.types.u64, tcx.types.u64),
tcx.mk_tup(vec!(tcx.types.u64, tcx.types.bool))),
"unchecked_udiv" | "unchecked_sdiv" | "unchecked_urem" | "unchecked_srem" =>
"unchecked_div" | "unchecked_rem" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
"overflowing_add" | "overflowing_sub" | "overflowing_mul" =>

View File

@ -14,24 +14,10 @@
mod rusti {
extern "rust-intrinsic" {
pub fn ctpop8(x: u8) -> u8;
pub fn ctpop16(x: u16) -> u16;
pub fn ctpop32(x: u32) -> u32;
pub fn ctpop64(x: u64) -> u64;
pub fn ctlz8(x: u8) -> u8;
pub fn ctlz16(x: u16) -> u16;
pub fn ctlz32(x: u32) -> u32;
pub fn ctlz64(x: u64) -> u64;
pub fn cttz8(x: u8) -> u8;
pub fn cttz16(x: u16) -> u16;
pub fn cttz32(x: u32) -> u32;
pub fn cttz64(x: u64) -> u64;
pub fn bswap16(x: u16) -> u16;
pub fn bswap32(x: u32) -> u32;
pub fn bswap64(x: u64) -> u64;
pub fn ctpop<T>(x: T) -> T;
pub fn ctlz<T>(x: T) -> T;
pub fn cttz<T>(x: T) -> T;
pub fn bswap<T>(x: T) -> T;
}
}
@ -39,78 +25,83 @@ pub fn main() {
unsafe {
use rusti::*;
assert_eq!(ctpop8(0), 0);
assert_eq!(ctpop16(0), 0);
assert_eq!(ctpop32(0), 0);
assert_eq!(ctpop64(0), 0);
assert_eq!(ctpop(0u8), 0); assert_eq!(ctpop(0i8), 0);
assert_eq!(ctpop(0u16), 0); assert_eq!(ctpop(0i16), 0);
assert_eq!(ctpop(0u32), 0); assert_eq!(ctpop(0i32), 0);
assert_eq!(ctpop(0u64), 0); assert_eq!(ctpop(0i64), 0);
assert_eq!(ctpop8(1), 1);
assert_eq!(ctpop16(1), 1);
assert_eq!(ctpop32(1), 1);
assert_eq!(ctpop64(1), 1);
assert_eq!(ctpop(1u8), 1); assert_eq!(ctpop(1i8), 1);
assert_eq!(ctpop(1u16), 1); assert_eq!(ctpop(1i16), 1);
assert_eq!(ctpop(1u32), 1); assert_eq!(ctpop(1i32), 1);
assert_eq!(ctpop(1u64), 1); assert_eq!(ctpop(1i64), 1);
assert_eq!(ctpop8(10), 2);
assert_eq!(ctpop16(10), 2);
assert_eq!(ctpop32(10), 2);
assert_eq!(ctpop64(10), 2);
assert_eq!(ctpop(10u8), 2); assert_eq!(ctpop(10i8), 2);
assert_eq!(ctpop(10u16), 2); assert_eq!(ctpop(10i16), 2);
assert_eq!(ctpop(10u32), 2); assert_eq!(ctpop(10i32), 2);
assert_eq!(ctpop(10u64), 2); assert_eq!(ctpop(10i64), 2);
assert_eq!(ctpop8(100), 3);
assert_eq!(ctpop16(100), 3);
assert_eq!(ctpop32(100), 3);
assert_eq!(ctpop64(100), 3);
assert_eq!(ctpop(100u8), 3); assert_eq!(ctpop(100i8), 3);
assert_eq!(ctpop(100u16), 3); assert_eq!(ctpop(100i16), 3);
assert_eq!(ctpop(100u32), 3); assert_eq!(ctpop(100i32), 3);
assert_eq!(ctpop(100u64), 3); assert_eq!(ctpop(100i64), 3);
assert_eq!(ctpop8(-1), 8);
assert_eq!(ctpop16(-1), 16);
assert_eq!(ctpop32(-1), 32);
assert_eq!(ctpop64(-1), 64);
assert_eq!(ctpop(-1u8), 8); assert_eq!(ctpop(-1i8), 8);
assert_eq!(ctpop(-1u16), 16); assert_eq!(ctpop(-1i16), 16);
assert_eq!(ctpop(-1u32), 32); assert_eq!(ctpop(-1i32), 32);
assert_eq!(ctpop(-1u64), 64); assert_eq!(ctpop(-1i64), 64);
assert_eq!(ctlz8(0), 8);
assert_eq!(ctlz16(0), 16);
assert_eq!(ctlz32(0), 32);
assert_eq!(ctlz64(0), 64);
assert_eq!(ctlz(0u8), 8); assert_eq!(ctlz(0i8), 8);
assert_eq!(ctlz(0u16), 16); assert_eq!(ctlz(0i16), 16);
assert_eq!(ctlz(0u32), 32); assert_eq!(ctlz(0i32), 32);
assert_eq!(ctlz(0u64), 64); assert_eq!(ctlz(0i64), 64);
assert_eq!(ctlz8(1), 7);
assert_eq!(ctlz16(1), 15);
assert_eq!(ctlz32(1), 31);
assert_eq!(ctlz64(1), 63);
assert_eq!(ctlz(1u8), 7); assert_eq!(ctlz(1i8), 7);
assert_eq!(ctlz(1u16), 15); assert_eq!(ctlz(1i16), 15);
assert_eq!(ctlz(1u32), 31); assert_eq!(ctlz(1i32), 31);
assert_eq!(ctlz(1u64), 63); assert_eq!(ctlz(1i64), 63);
assert_eq!(ctlz8(10), 4);
assert_eq!(ctlz16(10), 12);
assert_eq!(ctlz32(10), 28);
assert_eq!(ctlz64(10), 60);
assert_eq!(ctlz(10u8), 4); assert_eq!(ctlz(10i8), 4);
assert_eq!(ctlz(10u16), 12); assert_eq!(ctlz(10i16), 12);
assert_eq!(ctlz(10u32), 28); assert_eq!(ctlz(10i32), 28);
assert_eq!(ctlz(10u64), 60); assert_eq!(ctlz(10i64), 60);
assert_eq!(ctlz8(100), 1);
assert_eq!(ctlz16(100), 9);
assert_eq!(ctlz32(100), 25);
assert_eq!(ctlz64(100), 57);
assert_eq!(ctlz(100u8), 1); assert_eq!(ctlz(100i8), 1);
assert_eq!(ctlz(100u16), 9); assert_eq!(ctlz(100i16), 9);
assert_eq!(ctlz(100u32), 25); assert_eq!(ctlz(100i32), 25);
assert_eq!(ctlz(100u64), 57); assert_eq!(ctlz(100i64), 57);
assert_eq!(cttz8(-1), 0);
assert_eq!(cttz16(-1), 0);
assert_eq!(cttz32(-1), 0);
assert_eq!(cttz64(-1), 0);
assert_eq!(cttz(-1u8), 0); assert_eq!(cttz(-1i8), 0);
assert_eq!(cttz(-1u16), 0); assert_eq!(cttz(-1i16), 0);
assert_eq!(cttz(-1u32), 0); assert_eq!(cttz(-1i32), 0);
assert_eq!(cttz(-1u64), 0); assert_eq!(cttz(-1i64), 0);
assert_eq!(cttz8(0), 8);
assert_eq!(cttz16(0), 16);
assert_eq!(cttz32(0), 32);
assert_eq!(cttz64(0), 64);
assert_eq!(cttz(0u8), 8); assert_eq!(cttz(0i8), 8);
assert_eq!(cttz(0u16), 16); assert_eq!(cttz(0i16), 16);
assert_eq!(cttz(0u32), 32); assert_eq!(cttz(0i32), 32);
assert_eq!(cttz(0u64), 64); assert_eq!(cttz(0i64), 64);
assert_eq!(cttz8(1), 0);
assert_eq!(cttz16(1), 0);
assert_eq!(cttz32(1), 0);
assert_eq!(cttz64(1), 0);
assert_eq!(cttz(1u8), 0); assert_eq!(cttz(1i8), 0);
assert_eq!(cttz(1u16), 0); assert_eq!(cttz(1i16), 0);
assert_eq!(cttz(1u32), 0); assert_eq!(cttz(1i32), 0);
assert_eq!(cttz(1u64), 0); assert_eq!(cttz(1i64), 0);
assert_eq!(cttz8(10), 1);
assert_eq!(cttz16(10), 1);
assert_eq!(cttz32(10), 1);
assert_eq!(cttz64(10), 1);
assert_eq!(cttz(10u8), 1); assert_eq!(cttz(10i8), 1);
assert_eq!(cttz(10u16), 1); assert_eq!(cttz(10i16), 1);
assert_eq!(cttz(10u32), 1); assert_eq!(cttz(10i32), 1);
assert_eq!(cttz(10u64), 1); assert_eq!(cttz(10i64), 1);
assert_eq!(cttz8(100), 2);
assert_eq!(cttz16(100), 2);
assert_eq!(cttz32(100), 2);
assert_eq!(cttz64(100), 2);
assert_eq!(cttz(100u8), 2); assert_eq!(cttz(100i8), 2);
assert_eq!(cttz(100u16), 2); assert_eq!(cttz(100i16), 2);
assert_eq!(cttz(100u32), 2); assert_eq!(cttz(100i32), 2);
assert_eq!(cttz(100u64), 2); assert_eq!(cttz(100i64), 2);
assert_eq!(bswap16(0x0A0B), 0x0B0A);
assert_eq!(bswap32(0x0ABBCC0D), 0x0DCCBB0A);
assert_eq!(bswap64(0x0122334455667708), 0x0877665544332201);
assert_eq!(bswap(0x0Au8), 0x0A); // no-op
assert_eq!(bswap(0x0Ai8), 0x0A); // no-op
assert_eq!(bswap(0x0A0Bu16), 0x0B0A);
assert_eq!(bswap(0x0A0Bi16), 0x0B0A);
assert_eq!(bswap(0x0ABBCC0Du32), 0x0DCCBB0A);
assert_eq!(bswap(0x0ABBCC0Di32), 0x0DCCBB0A);
assert_eq!(bswap(0x0122334455667708u64), 0x0877665544332201);
assert_eq!(bswap(0x0122334455667708i64), 0x0877665544332201);
}
}