diff --git a/src/librustc/middle/trans/intrinsic.rs b/src/librustc/middle/trans/intrinsic.rs index 3a0083ba5c6..86fde5d821a 100644 --- a/src/librustc/middle/trans/intrinsic.rs +++ b/src/librustc/middle/trans/intrinsic.rs @@ -129,7 +129,7 @@ pub fn trans_intrinsic(ccx: &CrateContext, RetVoid(bcx); } - fn copy_intrinsic(bcx: &Block, allow_overlap: bool, tp_ty: ty::t) { + fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32); @@ -154,13 +154,12 @@ pub fn trans_intrinsic(ccx: &CrateContext, let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx)); let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx)); let count = get_param(decl, first_real_arg + 2); - let volatile = C_i1(ccx, false); let llfn = ccx.get_intrinsic(&name); - Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, volatile], []); + Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []); RetVoid(bcx); } - fn memset_intrinsic(bcx: &Block, tp_ty: ty::t) { + fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32); @@ -176,9 +175,8 @@ pub fn trans_intrinsic(ccx: &CrateContext, let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx)); let val = get_param(decl, first_real_arg + 1); let count = get_param(decl, first_real_arg + 2); - let volatile = C_i1(ccx, false); let llfn = ccx.get_intrinsic(&name); - Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, volatile], []); + Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []); RetVoid(bcx); } @@ -466,11 +464,15 @@ pub fn trans_intrinsic(ccx: &CrateContext, let lladdr = InBoundsGEP(bcx, ptr, [offset]); Ret(bcx, lladdr); } - "copy_nonoverlapping_memory" => { - copy_intrinsic(bcx, false, *substs.tys.get(0)) - } - "copy_memory" => copy_intrinsic(bcx, true, *substs.tys.get(0)), - "set_memory" => memset_intrinsic(bcx, *substs.tys.get(0)), + "copy_nonoverlapping_memory" => copy_intrinsic(bcx, false, false, *substs.tys.get(0)), + "copy_memory" => copy_intrinsic(bcx, true, false, *substs.tys.get(0)), + "set_memory" => memset_intrinsic(bcx, false, *substs.tys.get(0)), + + "volatile_copy_nonoverlapping_memory" => + copy_intrinsic(bcx, false, true, *substs.tys.get(0)), + "volatile_copy_memory" => copy_intrinsic(bcx, true, true, *substs.tys.get(0)), + "volatile_set_memory" => memset_intrinsic(bcx, true, *substs.tys.get(0)), + "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"), "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"), "ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"), diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index dc5b4f6d520..9664ad9fe55 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -4127,7 +4127,8 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) { mutbl: ast::MutImmutable })) } - "copy_nonoverlapping_memory" => { + "copy_memory" | "copy_nonoverlapping_memory" | + "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { (1, vec!( ty::mk_ptr(tcx, ty::mt { @@ -4142,22 +4143,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) { ), ty::mk_nil()) } - "copy_memory" => { - (1, - vec!( - ty::mk_ptr(tcx, ty::mt { - ty: param(ccx, 0), - mutbl: ast::MutMutable - }), - ty::mk_ptr(tcx, ty::mt { - ty: param(ccx, 0), - mutbl: ast::MutImmutable - }), - ty::mk_uint() - ), - ty::mk_nil()) - } - "set_memory" => { + "set_memory" | "volatile_set_memory" => { (1, vec!( ty::mk_ptr(tcx, ty::mt { diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs index 7f02ab28342..1b419e59a70 100644 --- a/src/libstd/intrinsics.rs +++ b/src/libstd/intrinsics.rs @@ -261,10 +261,6 @@ extern "rust-intrinsic" { /// Execute a breakpoint trap, for inspection by a debugger. pub fn breakpoint(); - pub fn volatile_load(src: *T) -> T; - pub fn volatile_store(dst: *mut T, val: T); - - /// The size of a type in bytes. /// /// This is the exact number of bytes in memory taken up by a @@ -338,6 +334,33 @@ extern "rust-intrinsic" { /// `min_align_of::()` pub fn set_memory(dst: *mut T, val: u8, count: uint); + /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with + /// a size of `count` * `size_of::()` and an alignment of + /// `min_align_of::()` + /// + /// The volatile parameter parameter is set to `true`, so it will not be optimized out. + #[cfg(not(stage0))] + pub fn volatile_copy_nonoverlapping_memory(dst: *mut T, src: *T, count: uint); + /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with + /// a size of `count` * `size_of::()` and an alignment of + /// `min_align_of::()` + /// + /// The volatile parameter parameter is set to `true`, so it will not be optimized out. + #[cfg(not(stage0))] + pub fn volatile_copy_memory(dst: *mut T, src: *T, count: uint); + /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a + /// size of `count` * `size_of::()` and an alignment of + /// `min_align_of::()`. + /// + /// The volatile parameter parameter is set to `true`, so it will not be optimized out. + #[cfg(not(stage0))] + pub fn volatile_set_memory(dst: *mut T, val: u8, count: uint); + + /// Perform a volatile load from the `src` pointer. + pub fn volatile_load(src: *T) -> T; + /// Perform a volatile store to the `dst` pointer. + pub fn volatile_store(dst: *mut T, val: T); + pub fn sqrtf32(x: f32) -> f32; pub fn sqrtf64(x: f64) -> f64;