auto merge of #11173 : whitequark/rust/master, r=thestinger

This PR adds `std::unsafe::intrinsics::{volatile_load,volatile_store}`, which map to LLVM's `load volatile` and `store volatile` operations correspondingly.

This would fix #11172.

I have addressed several uncertainties with this PR in the line comments.
This commit is contained in:
bors 2013-12-31 15:06:55 -08:00
commit 1dcc986d52
8 changed files with 103 additions and 0 deletions

View File

@ -787,6 +787,10 @@ pub mod llvm {
pub fn LLVMIsTailCall(CallInst: ValueRef) -> Bool;
pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool);
/* Operations on load/store instructions (only) */
pub fn LLVMGetVolatile(MemoryAccessInst: ValueRef) -> Bool;
pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
/* Operations on phi nodes */
pub fn LLVMAddIncoming(PhiNode: ValueRef,
IncomingValues: *ValueRef,

View File

@ -349,6 +349,13 @@ pub fn Load(cx: &Block, PointerVal: ValueRef) -> ValueRef {
}
}
pub fn VolatileLoad(cx: &Block, PointerVal: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(Type::nil().to_ref()); }
B(cx).volatile_load(PointerVal)
}
}
pub fn AtomicLoad(cx: &Block, PointerVal: ValueRef, order: AtomicOrdering) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
@ -383,6 +390,11 @@ pub fn Store(cx: &Block, Val: ValueRef, Ptr: ValueRef) {
B(cx).store(Val, Ptr)
}
pub fn VolatileStore(cx: &Block, Val: ValueRef, Ptr: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).volatile_store(Val, Ptr)
}
pub fn AtomicStore(cx: &Block, Val: ValueRef, Ptr: ValueRef, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_store(Val, Ptr, order)

View File

@ -449,6 +449,15 @@ impl Builder {
}
}
pub fn volatile_load(&self, ptr: ValueRef) -> ValueRef {
self.count_insn("load.volatile");
unsafe {
let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetVolatile(insn, lib::llvm::True);
insn
}
}
pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
self.count_insn("load.atomic");
unsafe {
@ -488,6 +497,18 @@ impl Builder {
}
}
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
self.ccx.tn.val_to_str(val),
self.ccx.tn.val_to_str(ptr));
assert!(is_not_null(self.llbuilder));
self.count_insn("store.volatile");
unsafe {
let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
llvm::LLVMSetVolatile(insn, lib::llvm::True);
}
}
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
self.ccx.tn.val_to_str(val),

View File

@ -73,6 +73,23 @@ pub fn trans_intrinsic(ccx: @CrateContext,
}
}
fn volatile_load_intrinsic(bcx: @Block) {
let first_real_arg = bcx.fcx.arg_pos(0u);
let src = get_param(bcx.fcx.llfn, first_real_arg);
let val = VolatileLoad(bcx, src);
Ret(bcx, val);
}
fn volatile_store_intrinsic(bcx: @Block) {
let first_real_arg = bcx.fcx.arg_pos(0u);
let dst = get_param(bcx.fcx.llfn, first_real_arg);
let val = get_param(bcx.fcx.llfn, first_real_arg + 1);
VolatileStore(bcx, val, dst);
RetVoid(bcx);
}
fn copy_intrinsic(bcx: @Block, allow_overlap: bool, tp_ty: ty::t) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
@ -480,6 +497,9 @@ pub fn trans_intrinsic(ccx: @CrateContext,
"bswap32" => simple_llvm_intrinsic(bcx, "llvm.bswap.i32", 1),
"bswap64" => simple_llvm_intrinsic(bcx, "llvm.bswap.i64", 1),
"volatile_load" => volatile_load_intrinsic(bcx),
"volatile_store" => volatile_store_intrinsic(bcx),
"i8_add_with_overflow" =>
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8", output_type),
"i16_add_with_overflow" =>

View File

@ -4226,6 +4226,11 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: @ast::foreign_item) {
"bswap32" => (0, ~[ ty::mk_i32() ], ty::mk_i32()),
"bswap64" => (0, ~[ ty::mk_i64() ], ty::mk_i64()),
"volatile_load" =>
(1, ~[ ty::mk_imm_ptr(tcx, param(ccx, 0)) ], param(ccx, 0)),
"volatile_store" =>
(1, ~[ ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], ty::mk_nil()),
"i8_add_with_overflow" | "i8_sub_with_overflow" | "i8_mul_with_overflow" =>
(0, ~[ty::mk_i8(), ty::mk_i8()],
ty::mk_tup(tcx, ~[ty::mk_i8(), ty::mk_bool()])),

View File

@ -12,6 +12,15 @@
The corresponding definitions are in librustc/middle/trans/foreign.rs.
# Volatiles
The volatile intrinsics provide operations intended to act on I/O
memory, which are guaranteed to not be reordered by the compiler
across other volatile intrinsics. See the LLVM documentation on
[[volatile]].
[volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
# Atomics
The atomic intrinsics provide common atomic operations on machine
@ -179,6 +188,9 @@ extern "rust-intrinsic" {
/// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint();
#[cfg(not(stage0))] pub fn volatile_load<T>(src: *T) -> T;
#[cfg(not(stage0))] pub fn volatile_store<T>(dst: *mut T, val: T);
/// Atomic compare and exchange, sequentially consistent.
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
/// Atomic compare and exchange, acquire ordering.

View File

@ -0,0 +1,10 @@
-include ../tools.mk
all:
# The tests must pass...
$(RUSTC) main.rs
$(call RUN,main)
# ... and the loads/stores must not be optimized out.
$(RUSTC) main.rs --emit-llvm -S
grep "load volatile" $(TMPDIR)/main.ll
grep "store volatile" $(TMPDIR)/main.ll

View File

@ -0,0 +1,19 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::unstable::intrinsics::{volatile_load, volatile_store};
pub fn main() {
unsafe {
let mut i : int = 1;
volatile_store(&mut i, 2);
assert_eq!(volatile_load(&i), 2);
}
}