From 7c5ea621560d55b00f046b2b474bb7ce554a4d73 Mon Sep 17 00:00:00 2001 From: Scott Olson Date: Sat, 5 Mar 2016 00:48:23 -0600 Subject: [PATCH] Move memory module to its own file. --- src/interpreter.rs | 166 +-------------------------------------------- src/lib.rs | 1 + src/memory.rs | 160 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 164 deletions(-) create mode 100644 src/memory.rs diff --git a/src/interpreter.rs b/src/interpreter.rs index 9dc35703c7b..2d97de1032d 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -16,172 +16,10 @@ use std::iter; use syntax::ast::Attribute; use syntax::attr::AttrMetaMethods; +use memory::{self, Pointer, Repr, Allocation}; + const TRACE_EXECUTION: bool = true; -mod memory { - use byteorder; - use byteorder::ByteOrder; - use rustc::middle::ty; - use std::collections::HashMap; - use std::mem; - use std::ops::Add; - use std::ptr; - use super::{EvalError, EvalResult}; - - pub struct Memory { - next_id: u64, - alloc_map: HashMap, - } - - #[derive(Copy, Clone, Debug, Eq, PartialEq)] - pub struct AllocId(u64); - - // TODO(tsion): Shouldn't clone Allocation. (Audit the rest of the code.) - #[derive(Clone, Debug)] - pub struct Allocation { - pub bytes: Vec, - // TODO(tsion): relocations - // TODO(tsion): undef mask - } - - #[derive(Clone, Debug, PartialEq, Eq)] - pub struct Pointer { - pub alloc_id: AllocId, - pub offset: usize, - pub repr: Repr, - } - - #[derive(Clone, Debug, PartialEq, Eq)] - pub struct FieldRepr { - pub offset: usize, - pub repr: Repr, - } - - #[derive(Clone, Debug, PartialEq, Eq)] - pub enum Repr { - Int, - Aggregate { - size: usize, - fields: Vec, - }, - } - - impl Memory { - pub fn new() -> Self { - Memory { next_id: 0, alloc_map: HashMap::new() } - } - - pub fn allocate_raw(&mut self, size: usize) -> AllocId { - let id = AllocId(self.next_id); - let alloc = Allocation { bytes: vec![0; size] }; - self.alloc_map.insert(self.next_id, alloc); - self.next_id += 1; - id - } - - pub fn allocate(&mut self, repr: Repr) -> Pointer { - Pointer { - alloc_id: self.allocate_raw(repr.size()), - offset: 0, - repr: repr, - } - } - - pub fn get(&self, id: AllocId) -> EvalResult<&Allocation> { - self.alloc_map.get(&id.0).ok_or(EvalError::DanglingPointerDeref) - } - - pub fn get_mut(&mut self, id: AllocId) -> EvalResult<&mut Allocation> { - self.alloc_map.get_mut(&id.0).ok_or(EvalError::DanglingPointerDeref) - } - - fn get_bytes(&self, ptr: &Pointer, size: usize) -> EvalResult<&[u8]> { - let alloc = try!(self.get(ptr.alloc_id)); - try!(alloc.check_bytes(ptr.offset, ptr.offset + size)); - Ok(&alloc.bytes[ptr.offset..ptr.offset + size]) - } - - fn get_bytes_mut(&mut self, ptr: &Pointer, size: usize) -> EvalResult<&mut [u8]> { - let alloc = try!(self.get_mut(ptr.alloc_id)); - try!(alloc.check_bytes(ptr.offset, ptr.offset + size)); - Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size]) - } - - pub fn copy(&mut self, src: &Pointer, dest: &Pointer, size: usize) -> EvalResult<()> { - let src_bytes = try!(self.get_bytes_mut(src, size)).as_mut_ptr(); - let dest_bytes = try!(self.get_bytes_mut(dest, size)).as_mut_ptr(); - - // SAFE: The above indexing would have panicked if there weren't at least `size` bytes - // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and - // `dest` could possibly overlap. - unsafe { - if src.alloc_id == dest.alloc_id { - ptr::copy(src_bytes, dest_bytes, size); - } else { - ptr::copy_nonoverlapping(src_bytes, dest_bytes, size); - } - } - - Ok(()) - } - - pub fn read_int(&self, ptr: &Pointer) -> EvalResult { - let bytes = try!(self.get_bytes(ptr, Repr::Int.size())); - Ok(byteorder::NativeEndian::read_i64(bytes)) - } - - pub fn write_int(&mut self, ptr: &Pointer, n: i64) -> EvalResult<()> { - let bytes = try!(self.get_bytes_mut(ptr, Repr::Int.size())); - Ok(byteorder::NativeEndian::write_i64(bytes, n)) - } - } - - impl Allocation { - fn check_bytes(&self, start: usize, end: usize) -> EvalResult<()> { - if start >= self.bytes.len() || end > self.bytes.len() { - return Err(EvalError::PointerOutOfBounds); - } - Ok(()) - } - } - - impl Pointer { - pub fn offset(&self, i: usize) -> Self { - Pointer { offset: self.offset + i, ..self.clone() } - } - } - - impl Repr { - // TODO(tsion): Cache these outputs. - pub fn from_ty(ty: ty::Ty) -> Self { - match ty.sty { - ty::TyInt(_) => Repr::Int, - - ty::TyTuple(ref fields) => { - let mut size = 0; - let fields = fields.iter().map(|ty| { - let repr = Repr::from_ty(ty); - let old_size = size; - size += repr.size(); - FieldRepr { offset: old_size, repr: repr } - }).collect(); - Repr::Aggregate { size: size, fields: fields } - }, - - _ => unimplemented!(), - } - } - - pub fn size(&self) -> usize { - match *self { - Repr::Int => mem::size_of::(), - Repr::Aggregate { size, .. } => size, - } - } - } -} -use self::memory::{Pointer, Repr, Allocation}; - #[derive(Clone, Debug)] pub enum EvalError { DanglingPointerDeref, diff --git a/src/lib.rs b/src/lib.rs index c7be3a0340c..036b87c2c7f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,3 +6,4 @@ extern crate rustc_mir; extern crate syntax; pub mod interpreter; +mod memory; diff --git a/src/memory.rs b/src/memory.rs new file mode 100644 index 00000000000..0fe2b735671 --- /dev/null +++ b/src/memory.rs @@ -0,0 +1,160 @@ +use byteorder; +use byteorder::ByteOrder; +use rustc::middle::ty; +use std::collections::HashMap; +use std::mem; +use std::ptr; + +use interpreter::{EvalError, EvalResult}; + +pub struct Memory { + next_id: u64, + alloc_map: HashMap, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct AllocId(u64); + +// TODO(tsion): Shouldn't clone Allocation. (Audit the rest of the code.) +#[derive(Clone, Debug)] +pub struct Allocation { + pub bytes: Vec, + // TODO(tsion): relocations + // TODO(tsion): undef mask +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Pointer { + pub alloc_id: AllocId, + pub offset: usize, + pub repr: Repr, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct FieldRepr { + pub offset: usize, + pub repr: Repr, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Repr { + Int, + Aggregate { + size: usize, + fields: Vec, + }, +} + +impl Memory { + pub fn new() -> Self { + Memory { next_id: 0, alloc_map: HashMap::new() } + } + + pub fn allocate_raw(&mut self, size: usize) -> AllocId { + let id = AllocId(self.next_id); + let alloc = Allocation { bytes: vec![0; size] }; + self.alloc_map.insert(self.next_id, alloc); + self.next_id += 1; + id + } + + pub fn allocate(&mut self, repr: Repr) -> Pointer { + Pointer { + alloc_id: self.allocate_raw(repr.size()), + offset: 0, + repr: repr, + } + } + + pub fn get(&self, id: AllocId) -> EvalResult<&Allocation> { + self.alloc_map.get(&id.0).ok_or(EvalError::DanglingPointerDeref) + } + + pub fn get_mut(&mut self, id: AllocId) -> EvalResult<&mut Allocation> { + self.alloc_map.get_mut(&id.0).ok_or(EvalError::DanglingPointerDeref) + } + + fn get_bytes(&self, ptr: &Pointer, size: usize) -> EvalResult<&[u8]> { + let alloc = try!(self.get(ptr.alloc_id)); + try!(alloc.check_bytes(ptr.offset, ptr.offset + size)); + Ok(&alloc.bytes[ptr.offset..ptr.offset + size]) + } + + fn get_bytes_mut(&mut self, ptr: &Pointer, size: usize) -> EvalResult<&mut [u8]> { + let alloc = try!(self.get_mut(ptr.alloc_id)); + try!(alloc.check_bytes(ptr.offset, ptr.offset + size)); + Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size]) + } + + pub fn copy(&mut self, src: &Pointer, dest: &Pointer, size: usize) -> EvalResult<()> { + let src_bytes = try!(self.get_bytes_mut(src, size)).as_mut_ptr(); + let dest_bytes = try!(self.get_bytes_mut(dest, size)).as_mut_ptr(); + + // SAFE: The above indexing would have panicked if there weren't at least `size` bytes + // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and + // `dest` could possibly overlap. + unsafe { + if src.alloc_id == dest.alloc_id { + ptr::copy(src_bytes, dest_bytes, size); + } else { + ptr::copy_nonoverlapping(src_bytes, dest_bytes, size); + } + } + + Ok(()) + } + + pub fn read_int(&self, ptr: &Pointer) -> EvalResult { + let bytes = try!(self.get_bytes(ptr, Repr::Int.size())); + Ok(byteorder::NativeEndian::read_i64(bytes)) + } + + pub fn write_int(&mut self, ptr: &Pointer, n: i64) -> EvalResult<()> { + let bytes = try!(self.get_bytes_mut(ptr, Repr::Int.size())); + Ok(byteorder::NativeEndian::write_i64(bytes, n)) + } +} + +impl Allocation { + fn check_bytes(&self, start: usize, end: usize) -> EvalResult<()> { + if start >= self.bytes.len() || end > self.bytes.len() { + return Err(EvalError::PointerOutOfBounds); + } + Ok(()) + } +} + +impl Pointer { + pub fn offset(&self, i: usize) -> Self { + Pointer { offset: self.offset + i, ..self.clone() } + } +} + +impl Repr { + // TODO(tsion): Cache these outputs. + pub fn from_ty(ty: ty::Ty) -> Self { + match ty.sty { + ty::TyInt(_) => Repr::Int, + + ty::TyTuple(ref fields) => { + let mut size = 0; + let fields = fields.iter().map(|ty| { + let repr = Repr::from_ty(ty); + let old_size = size; + size += repr.size(); + FieldRepr { offset: old_size, repr: repr } + }).collect(); + Repr::Aggregate { size: size, fields: fields } + }, + + _ => unimplemented!(), + } + } + + pub fn size(&self) -> usize { + match *self { + Repr::Int => mem::size_of::(), + Repr::Aggregate { size, .. } => size, + } + } +}