parameterize `BitVector` and `BitMatrix` by their index types

This commit is contained in:
Niko Matsakis 2018-07-22 19:23:39 +03:00
parent a54401ebcc
commit 145155dc96
16 changed files with 119 additions and 96 deletions

View File

@ -9,7 +9,6 @@
// except according to those terms.
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::Idx;
use super::*;
@ -33,7 +32,7 @@ use super::*;
#[derive(Clone)]
pub struct Preorder<'a, 'tcx: 'a> {
mir: &'a Mir<'tcx>,
visited: BitVector,
visited: BitVector<BasicBlock>,
worklist: Vec<BasicBlock>,
}
@ -58,7 +57,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
while let Some(idx) = self.worklist.pop() {
if !self.visited.insert(idx.index()) {
if !self.visited.insert(idx) {
continue;
}
@ -107,7 +106,7 @@ impl<'a, 'tcx> ExactSizeIterator for Preorder<'a, 'tcx> {}
/// A Postorder traversal of this graph is `D B C A` or `D C B A`
pub struct Postorder<'a, 'tcx: 'a> {
mir: &'a Mir<'tcx>,
visited: BitVector,
visited: BitVector<BasicBlock>,
visit_stack: Vec<(BasicBlock, Successors<'a>)>
}
@ -123,7 +122,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
let data = &po.mir[root];
if let Some(ref term) = data.terminator {
po.visited.insert(root.index());
po.visited.insert(root);
po.visit_stack.push((root, term.successors()));
po.traverse_successor();
}
@ -190,8 +189,8 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
break;
};
if self.visited.insert(bb.index()) {
if let Some(ref term) = self.mir[bb].terminator {
if self.visited.insert(bb) {
if let Some(term) = &self.mir[bb].terminator {
self.visit_stack.push((bb, term.successors()));
}
}

View File

@ -65,7 +65,7 @@ pub fn create_mir_scopes(cx: &CodegenCx, mir: &Mir, debug_context: &FunctionDebu
let mut has_variables = BitVector::new(mir.source_scopes.len());
for var in mir.vars_iter() {
let decl = &mir.local_decls[var];
has_variables.insert(decl.visibility_scope.index());
has_variables.insert(decl.visibility_scope);
}
// Instantiate all scopes.
@ -79,7 +79,7 @@ pub fn create_mir_scopes(cx: &CodegenCx, mir: &Mir, debug_context: &FunctionDebu
fn make_mir_scope(cx: &CodegenCx,
mir: &Mir,
has_variables: &BitVector,
has_variables: &BitVector<SourceScope>,
debug_context: &FunctionDebugContextData,
scope: SourceScope,
scopes: &mut IndexVec<SourceScope, MirDebugScope>) {
@ -102,7 +102,7 @@ fn make_mir_scope(cx: &CodegenCx,
return;
};
if !has_variables.contains(scope.index()) {
if !has_variables.contains(scope) {
// Do not create a DIScope if there are no variables
// defined in this MIR Scope, to avoid debuginfo bloat.

View File

@ -22,7 +22,7 @@ use rustc::ty::layout::LayoutOf;
use type_of::LayoutLlvmExt;
use super::FunctionCx;
pub fn non_ssa_locals<'a, 'tcx>(fx: &FunctionCx<'a, 'tcx>) -> BitVector {
pub fn non_ssa_locals<'a, 'tcx>(fx: &FunctionCx<'a, 'tcx>) -> BitVector<mir::Local> {
let mir = fx.mir;
let mut analyzer = LocalAnalyzer::new(fx);
@ -54,7 +54,7 @@ pub fn non_ssa_locals<'a, 'tcx>(fx: &FunctionCx<'a, 'tcx>) -> BitVector {
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
fx: &'mir FunctionCx<'a, 'tcx>,
dominators: Dominators<mir::BasicBlock>,
non_ssa_locals: BitVector,
non_ssa_locals: BitVector<mir::Local>,
// The location of the first visited direct assignment to each
// local, or an invalid location (out of bounds `block` index).
first_assignment: IndexVec<mir::Local, Location>
@ -90,7 +90,7 @@ impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
fn not_ssa(&mut self, local: mir::Local) {
debug!("marking {:?} as non-SSA", local);
self.non_ssa_locals.insert(local.index());
self.non_ssa_locals.insert(local);
}
fn assign(&mut self, local: mir::Local, location: Location) {

View File

@ -268,7 +268,7 @@ pub fn codegen_mir<'a, 'tcx: 'a>(
let debug_scope = fx.scopes[decl.visibility_scope];
let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == FullDebugInfo;
if !memory_locals.contains(local.index()) && !dbg {
if !memory_locals.contains(local) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
return LocalRef::new_operand(bx.cx, layout);
}
@ -291,7 +291,7 @@ pub fn codegen_mir<'a, 'tcx: 'a>(
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
} else if memory_locals.contains(local.index()) {
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
} else {
@ -415,7 +415,7 @@ fn create_funclets<'a, 'tcx>(
fn arg_local_refs<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
fx: &FunctionCx<'a, 'tcx>,
scopes: &IndexVec<mir::SourceScope, debuginfo::MirDebugScope>,
memory_locals: &BitVector)
memory_locals: &BitVector<mir::Local>)
-> Vec<LocalRef<'tcx>> {
let mir = fx.mir;
let tcx = bx.tcx();
@ -487,7 +487,7 @@ fn arg_local_refs<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
llarg_idx += 1;
}
if arg_scope.is_none() && !memory_locals.contains(local.index()) {
if arg_scope.is_none() && !memory_locals.contains(local) {
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.

View File

@ -17,16 +17,18 @@ const WORD_BITS: usize = 128;
/// A very simple BitVector type.
#[derive(Clone, Debug, PartialEq)]
pub struct BitVector {
pub struct BitVector<C: Idx> {
data: Vec<Word>,
marker: PhantomData<C>,
}
impl BitVector {
impl<C: Idx> BitVector<C> {
#[inline]
pub fn new(num_bits: usize) -> BitVector {
pub fn new(num_bits: usize) -> BitVector<C> {
let num_words = words(num_bits);
BitVector {
data: vec![0; num_words],
marker: PhantomData,
}
}
@ -42,14 +44,14 @@ impl BitVector {
}
#[inline]
pub fn contains(&self, bit: usize) -> bool {
pub fn contains(&self, bit: C) -> bool {
let (word, mask) = word_mask(bit);
(self.data[word] & mask) != 0
}
/// Returns true if the bit has changed.
#[inline]
pub fn insert(&mut self, bit: usize) -> bool {
pub fn insert(&mut self, bit: C) -> bool {
let (word, mask) = word_mask(bit);
let data = &mut self.data[word];
let value = *data;
@ -60,7 +62,7 @@ impl BitVector {
/// Returns true if the bit has changed.
#[inline]
pub fn remove(&mut self, bit: usize) -> bool {
pub fn remove(&mut self, bit: C) -> bool {
let (word, mask) = word_mask(bit);
let data = &mut self.data[word];
let value = *data;
@ -70,7 +72,7 @@ impl BitVector {
}
#[inline]
pub fn merge(&mut self, all: &BitVector) -> bool {
pub fn merge(&mut self, all: &BitVector<C>) -> bool {
assert!(self.data.len() == all.data.len());
let mut changed = false;
for (i, j) in self.data.iter_mut().zip(&all.data) {
@ -84,7 +86,7 @@ impl BitVector {
}
#[inline]
pub fn grow(&mut self, num_bits: usize) {
pub fn grow(&mut self, num_bits: C) {
let num_words = words(num_bits);
if self.data.len() < num_words {
self.data.resize(num_words, 0)
@ -93,24 +95,26 @@ impl BitVector {
/// Iterates over indexes of set bits in a sorted order
#[inline]
pub fn iter<'a>(&'a self) -> BitVectorIter<'a> {
pub fn iter<'a>(&'a self) -> BitVectorIter<'a, C> {
BitVectorIter {
iter: self.data.iter(),
current: 0,
idx: 0,
marker: PhantomData,
}
}
}
pub struct BitVectorIter<'a> {
pub struct BitVectorIter<'a, C: Idx> {
iter: ::std::slice::Iter<'a, Word>,
current: Word,
idx: usize,
marker: PhantomData<C>
}
impl<'a> Iterator for BitVectorIter<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
impl<'a, C: Idx> Iterator for BitVectorIter<'a, C> {
type Item = C;
fn next(&mut self) -> Option<C> {
while self.current == 0 {
self.current = if let Some(&i) = self.iter.next() {
if i == 0 {
@ -128,7 +132,7 @@ impl<'a> Iterator for BitVectorIter<'a> {
self.current >>= offset;
self.current >>= 1; // shift otherwise overflows for 0b1000_0000_…_0000
self.idx += offset + 1;
return Some(self.idx - 1);
return Some(C::new(self.idx - 1));
}
fn size_hint(&self) -> (usize, Option<usize>) {
@ -137,8 +141,8 @@ impl<'a> Iterator for BitVectorIter<'a> {
}
}
impl FromIterator<bool> for BitVector {
fn from_iter<I>(iter: I) -> BitVector
impl<C: Idx> FromIterator<bool> for BitVector<C> {
fn from_iter<I>(iter: I) -> BitVector<C>
where
I: IntoIterator<Item = bool>,
{
@ -150,10 +154,10 @@ impl FromIterator<bool> for BitVector {
let mut bv = BitVector::new(len);
for (idx, val) in iter.enumerate() {
if idx > len {
bv.grow(idx);
bv.grow(C::new(idx));
}
if val {
bv.insert(idx);
bv.insert(C::new(idx));
}
}
@ -165,25 +169,28 @@ impl FromIterator<bool> for BitVector {
/// one gigantic bitvector. In other words, it is as if you have
/// `rows` bitvectors, each of length `columns`.
#[derive(Clone, Debug)]
pub struct BitMatrix {
pub struct BitMatrix<R: Idx, C: Idx> {
columns: usize,
vector: Vec<Word>,
phantom: PhantomData<(R, C)>,
}
impl BitMatrix {
impl<R: Idx, C: Idx> BitMatrix<R, C> {
/// Create a new `rows x columns` matrix, initially empty.
pub fn new(rows: usize, columns: usize) -> BitMatrix {
pub fn new(rows: usize, columns: usize) -> BitMatrix<R, C> {
// For every element, we need one bit for every other
// element. Round up to an even number of words.
let words_per_row = words(columns);
BitMatrix {
columns,
vector: vec![0; rows * words_per_row],
phantom: PhantomData,
}
}
/// The range of bits for a given row.
fn range(&self, row: usize) -> (usize, usize) {
fn range(&self, row: R) -> (usize, usize) {
let row = row.index();
let words_per_row = words(self.columns);
let start = row * words_per_row;
(start, start + words_per_row)
@ -193,7 +200,7 @@ impl BitMatrix {
/// `column` to the bitset for `row`.
///
/// Returns true if this changed the matrix, and false otherwise.
pub fn add(&mut self, row: usize, column: usize) -> bool {
pub fn add(&mut self, row: R, column: R) -> bool {
let (start, _) = self.range(row);
let (word, mask) = word_mask(column);
let vector = &mut self.vector[..];
@ -207,7 +214,7 @@ impl BitMatrix {
/// the matrix cell at `(row, column)` true? Put yet another way,
/// if the matrix represents (transitive) reachability, can
/// `row` reach `column`?
pub fn contains(&self, row: usize, column: usize) -> bool {
pub fn contains(&self, row: R, column: R) -> bool {
let (start, _) = self.range(row);
let (word, mask) = word_mask(column);
(self.vector[start + word] & mask) != 0
@ -217,7 +224,7 @@ impl BitMatrix {
/// is an O(n) operation where `n` is the number of elements
/// (somewhat independent from the actual size of the
/// intersection, in particular).
pub fn intersection(&self, a: usize, b: usize) -> Vec<usize> {
pub fn intersection(&self, a: R, b: R) -> Vec<C> {
let (a_start, a_end) = self.range(a);
let (b_start, b_end) = self.range(b);
let mut result = Vec::with_capacity(self.columns);
@ -228,7 +235,7 @@ impl BitMatrix {
break;
}
if v & 0x1 != 0 {
result.push(base * WORD_BITS + bit);
result.push(C::new(base * WORD_BITS + bit));
}
v >>= 1;
}
@ -243,7 +250,7 @@ impl BitMatrix {
/// you have an edge `write -> read`, because in that case
/// `write` can reach everything that `read` can (and
/// potentially more).
pub fn merge(&mut self, read: usize, write: usize) -> bool {
pub fn merge(&mut self, read: R, write: R) -> bool {
let (read_start, read_end) = self.range(read);
let (write_start, write_end) = self.range(write);
let vector = &mut self.vector[..];
@ -259,12 +266,13 @@ impl BitMatrix {
/// Iterates through all the columns set to true in a given row of
/// the matrix.
pub fn iter<'a>(&'a self, row: usize) -> BitVectorIter<'a> {
pub fn iter<'a>(&'a self, row: R) -> BitVectorIter<'a, C> {
let (start, end) = self.range(row);
BitVectorIter {
iter: self.vector[start..end].iter(),
current: 0,
idx: 0,
marker: PhantomData,
}
}
}
@ -278,8 +286,7 @@ where
C: Idx,
{
columns: usize,
vector: IndexVec<R, BitVector>,
marker: PhantomData<C>,
vector: IndexVec<R, BitVector<C>>,
}
impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
@ -288,7 +295,6 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
Self {
columns,
vector: IndexVec::new(),
marker: PhantomData,
}
}
@ -300,7 +306,7 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
let columns = self.columns;
self.vector
.ensure_contains_elem(row, || BitVector::new(columns));
self.vector[row].insert(column.index())
self.vector[row].insert(column)
}
/// Do the bits from `row` contain `column`? Put another way, is
@ -308,7 +314,7 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
/// if the matrix represents (transitive) reachability, can
/// `row` reach `column`?
pub fn contains(&self, row: R, column: C) -> bool {
self.vector.get(row).map_or(false, |r| r.contains(column.index()))
self.vector.get(row).map_or(false, |r| r.contains(column))
}
/// Add the bits from row `read` to the bits from row `write`,
@ -331,7 +337,7 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
}
/// Merge a row, `from`, into the `into` row.
pub fn merge_into(&mut self, into: R, from: &BitVector) -> bool {
pub fn merge_into(&mut self, into: R, from: &BitVector<C>) -> bool {
let columns = self.columns;
self.vector
.ensure_contains_elem(into, || BitVector::new(columns));
@ -346,22 +352,27 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
/// Iterates through all the columns set to true in a given row of
/// the matrix.
pub fn iter<'a>(&'a self, row: R) -> impl Iterator<Item = C> + 'a {
self.vector.get(row).into_iter().flat_map(|r| r.iter().map(|n| C::new(n)))
self.vector.get(row).into_iter().flat_map(|r| r.iter())
}
/// Iterates through each row and the accompanying bit set.
pub fn iter_enumerated<'a>(&'a self) -> impl Iterator<Item = (R, &'a BitVector)> + 'a {
pub fn iter_enumerated<'a>(&'a self) -> impl Iterator<Item = (R, &'a BitVector<C>)> + 'a {
self.vector.iter_enumerated()
}
pub fn row(&self, row: R) -> Option<&BitVector<C>> {
self.vector.get(row)
}
}
#[inline]
fn words(elements: usize) -> usize {
(elements + WORD_BITS - 1) / WORD_BITS
fn words<C: Idx>(elements: C) -> usize {
(elements.index() + WORD_BITS - 1) / WORD_BITS
}
#[inline]
fn word_mask(index: usize) -> (usize, Word) {
fn word_mask<C: Idx>(index: C) -> (usize, Word) {
let index = index.index();
let word = index / WORD_BITS;
let mask = 1 << (index % WORD_BITS);
(word, mask)

View File

@ -348,7 +348,7 @@ where
{
graph: &'g Graph<N, E>,
stack: Vec<NodeIndex>,
visited: BitVector,
visited: BitVector<usize>,
direction: Direction,
}

View File

@ -25,7 +25,13 @@ use rustc_serialize as serialize;
/// (purpose: avoid mixing indexes for different bitvector domains.)
pub trait Idx: Copy + 'static + Ord + Debug + Hash {
fn new(idx: usize) -> Self;
fn index(self) -> usize;
fn increment_by(&mut self, amount: usize) {
let v = self.index() + amount;
*self = Self::new(v);
}
}
impl Idx for usize {
@ -504,8 +510,8 @@ impl<I: Idx, T> IndexVec<I, T> {
}
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
self.raw.swap(a, b)
pub fn swap(&mut self, a: I, b: I) {
self.raw.swap(a.index(), b.index())
}
#[inline]

View File

@ -39,7 +39,7 @@ pub struct TransitiveRelation<T: Clone + Debug + Eq + Hash> {
// are added with new elements. Perhaps better would be to ask the
// user for a batch of edges to minimize this effect, but I
// already wrote the code this way. :P -nmatsakis
closure: Lock<Option<BitMatrix>>,
closure: Lock<Option<BitMatrix<usize, usize>>>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable, Debug)]
@ -354,7 +354,7 @@ impl<T: Clone + Debug + Eq + Hash> TransitiveRelation<T> {
}
fn with_closure<OP, R>(&self, op: OP) -> R
where OP: FnOnce(&BitMatrix) -> R
where OP: FnOnce(&BitMatrix<usize, usize>) -> R
{
let mut closure_cell = self.closure.borrow_mut();
let mut closure = closure_cell.take();
@ -366,7 +366,7 @@ impl<T: Clone + Debug + Eq + Hash> TransitiveRelation<T> {
result
}
fn compute_closure(&self) -> BitMatrix {
fn compute_closure(&self) -> BitMatrix<usize, usize> {
let mut matrix = BitMatrix::new(self.elements.len(),
self.elements.len());
let mut changed = true;
@ -396,7 +396,7 @@ impl<T: Clone + Debug + Eq + Hash> TransitiveRelation<T> {
/// - Input: `[a, b, x]`. Output: `[a, x]`.
/// - Input: `[b, a, x]`. Output: `[b, a, x]`.
/// - Input: `[a, x, b, y]`. Output: `[a, x]`.
fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix) {
fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix<usize, usize>) {
let mut i = 0;
while i < candidates.len() {
let candidate_i = candidates[i];

View File

@ -222,12 +222,12 @@ impl<N: Idx> RegionValues<N> {
/// Iterates through each row and the accompanying bit set.
pub fn iter_enumerated<'a>(
&'a self
) -> impl Iterator<Item = (N, &'a BitVector)> + 'a {
) -> impl Iterator<Item = (N, &'a BitVector<RegionElementIndex>)> + 'a {
self.matrix.iter_enumerated()
}
/// Merge a row, `from`, originating in another `RegionValues` into the `into` row.
pub fn merge_into(&mut self, into: N, from: &BitVector) -> bool {
pub fn merge_into(&mut self, into: N, from: &BitVector<RegionElementIndex>) -> bool {
self.matrix.merge_into(into, from)
}

View File

@ -487,7 +487,7 @@ enum TestKind<'tcx> {
// test the branches of enum
Switch {
adt_def: &'tcx ty::AdtDef,
variants: BitVector,
variants: BitVector<usize>,
},
// test the branches of enum

View File

@ -149,7 +149,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn add_variants_to_switch<'pat>(&mut self,
test_place: &Place<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
variants: &mut BitVector)
variants: &mut BitVector<usize>)
-> bool
{
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) {

View File

@ -231,7 +231,7 @@ pub struct InliningMap<'tcx> {
// Contains one bit per mono item in the `targets` field. That bit
// is true if that mono item needs to be inlined into every CGU.
inlines: BitVector,
inlines: BitVector<usize>,
}
impl<'tcx> InliningMap<'tcx> {

View File

@ -292,8 +292,10 @@ fn make_generator_state_argument_indirect<'a, 'tcx>(
DerefArgVisitor.visit_mir(mir);
}
fn replace_result_variable<'tcx>(ret_ty: Ty<'tcx>,
mir: &mut Mir<'tcx>) -> Local {
fn replace_result_variable<'tcx>(
ret_ty: Ty<'tcx>,
mir: &mut Mir<'tcx>,
) -> Local {
let source_info = source_info(mir);
let new_ret = LocalDecl {
mutability: Mutability::Mut,
@ -306,7 +308,7 @@ fn replace_result_variable<'tcx>(ret_ty: Ty<'tcx>,
};
let new_ret_local = Local::new(mir.local_decls.len());
mir.local_decls.push(new_ret);
mir.local_decls.swap(0, new_ret_local.index());
mir.local_decls.swap(RETURN_PLACE, new_ret_local);
RenameLocalVisitor {
from: RETURN_PLACE,

View File

@ -116,7 +116,7 @@ struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
param_env: ty::ParamEnv<'tcx>,
local_qualif: IndexVec<Local, Option<Qualif>>,
qualif: Qualif,
const_fn_arg_vars: BitVector,
const_fn_arg_vars: BitVector<Local>,
temp_promotion_state: IndexVec<Local, TempState>,
promotion_candidates: Vec<Candidate>
}
@ -344,7 +344,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> {
// Make sure there are no extra unassigned variables.
self.qualif = Qualif::NOT_CONST;
for index in mir.vars_iter() {
if !self.const_fn_arg_vars.contains(index.index()) {
if !self.const_fn_arg_vars.contains(index) {
debug!("unassigned variable {:?}", index);
self.assign(&Place::Local(index), Location {
block: bb,
@ -1021,7 +1021,7 @@ This does not pose a problem by itself because they can't be accessed directly."
// Check the allowed const fn argument forms.
if let (Mode::ConstFn, &Place::Local(index)) = (self.mode, dest) {
if self.mir.local_kind(index) == LocalKind::Var &&
self.const_fn_arg_vars.insert(index.index()) &&
self.const_fn_arg_vars.insert(index) &&
!self.tcx.sess.features_untracked().const_let {
// Direct use of an argument is permitted.

View File

@ -11,7 +11,6 @@
use rustc::ty::TyCtxt;
use rustc::mir::*;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
use util::patch::MirPatch;
@ -42,9 +41,12 @@ impl MirPass for RemoveNoopLandingPads {
}
impl RemoveNoopLandingPads {
fn is_nop_landing_pad(&self, bb: BasicBlock, mir: &Mir, nop_landing_pads: &BitVector)
-> bool
{
fn is_nop_landing_pad(
&self,
bb: BasicBlock,
mir: &Mir,
nop_landing_pads: &BitVector<BasicBlock>,
) -> bool {
for stmt in &mir[bb].statements {
match stmt.kind {
StatementKind::ReadForMatch(_) |
@ -79,8 +81,8 @@ impl RemoveNoopLandingPads {
TerminatorKind::SwitchInt { .. } |
TerminatorKind::FalseEdges { .. } |
TerminatorKind::FalseUnwind { .. } => {
terminator.successors().all(|succ| {
nop_landing_pads.contains(succ.index())
terminator.successors().all(|&succ| {
nop_landing_pads.contains(succ)
})
},
TerminatorKind::GeneratorDrop |
@ -117,7 +119,7 @@ impl RemoveNoopLandingPads {
for bb in postorder {
debug!(" processing {:?}", bb);
for target in mir[bb].terminator_mut().successors_mut() {
if *target != resume_block && nop_landing_pads.contains(target.index()) {
if *target != resume_block && nop_landing_pads.contains(*target) {
debug!(" folding noop jump to {:?} to resume block", target);
*target = resume_block;
jumps_folded += 1;
@ -138,7 +140,7 @@ impl RemoveNoopLandingPads {
let is_nop_landing_pad = self.is_nop_landing_pad(bb, mir, &nop_landing_pads);
if is_nop_landing_pad {
nop_landing_pads.insert(bb.index());
nop_landing_pads.insert(bb);
}
debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
}

View File

@ -288,15 +288,15 @@ impl MirPass for SimplifyLocals {
let mut marker = DeclMarker { locals: BitVector::new(mir.local_decls.len()) };
marker.visit_mir(mir);
// Return pointer and arguments are always live
marker.locals.insert(RETURN_PLACE.index());
marker.locals.insert(RETURN_PLACE);
for arg in mir.args_iter() {
marker.locals.insert(arg.index());
marker.locals.insert(arg);
}
// We may need to keep dead user variables live for debuginfo.
if tcx.sess.opts.debuginfo == FullDebugInfo {
for local in mir.vars_iter() {
marker.locals.insert(local.index());
marker.locals.insert(local);
}
}
@ -308,35 +308,38 @@ impl MirPass for SimplifyLocals {
}
/// Construct the mapping while swapping out unused stuff out from the `vec`.
fn make_local_map<'tcx, I: Idx, V>(vec: &mut IndexVec<I, V>, mask: BitVector) -> Vec<usize> {
let mut map: Vec<usize> = ::std::iter::repeat(!0).take(vec.len()).collect();
let mut used = 0;
fn make_local_map<'tcx, V>(
vec: &mut IndexVec<Local, V>,
mask: BitVector<Local>,
) -> IndexVec<Local, Option<Local>> {
let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*vec);
let mut used = Local::new(0);
for alive_index in mask.iter() {
map[alive_index] = used;
map[alive_index] = Some(used);
if alive_index != used {
vec.swap(alive_index, used);
}
used += 1;
used.increment_by(1);
}
vec.truncate(used);
vec.truncate(used.index());
map
}
struct DeclMarker {
pub locals: BitVector,
pub locals: BitVector<Local>,
}
impl<'tcx> Visitor<'tcx> for DeclMarker {
fn visit_local(&mut self, local: &Local, ctx: PlaceContext<'tcx>, _: Location) {
// ignore these altogether, they get removed along with their otherwise unused decls.
if ctx != PlaceContext::StorageLive && ctx != PlaceContext::StorageDead {
self.locals.insert(local.index());
self.locals.insert(*local);
}
}
}
struct LocalUpdater {
map: Vec<usize>,
map: IndexVec<Local, Option<Local>>,
}
impl<'tcx> MutVisitor<'tcx> for LocalUpdater {
@ -345,7 +348,7 @@ impl<'tcx> MutVisitor<'tcx> for LocalUpdater {
data.statements.retain(|stmt| {
match stmt.kind {
StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
self.map[l.index()] != !0
self.map[l].is_some()
}
_ => true
}
@ -353,6 +356,6 @@ impl<'tcx> MutVisitor<'tcx> for LocalUpdater {
self.super_basic_block_data(block, data);
}
fn visit_local(&mut self, l: &mut Local, _: PlaceContext<'tcx>, _: Location) {
*l = Local::new(self.map[l.index()]);
*l = self.map[*l].unwrap();
}
}