rt: Remove the cycle collector

This commit is contained in:
Patrick Walton 2012-09-25 12:13:20 -07:00
parent ac822a52be
commit f813f519a5
6 changed files with 0 additions and 610 deletions

View File

@ -71,7 +71,6 @@ RUNTIME_CS_$(1) := \
rt/rust_kernel.cpp \
rt/rust_shape.cpp \
rt/rust_abi.cpp \
rt/rust_cc.cpp \
rt/rust_debug.cpp \
rt/rust_box_annihilator.cpp \
rt/memory_region.cpp \

View File

@ -1,572 +0,0 @@
// Rust cycle collector. Temporary, but will probably stick around for some
// time until LLVM's GC infrastructure is more mature.
#include <map>
#include <set>
#include <vector>
#include <ios>
#include "rust_globals.h"
#include "rust_cc.h"
#include "rust_debug.h"
#include "rust_shape.h"
#include "rust_task.h"
// The number of allocations Rust code performs before performing cycle
// collection.
#define RUST_CC_FREQUENCY 5000
using namespace std;
void
annihilate_box(rust_task *task, rust_opaque_box *box);
namespace cc {
// Internal reference count computation
typedef std::map<rust_opaque_box*,uintptr_t> irc_map;
class irc : public shape::data<irc,shape::ptr> {
friend class shape::data<irc,shape::ptr>;
irc_map &ircs;
irc(const irc &other, const shape::ptr &in_dp)
: shape::data<irc,shape::ptr>(other.task, other.align, other.sp,
other.tables, in_dp),
ircs(other.ircs) {}
irc(const irc &other,
const uint8_t *in_sp,
const rust_shape_tables *in_tables = NULL)
: shape::data<irc,shape::ptr>(other.task,
other.align,
in_sp,
in_tables ? in_tables : other.tables,
other.dp),
ircs(other.ircs) {}
irc(const irc &other,
const uint8_t *in_sp,
const rust_shape_tables *in_tables,
shape::ptr in_dp)
: shape::data<irc,shape::ptr>(other.task,
other.align,
in_sp,
in_tables,
in_dp),
ircs(other.ircs) {}
irc(rust_task *in_task,
bool in_align,
const uint8_t *in_sp,
const rust_shape_tables *in_tables,
uint8_t *in_data,
irc_map &in_ircs)
: shape::data<irc,shape::ptr>(in_task, in_align, in_sp,
in_tables, shape::ptr(in_data)),
ircs(in_ircs) {}
void walk_vec2(bool is_pod, std::pair<uint8_t *,uint8_t *> data_range) {
// There can't be any outbound pointers from pod.
if (is_pod)
return;
irc sub(*this, shape::ptr(data_range.first));
shape::ptr data_end = sub.end_dp = shape::ptr(data_range.second);
while (sub.dp < data_end) {
sub.walk_reset();
// FIXME (#2669): shouldn't this be 'sub.align = true;'?
align = true;
}
}
void walk_vec2(bool is_pod) {
if (shape::get_dp<void *>(dp) == NULL)
return;
walk_vec2(is_pod, get_vec_data_range(dp));
}
void walk_unboxed_vec2(bool is_pod) {
walk_vec2(is_pod, get_unboxed_vec_data_range(dp));
}
void walk_slice2(bool is_pod, bool is_str) {
walk_vec2(is_pod, get_slice_data_range(is_str, dp));
}
void walk_fixedvec2(uint16_t n_elts, size_t elt_sz, bool is_pod) {
walk_vec2(is_pod, get_fixedvec_data_range(n_elts, elt_sz, dp));
}
void walk_tag2(shape::tag_info &tinfo, uint32_t tag_variant) {
shape::data<irc,shape::ptr>::walk_variant1(tinfo, tag_variant);
}
void walk_box2() {
// the box ptr can be NULL for env ptrs in closures and data
// not fully initialized
rust_opaque_box *box = *(rust_opaque_box**)dp;
if (box)
shape::data<irc,shape::ptr>::walk_box_contents1();
}
void walk_uniq2() {
rust_opaque_box *box = *(rust_opaque_box**)dp;
if (box)
shape::data<irc,shape::ptr>::walk_uniq_contents1();
}
void walk_rptr2() {
shape::data<irc,shape::ptr>::walk_rptr_contents1();
}
void walk_fn2(char code) {
switch (code) {
case shape::SHAPE_BOX_FN: {
shape::bump_dp<void*>(dp); // skip over the code ptr
walk_box2(); // walk over the environment ptr
break;
}
case shape::SHAPE_BARE_FN: // Does not close over data.
case shape::SHAPE_STACK_FN: // Not reachable from heap.
case shape::SHAPE_UNIQ_FN: break; /* Can only close over sendable
* (and hence acyclic) data */
default: abort();
}
}
void walk_trait2() {
walk_box2();
}
void walk_tydesc2(char) {
}
void walk_res2(const shape::rust_fn *dtor, const uint8_t *end_sp) {
while (this->sp != end_sp) {
this->walk();
align = true;
}
}
void walk_subcontext2(irc &sub) { sub.walk(); }
void walk_uniq_contents2(irc &sub) { sub.walk(); }
void walk_rptr_contents2(irc &sub) { sub.walk(); }
void walk_box_contents2(irc &sub) {
maybe_record_irc();
// Do not traverse the contents of this box; it's in the allocation
// somewhere, so we're guaranteed to come back to it (if we haven't
// traversed it already).
}
void maybe_record_irc() {
rust_opaque_box *box_ptr = *(rust_opaque_box **) dp;
if (!box_ptr)
return;
// Bump the internal reference count of the box.
if (ircs.find(box_ptr) == ircs.end()) {
LOG(task, gc,
"setting internal reference count for %p to 1",
box_ptr);
ircs[box_ptr] = 1;
} else {
uintptr_t newcount = ircs[box_ptr] + 1;
LOG(task, gc,
"bumping internal reference count for %p to %lu",
box_ptr, newcount);
ircs[box_ptr] = newcount;
}
}
void walk_struct2(const uint8_t *end_sp) {
while (this->sp != end_sp) {
this->walk();
align = true;
}
}
void walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
const std::pair<const uint8_t *,const uint8_t *>
variant_ptr_and_end);
template<typename T>
inline void walk_number2() { /* no-op */ }
public:
static void compute_ircs(rust_task *task, irc_map &ircs);
};
void
irc::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
const std::pair<const uint8_t *,const uint8_t *>
variant_ptr_and_end) {
irc sub(*this, variant_ptr_and_end.first);
assert(variant_id < 256); // FIXME (#2666): Temporary sanity check.
const uint8_t *variant_end = variant_ptr_and_end.second;
while (sub.sp < variant_end) {
sub.walk();
align = true;
}
}
void
irc::compute_ircs(rust_task *task, irc_map &ircs) {
boxed_region *boxed = &task->boxed;
for (rust_opaque_box *box = boxed->first_live_alloc();
box != NULL;
box = box->next) {
type_desc *tydesc = box->td;
uint8_t *body = (uint8_t*) box_body(box);
LOG(task, gc,
"determining internal ref counts: "
"box=%p tydesc=%p body=%p",
box, tydesc, body);
shape::arena arena;
irc irc(task, true, tydesc->shape, tydesc->shape_tables,
body, ircs);
irc.walk();
}
}
// Root finding
void
find_roots(rust_task *task, irc_map &ircs,
std::vector<rust_opaque_box *> &roots) {
boxed_region *boxed = &task->boxed;
for (rust_opaque_box *box = boxed->first_live_alloc();
box != NULL;
box = box->next) {
uintptr_t ref_count = box->ref_count;
uintptr_t irc;
if (ircs.find(box) != ircs.end())
irc = ircs[box];
else
irc = 0;
if (irc < ref_count) {
// This allocation must be a root, because the internal reference
// count is smaller than the total reference count.
LOG(task, gc,"root found: %p, irc %lu, ref count %lu",
box, irc, ref_count);
roots.push_back(box);
} else {
LOG(task, gc, "nonroot found: %p, irc %lu, ref count %lu",
box, irc, ref_count);
assert(irc == ref_count && "Internal reference count must be "
"less than or equal to the total reference count!");
}
}
}
// Marking
class mark : public shape::data<mark,shape::ptr> {
friend class shape::data<mark,shape::ptr>;
std::set<rust_opaque_box *> &marked;
mark(const mark &other, const shape::ptr &in_dp)
: shape::data<mark,shape::ptr>(other.task, other.align, other.sp,
other.tables, in_dp),
marked(other.marked) {}
mark(const mark &other,
const uint8_t *in_sp,
const rust_shape_tables *in_tables = NULL)
: shape::data<mark,shape::ptr>(other.task,
other.align,
in_sp,
in_tables ? in_tables : other.tables,
other.dp),
marked(other.marked) {}
mark(const mark &other,
const uint8_t *in_sp,
const rust_shape_tables *in_tables,
shape::ptr in_dp)
: shape::data<mark,shape::ptr>(other.task,
other.align,
in_sp,
in_tables,
in_dp),
marked(other.marked) {}
mark(rust_task *in_task,
bool in_align,
const uint8_t *in_sp,
const rust_shape_tables *in_tables,
uint8_t *in_data,
std::set<rust_opaque_box*> &in_marked)
: shape::data<mark,shape::ptr>(in_task, in_align, in_sp,
in_tables, shape::ptr(in_data)),
marked(in_marked) {}
void walk_vec2(bool is_pod, std::pair<uint8_t *,uint8_t *> data_range) {
// There can't be any outbound pointers from pod.
if (is_pod)
return;
if (data_range.second - data_range.first > 100000)
abort(); // FIXME (#2666): Temporary sanity check.
mark sub(*this, shape::ptr(data_range.first));
shape::ptr data_end = sub.end_dp = shape::ptr(data_range.second);
while (sub.dp < data_end) {
sub.walk_reset();
align = true;
}
}
void walk_vec2(bool is_pod) {
if (shape::get_dp<void *>(dp) == NULL)
return;
walk_vec2(is_pod, get_vec_data_range(dp));
}
void walk_unboxed_vec2(bool is_pod) {
walk_vec2(is_pod, get_unboxed_vec_data_range(dp));
}
void walk_slice2(bool is_pod, bool is_str) {
walk_vec2(is_pod, get_slice_data_range(is_str, dp));
}
void walk_fixedvec2(uint16_t n_elts, size_t elt_sz, bool is_pod) {
walk_vec2(is_pod, get_fixedvec_data_range(n_elts, elt_sz, dp));
}
void walk_tag2(shape::tag_info &tinfo, uint32_t tag_variant) {
shape::data<mark,shape::ptr>::walk_variant1(tinfo, tag_variant);
}
void walk_box2() {
// the box ptr can be NULL for env ptrs in closures and data
// not fully initialized
rust_opaque_box *box = *(rust_opaque_box**)dp;
if (box)
shape::data<mark,shape::ptr>::walk_box_contents1();
}
void walk_uniq2() {
rust_opaque_box *box = *(rust_opaque_box**)dp;
if (box)
shape::data<mark,shape::ptr>::walk_uniq_contents1();
}
void walk_rptr2() {
shape::data<mark,shape::ptr>::walk_rptr_contents1();
}
void walk_fn2(char code) {
switch (code) {
case shape::SHAPE_BOX_FN: {
shape::bump_dp<void*>(dp); // skip over the code ptr
walk_box2(); // walk over the environment ptr
break;
}
case shape::SHAPE_BARE_FN: // Does not close over data.
case shape::SHAPE_STACK_FN: // Not reachable from heap.
case shape::SHAPE_UNIQ_FN: break; /* Can only close over sendable
* (and hence acyclic) data */
default: abort();
}
}
void walk_res2(const shape::rust_fn *dtor, const uint8_t *end_sp) {
while (this->sp != end_sp) {
this->walk();
align = true;
}
}
void walk_trait2() {
walk_box2();
}
void walk_tydesc2(char) {
}
void walk_subcontext2(mark &sub) { sub.walk(); }
void walk_uniq_contents2(mark &sub) { sub.walk(); }
void walk_rptr_contents2(mark &sub) { sub.walk(); }
void walk_box_contents2(mark &sub) {
rust_opaque_box *box_ptr = *(rust_opaque_box **) dp;
if (!box_ptr)
return;
if (marked.find(box_ptr) != marked.end())
return; // Skip to avoid chasing cycles.
marked.insert(box_ptr);
sub.walk();
}
void walk_struct2(const uint8_t *end_sp) {
while (this->sp != end_sp) {
this->walk();
align = true;
}
}
void walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
const std::pair<const uint8_t *,const uint8_t *>
variant_ptr_and_end);
template<typename T>
inline void walk_number2() { /* no-op */ }
public:
static void do_mark(rust_task *task,
const std::vector<rust_opaque_box *> &roots,
std::set<rust_opaque_box*> &marked);
};
void
mark::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
const std::pair<const uint8_t *,const uint8_t *>
variant_ptr_and_end) {
mark sub(*this, variant_ptr_and_end.first);
assert(variant_id < 256); // FIXME (#2666): Temporary sanity check.
const uint8_t *variant_end = variant_ptr_and_end.second;
while (sub.sp < variant_end) {
sub.walk();
align = true;
}
}
void
mark::do_mark(rust_task *task,
const std::vector<rust_opaque_box *> &roots,
std::set<rust_opaque_box *> &marked) {
std::vector<rust_opaque_box *>::const_iterator
begin(roots.begin()),
end(roots.end());
while (begin != end) {
rust_opaque_box *box = *begin;
if (marked.find(box) == marked.end()) {
marked.insert(box);
const type_desc *tydesc = box->td;
LOG(task, gc, "marking: %p, tydesc=%p", box, tydesc);
uint8_t *p = (uint8_t*) box_body(box);
shape::arena arena;
mark mark(task, true, tydesc->shape, tydesc->shape_tables,
p, marked);
mark.walk();
}
++begin;
}
}
void
do_sweep(rust_task *task,
const std::set<rust_opaque_box*> &marked) {
boxed_region *boxed = &task->boxed;
rust_opaque_box *box = boxed->first_live_alloc();
while (box != NULL) {
// save next ptr as we may be freeing box
rust_opaque_box *box_next = box->next;
if (marked.find(box) == marked.end()) {
LOG(task, gc, "object is part of a cycle: %p", box);
annihilate_box(task, box);
}
box = box_next;
}
}
void
do_cc(rust_task *task) {
LOG(task, gc, "cc");
irc_map ircs;
irc::compute_ircs(task, ircs);
std::vector<rust_opaque_box*> roots;
find_roots(task, ircs, roots);
std::set<rust_opaque_box*> marked;
mark::do_mark(task, roots, marked);
do_sweep(task, marked);
}
void
do_final_cc(rust_task *task) {
do_cc(task);
boxed_region *boxed = &task->boxed;
for (rust_opaque_box *box = boxed->first_live_alloc();
box != NULL;
box = box->next) {
cerr << "Unreclaimed object found at " << (void*) box << ": ";
const type_desc *td = box->td;
shape::arena arena;
shape::log log(task, true, td->shape, td->shape_tables,
(uint8_t*)box_body(box), cerr);
log.walk();
cerr << "\n";
}
}
void
maybe_cc(rust_task *task) {
static debug::flag zeal("RUST_CC_ZEAL");
if (*zeal) {
do_cc(task);
return;
}
// FIXME (#1498): depressingly, due to alignment bugs the whole file is
// disabled presently unless you're doing testing. Remove the whole thing
// when we transition to using a visitor for GC/CC.
#if 0
if (task->cc_counter++ > RUST_CC_FREQUENCY) {
task->cc_counter = 0;
do_cc(task);
}
#endif
}
} // end namespace cc
//
// Local Variables:
// mode: C++
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//

View File

@ -1,30 +0,0 @@
// Rust cycle collector. Temporary, but will probably stick around for some
// time until LLVM's GC infrastructure is more mature.
#ifndef RUST_CC_H
#define RUST_CC_H
struct rust_task;
namespace cc {
void do_cc(rust_task *task);
// performs a cycle coll then asserts that there is nothing left
void do_final_cc(rust_task *task);
void maybe_cc(rust_task *task);
} // end namespace cc
#endif
//
// Local Variables:
// mode: C++
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//

View File

@ -6,7 +6,6 @@
#include <algorithm>
#include "rust_task.h"
#include "rust_cc.h"
#include "rust_env.h"
#include "rust_port.h"
#include "rust_globals.h"
@ -28,7 +27,6 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
boxed(sched_loop->kernel->env, &local_region),
local_region(&sched_loop->local_region),
unwinding(false),
cc_counter(0),
total_stack_sz(0),
task_local_data(NULL),
task_local_data_cleanup(NULL),

View File

@ -223,8 +223,6 @@ rust_task : public kernel_owned<rust_task>
bool propagate_failure;
uint32_t cc_counter;
debug::task_debug_info debug;
// The amount of stack we're using, excluding red zones

View File

@ -8,7 +8,6 @@
#include "rust_globals.h"
#include "rust_task.h"
#include "rust_cc.h"
#include "rust_sched_loop.h"
#include "rust_unwind.h"
#include "rust_upcall.h"
@ -225,8 +224,6 @@ upcall_s_malloc(s_malloc_args *args) {
LOG_UPCALL_ENTRY(task);
LOG(task, mem, "upcall malloc(0x%" PRIxPTR ")", args->td);
cc::maybe_cc(task);
// FIXME--does this have to be calloc? (Issue #2682)
rust_opaque_box *box = task->boxed.calloc(args->td, args->size);
void *body = box_body(box);