gcc/libgo/runtime/mcentral.c
Ian Lance Taylor 75791bab05 runtime: use -fgo-c-header to build C header file
Use the new -fgo-c-header option to build a header file for the Go
    runtime code in libgo/go/runtime, and use the new header file in the C
    runtime code in libgo/runtime.  This will ensure that the Go code and C
    code share the same data structures as we convert the runtime from C to
    Go.
    
    The new file libgo/go/runtime/runtime2.go is copied from the Go 1.7
    release, and then edited to remove unnecessary data structures and
    modify others for use with libgo.
    
    The new file libgo/go/runtime/mcache.go is an initial version of the
    same files in the Go 1.7 release, and will be replaced by the Go 1.7
    file when we convert to the new memory allocator.
    
    The new file libgo/go/runtime/type.go describes the gccgo version of the
    reflection data structures, and replaces the Go 1.7 runtime file which
    describes the gc version of those structures.
    
    Using the new header file means changing a number of struct fields to
    use Go naming conventions (that is, no underscores) and to rename
    constants to have a leading underscore so that they are not exported
    from the Go package.  These names were updated in the C code.
    
    The C code was also changed to drop the thread-local variable m, as was
    done some time ago in the gc sources.  Now the m field is always
    accessed using g->m, where g is the single remaining thread-local
    variable.  This in turn required some adjustments to set g->m correctly
    in all cases.
    
    Also pass the new -fgo-compiling-runtime option when compiling the
    runtime package, although that option doesn't do anything yet.
    
    Reviewed-on: https://go-review.googlesource.com/28051

From-SVN: r239872
2016-08-30 21:07:47 +00:00

308 lines
7.5 KiB
C

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Central free lists.
//
// See malloc.h for an overview.
//
// The MCentral doesn't actually contain the list of free objects; the MSpan does.
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
// and those that are completely allocated (c->empty).
//
// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
// into sections of class_to_transfercount[sizeclass] objects
// so that it is faster to move those lists between MCaches and MCentrals.
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
static void MCentral_Free(MCentral *c, MLink *v);
static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
// Initialize a single central free list.
void
runtime_MCentral_Init(MCentral *c, int32 sizeclass)
{
c->sizeclass = sizeclass;
runtime_MSpanList_Init(&c->nonempty);
runtime_MSpanList_Init(&c->empty);
}
// Allocate a span to use in an MCache.
MSpan*
runtime_MCentral_CacheSpan(MCentral *c)
{
MSpan *s;
int32 cap, n;
uint32 sg;
runtime_lock(c);
sg = runtime_mheap.sweepgen;
retry:
for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
runtime_unlock(c);
runtime_MSpan_Sweep(s);
runtime_lock(c);
// the span could have been moved to heap, retry
goto retry;
}
if(s->sweepgen == sg-1) {
// the span is being swept by background sweeper, skip
continue;
}
// we have a nonempty span that does not require sweeping, allocate from it
goto havespan;
}
for(s = c->empty.next; s != &c->empty; s = s->next) {
if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
runtime_MSpanList_Remove(s);
// swept spans are at the end of the list
runtime_MSpanList_InsertBack(&c->empty, s);
runtime_unlock(c);
runtime_MSpan_Sweep(s);
runtime_lock(c);
// the span could be moved to nonempty or heap, retry
goto retry;
}
if(s->sweepgen == sg-1) {
// the span is being swept by background sweeper, skip
continue;
}
// already swept empty span,
// all subsequent ones must also be either swept or in process of sweeping
break;
}
// Replenish central list if empty.
if(!MCentral_Grow(c)) {
runtime_unlock(c);
return nil;
}
goto retry;
havespan:
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
if(n == 0)
runtime_throw("empty span");
if(s->freelist == nil)
runtime_throw("freelist empty");
c->nfree -= n;
runtime_MSpanList_Remove(s);
runtime_MSpanList_InsertBack(&c->empty, s);
s->incache = true;
runtime_unlock(c);
return s;
}
// Return span from an MCache.
void
runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
MLink *v;
int32 cap, n;
runtime_lock(c);
s->incache = false;
// Move any explicitly freed items from the freebuf to the freelist.
while((v = s->freebuf) != nil) {
s->freebuf = v->next;
runtime_markfreed(v);
v->next = s->freelist;
s->freelist = v;
s->ref--;
}
if(s->ref == 0) {
// Free back to heap. Unlikely, but possible.
MCentral_ReturnToHeap(c, s); // unlocks c
return;
}
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
if(n > 0) {
c->nfree += n;
runtime_MSpanList_Remove(s);
runtime_MSpanList_Insert(&c->nonempty, s);
}
runtime_unlock(c);
}
// Free the list of objects back into the central free list c.
// Called from runtime_free.
void
runtime_MCentral_FreeList(MCentral *c, MLink *start)
{
MLink *next;
runtime_lock(c);
for(; start != nil; start = next) {
next = start->next;
MCentral_Free(c, start);
}
runtime_unlock(c);
}
// Helper: free one object back into the central free list.
// Caller must hold lock on c on entry. Holds lock on exit.
static void
MCentral_Free(MCentral *c, MLink *v)
{
MSpan *s;
// Find span for v.
s = runtime_MHeap_Lookup(&runtime_mheap, v);
if(s == nil || s->ref == 0)
runtime_throw("invalid free");
if(s->sweepgen != runtime_mheap.sweepgen)
runtime_throw("free into unswept span");
// If the span is currently being used unsynchronized by an MCache,
// we can't modify the freelist. Add to the freebuf instead. The
// items will get moved to the freelist when the span is returned
// by the MCache.
if(s->incache) {
v->next = s->freebuf;
s->freebuf = v;
return;
}
// Move span to nonempty if necessary.
if(s->freelist == nil) {
runtime_MSpanList_Remove(s);
runtime_MSpanList_Insert(&c->nonempty, s);
}
// Add the object to span's free list.
runtime_markfreed(v);
v->next = s->freelist;
s->freelist = v;
s->ref--;
c->nfree++;
// If s is completely freed, return it to the heap.
if(s->ref == 0) {
MCentral_ReturnToHeap(c, s); // unlocks c
runtime_lock(c);
}
}
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap. Sets sweepgen to
// the latest generation.
bool
runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
if(s->incache)
runtime_throw("freespan into cached span");
runtime_lock(c);
// Move to nonempty if necessary.
if(s->freelist == nil) {
runtime_MSpanList_Remove(s);
runtime_MSpanList_Insert(&c->nonempty, s);
}
// Add the objects back to s's free list.
end->next = s->freelist;
s->freelist = start;
s->ref -= n;
c->nfree += n;
// delay updating sweepgen until here. This is the signal that
// the span may be used in an MCache, so it must come after the
// linked list operations above (actually, just after the
// lock of c above.)
runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
if(s->ref != 0) {
runtime_unlock(c);
return false;
}
// s is completely freed, return it to the heap.
MCentral_ReturnToHeap(c, s); // unlocks c
return true;
}
void
runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
{
int32 size;
int32 npages;
npages = runtime_class_to_allocnpages[sizeclass];
size = runtime_class_to_size[sizeclass];
*npagesp = npages;
*sizep = size;
*nobj = (npages << PageShift) / size;
}
// Fetch a new span from the heap and
// carve into objects for the free list.
static bool
MCentral_Grow(MCentral *c)
{
int32 i, n, npages;
uintptr size;
MLink **tailp, *v;
byte *p;
MSpan *s;
runtime_unlock(c);
runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
if(s == nil) {
// TODO(rsc): Log out of memory
runtime_lock(c);
return false;
}
// Carve span into sequence of blocks.
tailp = &s->freelist;
p = (byte*)(s->start << PageShift);
s->limit = (uintptr)(p + size*n);
for(i=0; i<n; i++) {
v = (MLink*)p;
*tailp = v;
tailp = &v->next;
p += size;
}
*tailp = nil;
runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
runtime_lock(c);
c->nfree += n;
runtime_MSpanList_Insert(&c->nonempty, s);
return true;
}
// Return s to the heap. s must be unused (s->ref == 0). Unlocks c.
static void
MCentral_ReturnToHeap(MCentral *c, MSpan *s)
{
int32 size;
size = runtime_class_to_size[c->sizeclass];
runtime_MSpanList_Remove(s);
s->needzero = 1;
s->freelist = nil;
if(s->ref != 0)
runtime_throw("ref wrong");
c->nfree -= (s->npages << PageShift) / size;
runtime_unlock(c);
runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
runtime_MHeap_Free(&runtime_mheap, s, 0);
}