2010-12-03 05:34:57 +01:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Page heap.
|
|
|
|
//
|
|
|
|
// See malloc.h for overview.
|
|
|
|
//
|
|
|
|
// When a MSpan is in the heap free list, state == MSpanFree
|
|
|
|
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
|
|
|
|
//
|
|
|
|
// When a MSpan is allocated, state == MSpanInUse
|
|
|
|
// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
|
|
|
|
|
|
|
|
#include "runtime.h"
|
2011-10-27 01:57:58 +02:00
|
|
|
#include "arch.h"
|
2010-12-03 05:34:57 +01:00
|
|
|
#include "malloc.h"
|
|
|
|
|
|
|
|
static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
|
|
|
|
static bool MHeap_Grow(MHeap*, uintptr);
|
|
|
|
static void MHeap_FreeLocked(MHeap*, MSpan*);
|
|
|
|
static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
|
|
|
|
static MSpan *BestFit(MSpan*, uintptr, MSpan*);
|
|
|
|
|
|
|
|
static void
|
|
|
|
RecordSpan(void *vh, byte *p)
|
|
|
|
{
|
|
|
|
MHeap *h;
|
|
|
|
MSpan *s;
|
2012-10-23 06:31:11 +02:00
|
|
|
MSpan **all;
|
|
|
|
uint32 cap;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
h = vh;
|
|
|
|
s = (MSpan*)p;
|
2012-10-23 06:31:11 +02:00
|
|
|
if(h->nspan >= h->nspancap) {
|
|
|
|
cap = 64*1024/sizeof(all[0]);
|
|
|
|
if(cap < h->nspancap*3/2)
|
|
|
|
cap = h->nspancap*3/2;
|
2016-10-13 17:24:50 +02:00
|
|
|
all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys);
|
2013-07-16 08:54:42 +02:00
|
|
|
if(all == nil)
|
|
|
|
runtime_throw("runtime: cannot allocate memory");
|
2012-10-23 06:31:11 +02:00
|
|
|
if(h->allspans) {
|
|
|
|
runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
|
2014-06-07 00:37:27 +02:00
|
|
|
// Don't free the old array if it's referenced by sweep.
|
|
|
|
// See the comment in mgc0.c.
|
|
|
|
if(h->allspans != runtime_mheap.sweepspans)
|
2016-10-13 17:24:50 +02:00
|
|
|
runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys);
|
2012-10-23 06:31:11 +02:00
|
|
|
}
|
|
|
|
h->allspans = all;
|
|
|
|
h->nspancap = cap;
|
|
|
|
}
|
|
|
|
h->allspans[h->nspan++] = s;
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the heap; fetch memory using alloc.
|
|
|
|
void
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_MHeap_Init(MHeap *h)
|
2010-12-03 05:34:57 +01:00
|
|
|
{
|
2016-10-13 17:24:50 +02:00
|
|
|
MStats *pmstats;
|
2010-12-03 05:34:57 +01:00
|
|
|
uint32 i;
|
|
|
|
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats = mstats();
|
|
|
|
runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys);
|
|
|
|
runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys);
|
|
|
|
runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys);
|
|
|
|
runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys);
|
2010-12-03 05:34:57 +01:00
|
|
|
// h->mapcache needs no init
|
2014-06-07 00:37:27 +02:00
|
|
|
for(i=0; i<nelem(h->free); i++) {
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_MSpanList_Init(&h->free[i]);
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_MSpanList_Init(&h->busy[i]);
|
|
|
|
}
|
|
|
|
runtime_MSpanList_Init(&h->freelarge);
|
|
|
|
runtime_MSpanList_Init(&h->busylarge);
|
2010-12-03 05:34:57 +01:00
|
|
|
for(i=0; i<nelem(h->central); i++)
|
|
|
|
runtime_MCentral_Init(&h->central[i], i);
|
|
|
|
}
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
void
|
|
|
|
runtime_MHeap_MapSpans(MHeap *h)
|
|
|
|
{
|
2013-11-08 00:38:47 +01:00
|
|
|
uintptr pagesize;
|
2013-11-06 20:49:01 +01:00
|
|
|
uintptr n;
|
|
|
|
|
|
|
|
// Map spans array, PageSize at a time.
|
|
|
|
n = (uintptr)h->arena_used;
|
2014-01-10 00:16:56 +01:00
|
|
|
n -= (uintptr)h->arena_start;
|
2013-11-06 20:49:01 +01:00
|
|
|
n = n / PageSize * sizeof(h->spans[0]);
|
|
|
|
n = ROUND(n, PageSize);
|
2013-11-08 00:38:47 +01:00
|
|
|
pagesize = getpagesize();
|
|
|
|
n = ROUND(n, pagesize);
|
2013-11-06 20:49:01 +01:00
|
|
|
if(h->spans_mapped >= n)
|
|
|
|
return;
|
2016-10-13 17:24:50 +02:00
|
|
|
runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys);
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans_mapped = n;
|
|
|
|
}
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
// Sweeps spans in list until reclaims at least npages into heap.
|
|
|
|
// Returns the actual number of pages reclaimed.
|
|
|
|
static uintptr
|
|
|
|
MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
|
|
|
|
{
|
|
|
|
MSpan *s;
|
|
|
|
uintptr n;
|
|
|
|
uint32 sg;
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
sg = runtime_mheap.sweepgen;
|
|
|
|
retry:
|
|
|
|
for(s = list->next; s != list; s = s->next) {
|
|
|
|
if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
|
|
|
|
runtime_MSpanList_Remove(s);
|
|
|
|
// swept spans are at the end of the list
|
|
|
|
runtime_MSpanList_InsertBack(list, s);
|
|
|
|
runtime_unlock(h);
|
|
|
|
n += runtime_MSpan_Sweep(s);
|
|
|
|
runtime_lock(h);
|
|
|
|
if(n >= npages)
|
|
|
|
return n;
|
|
|
|
// the span could have been moved elsewhere
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if(s->sweepgen == sg-1) {
|
|
|
|
// the span is being sweept by background sweeper, skip
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// already swept empty span,
|
|
|
|
// all subsequent ones must also be either swept or in process of sweeping
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sweeps and reclaims at least npage pages into heap.
|
|
|
|
// Called before allocating npage pages.
|
|
|
|
static void
|
|
|
|
MHeap_Reclaim(MHeap *h, uintptr npage)
|
|
|
|
{
|
|
|
|
uintptr reclaimed, n;
|
|
|
|
|
|
|
|
// First try to sweep busy spans with large objects of size >= npage,
|
|
|
|
// this has good chances of reclaiming the necessary space.
|
|
|
|
for(n=npage; n < nelem(h->busy); n++) {
|
|
|
|
if(MHeap_ReclaimList(h, &h->busy[n], npage))
|
|
|
|
return; // Bingo!
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then -- even larger objects.
|
|
|
|
if(MHeap_ReclaimList(h, &h->busylarge, npage))
|
|
|
|
return; // Bingo!
|
|
|
|
|
|
|
|
// Now try smaller objects.
|
|
|
|
// One such object is not enough, so we need to reclaim several of them.
|
|
|
|
reclaimed = 0;
|
|
|
|
for(n=0; n < npage && n < nelem(h->busy); n++) {
|
|
|
|
reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
|
|
|
|
if(reclaimed >= npage)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now sweep everything that is not yet swept.
|
|
|
|
runtime_unlock(h);
|
|
|
|
for(;;) {
|
|
|
|
n = runtime_sweepone();
|
|
|
|
if(n == (uintptr)-1) // all spans are swept
|
|
|
|
break;
|
|
|
|
reclaimed += n;
|
|
|
|
if(reclaimed >= npage)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
runtime_lock(h);
|
|
|
|
}
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
// Allocate a new span of npage pages from the heap
|
|
|
|
// and record its size class in the HeapMap and HeapMapCache.
|
|
|
|
MSpan*
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
|
2010-12-03 05:34:57 +01:00
|
|
|
{
|
2016-10-13 17:24:50 +02:00
|
|
|
MStats *pmstats;
|
2010-12-03 05:34:57 +01:00
|
|
|
MSpan *s;
|
|
|
|
|
|
|
|
runtime_lock(h);
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats = mstats();
|
|
|
|
pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_m()->mcache->local_cachealloc = 0;
|
2010-12-03 05:34:57 +01:00
|
|
|
s = MHeap_AllocLocked(h, npage, sizeclass);
|
|
|
|
if(s != nil) {
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats->heap_inuse += npage<<PageShift;
|
2014-06-07 00:37:27 +02:00
|
|
|
if(large) {
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats->heap_objects++;
|
|
|
|
pmstats->heap_alloc += npage<<PageShift;
|
2014-06-07 00:37:27 +02:00
|
|
|
// Swept spans are at the end of lists.
|
|
|
|
if(s->npages < nelem(h->free))
|
|
|
|
runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
|
|
|
|
else
|
|
|
|
runtime_MSpanList_InsertBack(&h->busylarge, s);
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
runtime_unlock(h);
|
2014-06-07 00:37:27 +02:00
|
|
|
if(s != nil) {
|
|
|
|
if(needzero && s->needzero)
|
|
|
|
runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
|
|
|
|
s->needzero = 0;
|
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MSpan*
|
|
|
|
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
|
|
|
|
{
|
|
|
|
uintptr n;
|
|
|
|
MSpan *s, *t;
|
2011-03-17 00:05:44 +01:00
|
|
|
PageID p;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
// To prevent excessive heap growth, before allocating n pages
|
|
|
|
// we need to sweep and reclaim at least n pages.
|
|
|
|
if(!h->sweepdone)
|
|
|
|
MHeap_Reclaim(h, npage);
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
// Try in fixed-size lists up to max.
|
|
|
|
for(n=npage; n < nelem(h->free); n++) {
|
|
|
|
if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
|
|
|
|
s = h->free[n].next;
|
|
|
|
goto HaveSpan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Best fit in list of large spans.
|
|
|
|
if((s = MHeap_AllocLarge(h, npage)) == nil) {
|
|
|
|
if(!MHeap_Grow(h, npage))
|
|
|
|
return nil;
|
|
|
|
if((s = MHeap_AllocLarge(h, npage)) == nil)
|
|
|
|
return nil;
|
|
|
|
}
|
|
|
|
|
|
|
|
HaveSpan:
|
|
|
|
// Mark span in use.
|
|
|
|
if(s->state != MSpanFree)
|
|
|
|
runtime_throw("MHeap_AllocLocked - MSpan not free");
|
|
|
|
if(s->npages < npage)
|
|
|
|
runtime_throw("MHeap_AllocLocked - bad npages");
|
|
|
|
runtime_MSpanList_Remove(s);
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_atomicstore(&s->sweepgen, h->sweepgen);
|
2010-12-03 05:34:57 +01:00
|
|
|
s->state = MSpanInUse;
|
2016-10-13 17:24:50 +02:00
|
|
|
mstats()->heap_idle -= s->npages<<PageShift;
|
|
|
|
mstats()->heap_released -= s->npreleased<<PageShift;
|
2014-06-07 00:37:27 +02:00
|
|
|
if(s->npreleased > 0)
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
|
2012-03-02 21:01:37 +01:00
|
|
|
s->npreleased = 0;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
if(s->npages > npage) {
|
|
|
|
// Trim extra and put it back in the heap.
|
|
|
|
t = runtime_FixAlloc_Alloc(&h->spanalloc);
|
|
|
|
runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
|
|
|
|
s->npages = npage;
|
2011-03-17 00:05:44 +01:00
|
|
|
p = t->start;
|
2014-01-10 00:16:56 +01:00
|
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
2011-03-17 00:05:44 +01:00
|
|
|
if(p > 0)
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans[p-1] = s;
|
|
|
|
h->spans[p] = t;
|
|
|
|
h->spans[p+t->npages-1] = t;
|
2014-06-07 00:37:27 +02:00
|
|
|
t->needzero = s->needzero;
|
|
|
|
runtime_atomicstore(&t->sweepgen, h->sweepgen);
|
2010-12-03 05:34:57 +01:00
|
|
|
t->state = MSpanInUse;
|
|
|
|
MHeap_FreeLocked(h, t);
|
2013-01-29 21:52:43 +01:00
|
|
|
t->unusedsince = s->unusedsince; // preserve age
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2013-01-29 21:52:43 +01:00
|
|
|
s->unusedsince = 0;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
// Record span info, because gc needs to be
|
|
|
|
// able to map interior pointer to containing span.
|
|
|
|
s->sizeclass = sizeclass;
|
2012-10-23 06:31:11 +02:00
|
|
|
s->elemsize = (sizeclass==0 ? s->npages<<PageShift : (uintptr)runtime_class_to_size[sizeclass]);
|
|
|
|
s->types.compression = MTypes_Empty;
|
2011-03-17 00:05:44 +01:00
|
|
|
p = s->start;
|
2014-01-10 00:16:56 +01:00
|
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
2010-12-03 05:34:57 +01:00
|
|
|
for(n=0; n<npage; n++)
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans[p+n] = s;
|
2010-12-03 05:34:57 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a span of exactly npage pages from the list of large spans.
|
|
|
|
static MSpan*
|
|
|
|
MHeap_AllocLarge(MHeap *h, uintptr npage)
|
|
|
|
{
|
2014-06-07 00:37:27 +02:00
|
|
|
return BestFit(&h->freelarge, npage, nil);
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Search list for smallest span with >= npage pages.
|
|
|
|
// If there are multiple smallest spans, take the one
|
|
|
|
// with the earliest starting address.
|
|
|
|
static MSpan*
|
|
|
|
BestFit(MSpan *list, uintptr npage, MSpan *best)
|
|
|
|
{
|
|
|
|
MSpan *s;
|
|
|
|
|
|
|
|
for(s=list->next; s != list; s=s->next) {
|
|
|
|
if(s->npages < npage)
|
|
|
|
continue;
|
|
|
|
if(best == nil
|
|
|
|
|| s->npages < best->npages
|
|
|
|
|| (s->npages == best->npages && s->start < best->start))
|
|
|
|
best = s;
|
|
|
|
}
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to add at least npage pages of memory to the heap,
|
|
|
|
// returning whether it worked.
|
|
|
|
static bool
|
|
|
|
MHeap_Grow(MHeap *h, uintptr npage)
|
|
|
|
{
|
|
|
|
uintptr ask;
|
|
|
|
void *v;
|
|
|
|
MSpan *s;
|
2011-03-17 00:05:44 +01:00
|
|
|
PageID p;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
// Ask for a big chunk, to reduce the number of mappings
|
|
|
|
// the operating system needs to track; also amortizes
|
|
|
|
// the overhead of an operating system mapping.
|
2011-01-21 19:19:03 +01:00
|
|
|
// Allocate a multiple of 64kB (16 pages).
|
2010-12-03 05:34:57 +01:00
|
|
|
npage = (npage+15)&~15;
|
|
|
|
ask = npage<<PageShift;
|
2011-05-20 02:18:15 +02:00
|
|
|
if(ask < HeapAllocChunk)
|
2010-12-03 05:34:57 +01:00
|
|
|
ask = HeapAllocChunk;
|
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
v = runtime_MHeap_SysAlloc(h, ask);
|
2010-12-03 05:34:57 +01:00
|
|
|
if(v == nil) {
|
|
|
|
if(ask > (npage<<PageShift)) {
|
|
|
|
ask = npage<<PageShift;
|
2011-03-17 00:05:44 +01:00
|
|
|
v = runtime_MHeap_SysAlloc(h, ask);
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-05-20 02:18:15 +02:00
|
|
|
if(v == nil) {
|
2016-10-13 17:24:50 +02:00
|
|
|
runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys);
|
2010-12-03 05:34:57 +01:00
|
|
|
return false;
|
2011-05-20 02:18:15 +02:00
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a fake "in use" span and free it, so that the
|
|
|
|
// right coalescing happens.
|
|
|
|
s = runtime_FixAlloc_Alloc(&h->spanalloc);
|
|
|
|
runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
|
2011-03-17 00:05:44 +01:00
|
|
|
p = s->start;
|
2014-01-10 00:16:56 +01:00
|
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans[p] = s;
|
|
|
|
h->spans[p + s->npages - 1] = s;
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_atomicstore(&s->sweepgen, h->sweepgen);
|
2010-12-03 05:34:57 +01:00
|
|
|
s->state = MSpanInUse;
|
|
|
|
MHeap_FreeLocked(h, s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
// Look up the span at the given address.
|
|
|
|
// Address is guaranteed to be in map
|
2010-12-03 05:34:57 +01:00
|
|
|
// and is guaranteed to be start or end of span.
|
|
|
|
MSpan*
|
2011-03-17 00:05:44 +01:00
|
|
|
runtime_MHeap_Lookup(MHeap *h, void *v)
|
2010-12-03 05:34:57 +01:00
|
|
|
{
|
2011-03-17 00:05:44 +01:00
|
|
|
uintptr p;
|
|
|
|
|
|
|
|
p = (uintptr)v;
|
2014-01-10 00:16:56 +01:00
|
|
|
p -= (uintptr)h->arena_start;
|
2013-11-06 20:49:01 +01:00
|
|
|
return h->spans[p >> PageShift];
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
// Look up the span at the given address.
|
|
|
|
// Address is *not* guaranteed to be in map
|
2010-12-03 05:34:57 +01:00
|
|
|
// and may be anywhere in the span.
|
|
|
|
// Map entries for the middle of a span are only
|
|
|
|
// valid for allocated spans. Free spans may have
|
|
|
|
// other garbage in their middles, so we have to
|
|
|
|
// check for that.
|
|
|
|
MSpan*
|
2011-03-17 00:05:44 +01:00
|
|
|
runtime_MHeap_LookupMaybe(MHeap *h, void *v)
|
2010-12-03 05:34:57 +01:00
|
|
|
{
|
|
|
|
MSpan *s;
|
2011-03-17 00:05:44 +01:00
|
|
|
PageID p, q;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
|
|
|
|
return nil;
|
|
|
|
p = (uintptr)v>>PageShift;
|
|
|
|
q = p;
|
2014-01-10 00:16:56 +01:00
|
|
|
q -= (uintptr)h->arena_start >> PageShift;
|
2013-11-06 20:49:01 +01:00
|
|
|
s = h->spans[q];
|
2016-08-30 23:07:47 +02:00
|
|
|
if(s == nil || p < s->start || (uintptr)v >= s->limit || s->state != MSpanInUse)
|
2010-12-03 05:34:57 +01:00
|
|
|
return nil;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the span back into the heap.
|
|
|
|
void
|
|
|
|
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
|
|
|
|
{
|
2016-10-13 17:24:50 +02:00
|
|
|
MStats *pmstats;
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_lock(h);
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats = mstats();
|
|
|
|
pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_m()->mcache->local_cachealloc = 0;
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats->heap_inuse -= s->npages<<PageShift;
|
2010-12-03 05:34:57 +01:00
|
|
|
if(acct) {
|
2016-10-13 17:24:50 +02:00
|
|
|
pmstats->heap_alloc -= s->npages<<PageShift;
|
|
|
|
pmstats->heap_objects--;
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
MHeap_FreeLocked(h, s);
|
|
|
|
runtime_unlock(h);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
MHeap_FreeLocked(MHeap *h, MSpan *s)
|
|
|
|
{
|
|
|
|
MSpan *t;
|
2011-03-17 00:05:44 +01:00
|
|
|
PageID p;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
2012-10-23 06:31:11 +02:00
|
|
|
s->types.compression = MTypes_Empty;
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
if(s->state != MSpanInUse || s->ref != 0 || s->sweepgen != h->sweepgen) {
|
|
|
|
runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
|
|
|
|
s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_throw("MHeap_FreeLocked - invalid free");
|
|
|
|
}
|
2016-10-13 17:24:50 +02:00
|
|
|
mstats()->heap_idle += s->npages<<PageShift;
|
2010-12-03 05:34:57 +01:00
|
|
|
s->state = MSpanFree;
|
|
|
|
runtime_MSpanList_Remove(s);
|
2013-01-29 21:52:43 +01:00
|
|
|
// Stamp newly unused spans. The scavenger will use that
|
|
|
|
// info to potentially give back some pages to the OS.
|
|
|
|
s->unusedsince = runtime_nanotime();
|
|
|
|
s->npreleased = 0;
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
// Coalesce with earlier, later spans.
|
2011-03-17 00:05:44 +01:00
|
|
|
p = s->start;
|
2014-01-10 00:16:56 +01:00
|
|
|
p -= (uintptr)h->arena_start >> PageShift;
|
2013-11-06 20:49:01 +01:00
|
|
|
if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
|
2010-12-03 05:34:57 +01:00
|
|
|
s->start = t->start;
|
|
|
|
s->npages += t->npages;
|
2012-03-02 21:01:37 +01:00
|
|
|
s->npreleased = t->npreleased; // absorb released pages
|
2014-06-07 00:37:27 +02:00
|
|
|
s->needzero |= t->needzero;
|
2011-03-17 00:05:44 +01:00
|
|
|
p -= t->npages;
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans[p] = s;
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_MSpanList_Remove(t);
|
|
|
|
t->state = MSpanDead;
|
|
|
|
runtime_FixAlloc_Free(&h->spanalloc, t);
|
|
|
|
}
|
2013-11-06 20:49:01 +01:00
|
|
|
if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
|
2010-12-03 05:34:57 +01:00
|
|
|
s->npages += t->npages;
|
2012-03-02 21:01:37 +01:00
|
|
|
s->npreleased += t->npreleased;
|
2014-06-07 00:37:27 +02:00
|
|
|
s->needzero |= t->needzero;
|
2013-11-06 20:49:01 +01:00
|
|
|
h->spans[p + s->npages - 1] = s;
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_MSpanList_Remove(t);
|
|
|
|
t->state = MSpanDead;
|
|
|
|
runtime_FixAlloc_Free(&h->spanalloc, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert s into appropriate list.
|
|
|
|
if(s->npages < nelem(h->free))
|
|
|
|
runtime_MSpanList_Insert(&h->free[s->npages], s);
|
|
|
|
else
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_MSpanList_Insert(&h->freelarge, s);
|
2012-03-02 21:01:37 +01:00
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
|
2012-11-21 08:03:38 +01:00
|
|
|
static void
|
|
|
|
forcegchelper(void *vnote)
|
|
|
|
{
|
|
|
|
Note *note = (Note*)vnote;
|
|
|
|
|
|
|
|
runtime_gc(1);
|
|
|
|
runtime_notewakeup(note);
|
|
|
|
}
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
static uintptr
|
|
|
|
scavengelist(MSpan *list, uint64 now, uint64 limit)
|
|
|
|
{
|
2014-04-25 06:29:07 +02:00
|
|
|
uintptr released, sumreleased, start, end, pagesize;
|
2013-07-16 08:54:42 +02:00
|
|
|
MSpan *s;
|
|
|
|
|
|
|
|
if(runtime_MSpanList_IsEmpty(list))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sumreleased = 0;
|
|
|
|
for(s=list->next; s != list; s=s->next) {
|
2013-11-06 20:49:01 +01:00
|
|
|
if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
|
2013-07-16 08:54:42 +02:00
|
|
|
released = (s->npages - s->npreleased) << PageShift;
|
2016-10-13 17:24:50 +02:00
|
|
|
mstats()->heap_released += released;
|
2013-07-16 08:54:42 +02:00
|
|
|
sumreleased += released;
|
|
|
|
s->npreleased = s->npages;
|
2014-04-25 06:29:07 +02:00
|
|
|
|
|
|
|
start = s->start << PageShift;
|
|
|
|
end = start + (s->npages << PageShift);
|
|
|
|
|
|
|
|
// Round start up and end down to ensure we
|
|
|
|
// are acting on entire pages.
|
|
|
|
pagesize = getpagesize();
|
|
|
|
start = ROUND(start, pagesize);
|
|
|
|
end &= ~(pagesize - 1);
|
|
|
|
if(end > start)
|
|
|
|
runtime_SysUnused((void*)start, end - start);
|
2013-07-16 08:54:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return sumreleased;
|
|
|
|
}
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
static void
|
|
|
|
scavenge(int32 k, uint64 now, uint64 limit)
|
2013-07-16 08:54:42 +02:00
|
|
|
{
|
|
|
|
uint32 i;
|
|
|
|
uintptr sumreleased;
|
|
|
|
MHeap *h;
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
h = &runtime_mheap;
|
2013-07-16 08:54:42 +02:00
|
|
|
sumreleased = 0;
|
|
|
|
for(i=0; i < nelem(h->free); i++)
|
|
|
|
sumreleased += scavengelist(&h->free[i], now, limit);
|
2014-06-07 00:37:27 +02:00
|
|
|
sumreleased += scavengelist(&h->freelarge, now, limit);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
|
|
if(runtime_debug.gctrace > 0) {
|
|
|
|
if(sumreleased > 0)
|
|
|
|
runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
|
|
|
|
runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
|
2016-10-13 17:24:50 +02:00
|
|
|
k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20,
|
|
|
|
mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20);
|
2013-11-06 20:49:01 +01:00
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
}
|
|
|
|
|
2012-03-02 21:01:37 +01:00
|
|
|
// Release (part of) unused memory to OS.
|
2012-03-06 18:57:23 +01:00
|
|
|
// Goroutine created at startup.
|
2012-03-02 21:01:37 +01:00
|
|
|
// Loop forever.
|
|
|
|
void
|
|
|
|
runtime_MHeap_Scavenger(void* dummy)
|
|
|
|
{
|
2013-07-16 08:54:42 +02:00
|
|
|
G *g;
|
2012-03-02 21:01:37 +01:00
|
|
|
MHeap *h;
|
|
|
|
uint64 tick, now, forcegc, limit;
|
2014-07-19 10:53:52 +02:00
|
|
|
int64 unixnow;
|
2013-07-16 08:54:42 +02:00
|
|
|
uint32 k;
|
2012-11-21 08:03:38 +01:00
|
|
|
Note note, *notep;
|
2012-03-02 21:01:37 +01:00
|
|
|
|
|
|
|
USED(dummy);
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
g = runtime_g();
|
|
|
|
g->issystem = true;
|
|
|
|
g->isbackground = true;
|
|
|
|
|
2012-03-02 21:01:37 +01:00
|
|
|
// If we go two minutes without a garbage collection, force one to run.
|
|
|
|
forcegc = 2*60*1e9;
|
|
|
|
// If a span goes unused for 5 minutes after a garbage collection,
|
|
|
|
// we hand it back to the operating system.
|
|
|
|
limit = 5*60*1e9;
|
|
|
|
// Make wake-up period small enough for the sampling to be correct.
|
|
|
|
if(forcegc < limit)
|
|
|
|
tick = forcegc/2;
|
|
|
|
else
|
|
|
|
tick = limit/2;
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
h = &runtime_mheap;
|
2012-03-02 21:01:37 +01:00
|
|
|
for(k=0;; k++) {
|
|
|
|
runtime_noteclear(¬e);
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_notetsleepg(¬e, tick);
|
2012-03-02 21:01:37 +01:00
|
|
|
|
|
|
|
runtime_lock(h);
|
2014-07-19 10:53:52 +02:00
|
|
|
unixnow = runtime_unixnanotime();
|
2016-10-13 17:24:50 +02:00
|
|
|
if(unixnow - mstats()->last_gc > forcegc) {
|
2012-03-02 21:01:37 +01:00
|
|
|
runtime_unlock(h);
|
2012-11-21 08:03:38 +01:00
|
|
|
// The scavenger can not block other goroutines,
|
|
|
|
// otherwise deadlock detector can fire spuriously.
|
|
|
|
// GC blocks other goroutines via the runtime_worldsema.
|
|
|
|
runtime_noteclear(¬e);
|
|
|
|
notep = ¬e;
|
2013-02-06 00:59:24 +01:00
|
|
|
__go_go(forcegchelper, (void*)notep);
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_notetsleepg(¬e, -1);
|
|
|
|
if(runtime_debug.gctrace > 0)
|
2013-01-29 21:52:43 +01:00
|
|
|
runtime_printf("scvg%d: GC forced\n", k);
|
2012-03-02 21:01:37 +01:00
|
|
|
runtime_lock(h);
|
|
|
|
}
|
2014-07-19 10:53:52 +02:00
|
|
|
now = runtime_nanotime();
|
2013-11-06 20:49:01 +01:00
|
|
|
scavenge(k, now, limit);
|
2012-03-02 21:01:37 +01:00
|
|
|
runtime_unlock(h);
|
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime_debug_freeOSMemory(void)
|
|
|
|
{
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_gc(2); // force GC and do eager sweep
|
2013-11-06 20:49:01 +01:00
|
|
|
runtime_lock(&runtime_mheap);
|
|
|
|
scavenge(-1, ~(uintptr)0, 0);
|
|
|
|
runtime_unlock(&runtime_mheap);
|
2013-07-16 08:54:42 +02:00
|
|
|
}
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
// Initialize a new span with the given start and npages.
|
|
|
|
void
|
|
|
|
runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
|
|
|
|
{
|
|
|
|
span->next = nil;
|
|
|
|
span->prev = nil;
|
|
|
|
span->start = start;
|
|
|
|
span->npages = npages;
|
|
|
|
span->freelist = nil;
|
|
|
|
span->ref = 0;
|
|
|
|
span->sizeclass = 0;
|
2014-07-19 10:53:52 +02:00
|
|
|
span->incache = false;
|
2012-10-23 06:31:11 +02:00
|
|
|
span->elemsize = 0;
|
2014-06-07 00:37:27 +02:00
|
|
|
span->state = MSpanDead;
|
2012-03-02 21:01:37 +01:00
|
|
|
span->unusedsince = 0;
|
|
|
|
span->npreleased = 0;
|
2012-10-23 06:31:11 +02:00
|
|
|
span->types.compression = MTypes_Empty;
|
2016-08-30 23:07:47 +02:00
|
|
|
span->speciallock.key = 0;
|
2014-06-07 00:37:27 +02:00
|
|
|
span->specials = nil;
|
|
|
|
span->needzero = 0;
|
2014-07-19 10:53:52 +02:00
|
|
|
span->freebuf = nil;
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize an empty doubly-linked list.
|
|
|
|
void
|
|
|
|
runtime_MSpanList_Init(MSpan *list)
|
|
|
|
{
|
|
|
|
list->state = MSpanListHead;
|
|
|
|
list->next = list;
|
|
|
|
list->prev = list;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime_MSpanList_Remove(MSpan *span)
|
|
|
|
{
|
|
|
|
if(span->prev == nil && span->next == nil)
|
|
|
|
return;
|
|
|
|
span->prev->next = span->next;
|
|
|
|
span->next->prev = span->prev;
|
|
|
|
span->prev = nil;
|
|
|
|
span->next = nil;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
runtime_MSpanList_IsEmpty(MSpan *list)
|
|
|
|
{
|
|
|
|
return list->next == list;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime_MSpanList_Insert(MSpan *list, MSpan *span)
|
|
|
|
{
|
2011-03-17 00:05:44 +01:00
|
|
|
if(span->next != nil || span->prev != nil) {
|
2012-05-24 22:44:34 +02:00
|
|
|
runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
|
2010-12-03 05:34:57 +01:00
|
|
|
runtime_throw("MSpanList_Insert");
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
span->next = list->next;
|
|
|
|
span->prev = list;
|
|
|
|
span->next->prev = span;
|
|
|
|
span->prev->next = span;
|
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
void
|
|
|
|
runtime_MSpanList_InsertBack(MSpan *list, MSpan *span)
|
|
|
|
{
|
|
|
|
if(span->next != nil || span->prev != nil) {
|
|
|
|
runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
|
|
|
|
runtime_throw("MSpanList_Insert");
|
|
|
|
}
|
|
|
|
span->next = list;
|
|
|
|
span->prev = list->prev;
|
|
|
|
span->next->prev = span;
|
|
|
|
span->prev->next = span;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds the special record s to the list of special records for
|
|
|
|
// the object p. All fields of s should be filled in except for
|
|
|
|
// offset & next, which this routine will fill in.
|
|
|
|
// Returns true if the special was successfully added, false otherwise.
|
|
|
|
// (The add will fail only if a record with the same p and s->kind
|
|
|
|
// already exists.)
|
|
|
|
static bool
|
|
|
|
addspecial(void *p, Special *s)
|
|
|
|
{
|
|
|
|
MSpan *span;
|
|
|
|
Special **t, *x;
|
|
|
|
uintptr offset;
|
|
|
|
byte kind;
|
|
|
|
|
|
|
|
span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
|
|
|
|
if(span == nil)
|
|
|
|
runtime_throw("addspecial on invalid pointer");
|
|
|
|
|
|
|
|
// Ensure that the span is swept.
|
|
|
|
// GC accesses specials list w/o locks. And it's just much safer.
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks++;
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_MSpan_EnsureSwept(span);
|
|
|
|
|
|
|
|
offset = (uintptr)p - (span->start << PageShift);
|
|
|
|
kind = s->kind;
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_lock(&span->speciallock);
|
2014-06-07 00:37:27 +02:00
|
|
|
|
|
|
|
// Find splice point, check for existing record.
|
|
|
|
t = &span->specials;
|
|
|
|
while((x = *t) != nil) {
|
|
|
|
if(offset == x->offset && kind == x->kind) {
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_unlock(&span->speciallock);
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks--;
|
2014-06-07 00:37:27 +02:00
|
|
|
return false; // already exists
|
|
|
|
}
|
|
|
|
if(offset < x->offset || (offset == x->offset && kind < x->kind))
|
|
|
|
break;
|
|
|
|
t = &x->next;
|
|
|
|
}
|
|
|
|
// Splice in record, fill in offset.
|
|
|
|
s->offset = offset;
|
|
|
|
s->next = x;
|
|
|
|
*t = s;
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_unlock(&span->speciallock);
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks--;
|
2014-06-07 00:37:27 +02:00
|
|
|
return true;
|
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
// Removes the Special record of the given kind for the object p.
|
|
|
|
// Returns the record if the record existed, nil otherwise.
|
|
|
|
// The caller must FixAlloc_Free the result.
|
|
|
|
static Special*
|
|
|
|
removespecial(void *p, byte kind)
|
|
|
|
{
|
|
|
|
MSpan *span;
|
|
|
|
Special *s, **t;
|
|
|
|
uintptr offset;
|
|
|
|
|
|
|
|
span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
|
|
|
|
if(span == nil)
|
|
|
|
runtime_throw("removespecial on invalid pointer");
|
|
|
|
|
|
|
|
// Ensure that the span is swept.
|
|
|
|
// GC accesses specials list w/o locks. And it's just much safer.
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks++;
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_MSpan_EnsureSwept(span);
|
|
|
|
|
|
|
|
offset = (uintptr)p - (span->start << PageShift);
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_lock(&span->speciallock);
|
2014-06-07 00:37:27 +02:00
|
|
|
t = &span->specials;
|
|
|
|
while((s = *t) != nil) {
|
|
|
|
// This function is used for finalizers only, so we don't check for
|
|
|
|
// "interior" specials (p must be exactly equal to s->offset).
|
|
|
|
if(offset == s->offset && kind == s->kind) {
|
|
|
|
*t = s->next;
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_unlock(&span->speciallock);
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks--;
|
2014-06-07 00:37:27 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
t = &s->next;
|
|
|
|
}
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_unlock(&span->speciallock);
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_m()->locks--;
|
2014-06-07 00:37:27 +02:00
|
|
|
return nil;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a finalizer to the object p. Returns true if it succeeded.
|
|
|
|
bool
|
|
|
|
runtime_addfinalizer(void *p, FuncVal *f, const FuncType *ft, const PtrType *ot)
|
|
|
|
{
|
|
|
|
SpecialFinalizer *s;
|
|
|
|
|
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
s->kind = KindSpecialFinalizer;
|
|
|
|
s->fn = f;
|
|
|
|
s->ft = ft;
|
|
|
|
s->ot = ot;
|
|
|
|
if(addspecial(p, s))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// There was an old finalizer
|
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removes the finalizer (if any) from the object p.
|
|
|
|
void
|
|
|
|
runtime_removefinalizer(void *p)
|
|
|
|
{
|
|
|
|
SpecialFinalizer *s;
|
|
|
|
|
|
|
|
s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
|
|
|
|
if(s == nil)
|
|
|
|
return; // there wasn't a finalizer to remove
|
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the heap profile bucket associated with addr to b.
|
|
|
|
void
|
|
|
|
runtime_setprofilebucket(void *p, Bucket *b)
|
|
|
|
{
|
|
|
|
SpecialProfile *s;
|
|
|
|
|
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
s->kind = KindSpecialProfile;
|
|
|
|
s->b = b;
|
|
|
|
if(!addspecial(p, s))
|
|
|
|
runtime_throw("setprofilebucket: profile already set");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do whatever cleanup needs to be done to deallocate s. It has
|
|
|
|
// already been unlinked from the MSpan specials list.
|
|
|
|
// Returns true if we should keep working on deallocating p.
|
|
|
|
bool
|
|
|
|
runtime_freespecial(Special *s, void *p, uintptr size, bool freed)
|
|
|
|
{
|
|
|
|
SpecialFinalizer *sf;
|
|
|
|
SpecialProfile *sp;
|
|
|
|
|
|
|
|
switch(s->kind) {
|
|
|
|
case KindSpecialFinalizer:
|
|
|
|
sf = (SpecialFinalizer*)s;
|
|
|
|
runtime_queuefinalizer(p, sf->fn, sf->ft, sf->ot);
|
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, sf);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
return false; // don't free p until finalizer is done
|
|
|
|
case KindSpecialProfile:
|
|
|
|
sp = (SpecialProfile*)s;
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_MProf_Free(sp->b, size, freed);
|
2014-06-07 00:37:27 +02:00
|
|
|
runtime_lock(&runtime_mheap.speciallock);
|
|
|
|
runtime_FixAlloc_Free(&runtime_mheap.specialprofilealloc, sp);
|
|
|
|
runtime_unlock(&runtime_mheap.speciallock);
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
runtime_throw("bad special kind");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free all special records for p.
|
|
|
|
void
|
|
|
|
runtime_freeallspecials(MSpan *span, void *p, uintptr size)
|
|
|
|
{
|
|
|
|
Special *s, **t, *list;
|
|
|
|
uintptr offset;
|
|
|
|
|
2014-07-19 10:53:52 +02:00
|
|
|
if(span->sweepgen != runtime_mheap.sweepgen)
|
|
|
|
runtime_throw("runtime: freeallspecials: unswept span");
|
2014-06-07 00:37:27 +02:00
|
|
|
// first, collect all specials into the list; then, free them
|
|
|
|
// this is required to not cause deadlock between span->specialLock and proflock
|
|
|
|
list = nil;
|
|
|
|
offset = (uintptr)p - (span->start << PageShift);
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_lock(&span->speciallock);
|
2014-06-07 00:37:27 +02:00
|
|
|
t = &span->specials;
|
|
|
|
while((s = *t) != nil) {
|
|
|
|
if(offset + size <= s->offset)
|
|
|
|
break;
|
|
|
|
if(offset <= s->offset) {
|
|
|
|
*t = s->next;
|
|
|
|
s->next = list;
|
|
|
|
list = s;
|
|
|
|
} else
|
|
|
|
t = &s->next;
|
|
|
|
}
|
2016-08-30 23:07:47 +02:00
|
|
|
runtime_unlock(&span->speciallock);
|
2014-06-07 00:37:27 +02:00
|
|
|
|
|
|
|
while(list != nil) {
|
|
|
|
s = list;
|
|
|
|
list = s->next;
|
|
|
|
if(!runtime_freespecial(s, p, size, true))
|
|
|
|
runtime_throw("can't explicitly free an object with a finalizer");
|
|
|
|
}
|
|
|
|
}
|
2014-07-19 10:53:52 +02:00
|
|
|
|
|
|
|
// Split an allocated span into two equal parts.
|
|
|
|
void
|
|
|
|
runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
|
|
|
|
{
|
|
|
|
MSpan *t;
|
|
|
|
MCentral *c;
|
|
|
|
uintptr i;
|
|
|
|
uintptr npages;
|
|
|
|
PageID p;
|
|
|
|
|
|
|
|
if(s->state != MSpanInUse)
|
|
|
|
runtime_throw("MHeap_SplitSpan on a free span");
|
|
|
|
if(s->sizeclass != 0 && s->ref != 1)
|
|
|
|
runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
|
|
|
|
npages = s->npages;
|
|
|
|
|
|
|
|
// remove the span from whatever list it is in now
|
|
|
|
if(s->sizeclass > 0) {
|
compiler, runtime: replace hashmap code with Go 1.7 hashmap
This change removes the gccgo-specific hashmap code and replaces it with
the hashmap code from the Go 1.7 runtime. The Go 1.7 hashmap code is
more efficient, does a better job on details like when to update a key,
and provides some support against denial-of-service attacks.
The compiler is changed to call the new hashmap functions instead of the
old ones.
The compiler now tracks which types are reflexive and which require
updating when used as a map key, and records the information in map type
descriptors.
Map_index_expression is simplified. The special case for a map index on
the right hand side of a tuple expression has been unnecessary for some
time, and is removed. The support for specially marking a map index as
an lvalue is removed, in favor of lowering an assignment to a map index
into a function call. The long-obsolete support for a map index of a
pointer to a map is removed.
The __go_new_map_big function (known to the compiler as
Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap
function takes an int64 hint argument.
The old map descriptor type and supporting expression is removed.
The compiler was still supporting the long-obsolete syntax `m[k] = 0,
false` to delete a value from a map. That is now removed, requiring a
change to one of the gccgo-specific tests.
The builtin len function applied to a map or channel p is now compiled
as `p == nil ? 0 : *(*int)(p)`. The __go_chan_len function (known to
the compiler as Runtime::CHAN_LEN) is removed.
Support for a shared zero value for maps to large value types is
introduced, along the lines of the gc compiler. The zero value is
handled as a common variable.
The hash function is changed to take a seed argument, changing the
runtime hash functions and the compiler-generated hash functions.
Unlike the gc compiler, both the hash and equal functions continue to
take the type length.
Types that can not be compared now store nil for the hash and equal
functions, rather than pointing to functions that throw. Interface hash
and comparison functions now check explicitly for nil. This matches the
gc compiler and permits a simple implementation for ismapkey.
The compiler is changed to permit marking struct and array types as
incomparable, meaning that they have no hash or equal function. We use
this for thunk types, removing the existing special code to avoid
generating hash/equal functions for them.
The C runtime code adds memclr, memequal, and memmove functions.
The hashmap code uses go:linkname comments to make the functions
visible, as otherwise the compiler would discard them.
The hashmap code comments out the unused reference to the address of the
first parameter in the race code, as otherwise the compiler thinks that
the parameter escapes and copies it onto the heap. This is probably not
needed when we enable escape analysis.
Several runtime map tests that ere previously skipped for gccgo are now
run.
The Go runtime picks up type kind information and stubs. The type kind
information causes the generated runtime header file to define some
constants, including `empty`, and the C code is adjusted accordingly.
A Go-callable version of runtime.throw, that takes a Go string, is
added to be called from the hashmap code.
Reviewed-on: https://go-review.googlesource.com/29447
* go.go-torture/execute/map-1.go: Replace old map deletion syntax
with call to builtin delete function.
From-SVN: r240334
2016-09-21 22:58:51 +02:00
|
|
|
// must be in h->central[x].mempty
|
2014-07-19 10:53:52 +02:00
|
|
|
c = &h->central[s->sizeclass];
|
|
|
|
runtime_lock(c);
|
|
|
|
runtime_MSpanList_Remove(s);
|
|
|
|
runtime_unlock(c);
|
|
|
|
runtime_lock(h);
|
|
|
|
} else {
|
|
|
|
// must be in h->busy/busylarge
|
|
|
|
runtime_lock(h);
|
|
|
|
runtime_MSpanList_Remove(s);
|
|
|
|
}
|
|
|
|
// heap is locked now
|
|
|
|
|
|
|
|
if(npages == 1) {
|
|
|
|
// convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
|
|
|
|
s->ref = 2;
|
|
|
|
s->sizeclass = runtime_SizeToClass(PageSize/2);
|
|
|
|
s->elemsize = PageSize/2;
|
|
|
|
} else {
|
|
|
|
// convert span of n>1 pages into two spans of n/2 pages each.
|
|
|
|
if((s->npages & 1) != 0)
|
|
|
|
runtime_throw("MHeap_SplitSpan on an odd size span");
|
|
|
|
|
|
|
|
// compute position in h->spans
|
|
|
|
p = s->start;
|
|
|
|
p -= (uintptr)h->arena_start >> PageShift;
|
|
|
|
|
|
|
|
// Allocate a new span for the first half.
|
|
|
|
t = runtime_FixAlloc_Alloc(&h->spanalloc);
|
|
|
|
runtime_MSpan_Init(t, s->start, npages/2);
|
2016-08-30 23:07:47 +02:00
|
|
|
t->limit = (uintptr)((t->start + npages/2) << PageShift);
|
2014-07-19 10:53:52 +02:00
|
|
|
t->state = MSpanInUse;
|
|
|
|
t->elemsize = npages << (PageShift - 1);
|
|
|
|
t->sweepgen = s->sweepgen;
|
|
|
|
if(t->elemsize <= MaxSmallSize) {
|
|
|
|
t->sizeclass = runtime_SizeToClass(t->elemsize);
|
|
|
|
t->ref = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the old span holds the second half.
|
|
|
|
s->start += npages/2;
|
|
|
|
s->npages = npages/2;
|
|
|
|
s->elemsize = npages << (PageShift - 1);
|
|
|
|
if(s->elemsize <= MaxSmallSize) {
|
|
|
|
s->sizeclass = runtime_SizeToClass(s->elemsize);
|
|
|
|
s->ref = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// update span lookup table
|
|
|
|
for(i = p; i < p + npages/2; i++)
|
|
|
|
h->spans[i] = t;
|
|
|
|
}
|
|
|
|
|
|
|
|
// place the span into a new list
|
|
|
|
if(s->sizeclass > 0) {
|
|
|
|
runtime_unlock(h);
|
|
|
|
c = &h->central[s->sizeclass];
|
|
|
|
runtime_lock(c);
|
|
|
|
// swept spans are at the end of the list
|
compiler, runtime: replace hashmap code with Go 1.7 hashmap
This change removes the gccgo-specific hashmap code and replaces it with
the hashmap code from the Go 1.7 runtime. The Go 1.7 hashmap code is
more efficient, does a better job on details like when to update a key,
and provides some support against denial-of-service attacks.
The compiler is changed to call the new hashmap functions instead of the
old ones.
The compiler now tracks which types are reflexive and which require
updating when used as a map key, and records the information in map type
descriptors.
Map_index_expression is simplified. The special case for a map index on
the right hand side of a tuple expression has been unnecessary for some
time, and is removed. The support for specially marking a map index as
an lvalue is removed, in favor of lowering an assignment to a map index
into a function call. The long-obsolete support for a map index of a
pointer to a map is removed.
The __go_new_map_big function (known to the compiler as
Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap
function takes an int64 hint argument.
The old map descriptor type and supporting expression is removed.
The compiler was still supporting the long-obsolete syntax `m[k] = 0,
false` to delete a value from a map. That is now removed, requiring a
change to one of the gccgo-specific tests.
The builtin len function applied to a map or channel p is now compiled
as `p == nil ? 0 : *(*int)(p)`. The __go_chan_len function (known to
the compiler as Runtime::CHAN_LEN) is removed.
Support for a shared zero value for maps to large value types is
introduced, along the lines of the gc compiler. The zero value is
handled as a common variable.
The hash function is changed to take a seed argument, changing the
runtime hash functions and the compiler-generated hash functions.
Unlike the gc compiler, both the hash and equal functions continue to
take the type length.
Types that can not be compared now store nil for the hash and equal
functions, rather than pointing to functions that throw. Interface hash
and comparison functions now check explicitly for nil. This matches the
gc compiler and permits a simple implementation for ismapkey.
The compiler is changed to permit marking struct and array types as
incomparable, meaning that they have no hash or equal function. We use
this for thunk types, removing the existing special code to avoid
generating hash/equal functions for them.
The C runtime code adds memclr, memequal, and memmove functions.
The hashmap code uses go:linkname comments to make the functions
visible, as otherwise the compiler would discard them.
The hashmap code comments out the unused reference to the address of the
first parameter in the race code, as otherwise the compiler thinks that
the parameter escapes and copies it onto the heap. This is probably not
needed when we enable escape analysis.
Several runtime map tests that ere previously skipped for gccgo are now
run.
The Go runtime picks up type kind information and stubs. The type kind
information causes the generated runtime header file to define some
constants, including `empty`, and the C code is adjusted accordingly.
A Go-callable version of runtime.throw, that takes a Go string, is
added to be called from the hashmap code.
Reviewed-on: https://go-review.googlesource.com/29447
* go.go-torture/execute/map-1.go: Replace old map deletion syntax
with call to builtin delete function.
From-SVN: r240334
2016-09-21 22:58:51 +02:00
|
|
|
runtime_MSpanList_InsertBack(&c->mempty, s);
|
2014-07-19 10:53:52 +02:00
|
|
|
runtime_unlock(c);
|
|
|
|
} else {
|
|
|
|
// Swept spans are at the end of lists.
|
|
|
|
if(s->npages < nelem(h->free))
|
|
|
|
runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
|
|
|
|
else
|
|
|
|
runtime_MSpanList_InsertBack(&h->busylarge, s);
|
|
|
|
runtime_unlock(h);
|
|
|
|
}
|
|
|
|
}
|