253 lines
7.8 KiB
Go
253 lines
7.8 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Central free lists.
|
|
//
|
|
// See malloc.go for an overview.
|
|
//
|
|
// The mcentral doesn't actually contain the list of free objects; the mspan does.
|
|
// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
|
|
// and those that are completely allocated (c->empty).
|
|
|
|
package runtime
|
|
|
|
import "runtime/internal/atomic"
|
|
|
|
// Central list of free objects of a given size.
|
|
//
|
|
//go:notinheap
|
|
type mcentral struct {
|
|
spanclass spanClass
|
|
|
|
// partial and full contain two mspan sets: one of swept in-use
|
|
// spans, and one of unswept in-use spans. These two trade
|
|
// roles on each GC cycle. The unswept set is drained either by
|
|
// allocation or by the background sweeper in every GC cycle,
|
|
// so only two roles are necessary.
|
|
//
|
|
// sweepgen is increased by 2 on each GC cycle, so the swept
|
|
// spans are in partial[sweepgen/2%2] and the unswept spans are in
|
|
// partial[1-sweepgen/2%2]. Sweeping pops spans from the
|
|
// unswept set and pushes spans that are still in-use on the
|
|
// swept set. Likewise, allocating an in-use span pushes it
|
|
// on the swept set.
|
|
//
|
|
// Some parts of the sweeper can sweep arbitrary spans, and hence
|
|
// can't remove them from the unswept set, but will add the span
|
|
// to the appropriate swept list. As a result, the parts of the
|
|
// sweeper and mcentral that do consume from the unswept list may
|
|
// encounter swept spans, and these should be ignored.
|
|
partial [2]spanSet // list of spans with a free object
|
|
full [2]spanSet // list of spans with no free objects
|
|
}
|
|
|
|
// Initialize a single central free list.
|
|
func (c *mcentral) init(spc spanClass) {
|
|
c.spanclass = spc
|
|
lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
|
|
lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
|
|
lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
|
|
lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
|
|
}
|
|
|
|
// partialUnswept returns the spanSet which holds partially-filled
|
|
// unswept spans for this sweepgen.
|
|
func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
|
|
return &c.partial[1-sweepgen/2%2]
|
|
}
|
|
|
|
// partialSwept returns the spanSet which holds partially-filled
|
|
// swept spans for this sweepgen.
|
|
func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
|
|
return &c.partial[sweepgen/2%2]
|
|
}
|
|
|
|
// fullUnswept returns the spanSet which holds unswept spans without any
|
|
// free slots for this sweepgen.
|
|
func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
|
|
return &c.full[1-sweepgen/2%2]
|
|
}
|
|
|
|
// fullSwept returns the spanSet which holds swept spans without any
|
|
// free slots for this sweepgen.
|
|
func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
|
|
return &c.full[sweepgen/2%2]
|
|
}
|
|
|
|
// Allocate a span to use in an mcache.
|
|
func (c *mcentral) cacheSpan() *mspan {
|
|
// Deduct credit for this span allocation and sweep if necessary.
|
|
spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
|
|
deductSweepCredit(spanBytes, 0)
|
|
|
|
traceDone := false
|
|
if trace.enabled {
|
|
traceGCSweepStart()
|
|
}
|
|
|
|
// If we sweep spanBudget spans without finding any free
|
|
// space, just allocate a fresh span. This limits the amount
|
|
// of time we can spend trying to find free space and
|
|
// amortizes the cost of small object sweeping over the
|
|
// benefit of having a full free span to allocate from. By
|
|
// setting this to 100, we limit the space overhead to 1%.
|
|
//
|
|
// TODO(austin,mknyszek): This still has bad worst-case
|
|
// throughput. For example, this could find just one free slot
|
|
// on the 100th swept span. That limits allocation latency, but
|
|
// still has very poor throughput. We could instead keep a
|
|
// running free-to-used budget and switch to fresh span
|
|
// allocation if the budget runs low.
|
|
spanBudget := 100
|
|
|
|
var s *mspan
|
|
sl := newSweepLocker()
|
|
sg := sl.sweepGen
|
|
|
|
// Try partial swept spans first.
|
|
if s = c.partialSwept(sg).pop(); s != nil {
|
|
goto havespan
|
|
}
|
|
|
|
// Now try partial unswept spans.
|
|
for ; spanBudget >= 0; spanBudget-- {
|
|
s = c.partialUnswept(sg).pop()
|
|
if s == nil {
|
|
break
|
|
}
|
|
if s, ok := sl.tryAcquire(s); ok {
|
|
// We got ownership of the span, so let's sweep it and use it.
|
|
s.sweep(true)
|
|
sl.dispose()
|
|
goto havespan
|
|
}
|
|
// We failed to get ownership of the span, which means it's being or
|
|
// has been swept by an asynchronous sweeper that just couldn't remove it
|
|
// from the unswept list. That sweeper took ownership of the span and
|
|
// responsibility for either freeing it to the heap or putting it on the
|
|
// right swept list. Either way, we should just ignore it (and it's unsafe
|
|
// for us to do anything else).
|
|
}
|
|
// Now try full unswept spans, sweeping them and putting them into the
|
|
// right list if we fail to get a span.
|
|
for ; spanBudget >= 0; spanBudget-- {
|
|
s = c.fullUnswept(sg).pop()
|
|
if s == nil {
|
|
break
|
|
}
|
|
if s, ok := sl.tryAcquire(s); ok {
|
|
// We got ownership of the span, so let's sweep it.
|
|
s.sweep(true)
|
|
// Check if there's any free space.
|
|
freeIndex := s.nextFreeIndex()
|
|
if freeIndex != s.nelems {
|
|
s.freeindex = freeIndex
|
|
sl.dispose()
|
|
goto havespan
|
|
}
|
|
// Add it to the swept list, because sweeping didn't give us any free space.
|
|
c.fullSwept(sg).push(s.mspan)
|
|
}
|
|
// See comment for partial unswept spans.
|
|
}
|
|
sl.dispose()
|
|
if trace.enabled {
|
|
traceGCSweepDone()
|
|
traceDone = true
|
|
}
|
|
|
|
// We failed to get a span from the mcentral so get one from mheap.
|
|
s = c.grow()
|
|
if s == nil {
|
|
return nil
|
|
}
|
|
|
|
// At this point s is a span that should have free slots.
|
|
havespan:
|
|
if trace.enabled && !traceDone {
|
|
traceGCSweepDone()
|
|
}
|
|
n := int(s.nelems) - int(s.allocCount)
|
|
if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
|
|
throw("span has no free objects")
|
|
}
|
|
freeByteBase := s.freeindex &^ (64 - 1)
|
|
whichByte := freeByteBase / 8
|
|
// Init alloc bits cache.
|
|
s.refillAllocCache(whichByte)
|
|
|
|
// Adjust the allocCache so that s.freeindex corresponds to the low bit in
|
|
// s.allocCache.
|
|
s.allocCache >>= s.freeindex % 64
|
|
|
|
return s
|
|
}
|
|
|
|
// Return span from an mcache.
|
|
//
|
|
// s must have a span class corresponding to this
|
|
// mcentral and it must not be empty.
|
|
func (c *mcentral) uncacheSpan(s *mspan) {
|
|
if s.allocCount == 0 {
|
|
throw("uncaching span but s.allocCount == 0")
|
|
}
|
|
|
|
sg := mheap_.sweepgen
|
|
stale := s.sweepgen == sg+1
|
|
|
|
// Fix up sweepgen.
|
|
if stale {
|
|
// Span was cached before sweep began. It's our
|
|
// responsibility to sweep it.
|
|
//
|
|
// Set sweepgen to indicate it's not cached but needs
|
|
// sweeping and can't be allocated from. sweep will
|
|
// set s.sweepgen to indicate s is swept.
|
|
atomic.Store(&s.sweepgen, sg-1)
|
|
} else {
|
|
// Indicate that s is no longer cached.
|
|
atomic.Store(&s.sweepgen, sg)
|
|
}
|
|
|
|
// Put the span in the appropriate place.
|
|
if stale {
|
|
// It's stale, so just sweep it. Sweeping will put it on
|
|
// the right list.
|
|
//
|
|
// We don't use a sweepLocker here. Stale cached spans
|
|
// aren't in the global sweep lists, so mark termination
|
|
// itself holds up sweep completion until all mcaches
|
|
// have been swept.
|
|
ss := sweepLocked{s}
|
|
ss.sweep(false)
|
|
} else {
|
|
if int(s.nelems)-int(s.allocCount) > 0 {
|
|
// Put it back on the partial swept list.
|
|
c.partialSwept(sg).push(s)
|
|
} else {
|
|
// There's no free space and it's not stale, so put it on the
|
|
// full swept list.
|
|
c.fullSwept(sg).push(s)
|
|
}
|
|
}
|
|
}
|
|
|
|
// grow allocates a new empty span from the heap and initializes it for c's size class.
|
|
func (c *mcentral) grow() *mspan {
|
|
npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
|
|
size := uintptr(class_to_size[c.spanclass.sizeclass()])
|
|
|
|
s, _ := mheap_.alloc(npages, c.spanclass, true)
|
|
if s == nil {
|
|
return nil
|
|
}
|
|
|
|
// Use division by multiplication and shifts to quickly compute:
|
|
// n := (npages << _PageShift) / size
|
|
n := s.divideByElemSize(npages << _PageShift)
|
|
s.limit = s.base() + size*n
|
|
heapBitsForAddr(s.base()).initSpan(s)
|
|
return s
|
|
}
|