2020-01-03 00:05:27 +01:00
|
|
|
// Copyright 2019 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2021-07-30 23:28:58 +02:00
|
|
|
//go:build amd64 || (!ios && arm64) || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || arm64be || alpha || sparc64 || ia64
|
2020-12-23 18:57:37 +01:00
|
|
|
// +build amd64 !ios,arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x arm64be alpha sparc64 ia64
|
2020-01-03 00:05:27 +01:00
|
|
|
|
2020-12-23 18:57:37 +01:00
|
|
|
// See mpagealloc_32bit.go for why ios/arm64 is excluded here.
|
2020-01-03 00:05:27 +01:00
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The number of levels in the radix tree.
|
|
|
|
summaryLevels = 5
|
|
|
|
|
|
|
|
// Constants for testing.
|
|
|
|
pageAlloc32Bit = 0
|
|
|
|
pageAlloc64Bit = 1
|
|
|
|
|
|
|
|
// Number of bits needed to represent all indices into the L1 of the
|
|
|
|
// chunks map.
|
|
|
|
//
|
|
|
|
// See (*pageAlloc).chunks for more details. Update the documentation
|
|
|
|
// there should this number change.
|
|
|
|
pallocChunksL1Bits = 13
|
|
|
|
)
|
|
|
|
|
|
|
|
// levelBits is the number of bits in the radix for a given level in the super summary
|
|
|
|
// structure.
|
|
|
|
//
|
|
|
|
// The sum of all the entries of levelBits should equal heapAddrBits.
|
|
|
|
var levelBits = [summaryLevels]uint{
|
|
|
|
summaryL0Bits,
|
|
|
|
summaryLevelBits,
|
|
|
|
summaryLevelBits,
|
|
|
|
summaryLevelBits,
|
|
|
|
summaryLevelBits,
|
|
|
|
}
|
|
|
|
|
|
|
|
// levelShift is the number of bits to shift to acquire the radix for a given level
|
|
|
|
// in the super summary structure.
|
|
|
|
//
|
|
|
|
// With levelShift, one can compute the index of the summary at level l related to a
|
|
|
|
// pointer p by doing:
|
|
|
|
// p >> levelShift[l]
|
|
|
|
var levelShift = [summaryLevels]uint{
|
|
|
|
heapAddrBits - summaryL0Bits,
|
|
|
|
heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
|
|
|
|
heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
|
|
|
|
heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
|
|
|
|
heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
|
|
|
|
}
|
|
|
|
|
|
|
|
// levelLogPages is log2 the maximum number of runtime pages in the address space
|
|
|
|
// a summary in the given level represents.
|
|
|
|
//
|
|
|
|
// The leaf level always represents exactly log2 of 1 chunk's worth of pages.
|
|
|
|
var levelLogPages = [summaryLevels]uint{
|
|
|
|
logPallocChunkPages + 4*summaryLevelBits,
|
|
|
|
logPallocChunkPages + 3*summaryLevelBits,
|
|
|
|
logPallocChunkPages + 2*summaryLevelBits,
|
|
|
|
logPallocChunkPages + 1*summaryLevelBits,
|
|
|
|
logPallocChunkPages,
|
|
|
|
}
|
|
|
|
|
|
|
|
// sysInit performs architecture-dependent initialization of fields
|
|
|
|
// in pageAlloc. pageAlloc should be uninitialized except for sysStat
|
|
|
|
// if any runtime statistic should be updated.
|
2020-12-23 18:57:37 +01:00
|
|
|
func (p *pageAlloc) sysInit() {
|
2020-01-03 00:05:27 +01:00
|
|
|
// Reserve memory for each level. This will get mapped in
|
|
|
|
// as R/W by setArenas.
|
|
|
|
for l, shift := range levelShift {
|
|
|
|
entries := 1 << (heapAddrBits - shift)
|
|
|
|
|
|
|
|
// Reserve b bytes of memory anywhere in the address space.
|
|
|
|
b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
|
|
|
|
r := sysReserve(nil, b)
|
|
|
|
if r == nil {
|
|
|
|
throw("failed to reserve page summary memory")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put this reservation into a slice.
|
|
|
|
sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
|
2020-12-23 18:57:37 +01:00
|
|
|
p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sysGrow performs architecture-dependent operations on heap
|
|
|
|
// growth for the page allocator, such as mapping in new memory
|
|
|
|
// for summaries. It also updates the length of the slices in
|
2020-12-23 18:57:37 +01:00
|
|
|
// [.summary.
|
2020-01-03 00:05:27 +01:00
|
|
|
//
|
|
|
|
// base is the base of the newly-added heap memory and limit is
|
|
|
|
// the first address past the end of the newly-added heap memory.
|
|
|
|
// Both must be aligned to pallocChunkBytes.
|
|
|
|
//
|
2020-12-23 18:57:37 +01:00
|
|
|
// The caller must update p.start and p.end after calling sysGrow.
|
|
|
|
func (p *pageAlloc) sysGrow(base, limit uintptr) {
|
2020-01-03 00:05:27 +01:00
|
|
|
if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
|
|
|
|
print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
|
|
|
|
throw("sysGrow bounds not aligned to pallocChunkBytes")
|
|
|
|
}
|
|
|
|
|
|
|
|
// addrRangeToSummaryRange converts a range of addresses into a range
|
|
|
|
// of summary indices which must be mapped to support those addresses
|
|
|
|
// in the summary range.
|
|
|
|
addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
|
2020-07-28 07:27:54 +02:00
|
|
|
sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
|
2020-01-03 00:05:27 +01:00
|
|
|
return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// summaryRangeToSumAddrRange converts a range of indices in any
|
2020-12-23 18:57:37 +01:00
|
|
|
// level of p.summary into page-aligned addresses which cover that
|
2020-01-03 00:05:27 +01:00
|
|
|
// range of indices.
|
|
|
|
summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
|
|
|
|
baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
|
|
|
|
limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
|
2020-12-23 18:57:37 +01:00
|
|
|
base := unsafe.Pointer(&p.summary[level][0])
|
2020-01-03 00:05:27 +01:00
|
|
|
return addrRange{
|
2020-07-28 07:27:54 +02:00
|
|
|
offAddr{uintptr(add(base, baseOffset))},
|
|
|
|
offAddr{uintptr(add(base, limitOffset))},
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addrRangeToSumAddrRange is a convienience function that converts
|
|
|
|
// an address range r to the address range of the given summary level
|
|
|
|
// that stores the summaries for r.
|
|
|
|
addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
|
|
|
|
sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
|
|
|
|
return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the first inUse index which is strictly greater than base.
|
|
|
|
//
|
|
|
|
// Because this function will never be asked remap the same memory
|
|
|
|
// twice, this index is effectively the index at which we would insert
|
|
|
|
// this new growth, and base will never overlap/be contained within
|
|
|
|
// any existing range.
|
|
|
|
//
|
|
|
|
// This will be used to look at what memory in the summary array is already
|
|
|
|
// mapped before and after this new range.
|
2020-12-23 18:57:37 +01:00
|
|
|
inUseIndex := p.inUse.findSucc(base)
|
2020-01-03 00:05:27 +01:00
|
|
|
|
|
|
|
// Walk up the radix tree and map summaries in as needed.
|
2020-12-23 18:57:37 +01:00
|
|
|
for l := range p.summary {
|
2020-01-03 00:05:27 +01:00
|
|
|
// Figure out what part of the summary array this new address space needs.
|
2020-07-28 07:27:54 +02:00
|
|
|
needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
|
2020-01-03 00:05:27 +01:00
|
|
|
|
|
|
|
// Update the summary slices with a new upper-bound. This ensures
|
|
|
|
// we get tight bounds checks on at least the top bound.
|
|
|
|
//
|
|
|
|
// We must do this regardless of whether we map new memory.
|
2020-12-23 18:57:37 +01:00
|
|
|
if needIdxLimit > len(p.summary[l]) {
|
|
|
|
p.summary[l] = p.summary[l][:needIdxLimit]
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the needed address range in the summary array for level l.
|
|
|
|
need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
|
|
|
|
|
|
|
|
// Prune need down to what needs to be newly mapped. Some parts of it may
|
|
|
|
// already be mapped by what inUse describes due to page alignment requirements
|
|
|
|
// for mapping. prune's invariants are guaranteed by the fact that this
|
|
|
|
// function will never be asked to remap the same memory twice.
|
|
|
|
if inUseIndex > 0 {
|
2020-12-23 18:57:37 +01:00
|
|
|
need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
2020-12-23 18:57:37 +01:00
|
|
|
if inUseIndex < len(p.inUse.ranges) {
|
|
|
|
need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
|
|
|
// It's possible that after our pruning above, there's nothing new to map.
|
|
|
|
if need.size() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map and commit need.
|
2020-12-23 18:57:37 +01:00
|
|
|
sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
|
2020-07-28 07:27:54 +02:00
|
|
|
sysUsed(unsafe.Pointer(need.base.addr()), need.size())
|
2020-01-03 00:05:27 +01:00
|
|
|
}
|
|
|
|
}
|