2013-11-06 20:49:01 +01:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
|
|
|
. "runtime"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestMemStats(t *testing.T) {
|
2015-10-31 01:59:47 +01:00
|
|
|
t.Skip("skipping test with gccgo")
|
2017-01-14 01:05:42 +01:00
|
|
|
|
|
|
|
// Make sure there's at least one forced GC.
|
|
|
|
GC()
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
// Test that MemStats has sane values.
|
|
|
|
st := new(MemStats)
|
|
|
|
ReadMemStats(st)
|
2015-10-31 01:59:47 +01:00
|
|
|
|
2016-02-03 22:58:02 +01:00
|
|
|
// Everything except HeapReleased, HeapIdle, and NumGC,
|
|
|
|
// because they indeed can be 0.
|
2015-10-31 01:59:47 +01:00
|
|
|
if st.Alloc == 0 || st.TotalAlloc == 0 || st.Sys == 0 || st.Lookups == 0 ||
|
|
|
|
st.Mallocs == 0 || st.Frees == 0 || st.HeapAlloc == 0 || st.HeapSys == 0 ||
|
|
|
|
st.HeapInuse == 0 || st.HeapObjects == 0 || st.StackInuse == 0 ||
|
|
|
|
st.StackSys == 0 || st.MSpanInuse == 0 || st.MSpanSys == 0 || st.MCacheInuse == 0 ||
|
|
|
|
st.MCacheSys == 0 || st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 ||
|
2017-01-14 01:05:42 +01:00
|
|
|
st.NextGC == 0 || st.NumForcedGC == 0 {
|
2015-10-31 01:59:47 +01:00
|
|
|
t.Fatalf("Zero value: %+v", *st)
|
|
|
|
}
|
|
|
|
|
|
|
|
if st.Alloc > 1e10 || st.TotalAlloc > 1e11 || st.Sys > 1e10 || st.Lookups > 1e10 ||
|
|
|
|
st.Mallocs > 1e10 || st.Frees > 1e10 || st.HeapAlloc > 1e10 || st.HeapSys > 1e10 ||
|
|
|
|
st.HeapIdle > 1e10 || st.HeapInuse > 1e10 || st.HeapObjects > 1e10 || st.StackInuse > 1e10 ||
|
|
|
|
st.StackSys > 1e10 || st.MSpanInuse > 1e10 || st.MSpanSys > 1e10 || st.MCacheInuse > 1e10 ||
|
|
|
|
st.MCacheSys > 1e10 || st.BuckHashSys > 1e10 || st.GCSys > 1e10 || st.OtherSys > 1e10 ||
|
2017-01-14 01:05:42 +01:00
|
|
|
st.NextGC > 1e10 || st.NumGC > 1e9 || st.NumForcedGC > 1e9 || st.PauseTotalNs > 1e11 {
|
2015-10-31 01:59:47 +01:00
|
|
|
t.Fatalf("Insanely high value (overflow?): %+v", *st)
|
2013-11-06 20:49:01 +01:00
|
|
|
}
|
|
|
|
if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
|
|
|
|
st.BuckHashSys+st.GCSys+st.OtherSys {
|
|
|
|
t.Fatalf("Bad sys value: %+v", *st)
|
|
|
|
}
|
2015-10-31 01:59:47 +01:00
|
|
|
|
|
|
|
if st.HeapIdle+st.HeapInuse != st.HeapSys {
|
|
|
|
t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
|
|
|
|
}
|
|
|
|
|
|
|
|
if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
|
|
|
|
t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
|
|
|
|
}
|
|
|
|
|
|
|
|
var pauseTotal uint64
|
|
|
|
for _, pause := range st.PauseNs {
|
|
|
|
pauseTotal += pause
|
|
|
|
}
|
|
|
|
if int(st.NumGC) < len(st.PauseNs) {
|
|
|
|
// We have all pauses, so this should be exact.
|
|
|
|
if st.PauseTotalNs != pauseTotal {
|
|
|
|
t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
|
|
|
|
}
|
2016-02-03 22:58:02 +01:00
|
|
|
for i := int(st.NumGC); i < len(st.PauseNs); i++ {
|
|
|
|
if st.PauseNs[i] != 0 {
|
|
|
|
t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
|
|
|
|
}
|
|
|
|
if st.PauseEnd[i] != 0 {
|
|
|
|
t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
|
|
|
|
}
|
|
|
|
}
|
2015-10-31 01:59:47 +01:00
|
|
|
} else {
|
|
|
|
if st.PauseTotalNs < pauseTotal {
|
|
|
|
t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
|
|
|
|
}
|
|
|
|
}
|
2017-01-14 01:05:42 +01:00
|
|
|
|
|
|
|
if st.NumForcedGC > st.NumGC {
|
|
|
|
t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
|
|
|
|
}
|
2015-10-31 01:59:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestStringConcatenationAllocs(t *testing.T) {
|
|
|
|
t.Skip("skipping test with gccgo")
|
|
|
|
n := testing.AllocsPerRun(1e3, func() {
|
|
|
|
b := make([]byte, 10)
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
b[i] = byte(i) + '0'
|
|
|
|
}
|
|
|
|
s := "foo" + string(b)
|
|
|
|
if want := "foo0123456789"; s != want {
|
|
|
|
t.Fatalf("want %v, got %v", want, s)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// Only string concatenation allocates.
|
|
|
|
if n != 1 {
|
|
|
|
t.Fatalf("want 1 allocation, got %v", n)
|
|
|
|
}
|
2013-11-06 20:49:01 +01:00
|
|
|
}
|
|
|
|
|
2016-02-03 22:58:02 +01:00
|
|
|
func TestTinyAlloc(t *testing.T) {
|
|
|
|
const N = 16
|
|
|
|
var v [N]unsafe.Pointer
|
|
|
|
for i := range v {
|
|
|
|
v[i] = unsafe.Pointer(new(byte))
|
|
|
|
}
|
|
|
|
|
|
|
|
chunks := make(map[uintptr]bool, N)
|
|
|
|
for _, p := range v {
|
|
|
|
chunks[uintptr(p)&^7] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(chunks) == N {
|
|
|
|
t.Fatal("no bytes allocated within the same 8-byte chunk")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
var mallocSink uintptr
|
|
|
|
|
|
|
|
func BenchmarkMalloc8(b *testing.B) {
|
|
|
|
var x uintptr
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
p := new(int64)
|
|
|
|
x ^= uintptr(unsafe.Pointer(p))
|
|
|
|
}
|
|
|
|
mallocSink = x
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMalloc16(b *testing.B) {
|
|
|
|
var x uintptr
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
p := new([2]int64)
|
|
|
|
x ^= uintptr(unsafe.Pointer(p))
|
|
|
|
}
|
|
|
|
mallocSink = x
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMallocTypeInfo8(b *testing.B) {
|
|
|
|
var x uintptr
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
p := new(struct {
|
|
|
|
p [8 / unsafe.Sizeof(uintptr(0))]*int
|
|
|
|
})
|
|
|
|
x ^= uintptr(unsafe.Pointer(p))
|
|
|
|
}
|
|
|
|
mallocSink = x
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMallocTypeInfo16(b *testing.B) {
|
|
|
|
var x uintptr
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
p := new(struct {
|
|
|
|
p [16 / unsafe.Sizeof(uintptr(0))]*int
|
|
|
|
})
|
|
|
|
x ^= uintptr(unsafe.Pointer(p))
|
|
|
|
}
|
|
|
|
mallocSink = x
|
|
|
|
}
|
|
|
|
|
|
|
|
var n = flag.Int("n", 1000, "number of goroutines")
|
|
|
|
|
|
|
|
func BenchmarkGoroutineSelect(b *testing.B) {
|
|
|
|
quit := make(chan struct{})
|
|
|
|
read := func(ch chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case _, ok := <-ch:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
benchHelper(b, *n, read)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkGoroutineBlocking(b *testing.B) {
|
|
|
|
read := func(ch chan struct{}) {
|
|
|
|
for {
|
|
|
|
if _, ok := <-ch; !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
benchHelper(b, *n, read)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkGoroutineForRange(b *testing.B) {
|
|
|
|
read := func(ch chan struct{}) {
|
|
|
|
for _ = range ch {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
benchHelper(b, *n, read)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchHelper(b *testing.B, n int, read func(chan struct{})) {
|
|
|
|
m := make([]chan struct{}, n)
|
|
|
|
for i := range m {
|
|
|
|
m[i] = make(chan struct{}, 1)
|
|
|
|
go read(m[i])
|
|
|
|
}
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
GC()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, ch := range m {
|
|
|
|
if ch != nil {
|
|
|
|
ch <- struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
b.StartTimer()
|
|
|
|
GC()
|
|
|
|
b.StopTimer()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ch := range m {
|
|
|
|
close(ch)
|
|
|
|
}
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkGoroutineIdle(b *testing.B) {
|
|
|
|
quit := make(chan struct{})
|
|
|
|
fn := func() {
|
|
|
|
<-quit
|
|
|
|
}
|
|
|
|
for i := 0; i < *n; i++ {
|
|
|
|
go fn()
|
|
|
|
}
|
|
|
|
|
|
|
|
GC()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
GC()
|
|
|
|
}
|
|
|
|
|
|
|
|
b.StopTimer()
|
|
|
|
close(quit)
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|