libgo: update to almost the 1.14.2 release
Update to edea4a79e8d7dea2456b688f492c8af33d381dc2 which is likely to be approximately the 1.14.2 release. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/227377
This commit is contained in:
parent
749bd22ddc
commit
52fa80f853
@ -1,4 +1,4 @@
|
||||
7a62a49e62c090118fa003d9265c5f5e2090c4f9
|
||||
4a31d064fd6996f64b620104e849292af8f25e12
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
@ -1,4 +1,4 @@
|
||||
20a838ab94178c55bc4dc23ddc332fce8545a493
|
||||
edea4a79e8d7dea2456b688f492c8af33d381dc2
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -1 +1 @@
|
||||
go1.14
|
||||
go1.14.2
|
||||
|
@ -2265,7 +2265,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
// Translate to zero-length array instead.
|
||||
count = 0
|
||||
}
|
||||
sub := c.loadType(dt.Type, pos, key)
|
||||
sub := c.Type(dt.Type, pos)
|
||||
t.Align = sub.Align
|
||||
t.Go = &ast.ArrayType{
|
||||
Len: c.intExpr(count),
|
||||
@ -2410,7 +2410,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
c.ptrs[key] = append(c.ptrs[key], t)
|
||||
|
||||
case *dwarf.QualType:
|
||||
t1 := c.loadType(dt.Type, pos, key)
|
||||
t1 := c.Type(dt.Type, pos)
|
||||
t.Size = t1.Size
|
||||
t.Align = t1.Align
|
||||
t.Go = t1.Go
|
||||
@ -2494,7 +2494,13 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
}
|
||||
name := c.Ident("_Ctype_" + dt.Name)
|
||||
goIdent[name.Name] = name
|
||||
sub := c.loadType(dt.Type, pos, key)
|
||||
akey := ""
|
||||
if c.anonymousStructTypedef(dt) {
|
||||
// only load type recursively for typedefs of anonymous
|
||||
// structs, see issues 37479 and 37621.
|
||||
akey = key
|
||||
}
|
||||
sub := c.loadType(dt.Type, pos, akey)
|
||||
if c.badPointerTypedef(dt) {
|
||||
// Treat this typedef as a uintptr.
|
||||
s := *sub
|
||||
@ -3015,6 +3021,13 @@ func fieldPrefix(fld []*ast.Field) string {
|
||||
return prefix
|
||||
}
|
||||
|
||||
// anonymousStructTypedef reports whether dt is a C typedef for an anonymous
|
||||
// struct.
|
||||
func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
|
||||
st, ok := dt.Type.(*dwarf.StructType)
|
||||
return ok && st.StructName == ""
|
||||
}
|
||||
|
||||
// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
|
||||
// A typedef is bad if C code sometimes stores non-pointers in this type.
|
||||
// TODO: Currently our best solution is to find these manually and list them as
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"cmd/go/internal/cfg"
|
||||
"cmd/go/internal/load"
|
||||
"cmd/go/internal/modload"
|
||||
"cmd/go/internal/str"
|
||||
"cmd/go/internal/work"
|
||||
)
|
||||
|
||||
@ -438,7 +439,7 @@ func (g *Generator) exec(words []string) {
|
||||
cmd.Stderr = os.Stderr
|
||||
// Run the command in the package directory.
|
||||
cmd.Dir = g.dir
|
||||
cmd.Env = append(cfg.OrigEnv, g.env...)
|
||||
cmd.Env = str.StringList(cfg.OrigEnv, g.env)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
g.errorf("running %q: %s", words[0], err)
|
||||
|
@ -1142,7 +1142,7 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv)
|
||||
cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv[:len(cfg.OrigEnv):len(cfg.OrigEnv)])
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
@ -1224,6 +1224,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
|
||||
if len(out) == 0 {
|
||||
fmt.Fprintf(cmd.Stdout, "%s\n", err)
|
||||
}
|
||||
// NOTE(golang.org/issue/37555): test2json reports that a test passes
|
||||
// unless "FAIL" is printed at the beginning of a line. The test may not
|
||||
// actually print that if it panics, exits, or terminates abnormally,
|
||||
// so we print it here. We can't always check whether it was printed
|
||||
// because some tests need stdout to be a terminal (golang.org/issue/34791),
|
||||
// not a pipe.
|
||||
// TODO(golang.org/issue/29062): tests that exit with status 0 without
|
||||
// printing a final result should fail.
|
||||
fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t)
|
||||
}
|
||||
|
||||
|
@ -213,6 +213,9 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID {
|
||||
} else if cfg.BuildTrimpath && p.Module != nil {
|
||||
fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
|
||||
}
|
||||
if p.Module != nil {
|
||||
fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
|
||||
}
|
||||
fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
|
||||
fmt.Fprintf(h, "import %q\n", p.ImportPath)
|
||||
fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
|
||||
|
@ -9,6 +9,13 @@ go mod edit -go=1.9
|
||||
grep 'go 1.9' go.mod
|
||||
go build
|
||||
|
||||
# Reverting the version should force a rebuild and error instead of using
|
||||
# the cached 1.9 build. (https://golang.org/issue/37804)
|
||||
go mod edit -go=1.8
|
||||
! go build
|
||||
stderr 'type aliases only supported as of'
|
||||
|
||||
|
||||
-- go.mod --
|
||||
module m
|
||||
go 1.8
|
||||
|
@ -153,6 +153,7 @@ var pkgDeps = map[string][]string{
|
||||
"internal/syscall/unix": {"L0", "syscall"},
|
||||
"internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
|
||||
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
|
||||
"internal/syscall/execenv": {"L0", "syscall", "internal/syscall/windows", "unicode/utf16"},
|
||||
"time": {
|
||||
// "L0" without the "io" package:
|
||||
"errors",
|
||||
@ -170,10 +171,10 @@ var pkgDeps = map[string][]string{
|
||||
"internal/cfg": {"L0"},
|
||||
"internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows", "internal/syscall/unix"},
|
||||
"internal/testlog": {"L0"},
|
||||
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
|
||||
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/syscall/execenv", "internal/testlog"},
|
||||
"path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
|
||||
"io/ioutil": {"L2", "os", "path/filepath", "time"},
|
||||
"os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
|
||||
"os/exec": {"L2", "os", "context", "path/filepath", "syscall", "internal/syscall/execenv"},
|
||||
"os/signal": {"L2", "os", "syscall"},
|
||||
|
||||
// OS enables basic operating system functionality,
|
||||
|
19
libgo/go/internal/syscall/execenv/execenv_default.go
Normal file
19
libgo/go/internal/syscall/execenv/execenv_default.go
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package execenv
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Default will return the default environment
|
||||
// variables based on the process attributes
|
||||
// provided.
|
||||
//
|
||||
// Defaults to syscall.Environ() on all platforms
|
||||
// other than Windows.
|
||||
func Default(sys *syscall.SysProcAttr) ([]string, error) {
|
||||
return syscall.Environ(), nil
|
||||
}
|
@ -1,8 +1,10 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package os
|
||||
// +build windows
|
||||
|
||||
package execenv
|
||||
|
||||
import (
|
||||
"internal/syscall/windows"
|
||||
@ -11,9 +13,17 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func environForSysProcAttr(sys *syscall.SysProcAttr) (env []string, err error) {
|
||||
// Default will return the default environment
|
||||
// variables based on the process attributes
|
||||
// provided.
|
||||
//
|
||||
// If the process attributes contain a token, then
|
||||
// the environment variables will be sourced from
|
||||
// the defaults for that user token, otherwise they
|
||||
// will be sourced from syscall.Environ().
|
||||
func Default(sys *syscall.SysProcAttr) (env []string, err error) {
|
||||
if sys == nil || sys.Token == 0 {
|
||||
return Environ(), nil
|
||||
return syscall.Environ(), nil
|
||||
}
|
||||
var block *uint16
|
||||
err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
|
@ -1,13 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package os
|
||||
|
||||
import "syscall"
|
||||
|
||||
func environForSysProcAttr(sys *syscall.SysProcAttr) ([]string, error) {
|
||||
return Environ(), nil
|
||||
}
|
@ -24,6 +24,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"internal/syscall/execenv"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -222,11 +223,11 @@ func interfaceEqual(a, b interface{}) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
func (c *Cmd) envv() []string {
|
||||
func (c *Cmd) envv() ([]string, error) {
|
||||
if c.Env != nil {
|
||||
return c.Env
|
||||
return c.Env, nil
|
||||
}
|
||||
return os.Environ()
|
||||
return execenv.Default(c.SysProcAttr)
|
||||
}
|
||||
|
||||
func (c *Cmd) argv() []string {
|
||||
@ -413,11 +414,15 @@ func (c *Cmd) Start() error {
|
||||
}
|
||||
c.childFiles = append(c.childFiles, c.ExtraFiles...)
|
||||
|
||||
var err error
|
||||
envv, err := c.envv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
|
||||
Dir: c.Dir,
|
||||
Files: c.childFiles,
|
||||
Env: addCriticalEnv(dedupEnv(c.envv())),
|
||||
Env: addCriticalEnv(dedupEnv(envv)),
|
||||
Sys: c.SysProcAttr,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -7,6 +7,7 @@
|
||||
package os
|
||||
|
||||
import (
|
||||
"internal/syscall/execenv"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
@ -39,7 +40,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e
|
||||
Sys: attr.Sys,
|
||||
}
|
||||
if sysattr.Env == nil {
|
||||
sysattr.Env, err = environForSysProcAttr(sysattr.Sys)
|
||||
sysattr.Env, err = execenv.Default(sysattr.Sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -176,9 +176,19 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
// is slower but more general and is used for hashing interface types
|
||||
// (called from interhash or nilinterhash, above) or for hashing in
|
||||
// maps generated by reflect.MapOf (reflect_typehash, below).
|
||||
// Note: this function must match the compiler generated
|
||||
// functions exactly. See issue 37716.
|
||||
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
||||
if t.tflag&tflagRegularMemory != 0 {
|
||||
return memhash(p, h, t.size)
|
||||
// Handle ptr sizes specially, see issue 37086.
|
||||
switch t.size {
|
||||
case 4:
|
||||
return memhash32(p, h)
|
||||
case 8:
|
||||
return memhash64(p, h)
|
||||
default:
|
||||
return memhash(p, h, t.size)
|
||||
}
|
||||
}
|
||||
switch t.kind & kindMask {
|
||||
case kindFloat32:
|
||||
|
@ -10,8 +10,10 @@ import "unsafe"
|
||||
|
||||
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
|
||||
// Check that (*[n]elem)(p) is appropriately aligned.
|
||||
// Note that we allow unaligned pointers if the types they point to contain
|
||||
// no pointers themselves. See issue 37298.
|
||||
// TODO(mdempsky): What about fieldAlign?
|
||||
if uintptr(p)&(uintptr(elem.align)-1) != 0 {
|
||||
if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 {
|
||||
throw("checkptr: unsafe pointer conversion")
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,8 @@ func TestCheckPtr(t *testing.T) {
|
||||
cmd string
|
||||
want string
|
||||
}{
|
||||
{"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrAlignmentPtr", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrAlignmentNoPtr", ""},
|
||||
{"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"},
|
||||
{"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"},
|
||||
@ -42,6 +43,12 @@ func TestCheckPtr(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
if tc.want == "" {
|
||||
if len(got) > 0 {
|
||||
t.Errorf("output:\n%s\nwant no output", got)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(string(got), tc.want) {
|
||||
t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ package runtime_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
@ -281,3 +282,122 @@ func TestDeferForFuncWithNoExit(t *testing.T) {
|
||||
for {
|
||||
}
|
||||
}
|
||||
|
||||
// Test case approximating issue #37664, where a recursive function (interpreter)
|
||||
// may do repeated recovers/re-panics until it reaches the frame where the panic
|
||||
// can actually be handled. The recurseFnPanicRec() function is testing that there
|
||||
// are no stale defer structs on the defer chain after the interpreter() sequence,
|
||||
// by writing a bunch of 0xffffffffs into several recursive stack frames, and then
|
||||
// doing a single panic-recover which would invoke any such stale defer structs.
|
||||
func TestDeferWithRepeatedRepanics(t *testing.T) {
|
||||
interpreter(0, 6, 2)
|
||||
recurseFnPanicRec(0, 10)
|
||||
interpreter(0, 5, 1)
|
||||
recurseFnPanicRec(0, 10)
|
||||
interpreter(0, 6, 3)
|
||||
recurseFnPanicRec(0, 10)
|
||||
}
|
||||
|
||||
func interpreter(level int, maxlevel int, rec int) {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
if level != e.(int) {
|
||||
//fmt.Fprintln(os.Stderr, "re-panicing, level", level)
|
||||
panic(e)
|
||||
}
|
||||
//fmt.Fprintln(os.Stderr, "Recovered, level", level)
|
||||
}()
|
||||
if level+1 < maxlevel {
|
||||
interpreter(level+1, maxlevel, rec)
|
||||
} else {
|
||||
//fmt.Fprintln(os.Stderr, "Initiating panic")
|
||||
panic(rec)
|
||||
}
|
||||
}
|
||||
|
||||
func recurseFnPanicRec(level int, maxlevel int) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
recurseFn(level, maxlevel)
|
||||
}
|
||||
|
||||
func recurseFn(level int, maxlevel int) {
|
||||
a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}
|
||||
if level+1 < maxlevel {
|
||||
// Need this print statement to keep a around. '_ = a[4]' doesn't do it.
|
||||
fmt.Fprintln(os.Stderr, "recurseFn", level, a[4])
|
||||
recurseFn(level+1, maxlevel)
|
||||
} else {
|
||||
panic("recurseFn panic")
|
||||
}
|
||||
}
|
||||
|
||||
// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is
|
||||
// mistakenly held, and that struct keeps a pointer to a stack-allocated defer
|
||||
// struct, and that stack-allocated struct gets overwritten or the stack gets
|
||||
// moved, so a memory error happens on GC.
|
||||
func TestIssue37688(t *testing.T) {
|
||||
for j := 0; j < 10; j++ {
|
||||
g2()
|
||||
g3()
|
||||
}
|
||||
}
|
||||
|
||||
type foo struct {
|
||||
}
|
||||
|
||||
func (f *foo) method1() {
|
||||
fmt.Fprintln(os.Stderr, "method1")
|
||||
}
|
||||
|
||||
func (f *foo) method2() {
|
||||
fmt.Fprintln(os.Stderr, "method2")
|
||||
}
|
||||
|
||||
func g2() {
|
||||
var a foo
|
||||
ap := &a
|
||||
// The loop forces this defer to be heap-allocated and the remaining two
|
||||
// to be stack-allocated.
|
||||
for i := 0; i < 1; i++ {
|
||||
defer ap.method1()
|
||||
}
|
||||
defer ap.method2()
|
||||
defer ap.method1()
|
||||
ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9)
|
||||
// Try to get the stack to be be moved by growing it too large, so
|
||||
// existing stack-allocated defer becomes invalid.
|
||||
rec1(2000)
|
||||
}
|
||||
|
||||
func g3() {
|
||||
// Mix up the stack layout by adding in an extra function frame
|
||||
g2()
|
||||
}
|
||||
|
||||
func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) {
|
||||
defer ap.method1()
|
||||
|
||||
// Make a defer that has a very large set of args, hence big size for the
|
||||
// defer record for the open-coded frame (which means it won't use the
|
||||
// defer pool)
|
||||
defer func(ap *foo, a, b, c, d, e, f, g, h, i int) {
|
||||
if v := recover(); v != nil {
|
||||
fmt.Fprintln(os.Stderr, "did recover")
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "debug", ap, a, b, c, d, e, f, g, h)
|
||||
}(ap, a, b, c, d, e, f, g, h, i)
|
||||
panic("ff1 panic")
|
||||
}
|
||||
|
||||
func rec1(max int) {
|
||||
if max > 0 {
|
||||
rec1(max - 1)
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, "finished recursion", max)
|
||||
}
|
||||
}
|
||||
|
@ -945,4 +945,29 @@ func SemNwait(addr *uint32) uint32 {
|
||||
return atomic.Load(&root.nwait)
|
||||
}
|
||||
|
||||
// MapHashCheck computes the hash of the key k for the map m, twice.
|
||||
// Method 1 uses the built-in hasher for the map.
|
||||
// Method 2 uses the typehash function (the one used by reflect).
|
||||
// Returns the two hash values, which should always be equal.
|
||||
func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
|
||||
// Unpack m.
|
||||
mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
|
||||
mh := (*hmap)(efaceOf(&m).data)
|
||||
|
||||
// Unpack k.
|
||||
kt := efaceOf(&k)._type
|
||||
var p unsafe.Pointer
|
||||
if isDirectIface(kt) {
|
||||
q := efaceOf(&k).data
|
||||
p = unsafe.Pointer(&q)
|
||||
} else {
|
||||
p = efaceOf(&k).data
|
||||
}
|
||||
|
||||
// Compute the hash functions.
|
||||
x := mt.hasher(noescape(p), uintptr(mh.hash0))
|
||||
y := typehash(kt, noescape(p), uintptr(mh.hash0))
|
||||
return x, y
|
||||
}
|
||||
|
||||
var Pusestackmaps = &usestackmaps
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
. "runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -48,6 +49,54 @@ func TestMemHash64Equality(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompilerVsRuntimeHash(t *testing.T) {
|
||||
// Test to make sure the compiler's hash function and the runtime's hash function agree.
|
||||
// See issue 37716.
|
||||
for _, m := range []interface{}{
|
||||
map[bool]int{},
|
||||
map[int8]int{},
|
||||
map[uint8]int{},
|
||||
map[int16]int{},
|
||||
map[uint16]int{},
|
||||
map[int32]int{},
|
||||
map[uint32]int{},
|
||||
map[int64]int{},
|
||||
map[uint64]int{},
|
||||
map[int]int{},
|
||||
map[uint]int{},
|
||||
map[uintptr]int{},
|
||||
map[*byte]int{},
|
||||
map[chan int]int{},
|
||||
map[unsafe.Pointer]int{},
|
||||
map[float32]int{},
|
||||
map[float64]int{},
|
||||
map[complex64]int{},
|
||||
map[complex128]int{},
|
||||
map[string]int{},
|
||||
//map[interface{}]int{},
|
||||
//map[interface{F()}]int{},
|
||||
map[[8]uint64]int{},
|
||||
map[[8]string]int{},
|
||||
map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128
|
||||
map[struct{ a, b, _, d int32 }]int{},
|
||||
map[struct {
|
||||
a, b int32
|
||||
c float32
|
||||
d, e [8]byte
|
||||
}]int{},
|
||||
map[struct {
|
||||
a int16
|
||||
b int64
|
||||
}]int{},
|
||||
} {
|
||||
k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key
|
||||
x, y := MapHashCheck(m, k)
|
||||
if x != y {
|
||||
t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Smhasher is a torture test for hash functions.
|
||||
// https://code.google.com/p/smhasher/
|
||||
// This code is a port of some of the Smhasher tests to Go.
|
||||
|
@ -771,32 +771,40 @@ func gcSetTriggerRatio(triggerRatio float64) {
|
||||
goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
|
||||
}
|
||||
|
||||
// If we let triggerRatio go too low, then if the application
|
||||
// is allocating very rapidly we might end up in a situation
|
||||
// where we're allocating black during a nearly always-on GC.
|
||||
// The result of this is a growing heap and ultimately an
|
||||
// increase in RSS. By capping us at a point >0, we're essentially
|
||||
// saying that we're OK using more CPU during the GC to prevent
|
||||
// this growth in RSS.
|
||||
//
|
||||
// The current constant was chosen empirically: given a sufficiently
|
||||
// fast/scalable allocator with 48 Ps that could drive the trigger ratio
|
||||
// to <0.05, this constant causes applications to retain the same peak
|
||||
// RSS compared to not having this allocator.
|
||||
const minTriggerRatio = 0.6
|
||||
|
||||
// Set the trigger ratio, capped to reasonable bounds.
|
||||
if triggerRatio < minTriggerRatio {
|
||||
// This can happen if the mutator is allocating very
|
||||
// quickly or the GC is scanning very slowly.
|
||||
triggerRatio = minTriggerRatio
|
||||
} else if gcpercent >= 0 {
|
||||
if gcpercent >= 0 {
|
||||
scalingFactor := float64(gcpercent) / 100
|
||||
// Ensure there's always a little margin so that the
|
||||
// mutator assist ratio isn't infinity.
|
||||
maxTriggerRatio := 0.95 * float64(gcpercent) / 100
|
||||
maxTriggerRatio := 0.95 * scalingFactor
|
||||
if triggerRatio > maxTriggerRatio {
|
||||
triggerRatio = maxTriggerRatio
|
||||
}
|
||||
|
||||
// If we let triggerRatio go too low, then if the application
|
||||
// is allocating very rapidly we might end up in a situation
|
||||
// where we're allocating black during a nearly always-on GC.
|
||||
// The result of this is a growing heap and ultimately an
|
||||
// increase in RSS. By capping us at a point >0, we're essentially
|
||||
// saying that we're OK using more CPU during the GC to prevent
|
||||
// this growth in RSS.
|
||||
//
|
||||
// The current constant was chosen empirically: given a sufficiently
|
||||
// fast/scalable allocator with 48 Ps that could drive the trigger ratio
|
||||
// to <0.05, this constant causes applications to retain the same peak
|
||||
// RSS compared to not having this allocator.
|
||||
minTriggerRatio := 0.6 * scalingFactor
|
||||
if triggerRatio < minTriggerRatio {
|
||||
triggerRatio = minTriggerRatio
|
||||
}
|
||||
} else if triggerRatio < 0 {
|
||||
// gcpercent < 0, so just make sure we're not getting a negative
|
||||
// triggerRatio. This case isn't expected to happen in practice,
|
||||
// and doesn't really matter because if gcpercent < 0 then we won't
|
||||
// ever consume triggerRatio further on in this function, but let's
|
||||
// just be defensive here; the triggerRatio being negative is almost
|
||||
// certainly undesirable.
|
||||
triggerRatio = 0
|
||||
}
|
||||
memstats.triggerRatio = triggerRatio
|
||||
|
||||
|
@ -244,15 +244,6 @@ func genAMD64() {
|
||||
|
||||
// TODO: MXCSR register?
|
||||
|
||||
// Apparently, the signal handling code path in darwin kernel leaves
|
||||
// the upper bits of Y registers in a dirty state, which causes
|
||||
// many SSE operations (128-bit and narrower) become much slower.
|
||||
// Clear the upper bits to get to a clean state. See issue #37174.
|
||||
// It is safe here as Go code don't use the upper bits of Y registers.
|
||||
p("#ifdef GOOS_darwin")
|
||||
p("VZEROUPPER")
|
||||
p("#endif")
|
||||
|
||||
p("PUSHQ BP")
|
||||
p("MOVQ SP, BP")
|
||||
p("// Save flags before clobbering them")
|
||||
@ -261,6 +252,18 @@ func genAMD64() {
|
||||
p("ADJSP $%d", l.stack)
|
||||
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
|
||||
p("NOP SP")
|
||||
|
||||
// Apparently, the signal handling code path in darwin kernel leaves
|
||||
// the upper bits of Y registers in a dirty state, which causes
|
||||
// many SSE operations (128-bit and narrower) become much slower.
|
||||
// Clear the upper bits to get to a clean state. See issue #37174.
|
||||
// It is safe here as Go code don't use the upper bits of Y registers.
|
||||
p("#ifdef GOOS_darwin")
|
||||
p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0")
|
||||
p("JE 2(PC)")
|
||||
p("VZEROUPPER")
|
||||
p("#endif")
|
||||
|
||||
l.save()
|
||||
p("CALL ·asyncPreempt2(SB)")
|
||||
l.restore()
|
||||
@ -379,6 +382,7 @@ func genMIPS(_64bit bool) {
|
||||
sub := "SUB"
|
||||
r28 := "R28"
|
||||
regsize := 4
|
||||
softfloat := "GOMIPS_softfloat"
|
||||
if _64bit {
|
||||
mov = "MOVV"
|
||||
movf = "MOVD"
|
||||
@ -386,6 +390,7 @@ func genMIPS(_64bit bool) {
|
||||
sub = "SUBV"
|
||||
r28 = "RSB"
|
||||
regsize = 8
|
||||
softfloat = "GOMIPS64_softfloat"
|
||||
}
|
||||
|
||||
// Add integer registers R1-R22, R24-R25, R28
|
||||
@ -408,28 +413,36 @@ func genMIPS(_64bit bool) {
|
||||
mov+" LO, R1\n"+mov+" R1, %d(R29)",
|
||||
mov+" %d(R29), R1\n"+mov+" R1, LO",
|
||||
regsize)
|
||||
|
||||
// Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant)
|
||||
l.addSpecial(
|
||||
var lfp = layout{sp: "R29", stack: l.stack}
|
||||
lfp.addSpecial(
|
||||
mov+" FCR31, R1\n"+mov+" R1, %d(R29)",
|
||||
mov+" %d(R29), R1\n"+mov+" R1, FCR31",
|
||||
regsize)
|
||||
// Add floating point registers F0-F31.
|
||||
for i := 0; i <= 31; i++ {
|
||||
reg := fmt.Sprintf("F%d", i)
|
||||
l.add(movf, reg, regsize)
|
||||
lfp.add(movf, reg, regsize)
|
||||
}
|
||||
|
||||
// allocate frame, save PC of interrupted instruction (in LR)
|
||||
p(mov+" R31, -%d(R29)", l.stack)
|
||||
p(sub+" $%d, R29", l.stack)
|
||||
p(mov+" R31, -%d(R29)", lfp.stack)
|
||||
p(sub+" $%d, R29", lfp.stack)
|
||||
|
||||
l.save()
|
||||
p("#ifndef %s", softfloat)
|
||||
lfp.save()
|
||||
p("#endif")
|
||||
p("CALL ·asyncPreempt2(SB)")
|
||||
p("#ifndef %s", softfloat)
|
||||
lfp.restore()
|
||||
p("#endif")
|
||||
l.restore()
|
||||
|
||||
p(mov+" %d(R29), R31", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
|
||||
p(mov + " (R29), R23") // load PC to REGTMP
|
||||
p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
|
||||
p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
|
||||
p(mov + " (R29), R23") // load PC to REGTMP
|
||||
p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
|
||||
p("JMP (R23)")
|
||||
}
|
||||
|
||||
|
@ -218,10 +218,13 @@ func panicmem() {
|
||||
// pfn is a C function pointer.
|
||||
// arg is a value to pass to pfn.
|
||||
func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
|
||||
gp := getg()
|
||||
d := newdefer()
|
||||
if d._panic != nil {
|
||||
throw("deferproc: d.panic != nil after newdefer")
|
||||
}
|
||||
d.link = gp._defer
|
||||
gp._defer = d
|
||||
d.frame = frame
|
||||
d.panicStack = getg()._panic
|
||||
d.pfn = pfn
|
||||
@ -300,8 +303,6 @@ func newdefer() *_defer {
|
||||
}
|
||||
}
|
||||
d.heap = true
|
||||
d.link = gp._defer
|
||||
gp._defer = d
|
||||
return d
|
||||
}
|
||||
|
||||
@ -1175,6 +1176,12 @@ func startpanic_m() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
|
||||
//
|
||||
// TODO(austin): Remove this after Go 1.15 when we remove the
|
||||
// mlockGsignal workaround.
|
||||
var throwReportQuirk func()
|
||||
|
||||
var didothers bool
|
||||
var deadlock mutex
|
||||
|
||||
@ -1221,6 +1228,10 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
|
||||
|
||||
printDebugLog()
|
||||
|
||||
if throwReportQuirk != nil {
|
||||
throwReportQuirk()
|
||||
}
|
||||
|
||||
return docrash
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,8 @@ Search:
|
||||
if len(m.freeStk) < len(stk) {
|
||||
m.freeStk = make([]uintptr, 1024)
|
||||
}
|
||||
e.stk = m.freeStk[:len(stk)]
|
||||
// Limit cap to prevent append from clobbering freeStk.
|
||||
e.stk = m.freeStk[:len(stk):len(stk)]
|
||||
m.freeStk = m.freeStk[len(stk):]
|
||||
|
||||
for j := range stk {
|
||||
|
@ -1192,16 +1192,37 @@ func TestTryAdd(t *testing.T) {
|
||||
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
name: "recursive_inlined_funcs",
|
||||
name: "bug38096",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
// count (data[2]) == 0 && len(stk) == 1 is an overflow
|
||||
// entry. The "stk" entry is actually the count.
|
||||
4, 0, 0, 4242,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// If a function is called recursively then it must not be
|
||||
// inlined in the caller.
|
||||
//
|
||||
// N.B. We're generating an impossible profile here, with a
|
||||
// recursive inlineCallee call. This is simulating a non-Go
|
||||
// function that looks like an inlined Go function other than
|
||||
// its recursive property. See pcDeck.tryAdd.
|
||||
name: "recursive_func_is_not_inlined",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
|
||||
4, 0, 40, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
|
||||
// inlinedCaller shows up here because
|
||||
// runtime_expandFinalInlineFrame adds it to the stack frame.
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
|
||||
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
|
||||
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
|
||||
},
|
||||
}, {
|
||||
name: "truncated_stack_trace_later",
|
||||
@ -1222,12 +1243,36 @@ func TestTryAdd(t *testing.T) {
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
|
||||
},
|
||||
wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
|
||||
{"runtime/pprof.inlinedCallee"},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// We can recover the inlined caller from a truncated stack.
|
||||
name: "truncated_stack_trace_only",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// The same location is used for duplicated stacks.
|
||||
name: "truncated_stack_trace_twice",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
5, 0, 80, inlinedCallerPtr, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{
|
||||
{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"},
|
||||
{"runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
|
||||
},
|
||||
}}
|
||||
|
||||
|
@ -335,7 +335,10 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
|
||||
// overflow record
|
||||
count = uint64(stk[0])
|
||||
stk = []uint64{
|
||||
uint64(funcPC(lostProfileEvent)),
|
||||
// gentraceback guarantees that PCs in the
|
||||
// stack can be unconditionally decremented and
|
||||
// still be valid, so we must do the same.
|
||||
uint64(funcPC(lostProfileEvent) + 1),
|
||||
}
|
||||
}
|
||||
b.m.lookup(stk, tag).count += int64(count)
|
||||
@ -397,6 +400,10 @@ func (b *profileBuilder) build() {
|
||||
// It may emit to b.pb, so there must be no message encoding in progress.
|
||||
func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) {
|
||||
b.deck.reset()
|
||||
|
||||
// The last frame might be truncated. Recover lost inline frames.
|
||||
stk = runtime_expandFinalInlineFrame(stk)
|
||||
|
||||
for len(stk) > 0 {
|
||||
addr := stk[0]
|
||||
if l, ok := b.locs[addr]; ok {
|
||||
@ -408,22 +415,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
// then, record the cached location.
|
||||
locs = append(locs, l.id)
|
||||
|
||||
// The stk may be truncated due to the stack depth limit
|
||||
// (e.g. See maxStack and maxCPUProfStack in runtime) or
|
||||
// bugs in runtime. Avoid the crash in either case.
|
||||
// TODO(hyangah): The correct fix may require using the exact
|
||||
// pcs as the key for b.locs cache management instead of just
|
||||
// relying on the very first pc. We are late in the go1.14 dev
|
||||
// cycle, so this is a workaround with little code change.
|
||||
if len(l.pcs) > len(stk) {
|
||||
stk = nil
|
||||
// TODO(hyangah): would be nice if we can enable
|
||||
// debug print out on demand and report the problematic
|
||||
// cached location entry and stack traces. Do we already
|
||||
// have such facility to utilize (e.g. GODEBUG)?
|
||||
} else {
|
||||
stk = stk[len(l.pcs):] // skip the matching pcs.
|
||||
}
|
||||
// Skip the matching pcs.
|
||||
//
|
||||
// Even if stk was truncated due to the stack depth
|
||||
// limit, expandFinalInlineFrame above has already
|
||||
// fixed the truncation, ensuring it is long enough.
|
||||
stk = stk[len(l.pcs):]
|
||||
continue
|
||||
}
|
||||
|
||||
@ -440,9 +437,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
stk = stk[1:]
|
||||
continue
|
||||
}
|
||||
// add failed because this addr is not inlined with
|
||||
// the existing PCs in the deck. Flush the deck and retry to
|
||||
// handle this pc.
|
||||
// add failed because this addr is not inlined with the
|
||||
// existing PCs in the deck. Flush the deck and retry handling
|
||||
// this pc.
|
||||
if id := b.emitLocation(); id > 0 {
|
||||
locs = append(locs, id)
|
||||
}
|
||||
@ -476,8 +473,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
// the fake pcs and restore the inlined and entry functions. Inlined functions
|
||||
// have the following properties:
|
||||
// Frame's Func is nil (note: also true for non-Go functions), and
|
||||
// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions),
|
||||
// Frame's Name does not match its entry function frame's name.
|
||||
// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and
|
||||
// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive).
|
||||
//
|
||||
// As reading and processing the pcs in a stack trace one by one (from leaf to the root),
|
||||
// we use pcDeck to temporarily hold the observed pcs and their expanded frames
|
||||
@ -499,8 +496,8 @@ func (d *pcDeck) reset() {
|
||||
// to the deck. If it fails the caller needs to flush the deck and retry.
|
||||
func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) {
|
||||
if existing := len(d.pcs); existing > 0 {
|
||||
// 'frames' are all expanded from one 'pc' and represent all inlined functions
|
||||
// so we check only the last one.
|
||||
// 'd.frames' are all expanded from one 'pc' and represent all
|
||||
// inlined functions so we check only the last one.
|
||||
newFrame := frames[0]
|
||||
last := d.frames[existing-1]
|
||||
if last.Func != nil { // the last frame can't be inlined. Flush.
|
||||
|
@ -424,3 +424,16 @@ func TestFakeMapping(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the profiler can handle an empty stack trace.
|
||||
// See issue 37967.
|
||||
func TestEmptyStack(t *testing.T) {
|
||||
b := []uint64{
|
||||
3, 0, 500, // hz = 500
|
||||
3, 0, 10, // 10 samples with an empty stack trace
|
||||
}
|
||||
_, err := translateCPUProfile(b)
|
||||
if err != nil {
|
||||
t.Fatalf("translating profile: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,9 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// runtime_expandFinalInlineFrame is defined in runtime/symtab.go.
|
||||
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
|
||||
|
||||
// runtime_setProfLabel is defined in runtime/proflabel.go.
|
||||
func runtime_setProfLabel(labels unsafe.Pointer)
|
||||
|
||||
|
@ -593,6 +593,10 @@ type m struct {
|
||||
// requested, but fails. Accessed atomically.
|
||||
preemptGen uint32
|
||||
|
||||
// Whether this is a pending preemption signal on this M.
|
||||
// Accessed atomically.
|
||||
signalPending uint32
|
||||
|
||||
dlogPerM
|
||||
|
||||
mOS
|
||||
|
@ -343,6 +343,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt, sigpc uintptr) {
|
||||
|
||||
// Acknowledge the preemption.
|
||||
atomic.Xadd(&gp.m.preemptGen, 1)
|
||||
atomic.Store(&gp.m.signalPending, 0)
|
||||
}
|
||||
|
||||
// gccgo-specific definition.
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
_ "unsafe" // for go:linkname
|
||||
)
|
||||
|
||||
// Frames may be used to get function/file/line information for a
|
||||
// slice of PC values returned by Callers.
|
||||
type Frames struct {
|
||||
@ -108,6 +112,33 @@ func (ci *Frames) Next() (frame Frame, more bool) {
|
||||
return frame, more
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
// pcInlineCallers is written in C.
|
||||
func pcInlineCallers(pc uintptr, locbuf *location, max int32) int32
|
||||
|
||||
// runtime_expandFinalInlineFrame expands the final pc in stk to include all
|
||||
// "callers" if pc is inline.
|
||||
//
|
||||
//go:linkname runtime_expandFinalInlineFrame runtime..z2fpprof.runtime_expandFinalInlineFrame
|
||||
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
|
||||
if len(stk) == 0 {
|
||||
return stk
|
||||
}
|
||||
pc := stk[len(stk)-1]
|
||||
tracepc := pc - 1
|
||||
|
||||
var locbuf [_TracebackMaxFrames]location
|
||||
n := pcInlineCallers(tracepc, &locbuf[0], int32(len(locbuf)))
|
||||
|
||||
// Returning the same PC several times causes Frame.Next to do
|
||||
// the right thing.
|
||||
for i := int32(1); i < n; i++ {
|
||||
stk = append(stk, pc)
|
||||
}
|
||||
|
||||
return stk
|
||||
}
|
||||
|
||||
// NOTE: Func does not expose the actual unexported fields, because we return *Func
|
||||
// values to users, and we want to keep them from being able to overwrite the data
|
||||
// with (say) *f = Func{}.
|
||||
|
11
libgo/go/runtime/testdata/testprog/checkptr.go
vendored
11
libgo/go/runtime/testdata/testprog/checkptr.go
vendored
@ -7,18 +7,25 @@ package main
|
||||
import "unsafe"
|
||||
|
||||
func init() {
|
||||
register("CheckPtrAlignment", CheckPtrAlignment)
|
||||
register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr)
|
||||
register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr)
|
||||
register("CheckPtrArithmetic", CheckPtrArithmetic)
|
||||
register("CheckPtrSize", CheckPtrSize)
|
||||
register("CheckPtrSmall", CheckPtrSmall)
|
||||
}
|
||||
|
||||
func CheckPtrAlignment() {
|
||||
func CheckPtrAlignmentNoPtr() {
|
||||
var x [2]int64
|
||||
p := unsafe.Pointer(&x[0])
|
||||
sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1))
|
||||
}
|
||||
|
||||
func CheckPtrAlignmentPtr() {
|
||||
var x [2]int64
|
||||
p := unsafe.Pointer(&x[0])
|
||||
sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1))
|
||||
}
|
||||
|
||||
func CheckPtrArithmetic() {
|
||||
var x int
|
||||
i := uintptr(unsafe.Pointer(&x))
|
||||
|
@ -73,36 +73,26 @@ type timer struct {
|
||||
// timerNoStatus -> timerWaiting
|
||||
// anything else -> panic: invalid value
|
||||
// deltimer:
|
||||
// timerWaiting -> timerDeleted
|
||||
// timerWaiting -> timerModifying -> timerDeleted
|
||||
// timerModifiedEarlier -> timerModifying -> timerDeleted
|
||||
// timerModifiedLater -> timerDeleted
|
||||
// timerModifiedLater -> timerModifying -> timerDeleted
|
||||
// timerNoStatus -> do nothing
|
||||
// timerDeleted -> do nothing
|
||||
// timerRemoving -> do nothing
|
||||
// timerRemoved -> do nothing
|
||||
// timerRunning -> wait until status changes
|
||||
// timerMoving -> wait until status changes
|
||||
// timerModifying -> panic: concurrent deltimer/modtimer calls
|
||||
// timerModifying -> wait until status changes
|
||||
// modtimer:
|
||||
// timerWaiting -> timerModifying -> timerModifiedXX
|
||||
// timerModifiedXX -> timerModifying -> timerModifiedYY
|
||||
// timerNoStatus -> timerWaiting
|
||||
// timerRemoved -> timerWaiting
|
||||
// timerNoStatus -> timerModifying -> timerWaiting
|
||||
// timerRemoved -> timerModifying -> timerWaiting
|
||||
// timerDeleted -> timerModifying -> timerModifiedXX
|
||||
// timerRunning -> wait until status changes
|
||||
// timerMoving -> wait until status changes
|
||||
// timerRemoving -> wait until status changes
|
||||
// timerDeleted -> panic: concurrent modtimer/deltimer calls
|
||||
// timerModifying -> panic: concurrent modtimer calls
|
||||
// resettimer:
|
||||
// timerNoStatus -> timerWaiting
|
||||
// timerRemoved -> timerWaiting
|
||||
// timerDeleted -> timerModifying -> timerModifiedXX
|
||||
// timerRemoving -> wait until status changes
|
||||
// timerRunning -> wait until status changes
|
||||
// timerWaiting -> panic: resettimer called on active timer
|
||||
// timerMoving -> panic: resettimer called on active timer
|
||||
// timerModifiedXX -> panic: resettimer called on active timer
|
||||
// timerModifying -> panic: resettimer called on active timer
|
||||
// timerModifying -> wait until status changes
|
||||
// cleantimers (looks in P's timer heap):
|
||||
// timerDeleted -> timerRemoving -> timerRemoved
|
||||
// timerModifiedXX -> timerMoving -> timerWaiting
|
||||
@ -250,32 +240,24 @@ func addtimer(t *timer) {
|
||||
t.when = maxWhen
|
||||
}
|
||||
if t.status != timerNoStatus {
|
||||
badTimer()
|
||||
throw("addtimer called with initialized timer")
|
||||
}
|
||||
t.status = timerWaiting
|
||||
|
||||
addInitializedTimer(t)
|
||||
}
|
||||
|
||||
// addInitializedTimer adds an initialized timer to the current P.
|
||||
func addInitializedTimer(t *timer) {
|
||||
when := t.when
|
||||
|
||||
pp := getg().m.p.ptr()
|
||||
lock(&pp.timersLock)
|
||||
ok := cleantimers(pp) && doaddtimer(pp, t)
|
||||
cleantimers(pp)
|
||||
doaddtimer(pp, t)
|
||||
unlock(&pp.timersLock)
|
||||
if !ok {
|
||||
badTimer()
|
||||
}
|
||||
|
||||
wakeNetPoller(when)
|
||||
}
|
||||
|
||||
// doaddtimer adds t to the current P's heap.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func doaddtimer(pp *p, t *timer) bool {
|
||||
func doaddtimer(pp *p, t *timer) {
|
||||
// Timers rely on the network poller, so make sure the poller
|
||||
// has started.
|
||||
if netpollInited == 0 {
|
||||
@ -288,12 +270,11 @@ func doaddtimer(pp *p, t *timer) bool {
|
||||
t.pp.set(pp)
|
||||
i := len(pp.timers)
|
||||
pp.timers = append(pp.timers, t)
|
||||
ok := siftupTimer(pp.timers, i)
|
||||
siftupTimer(pp.timers, i)
|
||||
if t == pp.timers[0] {
|
||||
atomic.Store64(&pp.timer0When, uint64(t.when))
|
||||
}
|
||||
atomic.Xadd(&pp.numTimers, 1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// deltimer deletes the timer t. It may be on some other P, so we can't
|
||||
@ -304,22 +285,42 @@ func deltimer(t *timer) bool {
|
||||
for {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerWaiting, timerModifiedLater:
|
||||
tpp := t.pp.ptr()
|
||||
if atomic.Cas(&t.status, s, timerDeleted) {
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp := acquirem()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
// Must fetch t.pp before changing status,
|
||||
// as cleantimers in another goroutine
|
||||
// can clear t.pp of a timerDeleted timer.
|
||||
tpp := t.pp.ptr()
|
||||
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
atomic.Xadd(&tpp.deletedTimers, 1)
|
||||
// Timer was not yet run.
|
||||
return true
|
||||
} else {
|
||||
releasem(mp)
|
||||
}
|
||||
case timerModifiedEarlier:
|
||||
tpp := t.pp.ptr()
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp := acquirem()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
// Must fetch t.pp before setting status
|
||||
// to timerDeleted.
|
||||
tpp := t.pp.ptr()
|
||||
atomic.Xadd(&tpp.adjustTimers, -1)
|
||||
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
atomic.Xadd(&tpp.deletedTimers, 1)
|
||||
// Timer was not yet run.
|
||||
return true
|
||||
} else {
|
||||
releasem(mp)
|
||||
}
|
||||
case timerDeleted, timerRemoving, timerRemoved:
|
||||
// Timer was already run.
|
||||
@ -334,7 +335,8 @@ func deltimer(t *timer) bool {
|
||||
return false
|
||||
case timerModifying:
|
||||
// Simultaneous calls to deltimer and modtimer.
|
||||
badTimer()
|
||||
// Wait for the other call to complete.
|
||||
osyield()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
@ -345,7 +347,7 @@ func deltimer(t *timer) bool {
|
||||
// We are locked on the P when this is called.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func dodeltimer(pp *p, i int) bool {
|
||||
func dodeltimer(pp *p, i int) {
|
||||
if t := pp.timers[i]; t.pp.ptr() != pp {
|
||||
throw("dodeltimer: wrong P")
|
||||
} else {
|
||||
@ -357,29 +359,23 @@ func dodeltimer(pp *p, i int) bool {
|
||||
}
|
||||
pp.timers[last] = nil
|
||||
pp.timers = pp.timers[:last]
|
||||
ok := true
|
||||
if i != last {
|
||||
// Moving to i may have moved the last timer to a new parent,
|
||||
// so sift up to preserve the heap guarantee.
|
||||
if !siftupTimer(pp.timers, i) {
|
||||
ok = false
|
||||
}
|
||||
if !siftdownTimer(pp.timers, i) {
|
||||
ok = false
|
||||
}
|
||||
siftupTimer(pp.timers, i)
|
||||
siftdownTimer(pp.timers, i)
|
||||
}
|
||||
if i == 0 {
|
||||
updateTimer0When(pp)
|
||||
}
|
||||
atomic.Xadd(&pp.numTimers, -1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// dodeltimer0 removes timer 0 from the current P's heap.
|
||||
// We are locked on the P when this is called.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func dodeltimer0(pp *p) bool {
|
||||
func dodeltimer0(pp *p) {
|
||||
if t := pp.timers[0]; t.pp.ptr() != pp {
|
||||
throw("dodeltimer0: wrong P")
|
||||
} else {
|
||||
@ -391,13 +387,11 @@ func dodeltimer0(pp *p) bool {
|
||||
}
|
||||
pp.timers[last] = nil
|
||||
pp.timers = pp.timers[:last]
|
||||
ok := true
|
||||
if last > 0 {
|
||||
ok = siftdownTimer(pp.timers, 0)
|
||||
siftdownTimer(pp.timers, 0)
|
||||
}
|
||||
updateTimer0When(pp)
|
||||
atomic.Xadd(&pp.numTimers, -1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// modtimer modifies an existing timer.
|
||||
@ -409,30 +403,47 @@ func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg in
|
||||
|
||||
status := uint32(timerNoStatus)
|
||||
wasRemoved := false
|
||||
var mp *m
|
||||
loop:
|
||||
for {
|
||||
switch status = atomic.Load(&t.status); status {
|
||||
case timerWaiting, timerModifiedEarlier, timerModifiedLater:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerNoStatus, timerRemoved:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
|
||||
// Timer was already run and t is no longer in a heap.
|
||||
// Act like addtimer.
|
||||
if atomic.Cas(&t.status, status, timerWaiting) {
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
wasRemoved = true
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerDeleted:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerRunning, timerRemoving, timerMoving:
|
||||
// The timer is being run or moved, by a different P.
|
||||
// Wait for it to complete.
|
||||
osyield()
|
||||
case timerDeleted:
|
||||
// Simultaneous calls to modtimer and deltimer.
|
||||
badTimer()
|
||||
case timerModifying:
|
||||
// Multiple simultaneous calls to modtimer.
|
||||
badTimer()
|
||||
// Wait for the other call to complete.
|
||||
osyield()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
@ -445,7 +456,15 @@ loop:
|
||||
|
||||
if wasRemoved {
|
||||
t.when = when
|
||||
addInitializedTimer(t)
|
||||
pp := getg().m.p.ptr()
|
||||
lock(&pp.timersLock)
|
||||
doaddtimer(pp, t)
|
||||
unlock(&pp.timersLock)
|
||||
if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
wakeNetPoller(when)
|
||||
} else {
|
||||
// The timer is in some other P's heap, so we can't change
|
||||
// the when field. If we did, the other P's heap would
|
||||
@ -462,7 +481,6 @@ loop:
|
||||
// Update the adjustTimers field. Subtract one if we
|
||||
// are removing a timerModifiedEarlier, add one if we
|
||||
// are adding a timerModifiedEarlier.
|
||||
tpp := t.pp.ptr()
|
||||
adjust := int32(0)
|
||||
if status == timerModifiedEarlier {
|
||||
adjust--
|
||||
@ -471,13 +489,14 @@ loop:
|
||||
adjust++
|
||||
}
|
||||
if adjust != 0 {
|
||||
atomic.Xadd(&tpp.adjustTimers, adjust)
|
||||
atomic.Xadd(&t.pp.ptr().adjustTimers, adjust)
|
||||
}
|
||||
|
||||
// Set the new status of the timer.
|
||||
if !atomic.Cas(&t.status, timerModifying, newStatus) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
|
||||
// If the new status is earlier, wake up the poller.
|
||||
if newStatus == timerModifiedEarlier {
|
||||
@ -486,67 +505,22 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
// resettimer resets an existing inactive timer to turn it into an active timer,
|
||||
// with a new time for when the timer should fire.
|
||||
// resettimer resets the time when a timer should fire.
|
||||
// If used for an inactive timer, the timer will become active.
|
||||
// This should be called instead of addtimer if the timer value has been,
|
||||
// or may have been, used previously.
|
||||
func resettimer(t *timer, when int64) {
|
||||
if when < 0 {
|
||||
when = maxWhen
|
||||
}
|
||||
|
||||
for {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerNoStatus, timerRemoved:
|
||||
if atomic.Cas(&t.status, s, timerWaiting) {
|
||||
t.when = when
|
||||
addInitializedTimer(t)
|
||||
return
|
||||
}
|
||||
case timerDeleted:
|
||||
tpp := t.pp.ptr()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
t.nextwhen = when
|
||||
newStatus := uint32(timerModifiedLater)
|
||||
if when < t.when {
|
||||
newStatus = timerModifiedEarlier
|
||||
atomic.Xadd(&t.pp.ptr().adjustTimers, 1)
|
||||
}
|
||||
if !atomic.Cas(&t.status, timerModifying, newStatus) {
|
||||
badTimer()
|
||||
}
|
||||
atomic.Xadd(&tpp.deletedTimers, -1)
|
||||
if newStatus == timerModifiedEarlier {
|
||||
wakeNetPoller(when)
|
||||
}
|
||||
return
|
||||
}
|
||||
case timerRemoving:
|
||||
// Wait for the removal to complete.
|
||||
osyield()
|
||||
case timerRunning:
|
||||
// Even though the timer should not be active,
|
||||
// we can see timerRunning if the timer function
|
||||
// permits some other goroutine to call resettimer.
|
||||
// Wait until the run is complete.
|
||||
osyield()
|
||||
case timerWaiting, timerModifying, timerModifiedEarlier, timerModifiedLater, timerMoving:
|
||||
// Called resettimer on active timer.
|
||||
badTimer()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
}
|
||||
modtimer(t, when, t.period, t.f, t.arg, t.seq)
|
||||
}
|
||||
|
||||
// cleantimers cleans up the head of the timer queue. This speeds up
|
||||
// programs that create and delete timers; leaving them in the heap
|
||||
// slows down addtimer. Reports whether no timer problems were found.
|
||||
// The caller must have locked the timers for pp.
|
||||
func cleantimers(pp *p) bool {
|
||||
func cleantimers(pp *p) {
|
||||
for {
|
||||
if len(pp.timers) == 0 {
|
||||
return true
|
||||
return
|
||||
}
|
||||
t := pp.timers[0]
|
||||
if t.pp.ptr() != pp {
|
||||
@ -557,11 +531,9 @@ func cleantimers(pp *p) bool {
|
||||
if !atomic.Cas(&t.status, s, timerRemoving) {
|
||||
continue
|
||||
}
|
||||
if !dodeltimer0(pp) {
|
||||
return false
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
atomic.Xadd(&pp.deletedTimers, -1)
|
||||
case timerModifiedEarlier, timerModifiedLater:
|
||||
@ -571,21 +543,17 @@ func cleantimers(pp *p) bool {
|
||||
// Now we can change the when field.
|
||||
t.when = t.nextwhen
|
||||
// Move t to the right position.
|
||||
if !dodeltimer0(pp) {
|
||||
return false
|
||||
}
|
||||
if !doaddtimer(pp, t) {
|
||||
return false
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
doaddtimer(pp, t)
|
||||
if s == timerModifiedEarlier {
|
||||
atomic.Xadd(&pp.adjustTimers, -1)
|
||||
}
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
default:
|
||||
// Head of timers does not need adjustment.
|
||||
return true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -601,9 +569,7 @@ func moveTimers(pp *p, timers []*timer) {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerWaiting:
|
||||
t.pp = 0
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
break loop
|
||||
case timerModifiedEarlier, timerModifiedLater:
|
||||
if !atomic.Cas(&t.status, s, timerMoving) {
|
||||
@ -611,9 +577,7 @@ func moveTimers(pp *p, timers []*timer) {
|
||||
}
|
||||
t.when = t.nextwhen
|
||||
t.pp = 0
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
@ -667,9 +631,7 @@ loop:
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerDeleted:
|
||||
if atomic.Cas(&t.status, s, timerRemoving) {
|
||||
if !dodeltimer(pp, i) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer(pp, i)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
badTimer()
|
||||
}
|
||||
@ -685,9 +647,7 @@ loop:
|
||||
// We don't add it back yet because the
|
||||
// heap manipulation could cause our
|
||||
// loop to skip some other timer.
|
||||
if !dodeltimer(pp, i) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer(pp, i)
|
||||
moved = append(moved, t)
|
||||
if s == timerModifiedEarlier {
|
||||
if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 {
|
||||
@ -723,9 +683,7 @@ loop:
|
||||
// back to the timer heap.
|
||||
func addAdjustedTimers(pp *p, moved []*timer) {
|
||||
for _, t := range moved {
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
@ -779,9 +737,7 @@ func runtimer(pp *p, now int64) int64 {
|
||||
if !atomic.Cas(&t.status, s, timerRemoving) {
|
||||
continue
|
||||
}
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
badTimer()
|
||||
}
|
||||
@ -795,12 +751,8 @@ func runtimer(pp *p, now int64) int64 {
|
||||
continue
|
||||
}
|
||||
t.when = t.nextwhen
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
doaddtimer(pp, t)
|
||||
if s == timerModifiedEarlier {
|
||||
atomic.Xadd(&pp.adjustTimers, -1)
|
||||
}
|
||||
@ -838,18 +790,14 @@ func runOneTimer(pp *p, t *timer, now int64) {
|
||||
// Leave in heap but adjust next time to fire.
|
||||
delta := t.when - now
|
||||
t.when += t.period * (1 + -delta/t.period)
|
||||
if !siftdownTimer(pp.timers, 0) {
|
||||
badTimer()
|
||||
}
|
||||
siftdownTimer(pp.timers, 0)
|
||||
if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
updateTimer0When(pp)
|
||||
} else {
|
||||
// Remove from heap.
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
|
||||
badTimer()
|
||||
}
|
||||
@ -1053,9 +1001,9 @@ func timeSleepUntil() (int64, *p) {
|
||||
// "panic holding locks" message. Instead, we panic while not
|
||||
// holding a lock.
|
||||
|
||||
func siftupTimer(t []*timer, i int) bool {
|
||||
func siftupTimer(t []*timer, i int) {
|
||||
if i >= len(t) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
when := t[i].when
|
||||
tmp := t[i]
|
||||
@ -1070,13 +1018,12 @@ func siftupTimer(t []*timer, i int) bool {
|
||||
if tmp != t[i] {
|
||||
t[i] = tmp
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func siftdownTimer(t []*timer, i int) bool {
|
||||
func siftdownTimer(t []*timer, i int) {
|
||||
n := len(t)
|
||||
if i >= n {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
when := t[i].when
|
||||
tmp := t[i]
|
||||
@ -1111,7 +1058,6 @@ func siftdownTimer(t []*timer, i int) bool {
|
||||
if tmp != t[i] {
|
||||
t[i] = tmp
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// badTimer is called if the timer data structures have been corrupted,
|
||||
@ -1119,5 +1065,5 @@ func siftdownTimer(t []*timer, i int) bool {
|
||||
// panicing due to invalid slice access while holding locks.
|
||||
// See issue #25686.
|
||||
func badTimer() {
|
||||
panic(errorString("racy use of timers"))
|
||||
throw("timer data corruption")
|
||||
}
|
||||
|
@ -963,16 +963,15 @@ func tRunner(t *T, fn func(t *T)) {
|
||||
t.Logf("cleanup panicked with %v", r)
|
||||
}
|
||||
// Flush the output log up to the root before dying.
|
||||
t.mu.Lock()
|
||||
root := &t.common
|
||||
for ; root.parent != nil; root = root.parent {
|
||||
for root := &t.common; root.parent != nil; root = root.parent {
|
||||
root.mu.Lock()
|
||||
root.duration += time.Since(root.start)
|
||||
fmt.Fprintf(root.parent.w, "--- FAIL: %s (%s)\n", root.name, fmtDuration(root.duration))
|
||||
d := root.duration
|
||||
root.mu.Unlock()
|
||||
root.flushToParent("--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
|
||||
if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
|
||||
fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
|
||||
}
|
||||
root.parent.mu.Lock()
|
||||
io.Copy(root.parent.w, bytes.NewReader(root.output))
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"internal/race"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
@ -1393,23 +1392,11 @@ func TestReadFileLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Issue 25686: hard crash on concurrent timer access.
|
||||
// Issue 37400: panic with "racy use of timers"
|
||||
// This test deliberately invokes a race condition.
|
||||
// We are testing that we don't crash with "fatal error: panic holding locks".
|
||||
// We are testing that we don't crash with "fatal error: panic holding locks",
|
||||
// and that we also don't panic.
|
||||
func TestConcurrentTimerReset(t *testing.T) {
|
||||
if race.Enabled {
|
||||
t.Skip("skipping test under race detector")
|
||||
}
|
||||
|
||||
// We expect this code to panic rather than crash.
|
||||
// Don't worry if it doesn't panic.
|
||||
catch := func(i int) {
|
||||
if e := recover(); e != nil {
|
||||
t.Logf("panic in goroutine %d, as expected, with %q", i, e)
|
||||
} else {
|
||||
t.Logf("no panic in goroutine %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
const goroutines = 8
|
||||
const tries = 1000
|
||||
var wg sync.WaitGroup
|
||||
@ -1418,7 +1405,6 @@ func TestConcurrentTimerReset(t *testing.T) {
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
defer catch(i)
|
||||
for j := 0; j < tries; j++ {
|
||||
timer.Reset(Hour + Duration(i*j))
|
||||
}
|
||||
@ -1426,3 +1412,25 @@ func TestConcurrentTimerReset(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Issue 37400: panic with "racy use of timers".
|
||||
func TestConcurrentTimerResetStop(t *testing.T) {
|
||||
const goroutines = 8
|
||||
const tries = 1000
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines * 2)
|
||||
timer := NewTimer(Hour)
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < tries; j++ {
|
||||
timer.Reset(Hour + Duration(i*j))
|
||||
}
|
||||
}(i)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
timer.Stop()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
@ -123,6 +123,7 @@ internal/poll
|
||||
internal/race
|
||||
internal/reflectlite
|
||||
internal/singleflight
|
||||
internal/syscall/execenv
|
||||
internal/syscall/unix
|
||||
internal/testenv
|
||||
internal/testlog
|
||||
|
33
libgo/misc/cgo/testgodefs/testdata/issue37479.go
vendored
Normal file
33
libgo/misc/cgo/testgodefs/testdata/issue37479.go
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
typedef struct A A;
|
||||
|
||||
typedef struct {
|
||||
struct A *next;
|
||||
struct A **prev;
|
||||
} N;
|
||||
|
||||
struct A
|
||||
{
|
||||
N n;
|
||||
};
|
||||
|
||||
typedef struct B
|
||||
{
|
||||
A* a;
|
||||
} B;
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type N C.N
|
||||
|
||||
type A C.A
|
||||
|
||||
type B C.B
|
23
libgo/misc/cgo/testgodefs/testdata/issue37621.go
vendored
Normal file
23
libgo/misc/cgo/testgodefs/testdata/issue37621.go
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
struct tt {
|
||||
long long a;
|
||||
long long b;
|
||||
};
|
||||
|
||||
struct s {
|
||||
struct tt ts[3];
|
||||
};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type TT C.struct_tt
|
||||
|
||||
type S C.struct_s
|
8
libgo/misc/cgo/testgodefs/testdata/main.go
vendored
8
libgo/misc/cgo/testgodefs/testdata/main.go
vendored
@ -11,5 +11,13 @@ var v2 = v1.L
|
||||
// Test that P, Q, and R all point to byte.
|
||||
var v3 = Issue8478{P: (*byte)(nil), Q: (**byte)(nil), R: (***byte)(nil)}
|
||||
|
||||
// Test that N, A and B are fully defined
|
||||
var v4 = N{}
|
||||
var v5 = A{}
|
||||
var v6 = B{}
|
||||
|
||||
// Test that S is fully defined
|
||||
var v7 = S{}
|
||||
|
||||
func main() {
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ var filePrefixes = []string{
|
||||
"anonunion",
|
||||
"issue8478",
|
||||
"fieldtypedef",
|
||||
"issue37479",
|
||||
"issue37621",
|
||||
}
|
||||
|
||||
func TestGoDefs(t *testing.T) {
|
||||
|
@ -364,3 +364,39 @@ runtime_callersRaw (uintptr *pcbuf, int32 m)
|
||||
return data.index;
|
||||
}
|
||||
|
||||
/* runtime_pcInlineCallers returns the inline stack of calls for a PC.
|
||||
This is like runtime_callers, but instead of doing a backtrace,
|
||||
just finds the information for a single PC value. */
|
||||
|
||||
int32 runtime_pcInlineCallers (uintptr, Location *, int32)
|
||||
__asm__ (GOSYM_PREFIX "runtime.pcInlineCallers");
|
||||
|
||||
int32
|
||||
runtime_pcInlineCallers (uintptr pc, Location *locbuf, int32 m)
|
||||
{
|
||||
struct callers_data data;
|
||||
struct backtrace_state *state;
|
||||
int32 i;
|
||||
|
||||
data.locbuf = locbuf;
|
||||
data.skip = 0;
|
||||
data.index = 0;
|
||||
data.max = m;
|
||||
data.keep_thunks = false;
|
||||
data.saw_sigtramp = 0;
|
||||
runtime_xadd (&__go_runtime_in_callers, 1);
|
||||
state = __go_get_backtrace_state ();
|
||||
backtrace_pcinfo (state, pc, callback, error_callback, &data);
|
||||
runtime_xadd (&__go_runtime_in_callers, -1);
|
||||
|
||||
/* Try to use backtrace_syminfo to fill in missing names. See
|
||||
runtime_callers. */
|
||||
for (i = 0; i < data.index; ++i)
|
||||
{
|
||||
if (locbuf[i].function.len == 0 && locbuf[i].pc != 0)
|
||||
backtrace_syminfo (state, locbuf[i].pc, __go_syminfo_fnname_callback,
|
||||
error_callback, &locbuf[i].function);
|
||||
}
|
||||
|
||||
return data.index;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user