libgo: update to Go 1.15.3 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/265717
This commit is contained in:
parent
2b3e722a3c
commit
668894d7b5
@ -1,4 +1,4 @@
|
||||
957591b8a054b692d92203a2420851689875f9c5
|
||||
be0d2cc2df9f98d967c242594838f86362dae2e7
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
@ -1,4 +1,4 @@
|
||||
9706f510a5e2754595d716bd64be8375997311fb
|
||||
1984ee00048b63eacd2155cd6d74a2d13e998272
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -1 +1 @@
|
||||
go1.15.2
|
||||
go1.15.3
|
||||
|
@ -227,19 +227,26 @@ func IndexAny(s []byte, chars string) int {
|
||||
continue
|
||||
}
|
||||
r, width = utf8.DecodeRune(s[i:])
|
||||
if r == utf8.RuneError {
|
||||
for _, r = range chars {
|
||||
if r == utf8.RuneError {
|
||||
if r != utf8.RuneError {
|
||||
// r is 2 to 4 bytes
|
||||
if len(chars) == width {
|
||||
if chars == string(r) {
|
||||
return i
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Use bytealg.IndexString for performance if available.
|
||||
if bytealg.MaxLen >= width {
|
||||
if bytealg.IndexString(chars, string(r)) >= 0 {
|
||||
return i
|
||||
}
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
// r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes
|
||||
// package should not import the strings package, use bytealg.IndexString
|
||||
// instead. And this does not seem to lose much performance.
|
||||
if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 {
|
||||
return i
|
||||
for _, ch := range chars {
|
||||
if r == ch {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
@ -304,19 +311,26 @@ func LastIndexAny(s []byte, chars string) int {
|
||||
}
|
||||
r, size := utf8.DecodeLastRune(s[:i])
|
||||
i -= size
|
||||
if r == utf8.RuneError {
|
||||
for _, r = range chars {
|
||||
if r == utf8.RuneError {
|
||||
if r != utf8.RuneError {
|
||||
// r is 2 to 4 bytes
|
||||
if len(chars) == size {
|
||||
if chars == string(r) {
|
||||
return i
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Use bytealg.IndexString for performance if available.
|
||||
if bytealg.MaxLen >= size {
|
||||
if bytealg.IndexString(chars, string(r)) >= 0 {
|
||||
return i
|
||||
}
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
// r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes
|
||||
// package should not import the strings package, use bytealg.IndexString
|
||||
// instead. And this does not seem to lose much performance.
|
||||
if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 {
|
||||
return i
|
||||
for _, ch := range chars {
|
||||
if r == ch {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
|
@ -2459,6 +2459,18 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
tt := *t
|
||||
tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
|
||||
tt.Go = c.Ident("struct{}")
|
||||
if dt.Kind == "struct" {
|
||||
// We don't know what the representation of this struct is, so don't let
|
||||
// anyone allocate one on the Go side. As a side effect of this annotation,
|
||||
// pointers to this type will not be considered pointers in Go. They won't
|
||||
// get writebarrier-ed or adjusted during a stack copy. This should handle
|
||||
// all the cases badPointerTypedef used to handle, but hopefully will
|
||||
// continue to work going forward without any more need for cgo changes.
|
||||
tt.NotInHeap = true
|
||||
// TODO: we should probably do the same for unions. Unions can't live
|
||||
// on the Go heap, right? It currently doesn't work for unions because
|
||||
// they are defined as a type alias for struct{}, not a defined type.
|
||||
}
|
||||
typedef[name.Name] = &tt
|
||||
break
|
||||
}
|
||||
@ -2529,6 +2541,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
}
|
||||
t.Go = name
|
||||
t.BadPointer = sub.BadPointer
|
||||
t.NotInHeap = sub.NotInHeap
|
||||
if unionWithPointer[sub.Go] {
|
||||
unionWithPointer[t.Go] = true
|
||||
}
|
||||
@ -2539,6 +2552,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
tt := *t
|
||||
tt.Go = sub.Go
|
||||
tt.BadPointer = sub.BadPointer
|
||||
tt.NotInHeap = sub.NotInHeap
|
||||
typedef[name.Name] = &tt
|
||||
}
|
||||
|
||||
@ -3047,6 +3061,7 @@ func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
|
||||
// non-pointers in this type.
|
||||
// TODO: Currently our best solution is to find these manually and list them as
|
||||
// they come up. A better solution is desired.
|
||||
// Note: DEPRECATED. There is now a better solution. Search for NotInHeap in this file.
|
||||
func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool {
|
||||
if c.badCFType(dt) {
|
||||
return true
|
||||
|
@ -151,7 +151,8 @@ type Type struct {
|
||||
Go ast.Expr
|
||||
EnumValues map[string]int64
|
||||
Typedef string
|
||||
BadPointer bool
|
||||
BadPointer bool // this pointer type should be represented as a uintptr (deprecated)
|
||||
NotInHeap bool // this type should have a go:notinheap annotation
|
||||
}
|
||||
|
||||
// A FuncType collects information about a function type in both the C and Go worlds.
|
||||
|
@ -113,6 +113,9 @@ func (p *Package) writeDefs() {
|
||||
sort.Strings(typedefNames)
|
||||
for _, name := range typedefNames {
|
||||
def := typedef[name]
|
||||
if def.NotInHeap {
|
||||
fmt.Fprintf(fgo2, "//go:notinheap\n")
|
||||
}
|
||||
fmt.Fprintf(fgo2, "type %s ", name)
|
||||
// We don't have source info for these types, so write them out without source info.
|
||||
// Otherwise types would look like:
|
||||
|
@ -153,3 +153,20 @@ func SetFromGOFLAGS(flags *flag.FlagSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InGOFLAGS returns whether GOFLAGS contains the given flag, such as "-mod".
|
||||
func InGOFLAGS(flag string) bool {
|
||||
for _, goflag := range GOFLAGS() {
|
||||
name := goflag
|
||||
if strings.HasPrefix(name, "--") {
|
||||
name = name[1:]
|
||||
}
|
||||
if i := strings.Index(name, "="); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
if name == flag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -53,7 +53,14 @@ var (
|
||||
|
||||
func runVersion(cmd *base.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
if *versionM || *versionV {
|
||||
// If any of this command's flags were passed explicitly, error
|
||||
// out, because they only make sense with arguments.
|
||||
//
|
||||
// Don't error if the flags came from GOFLAGS, since that can be
|
||||
// a reasonable use case. For example, imagine GOFLAGS=-v to
|
||||
// turn "verbose mode" on for all Go commands, which should not
|
||||
// break "go version".
|
||||
if (!base.InGOFLAGS("-m") && *versionM) || (!base.InGOFLAGS("-v") && *versionV) {
|
||||
fmt.Fprintf(os.Stderr, "go version: flags can only be used with arguments\n")
|
||||
base.SetExitStatus(2)
|
||||
return
|
||||
|
@ -254,34 +254,18 @@ func buildModeInit() {
|
||||
case "":
|
||||
// ok
|
||||
case "readonly", "vendor", "mod":
|
||||
if !cfg.ModulesEnabled && !inGOFLAGS("-mod") {
|
||||
if !cfg.ModulesEnabled && !base.InGOFLAGS("-mod") {
|
||||
base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod)
|
||||
}
|
||||
default:
|
||||
base.Fatalf("-mod=%s not supported (can be '', 'mod', 'readonly', or 'vendor')", cfg.BuildMod)
|
||||
}
|
||||
if !cfg.ModulesEnabled {
|
||||
if cfg.ModCacheRW && !inGOFLAGS("-modcacherw") {
|
||||
if cfg.ModCacheRW && !base.InGOFLAGS("-modcacherw") {
|
||||
base.Fatalf("build flag -modcacherw only valid when using modules")
|
||||
}
|
||||
if cfg.ModFile != "" && !inGOFLAGS("-mod") {
|
||||
if cfg.ModFile != "" && !base.InGOFLAGS("-mod") {
|
||||
base.Fatalf("build flag -modfile only valid when using modules")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func inGOFLAGS(flag string) bool {
|
||||
for _, goflag := range base.GOFLAGS() {
|
||||
name := goflag
|
||||
if strings.HasPrefix(name, "--") {
|
||||
name = name[1:]
|
||||
}
|
||||
if i := strings.Index(name, "="); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
if name == flag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
6
libgo/go/cmd/go/testdata/script/version.txt
vendored
6
libgo/go/cmd/go/testdata/script/version.txt
vendored
@ -9,6 +9,12 @@ stderr 'with arguments'
|
||||
! go version -v
|
||||
stderr 'with arguments'
|
||||
|
||||
# Neither of the two flags above should be an issue via GOFLAGS.
|
||||
env GOFLAGS='-m -v'
|
||||
go version
|
||||
stdout '^go version'
|
||||
env GOFLAGS=
|
||||
|
||||
env GO111MODULE=on
|
||||
# Skip the builds below if we are running in short mode.
|
||||
[short] skip
|
||||
|
@ -17,42 +17,8 @@ func Index(a, b []byte) int {
|
||||
|
||||
// IndexString returns the index of the first instance of b in a, or -1 if b is not present in a.
|
||||
// Requires 2 <= len(b) <= MaxLen.
|
||||
func IndexString(s, substr string) int {
|
||||
// This is a partial copy of strings.Index, here because bytes.IndexAny and bytes.LastIndexAny
|
||||
// call bytealg.IndexString. Some platforms have an optimized assembly version of this function.
|
||||
// This implementation is used for those that do not. Although the pure Go implementation here
|
||||
// works for the case of len(b) > MaxLen, we do not require that its assembly implementation also
|
||||
// supports the case of len(b) > MaxLen. And we do not guarantee that this function supports the
|
||||
// case of len(b) > MaxLen.
|
||||
n := len(substr)
|
||||
c0 := substr[0]
|
||||
c1 := substr[1]
|
||||
i := 0
|
||||
t := len(s) - n + 1
|
||||
fails := 0
|
||||
for i < t {
|
||||
if s[i] != c0 {
|
||||
o := IndexByteString(s[i:t], c0)
|
||||
if o < 0 {
|
||||
return -1
|
||||
}
|
||||
i += o
|
||||
}
|
||||
if s[i+1] == c1 && s[i:i+n] == substr {
|
||||
return i
|
||||
}
|
||||
i++
|
||||
fails++
|
||||
if fails >= 4+i>>4 && i < t {
|
||||
// See comment in src/bytes/bytes.go.
|
||||
j := IndexRabinKarp(s[i:], substr)
|
||||
if j < 0 {
|
||||
return -1
|
||||
}
|
||||
return i + j
|
||||
}
|
||||
}
|
||||
return -1
|
||||
func IndexString(a, b string) int {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Cutover reports the number of failures of IndexByte we should tolerate
|
||||
|
@ -152,7 +152,7 @@ func (fd *FD) Read(p []byte) (int, error) {
|
||||
p = p[:maxRW]
|
||||
}
|
||||
for {
|
||||
n, err := ignoringEINTR(syscall.Read, fd.Sysfd, p)
|
||||
n, err := ignoringEINTR(func() (int, error) { return syscall.Read(fd.Sysfd, p) })
|
||||
if err != nil {
|
||||
n = 0
|
||||
if err == syscall.EAGAIN && fd.pd.pollable() {
|
||||
@ -264,7 +264,7 @@ func (fd *FD) Write(p []byte) (int, error) {
|
||||
if fd.IsStream && max-nn > maxRW {
|
||||
max = nn + maxRW
|
||||
}
|
||||
n, err := ignoringEINTR(syscall.Write, fd.Sysfd, p[nn:max])
|
||||
n, err := ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p[nn:max]) })
|
||||
if n > 0 {
|
||||
nn += n
|
||||
}
|
||||
@ -423,7 +423,7 @@ func (fd *FD) ReadDirent(buf []byte) (int, error) {
|
||||
}
|
||||
defer fd.decref()
|
||||
for {
|
||||
n, err := ignoringEINTR(syscall.ReadDirent, fd.Sysfd, buf)
|
||||
n, err := ignoringEINTR(func() (int, error) { return syscall.ReadDirent(fd.Sysfd, buf) })
|
||||
if err != nil {
|
||||
n = 0
|
||||
if err == syscall.EAGAIN && fd.pd.pollable() {
|
||||
@ -514,7 +514,7 @@ func (fd *FD) WriteOnce(p []byte) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.writeUnlock()
|
||||
return ignoringEINTR(syscall.Write, fd.Sysfd, p)
|
||||
return ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p) })
|
||||
}
|
||||
|
||||
// RawRead invokes the user-defined function f for a read operation.
|
||||
@ -562,9 +562,9 @@ func (fd *FD) RawWrite(f func(uintptr) bool) error {
|
||||
// installed without setting SA_RESTART. None of these are the common case,
|
||||
// but there are enough of them that it seems that we can't avoid
|
||||
// an EINTR loop.
|
||||
func ignoringEINTR(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) {
|
||||
func ignoringEINTR(fn func() (int, error)) (int, error) {
|
||||
for {
|
||||
n, err := fn(fd, p)
|
||||
n, err := fn()
|
||||
if err != syscall.EINTR {
|
||||
return n, err
|
||||
}
|
||||
|
@ -267,6 +267,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
|
||||
gp.waiting = mysg
|
||||
gp.param = nil
|
||||
c.sendq.enqueue(mysg)
|
||||
// Signal to anyone trying to shrink our stack that we're about
|
||||
// to park on a channel. The window between when this G's status
|
||||
// changes and when we set gp.activeStackChans is not safe for
|
||||
// stack shrinking.
|
||||
atomic.Store8(&gp.parkingOnChan, 1)
|
||||
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
|
||||
// Ensure the value being sent is kept alive until the
|
||||
// receiver copies it out. The sudog has a pointer to the
|
||||
@ -586,6 +591,11 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
|
||||
mysg.c = c
|
||||
gp.param = nil
|
||||
c.recvq.enqueue(mysg)
|
||||
// Signal to anyone trying to shrink our stack that we're about
|
||||
// to park on a channel. The window between when this G's status
|
||||
// changes and when we set gp.activeStackChans is not safe for
|
||||
// stack shrinking.
|
||||
atomic.Store8(&gp.parkingOnChan, 1)
|
||||
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
|
||||
|
||||
// someone woke us up
|
||||
@ -663,7 +673,19 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
|
||||
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
|
||||
// There are unlocked sudogs that point into gp's stack. Stack
|
||||
// copying must lock the channels of those sudogs.
|
||||
// Set activeStackChans here instead of before we try parking
|
||||
// because we could self-deadlock in stack growth on the
|
||||
// channel lock.
|
||||
gp.activeStackChans = true
|
||||
// Mark that it's safe for stack shrinking to occur now,
|
||||
// because any thread acquiring this G's stack for shrinking
|
||||
// is guaranteed to observe activeStackChans after this store.
|
||||
atomic.Store8(&gp.parkingOnChan, 0)
|
||||
// Make sure we unlock after setting activeStackChans and
|
||||
// unsetting parkingOnChan. The moment we unlock chanLock
|
||||
// we risk gp getting readied by a channel operation and
|
||||
// so gp could continue running before everything before
|
||||
// the unlock is visible (even to gp itself).
|
||||
unlock((*mutex)(chanLock))
|
||||
return true
|
||||
}
|
||||
|
@ -628,6 +628,62 @@ func TestShrinkStackDuringBlockedSend(t *testing.T) {
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestNoShrinkStackWhileParking(t *testing.T) {
|
||||
// The goal of this test is to trigger a "racy sudog adjustment"
|
||||
// throw. Basically, there's a window between when a goroutine
|
||||
// becomes available for preemption for stack scanning (and thus,
|
||||
// stack shrinking) but before the goroutine has fully parked on a
|
||||
// channel. See issue 40641 for more details on the problem.
|
||||
//
|
||||
// The way we try to induce this failure is to set up two
|
||||
// goroutines: a sender and a reciever that communicate across
|
||||
// a channel. We try to set up a situation where the sender
|
||||
// grows its stack temporarily then *fully* blocks on a channel
|
||||
// often. Meanwhile a GC is triggered so that we try to get a
|
||||
// mark worker to shrink the sender's stack and race with the
|
||||
// sender parking.
|
||||
//
|
||||
// Unfortunately the race window here is so small that we
|
||||
// either need a ridiculous number of iterations, or we add
|
||||
// "usleep(1000)" to park_m, just before the unlockf call.
|
||||
const n = 10
|
||||
send := func(c chan<- int, done chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
c <- i
|
||||
// Use lots of stack briefly so that
|
||||
// the GC is going to want to shrink us
|
||||
// when it scans us. Make sure not to
|
||||
// do any function calls otherwise
|
||||
// in order to avoid us shrinking ourselves
|
||||
// when we're preempted.
|
||||
stackGrowthRecursive(20)
|
||||
}
|
||||
done <- struct{}{}
|
||||
}
|
||||
recv := func(c <-chan int, done chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
// Sleep here so that the sender always
|
||||
// fully blocks.
|
||||
time.Sleep(10 * time.Microsecond)
|
||||
<-c
|
||||
}
|
||||
done <- struct{}{}
|
||||
}
|
||||
for i := 0; i < n*20; i++ {
|
||||
c := make(chan int)
|
||||
done := make(chan struct{})
|
||||
go recv(c, done)
|
||||
go send(c, done)
|
||||
// Wait a little bit before triggering
|
||||
// the GC to make sure the sender and
|
||||
// reciever have gotten into their groove.
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
runtime.GC()
|
||||
<-done
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectDuplicateChannel(t *testing.T) {
|
||||
// This test makes sure we can queue a G on
|
||||
// the same channel multiple times.
|
||||
|
@ -355,7 +355,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
||||
}
|
||||
|
||||
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
|
||||
pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
|
||||
chunk := mheap_.pages.tryChunkOf(i)
|
||||
if chunk == nil {
|
||||
continue
|
||||
}
|
||||
pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
|
||||
slow.HeapReleased += uint64(pg) * pageSize
|
||||
}
|
||||
for _, p := range allp {
|
||||
@ -752,11 +756,7 @@ func (p *PageAlloc) InUse() []AddrRange {
|
||||
// Returns nil if the PallocData's L2 is missing.
|
||||
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
|
||||
ci := chunkIdx(i)
|
||||
l2 := (*pageAlloc)(p).chunks[ci.l1()]
|
||||
if l2 == nil {
|
||||
return nil
|
||||
}
|
||||
return (*PallocData)(&l2[ci.l2()])
|
||||
return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
|
||||
}
|
||||
|
||||
// AddrRange represents a range over addresses.
|
||||
@ -896,7 +896,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
||||
lock(&mheap_.lock)
|
||||
chunkLoop:
|
||||
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
|
||||
chunk := mheap_.pages.chunkOf(i)
|
||||
chunk := mheap_.pages.tryChunkOf(i)
|
||||
if chunk == nil {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < pallocChunkPages/64; j++ {
|
||||
// Run over each 64-bit bitmap section and ensure
|
||||
// scavenged is being cleared properly on allocation.
|
||||
@ -977,10 +980,9 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
|
||||
}
|
||||
|
||||
func MSpanCountAlloc(bits []byte) int {
|
||||
s := mspan{
|
||||
nelems: uintptr(len(bits) * 8),
|
||||
gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])),
|
||||
}
|
||||
s := (*mspan)(mheap_.spanalloc.alloc())
|
||||
s.nelems = uintptr(len(bits) * 8)
|
||||
s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
|
||||
return s.countAlloc()
|
||||
}
|
||||
|
||||
|
@ -220,3 +220,13 @@ func TestBitwiseContended(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorepNoWB(t *testing.T) {
|
||||
var p [2]*int
|
||||
for i := range p {
|
||||
atomic.StorepNoWB(unsafe.Pointer(&p[i]), unsafe.Pointer(new(int)))
|
||||
}
|
||||
if p[0] == p[1] {
|
||||
t.Error("Bad escape analysis of StorepNoWB")
|
||||
}
|
||||
}
|
||||
|
@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
|
||||
s.scav.scavLWM = maxSearchAddr
|
||||
}
|
||||
|
||||
// tryChunkOf returns the bitmap data for the given chunk.
|
||||
//
|
||||
// Returns nil if the chunk data has not been mapped.
|
||||
func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
|
||||
l2 := s.chunks[ci.l1()]
|
||||
if l2 == nil {
|
||||
return nil
|
||||
}
|
||||
return &l2[ci.l2()]
|
||||
}
|
||||
|
||||
// chunkOf returns the chunk at the given chunk index.
|
||||
//
|
||||
// The chunk index must be valid or this method may throw.
|
||||
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
|
||||
return &s.chunks[ci.l1()][ci.l2()]
|
||||
}
|
||||
|
@ -529,9 +529,17 @@ func BenchmarkPingPongHog(b *testing.B) {
|
||||
<-done
|
||||
}
|
||||
|
||||
var padData [128]uint64
|
||||
|
||||
func stackGrowthRecursive(i int) {
|
||||
var pad [128]uint64
|
||||
if i != 0 && pad[0] == 0 {
|
||||
pad = padData
|
||||
for j := range pad {
|
||||
if pad[j] != 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
if i != 0 {
|
||||
stackGrowthRecursive(i - 1)
|
||||
}
|
||||
}
|
||||
|
@ -450,6 +450,10 @@ type g struct {
|
||||
// copying needs to acquire channel locks to protect these
|
||||
// areas of the stack.
|
||||
activeStackChans bool
|
||||
// parkingOnChan indicates that the goroutine is about to
|
||||
// park on a chansend or chanrecv. Used to signal an unsafe point
|
||||
// for stack shrinking. It's a boolean value, but is updated atomically.
|
||||
parkingOnChan uint8
|
||||
|
||||
raceignore int8 // ignore race detection events
|
||||
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
|
||||
@ -940,11 +944,6 @@ type _defer struct {
|
||||
|
||||
// panics
|
||||
// This is the gccgo version.
|
||||
//
|
||||
// This is marked go:notinheap because _panic values must only ever
|
||||
// live on the stack.
|
||||
//
|
||||
//go:notinheap
|
||||
type _panic struct {
|
||||
// The next entry in the stack.
|
||||
link *_panic
|
||||
|
@ -7,6 +7,7 @@ package runtime
|
||||
// This file contains the implementation of Go select statements.
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -72,7 +73,20 @@ func selunlock(scases []scase, lockorder []uint16) {
|
||||
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
|
||||
// There are unlocked sudogs that point into gp's stack. Stack
|
||||
// copying must lock the channels of those sudogs.
|
||||
// Set activeStackChans here instead of before we try parking
|
||||
// because we could self-deadlock in stack growth on a
|
||||
// channel lock.
|
||||
gp.activeStackChans = true
|
||||
// Mark that it's safe for stack shrinking to occur now,
|
||||
// because any thread acquiring this G's stack for shrinking
|
||||
// is guaranteed to observe activeStackChans after this store.
|
||||
atomic.Store8(&gp.parkingOnChan, 0)
|
||||
// Make sure we unlock after setting activeStackChans and
|
||||
// unsetting parkingOnChan. The moment we unlock any of the
|
||||
// channel locks we risk gp getting readied by a channel operation
|
||||
// and so gp could continue running before everything before the
|
||||
// unlock is visible (even to gp itself).
|
||||
|
||||
// This must not access gp's stack (see gopark). In
|
||||
// particular, it must not access the *hselect. That's okay,
|
||||
// because by the time this is called, gp.waiting has all
|
||||
@ -313,6 +327,11 @@ loop:
|
||||
|
||||
// wait for someone to wake us up
|
||||
gp.param = nil
|
||||
// Signal to anyone trying to shrink our stack that we're about
|
||||
// to park on a channel. The window between when this G's status
|
||||
// changes and when we set gp.activeStackChans is not safe for
|
||||
// stack shrinking.
|
||||
atomic.Store8(&gp.parkingOnChan, 1)
|
||||
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
|
||||
gp.activeStackChans = false
|
||||
|
||||
|
@ -252,6 +252,7 @@ func TestTraceSymbolize(t *testing.T) {
|
||||
{trace.EvGoSysCall, []frame{
|
||||
{"syscall.read", 0},
|
||||
{"syscall.Read", 0},
|
||||
{"internal/poll.(*FD).Read.func1", 0},
|
||||
{"internal/poll.ignoringEINTR", 0},
|
||||
{"internal/poll.(*FD).Read", 0},
|
||||
{"os.(*File).read", 0},
|
||||
|
@ -242,7 +242,7 @@ func (b *B) run1() bool {
|
||||
if b.skipped {
|
||||
tag = "SKIP"
|
||||
}
|
||||
if b.chatty && (len(b.output) > 0 || b.finished) {
|
||||
if b.chatty != nil && (len(b.output) > 0 || b.finished) {
|
||||
b.trimOutput()
|
||||
fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output)
|
||||
}
|
||||
@ -523,10 +523,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
|
||||
}
|
||||
main := &B{
|
||||
common: common{
|
||||
name: "Main",
|
||||
w: os.Stdout,
|
||||
chatty: *chatty,
|
||||
bench: true,
|
||||
name: "Main",
|
||||
w: os.Stdout,
|
||||
bench: true,
|
||||
},
|
||||
importPath: importPath,
|
||||
benchFunc: func(b *B) {
|
||||
@ -537,6 +536,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
|
||||
benchTime: benchTime,
|
||||
context: ctx,
|
||||
}
|
||||
if Verbose() {
|
||||
main.chatty = newChattyPrinter(main.w)
|
||||
}
|
||||
main.runN(1)
|
||||
return !main.failed
|
||||
}
|
||||
@ -549,7 +551,7 @@ func (ctx *benchContext) processBench(b *B) {
|
||||
benchName := benchmarkName(b.name, procs)
|
||||
|
||||
// If it's chatty, we've already printed this information.
|
||||
if !b.chatty {
|
||||
if b.chatty == nil {
|
||||
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
|
||||
}
|
||||
// Recompute the running time for all but the first iteration.
|
||||
@ -576,7 +578,7 @@ func (ctx *benchContext) processBench(b *B) {
|
||||
continue
|
||||
}
|
||||
results := r.String()
|
||||
if b.chatty {
|
||||
if b.chatty != nil {
|
||||
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
|
||||
}
|
||||
if *benchmarkMemory || b.showAllocResult {
|
||||
@ -639,7 +641,7 @@ func (b *B) Run(name string, f func(b *B)) bool {
|
||||
atomic.StoreInt32(&sub.hasSub, 1)
|
||||
}
|
||||
|
||||
if b.chatty {
|
||||
if b.chatty != nil {
|
||||
labelsOnce.Do(func() {
|
||||
fmt.Printf("goos: %s\n", runtime.GOOS)
|
||||
fmt.Printf("goarch: %s\n", runtime.GOARCH)
|
||||
|
@ -483,10 +483,12 @@ func TestTRun(t *T) {
|
||||
signal: make(chan bool),
|
||||
name: "Test",
|
||||
w: buf,
|
||||
chatty: tc.chatty,
|
||||
},
|
||||
context: ctx,
|
||||
}
|
||||
if tc.chatty {
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
}
|
||||
ok := root.Run(tc.desc, tc.f)
|
||||
ctx.release()
|
||||
|
||||
@ -665,11 +667,13 @@ func TestBRun(t *T) {
|
||||
signal: make(chan bool),
|
||||
name: "root",
|
||||
w: buf,
|
||||
chatty: tc.chatty,
|
||||
},
|
||||
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
|
||||
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
|
||||
}
|
||||
if tc.chatty {
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
}
|
||||
root.runN(1)
|
||||
if ok != !tc.failed {
|
||||
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
|
||||
@ -741,9 +745,13 @@ func TestParallelSub(t *T) {
|
||||
}
|
||||
}
|
||||
|
||||
type funcWriter func([]byte) (int, error)
|
||||
type funcWriter struct {
|
||||
write func([]byte) (int, error)
|
||||
}
|
||||
|
||||
func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) }
|
||||
func (fw *funcWriter) Write(b []byte) (int, error) {
|
||||
return fw.write(b)
|
||||
}
|
||||
|
||||
func TestRacyOutput(t *T) {
|
||||
var runs int32 // The number of running Writes
|
||||
@ -761,9 +769,10 @@ func TestRacyOutput(t *T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
root := &T{
|
||||
common: common{w: funcWriter(raceDetector), chatty: true},
|
||||
common: common{w: &funcWriter{raceDetector}},
|
||||
context: newTestContext(1, newMatcher(regexp.MatchString, "", "")),
|
||||
}
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
root.Run("", func(t *T) {
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
|
@ -325,7 +325,6 @@ var (
|
||||
cpuListStr *string
|
||||
parallel *int
|
||||
testlog *string
|
||||
printer *testPrinter
|
||||
|
||||
haveExamples bool // are there examples?
|
||||
|
||||
@ -335,55 +334,45 @@ var (
|
||||
numFailed uint32 // number of test failures
|
||||
)
|
||||
|
||||
type testPrinter struct {
|
||||
chatty bool
|
||||
|
||||
type chattyPrinter struct {
|
||||
w io.Writer
|
||||
lastNameMu sync.Mutex // guards lastName
|
||||
lastName string // last printed test name in chatty mode
|
||||
}
|
||||
|
||||
func newTestPrinter(chatty bool) *testPrinter {
|
||||
return &testPrinter{
|
||||
chatty: chatty,
|
||||
}
|
||||
func newChattyPrinter(w io.Writer) *chattyPrinter {
|
||||
return &chattyPrinter{w: w}
|
||||
}
|
||||
|
||||
func (p *testPrinter) Print(testName, out string) {
|
||||
p.Fprint(os.Stdout, testName, out)
|
||||
}
|
||||
|
||||
func (p *testPrinter) Fprint(w io.Writer, testName, out string) {
|
||||
// Updatef prints a message about the status of the named test to w.
|
||||
//
|
||||
// The formatted message must include the test name itself.
|
||||
func (p *chattyPrinter) Updatef(testName, format string, args ...interface{}) {
|
||||
p.lastNameMu.Lock()
|
||||
defer p.lastNameMu.Unlock()
|
||||
|
||||
if !p.chatty ||
|
||||
strings.HasPrefix(out, "--- PASS: ") ||
|
||||
strings.HasPrefix(out, "--- FAIL: ") ||
|
||||
strings.HasPrefix(out, "--- SKIP: ") ||
|
||||
strings.HasPrefix(out, "=== RUN ") ||
|
||||
strings.HasPrefix(out, "=== CONT ") ||
|
||||
strings.HasPrefix(out, "=== PAUSE ") {
|
||||
// If we're buffering test output (!p.chatty), we don't really care which
|
||||
// test is emitting which line so long as they are serialized.
|
||||
//
|
||||
// If the message already implies an association with a specific new test,
|
||||
// we don't need to check what the old test name was or log an extra CONT
|
||||
// line for it. (We're updating it anyway, and the current message already
|
||||
// includes the test name.)
|
||||
p.lastName = testName
|
||||
fmt.Fprint(w, out)
|
||||
return
|
||||
}
|
||||
// Since the message already implies an association with a specific new test,
|
||||
// we don't need to check what the old test name was or log an extra CONT line
|
||||
// for it. (We're updating it anyway, and the current message already includes
|
||||
// the test name.)
|
||||
p.lastName = testName
|
||||
fmt.Fprintf(p.w, format, args...)
|
||||
}
|
||||
|
||||
// Printf prints a message, generated by the named test, that does not
|
||||
// necessarily mention that tests's name itself.
|
||||
func (p *chattyPrinter) Printf(testName, format string, args ...interface{}) {
|
||||
p.lastNameMu.Lock()
|
||||
defer p.lastNameMu.Unlock()
|
||||
|
||||
if p.lastName == "" {
|
||||
p.lastName = testName
|
||||
} else if p.lastName != testName {
|
||||
// Always printed as-is, with 0 decoration or indentation. So, we skip
|
||||
// printing to w.
|
||||
fmt.Printf("=== CONT %s\n", testName)
|
||||
fmt.Fprintf(p.w, "=== CONT %s\n", testName)
|
||||
p.lastName = testName
|
||||
}
|
||||
fmt.Fprint(w, out)
|
||||
|
||||
fmt.Fprintf(p.w, format, args...)
|
||||
}
|
||||
|
||||
// The maximum number of stack frames to go through when skipping helper functions for
|
||||
@ -405,12 +394,12 @@ type common struct {
|
||||
cleanupName string // Name of the cleanup function.
|
||||
cleanupPc []uintptr // The stack trace at the point where Cleanup was called.
|
||||
|
||||
chatty bool // A copy of the chatty flag.
|
||||
bench bool // Whether the current test is a benchmark.
|
||||
finished bool // Test function has completed.
|
||||
hasSub int32 // Written atomically.
|
||||
raceErrors int // Number of races detected during test.
|
||||
runner string // Function name of tRunner running the test.
|
||||
chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set.
|
||||
bench bool // Whether the current test is a benchmark.
|
||||
finished bool // Test function has completed.
|
||||
hasSub int32 // Written atomically.
|
||||
raceErrors int // Number of races detected during test.
|
||||
runner string // Function name of tRunner running the test.
|
||||
|
||||
parent *common
|
||||
level int // Nesting depth of test or benchmark.
|
||||
@ -572,12 +561,31 @@ func (c *common) flushToParent(testName, format string, args ...interface{}) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
printer.Fprint(p.w, testName, fmt.Sprintf(format, args...))
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
io.Copy(p.w, bytes.NewReader(c.output))
|
||||
c.output = c.output[:0]
|
||||
|
||||
if len(c.output) > 0 {
|
||||
format += "%s"
|
||||
args = append(args[:len(args):len(args)], c.output)
|
||||
c.output = c.output[:0] // but why?
|
||||
}
|
||||
|
||||
if c.chatty != nil && p.w == c.chatty.w {
|
||||
// We're flushing to the actual output, so track that this output is
|
||||
// associated with a specific test (and, specifically, that the next output
|
||||
// is *not* associated with that test).
|
||||
//
|
||||
// Moreover, if c.output is non-empty it is important that this write be
|
||||
// atomic with respect to the output of other tests, so that we don't end up
|
||||
// with confusing '=== CONT' lines in the middle of our '--- PASS' block.
|
||||
// Neither humans nor cmd/test2json can parse those easily.
|
||||
// (See https://golang.org/issue/40771.)
|
||||
c.chatty.Updatef(testName, format, args...)
|
||||
} else {
|
||||
// We're flushing to the output buffer of the parent test, which will
|
||||
// itself follow a test-name header when it is finally flushed to stdout.
|
||||
fmt.Fprintf(p.w, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
type indenter struct {
|
||||
@ -746,13 +754,13 @@ func (c *common) logDepth(s string, depth int) {
|
||||
}
|
||||
panic("Log in goroutine after " + c.name + " has completed")
|
||||
} else {
|
||||
if c.chatty {
|
||||
if c.chatty != nil {
|
||||
if c.bench {
|
||||
// Benchmarks don't print === CONT, so we should skip the test
|
||||
// printer and just print straight to stdout.
|
||||
fmt.Print(c.decorate(s, depth+1))
|
||||
} else {
|
||||
printer.Print(c.name, c.decorate(s, depth+1))
|
||||
c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1))
|
||||
}
|
||||
|
||||
return
|
||||
@ -1019,34 +1027,22 @@ func (t *T) Parallel() {
|
||||
t.parent.sub = append(t.parent.sub, t)
|
||||
t.raceErrors += race.Errors()
|
||||
|
||||
if t.chatty {
|
||||
// Print directly to root's io.Writer so there is no delay.
|
||||
root := t.parent
|
||||
for ; root.parent != nil; root = root.parent {
|
||||
}
|
||||
root.mu.Lock()
|
||||
if t.chatty != nil {
|
||||
// Unfortunately, even though PAUSE indicates that the named test is *no
|
||||
// longer* running, cmd/test2json interprets it as changing the active test
|
||||
// for the purpose of log parsing. We could fix cmd/test2json, but that
|
||||
// won't fix existing deployments of third-party tools that already shell
|
||||
// out to older builds of cmd/test2json — so merely fixing cmd/test2json
|
||||
// isn't enough for now.
|
||||
printer.Fprint(root.w, t.name, fmt.Sprintf("=== PAUSE %s\n", t.name))
|
||||
root.mu.Unlock()
|
||||
t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name)
|
||||
}
|
||||
|
||||
t.signal <- true // Release calling test.
|
||||
<-t.parent.barrier // Wait for the parent test to complete.
|
||||
t.context.waitParallel()
|
||||
|
||||
if t.chatty {
|
||||
// Print directly to root's io.Writer so there is no delay.
|
||||
root := t.parent
|
||||
for ; root.parent != nil; root = root.parent {
|
||||
}
|
||||
root.mu.Lock()
|
||||
printer.Fprint(root.w, t.name, fmt.Sprintf("=== CONT %s\n", t.name))
|
||||
root.mu.Unlock()
|
||||
if t.chatty != nil {
|
||||
t.chatty.Updatef(t.name, "=== CONT %s\n", t.name)
|
||||
}
|
||||
|
||||
t.start = time.Now()
|
||||
@ -1197,14 +1193,8 @@ func (t *T) Run(name string, f func(t *T)) bool {
|
||||
}
|
||||
t.w = indenter{&t.common}
|
||||
|
||||
if t.chatty {
|
||||
// Print directly to root's io.Writer so there is no delay.
|
||||
root := t.parent
|
||||
for ; root.parent != nil; root = root.parent {
|
||||
}
|
||||
root.mu.Lock()
|
||||
printer.Fprint(root.w, t.name, fmt.Sprintf("=== RUN %s\n", t.name))
|
||||
root.mu.Unlock()
|
||||
if t.chatty != nil {
|
||||
t.chatty.Updatef(t.name, "=== RUN %s\n", t.name)
|
||||
}
|
||||
// Instead of reducing the running count of this test before calling the
|
||||
// tRunner and increasing it afterwards, we rely on tRunner keeping the
|
||||
@ -1369,8 +1359,6 @@ func (m *M) Run() (code int) {
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
printer = newTestPrinter(Verbose())
|
||||
|
||||
if *parallel < 1 {
|
||||
fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer")
|
||||
flag.Usage()
|
||||
@ -1415,7 +1403,7 @@ func (t *T) report() {
|
||||
format := "--- %s: %s (%s)\n"
|
||||
if t.Failed() {
|
||||
t.flushToParent(t.name, format, "FAIL", t.name, dstr)
|
||||
} else if t.chatty {
|
||||
} else if t.chatty != nil {
|
||||
if t.Skipped() {
|
||||
t.flushToParent(t.name, format, "SKIP", t.name, dstr)
|
||||
} else {
|
||||
@ -1476,10 +1464,12 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT
|
||||
signal: make(chan bool),
|
||||
barrier: make(chan bool),
|
||||
w: os.Stdout,
|
||||
chatty: *chatty,
|
||||
},
|
||||
context: ctx,
|
||||
}
|
||||
if Verbose() {
|
||||
t.chatty = newChattyPrinter(t.w)
|
||||
}
|
||||
tRunner(t, func(t *T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.Name, test.F)
|
||||
|
20
libgo/misc/cgo/test/testdata/issue41761.go
vendored
Normal file
20
libgo/misc/cgo/test/testdata/issue41761.go
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cgotest
|
||||
|
||||
/*
|
||||
typedef struct S S;
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"cgotest/issue41761a"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func test41761(t *testing.T) {
|
||||
var x issue41761a.T
|
||||
_ = (*C.struct_S)(x.X)
|
||||
}
|
14
libgo/misc/cgo/test/testdata/issue41761a/a.go
vendored
Normal file
14
libgo/misc/cgo/test/testdata/issue41761a/a.go
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package issue41761a
|
||||
|
||||
/*
|
||||
typedef struct S S;
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
X *C.S
|
||||
}
|
Loading…
Reference in New Issue
Block a user