libgo: update to Go1.15rc2 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/247517
This commit is contained in:
parent
a72e938d71
commit
10c8507372
@ -1,4 +1,4 @@
|
||||
f45afedf90ac9af8f03d7d4515e952cbd724953a
|
||||
307665073fce992ea8112f74b91954e770afcc70
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
@ -1,4 +1,4 @@
|
||||
3e8f6b0791a670e52d25d76813d669daa68acfb4
|
||||
c4f8cb43caf0bcd0c730d7d04a3fce129393cecc
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -1 +1 @@
|
||||
go1.15rc1
|
||||
go1.15rc2
|
||||
|
@ -128,7 +128,9 @@ func (p *Package) writeDefs() {
|
||||
// Moreover, empty file name makes compile emit no source debug info at all.
|
||||
var buf bytes.Buffer
|
||||
noSourceConf.Fprint(&buf, fset, def.Go)
|
||||
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) {
|
||||
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) ||
|
||||
strings.HasPrefix(name, "_Ctype_enum_") ||
|
||||
strings.HasPrefix(name, "_Ctype_union_") {
|
||||
// This typedef is of the form `typedef a b` and should be an alias.
|
||||
fmt.Fprintf(fgo2, "= ")
|
||||
}
|
||||
|
@ -239,11 +239,25 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta
|
||||
err = &NoGoError{Package: p}
|
||||
}
|
||||
|
||||
// Take only the first error from a scanner.ErrorList. PackageError only
|
||||
// has room for one position, so we report the first error with a position
|
||||
// instead of all of the errors without a position.
|
||||
var pos string
|
||||
var isScanErr bool
|
||||
if scanErr, ok := err.(scanner.ErrorList); ok && len(scanErr) > 0 {
|
||||
isScanErr = true // For stack push/pop below.
|
||||
|
||||
scanPos := scanErr[0].Pos
|
||||
scanPos.Filename = base.ShortPath(scanPos.Filename)
|
||||
pos = scanPos.String()
|
||||
err = errors.New(scanErr[0].Msg)
|
||||
}
|
||||
|
||||
// Report the error on the importing package if the problem is with the import declaration
|
||||
// for example, if the package doesn't exist or if the import path is malformed.
|
||||
// On the other hand, don't include a position if the problem is with the imported package,
|
||||
// for example there are no Go files (NoGoError), or there's a problem in the imported
|
||||
// package's source files themselves.
|
||||
// package's source files themselves (scanner errors).
|
||||
//
|
||||
// TODO(matloob): Perhaps make each of those the errors in the first group
|
||||
// (including modload.ImportMissingError, and the corresponding
|
||||
@ -254,22 +268,11 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta
|
||||
// to make it easier to check for them? That would save us from having to
|
||||
// move the modload errors into this package to avoid a package import cycle,
|
||||
// and from having to export an error type for the errors produced in build.
|
||||
if !isMatchErr && nogoErr != nil {
|
||||
if !isMatchErr && (nogoErr != nil || isScanErr) {
|
||||
stk.Push(path)
|
||||
defer stk.Pop()
|
||||
}
|
||||
|
||||
// Take only the first error from a scanner.ErrorList. PackageError only
|
||||
// has room for one position, so we report the first error with a position
|
||||
// instead of all of the errors without a position.
|
||||
var pos string
|
||||
if scanErr, ok := err.(scanner.ErrorList); ok && len(scanErr) > 0 {
|
||||
scanPos := scanErr[0].Pos
|
||||
scanPos.Filename = base.ShortPath(scanPos.Filename)
|
||||
pos = scanPos.String()
|
||||
err = errors.New(scanErr[0].Msg)
|
||||
}
|
||||
|
||||
p.Error = &PackageError{
|
||||
ImportStack: stk.Copy(),
|
||||
Pos: pos,
|
||||
|
@ -154,7 +154,7 @@ func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return signature
|
||||
}
|
||||
|
||||
func signGeneric(signature, privateKey, message []byte) {
|
||||
func sign(signature, privateKey, message []byte) {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
@ -201,10 +201,6 @@ func signGeneric(signature, privateKey, message []byte) {
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return verify(publicKey, message, sig)
|
||||
}
|
||||
|
||||
func verifyGeneric(publicKey PublicKey, message, sig []byte) bool {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
@ -1,15 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// -build !s390x
|
||||
|
||||
package ed25519
|
||||
|
||||
func sign(signature, privateKey, message []byte) {
|
||||
signGeneric(signature, privateKey, message)
|
||||
}
|
||||
|
||||
func verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return verifyGeneric(publicKey, message, sig)
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore_for_gccgo
|
||||
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"internal/cpu"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func kdsaSign(message, signature, privateKey []byte) bool
|
||||
|
||||
//go:noescape
|
||||
func kdsaVerify(message, signature, publicKey []byte) bool
|
||||
|
||||
// sign does a check to see if hardware has Edwards Curve instruction available.
|
||||
// If it does, use the hardware implementation. Otherwise, use the generic version.
|
||||
func sign(signature, privateKey, message []byte) {
|
||||
if cpu.S390X.HasEDDSA {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
ret := kdsaSign(message, signature, privateKey[:32])
|
||||
if !ret {
|
||||
panic("ed25519: kdsa sign has a failure")
|
||||
}
|
||||
return
|
||||
}
|
||||
signGeneric(signature, privateKey, message)
|
||||
}
|
||||
|
||||
// verify does a check to see if hardware has Edwards Curve instruction available.
|
||||
// If it does, use the hardware implementation for eddsa verfication. Otherwise, the generic
|
||||
// version is used
|
||||
func verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
if cpu.S390X.HasEDDSA {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
if len(sig) != SignatureSize || sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return kdsaVerify(message, sig, publicKey)
|
||||
}
|
||||
return verifyGeneric(publicKey, message, sig)
|
||||
}
|
@ -26,14 +26,6 @@ func (zeroReader) Read(buf []byte) (int, error) {
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// signGenericWrapper is identical to Sign except that it unconditionally calls signGeneric directly
|
||||
// rather than going through the sign function that might call assembly code.
|
||||
func signGenericWrapper(privateKey PrivateKey, msg []byte) []byte {
|
||||
sig := make([]byte, SignatureSize)
|
||||
signGeneric(sig, privateKey, msg)
|
||||
return sig
|
||||
}
|
||||
|
||||
func TestUnmarshalMarshal(t *testing.T) {
|
||||
pub, _, _ := GenerateKey(rand.Reader)
|
||||
|
||||
@ -53,33 +45,22 @@ func TestUnmarshalMarshal(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSignVerify(t *testing.T) {
|
||||
t.Run("Generic", func(t *testing.T) { testSignVerify(t, signGenericWrapper, verifyGeneric) })
|
||||
t.Run("Native", func(t *testing.T) { testSignVerify(t, Sign, Verify) })
|
||||
}
|
||||
|
||||
func testSignVerify(t *testing.T, signImpl func(privateKey PrivateKey, message []byte) []byte,
|
||||
verifyImpl func(publicKey PublicKey, message, sig []byte) bool) {
|
||||
var zero zeroReader
|
||||
public, private, _ := GenerateKey(zero)
|
||||
|
||||
message := []byte("test message")
|
||||
sig := signImpl(private, message)
|
||||
if !verifyImpl(public, message, sig) {
|
||||
sig := Sign(private, message)
|
||||
if !Verify(public, message, sig) {
|
||||
t.Errorf("valid signature rejected")
|
||||
}
|
||||
|
||||
wrongMessage := []byte("wrong message")
|
||||
if verifyImpl(public, wrongMessage, sig) {
|
||||
if Verify(public, wrongMessage, sig) {
|
||||
t.Errorf("signature of different message accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCryptoSigner(t *testing.T) {
|
||||
t.Run("Generic", func(t *testing.T) { testCryptoSigner(t, verifyGeneric) })
|
||||
t.Run("Native", func(t *testing.T) { testCryptoSigner(t, Verify) })
|
||||
}
|
||||
|
||||
func testCryptoSigner(t *testing.T, verifyImpl func(publicKey PublicKey, message, sig []byte) bool) {
|
||||
var zero zeroReader
|
||||
public, private, _ := GenerateKey(zero)
|
||||
|
||||
@ -102,7 +83,7 @@ func testCryptoSigner(t *testing.T, verifyImpl func(publicKey PublicKey, message
|
||||
t.Fatalf("error from Sign(): %s", err)
|
||||
}
|
||||
|
||||
if !verifyImpl(public, message, signature) {
|
||||
if !Verify(public, message, signature) {
|
||||
t.Errorf("Verify failed on signature from Sign()")
|
||||
}
|
||||
}
|
||||
@ -130,12 +111,6 @@ func TestEqual(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGolden(t *testing.T) {
|
||||
t.Run("Generic", func(t *testing.T) { testGolden(t, signGenericWrapper, verifyGeneric) })
|
||||
t.Run("Native", func(t *testing.T) { testGolden(t, Sign, Verify) })
|
||||
}
|
||||
|
||||
func testGolden(t *testing.T, signImpl func(privateKey PrivateKey, message []byte) []byte,
|
||||
verifyImpl func(publicKey PublicKey, message, sig []byte) bool) {
|
||||
// sign.input.gz is a selection of test cases from
|
||||
// https://ed25519.cr.yp.to/python/sign.input
|
||||
testDataZ, err := os.Open("testdata/sign.input.gz")
|
||||
@ -177,12 +152,12 @@ func testGolden(t *testing.T, signImpl func(privateKey PrivateKey, message []byt
|
||||
copy(priv[:], privBytes)
|
||||
copy(priv[32:], pubKey)
|
||||
|
||||
sig2 := signImpl(priv[:], msg)
|
||||
sig2 := Sign(priv[:], msg)
|
||||
if !bytes.Equal(sig, sig2[:]) {
|
||||
t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2)
|
||||
}
|
||||
|
||||
if !verifyImpl(pubKey, msg, sig2) {
|
||||
if !Verify(pubKey, msg, sig2) {
|
||||
t.Errorf("signature failed to verify on line %d", lineNo)
|
||||
}
|
||||
|
||||
@ -206,11 +181,6 @@ func testGolden(t *testing.T, signImpl func(privateKey PrivateKey, message []byt
|
||||
}
|
||||
|
||||
func TestMalleability(t *testing.T) {
|
||||
t.Run("Generic", func(t *testing.T) { testMalleability(t, verifyGeneric) })
|
||||
t.Run("Native", func(t *testing.T) { testMalleability(t, Verify) })
|
||||
}
|
||||
|
||||
func testMalleability(t *testing.T, verifyImpl func(publicKey PublicKey, message, sig []byte) bool) {
|
||||
// https://tools.ietf.org/html/rfc8032#section-5.1.7 adds an additional test
|
||||
// that s be in [0, order). This prevents someone from adding a multiple of
|
||||
// order to s and obtaining a second valid signature for the same message.
|
||||
@ -229,7 +199,7 @@ func testMalleability(t *testing.T, verifyImpl func(publicKey PublicKey, message
|
||||
0xb1, 0x08, 0xc3, 0xbd, 0xae, 0x36, 0x9e, 0xf5, 0x49, 0xfa,
|
||||
}
|
||||
|
||||
if verifyImpl(publicKey, msg, sig) {
|
||||
if Verify(publicKey, msg, sig) {
|
||||
t.Fatal("non-canonical signature accepted")
|
||||
}
|
||||
}
|
||||
|
@ -106,13 +106,13 @@ var overflow = errors.New("binary: varint overflows a 64-bit integer")
|
||||
func ReadUvarint(r io.ByteReader) (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
for i := 0; ; i++ {
|
||||
for i := 0; i < MaxVarintLen64; i++ {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i > 9 || i == 9 && b > 1 {
|
||||
if i == 9 && b > 1 {
|
||||
return x, overflow
|
||||
}
|
||||
return x | uint64(b)<<s, nil
|
||||
@ -120,6 +120,7 @@ func ReadUvarint(r io.ByteReader) (uint64, error) {
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
return x, overflow
|
||||
}
|
||||
|
||||
// ReadVarint reads an encoded signed integer from r and returns it as an int64.
|
||||
|
@ -121,21 +121,27 @@ func TestBufferTooSmall(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testOverflow(t *testing.T, buf []byte, n0 int, err0 error) {
|
||||
func testOverflow(t *testing.T, buf []byte, x0 uint64, n0 int, err0 error) {
|
||||
x, n := Uvarint(buf)
|
||||
if x != 0 || n != n0 {
|
||||
t.Errorf("Uvarint(%v): got x = %d, n = %d; want 0, %d", buf, x, n, n0)
|
||||
}
|
||||
|
||||
x, err := ReadUvarint(bytes.NewReader(buf))
|
||||
if x != 0 || err != err0 {
|
||||
t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want 0, %s", buf, x, err, err0)
|
||||
r := bytes.NewReader(buf)
|
||||
len := r.Len()
|
||||
x, err := ReadUvarint(r)
|
||||
if x != x0 || err != err0 {
|
||||
t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want %d, %s", buf, x, err, x0, err0)
|
||||
}
|
||||
if read := len - r.Len(); read > MaxVarintLen64 {
|
||||
t.Errorf("ReadUvarint(%v): read more than MaxVarintLen64 bytes, got %d", buf, read)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverflow(t *testing.T) {
|
||||
testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, -10, overflow)
|
||||
testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, -13, overflow)
|
||||
testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, 0, -10, overflow)
|
||||
testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, 0, -13, overflow)
|
||||
testOverflow(t, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 1<<64-1, 0, overflow) // 11 bytes, should overflow
|
||||
}
|
||||
|
||||
func TestNonCanonicalZero(t *testing.T) {
|
||||
|
@ -100,7 +100,7 @@ type Transport struct {
|
||||
idleLRU connLRU
|
||||
|
||||
reqMu sync.Mutex
|
||||
reqCanceler map[*Request]func(error)
|
||||
reqCanceler map[cancelKey]func(error)
|
||||
|
||||
altMu sync.Mutex // guards changing altProto only
|
||||
altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
|
||||
@ -273,6 +273,13 @@ type Transport struct {
|
||||
ForceAttemptHTTP2 bool
|
||||
}
|
||||
|
||||
// A cancelKey is the key of the reqCanceler map.
|
||||
// We wrap the *Request in this type since we want to use the original request,
|
||||
// not any transient one created by roundTrip.
|
||||
type cancelKey struct {
|
||||
req *Request
|
||||
}
|
||||
|
||||
func (t *Transport) writeBufferSize() int {
|
||||
if t.WriteBufferSize > 0 {
|
||||
return t.WriteBufferSize
|
||||
@ -433,9 +440,10 @@ func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
|
||||
// optional extra headers to write and stores any error to return
|
||||
// from roundTrip.
|
||||
type transportRequest struct {
|
||||
*Request // original request, not to be mutated
|
||||
extra Header // extra headers to write, or nil
|
||||
trace *httptrace.ClientTrace // optional
|
||||
*Request // original request, not to be mutated
|
||||
extra Header // extra headers to write, or nil
|
||||
trace *httptrace.ClientTrace // optional
|
||||
cancelKey cancelKey
|
||||
|
||||
mu sync.Mutex // guards err
|
||||
err error // first setError value for mapRoundTripError to consider
|
||||
@ -512,6 +520,7 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
|
||||
}
|
||||
|
||||
origReq := req
|
||||
cancelKey := cancelKey{origReq}
|
||||
req = setupRewindBody(req)
|
||||
|
||||
if altRT := t.alternateRoundTripper(req); altRT != nil {
|
||||
@ -546,7 +555,7 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
|
||||
}
|
||||
|
||||
// treq gets modified by roundTrip, so we need to recreate for each retry.
|
||||
treq := &transportRequest{Request: req, trace: trace}
|
||||
treq := &transportRequest{Request: req, trace: trace, cancelKey: cancelKey}
|
||||
cm, err := t.connectMethodForRequest(treq)
|
||||
if err != nil {
|
||||
req.closeBody()
|
||||
@ -559,7 +568,7 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
|
||||
// to send it requests.
|
||||
pconn, err := t.getConn(treq, cm)
|
||||
if err != nil {
|
||||
t.setReqCanceler(req, nil)
|
||||
t.setReqCanceler(cancelKey, nil)
|
||||
req.closeBody()
|
||||
return nil, err
|
||||
}
|
||||
@ -567,7 +576,7 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
|
||||
var resp *Response
|
||||
if pconn.alt != nil {
|
||||
// HTTP/2 path.
|
||||
t.setReqCanceler(req, nil) // not cancelable with CancelRequest
|
||||
t.setReqCanceler(cancelKey, nil) // not cancelable with CancelRequest
|
||||
resp, err = pconn.alt.RoundTrip(req)
|
||||
} else {
|
||||
resp, err = pconn.roundTrip(treq)
|
||||
@ -753,14 +762,14 @@ func (t *Transport) CloseIdleConnections() {
|
||||
// cancelable context instead. CancelRequest cannot cancel HTTP/2
|
||||
// requests.
|
||||
func (t *Transport) CancelRequest(req *Request) {
|
||||
t.cancelRequest(req, errRequestCanceled)
|
||||
t.cancelRequest(cancelKey{req}, errRequestCanceled)
|
||||
}
|
||||
|
||||
// Cancel an in-flight request, recording the error value.
|
||||
func (t *Transport) cancelRequest(req *Request, err error) {
|
||||
func (t *Transport) cancelRequest(key cancelKey, err error) {
|
||||
t.reqMu.Lock()
|
||||
cancel := t.reqCanceler[req]
|
||||
delete(t.reqCanceler, req)
|
||||
cancel := t.reqCanceler[key]
|
||||
delete(t.reqCanceler, key)
|
||||
t.reqMu.Unlock()
|
||||
if cancel != nil {
|
||||
cancel(err)
|
||||
@ -1093,16 +1102,16 @@ func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool {
|
||||
return removed
|
||||
}
|
||||
|
||||
func (t *Transport) setReqCanceler(r *Request, fn func(error)) {
|
||||
func (t *Transport) setReqCanceler(key cancelKey, fn func(error)) {
|
||||
t.reqMu.Lock()
|
||||
defer t.reqMu.Unlock()
|
||||
if t.reqCanceler == nil {
|
||||
t.reqCanceler = make(map[*Request]func(error))
|
||||
t.reqCanceler = make(map[cancelKey]func(error))
|
||||
}
|
||||
if fn != nil {
|
||||
t.reqCanceler[r] = fn
|
||||
t.reqCanceler[key] = fn
|
||||
} else {
|
||||
delete(t.reqCanceler, r)
|
||||
delete(t.reqCanceler, key)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1110,17 +1119,17 @@ func (t *Transport) setReqCanceler(r *Request, fn func(error)) {
|
||||
// for the request, we don't set the function and return false.
|
||||
// Since CancelRequest will clear the canceler, we can use the return value to detect if
|
||||
// the request was canceled since the last setReqCancel call.
|
||||
func (t *Transport) replaceReqCanceler(r *Request, fn func(error)) bool {
|
||||
func (t *Transport) replaceReqCanceler(key cancelKey, fn func(error)) bool {
|
||||
t.reqMu.Lock()
|
||||
defer t.reqMu.Unlock()
|
||||
_, ok := t.reqCanceler[r]
|
||||
_, ok := t.reqCanceler[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if fn != nil {
|
||||
t.reqCanceler[r] = fn
|
||||
t.reqCanceler[key] = fn
|
||||
} else {
|
||||
delete(t.reqCanceler, r)
|
||||
delete(t.reqCanceler, key)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -1324,12 +1333,12 @@ func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persi
|
||||
// set request canceler to some non-nil function so we
|
||||
// can detect whether it was cleared between now and when
|
||||
// we enter roundTrip
|
||||
t.setReqCanceler(req, func(error) {})
|
||||
t.setReqCanceler(treq.cancelKey, func(error) {})
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
cancelc := make(chan error, 1)
|
||||
t.setReqCanceler(req, func(err error) { cancelc <- err })
|
||||
t.setReqCanceler(treq.cancelKey, func(err error) { cancelc <- err })
|
||||
|
||||
// Queue for permission to dial.
|
||||
t.queueForDial(w)
|
||||
@ -2078,7 +2087,7 @@ func (pc *persistConn) readLoop() {
|
||||
}
|
||||
|
||||
if !hasBody || bodyWritable {
|
||||
pc.t.setReqCanceler(rc.req, nil)
|
||||
pc.t.setReqCanceler(rc.cancelKey, nil)
|
||||
|
||||
// Put the idle conn back into the pool before we send the response
|
||||
// so if they process it quickly and make another request, they'll
|
||||
@ -2151,7 +2160,7 @@ func (pc *persistConn) readLoop() {
|
||||
// reading the response body. (or for cancellation or death)
|
||||
select {
|
||||
case bodyEOF := <-waitForBodyRead:
|
||||
pc.t.setReqCanceler(rc.req, nil) // before pc might return to idle pool
|
||||
pc.t.setReqCanceler(rc.cancelKey, nil) // before pc might return to idle pool
|
||||
alive = alive &&
|
||||
bodyEOF &&
|
||||
!pc.sawEOF &&
|
||||
@ -2165,7 +2174,7 @@ func (pc *persistConn) readLoop() {
|
||||
pc.t.CancelRequest(rc.req)
|
||||
case <-rc.req.Context().Done():
|
||||
alive = false
|
||||
pc.t.cancelRequest(rc.req, rc.req.Context().Err())
|
||||
pc.t.cancelRequest(rc.cancelKey, rc.req.Context().Err())
|
||||
case <-pc.closech:
|
||||
alive = false
|
||||
}
|
||||
@ -2408,9 +2417,10 @@ type responseAndError struct {
|
||||
}
|
||||
|
||||
type requestAndChan struct {
|
||||
_ incomparable
|
||||
req *Request
|
||||
ch chan responseAndError // unbuffered; always send in select on callerGone
|
||||
_ incomparable
|
||||
req *Request
|
||||
cancelKey cancelKey
|
||||
ch chan responseAndError // unbuffered; always send in select on callerGone
|
||||
|
||||
// whether the Transport (as opposed to the user client code)
|
||||
// added the Accept-Encoding gzip header. If the Transport
|
||||
@ -2472,7 +2482,7 @@ var (
|
||||
|
||||
func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
|
||||
testHookEnterRoundTrip()
|
||||
if !pc.t.replaceReqCanceler(req.Request, pc.cancelRequest) {
|
||||
if !pc.t.replaceReqCanceler(req.cancelKey, pc.cancelRequest) {
|
||||
pc.t.putOrCloseIdleConn(pc)
|
||||
return nil, errRequestCanceled
|
||||
}
|
||||
@ -2524,7 +2534,7 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
pc.t.setReqCanceler(req.Request, nil)
|
||||
pc.t.setReqCanceler(req.cancelKey, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -2540,6 +2550,7 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
|
||||
resc := make(chan responseAndError)
|
||||
pc.reqch <- requestAndChan{
|
||||
req: req.Request,
|
||||
cancelKey: req.cancelKey,
|
||||
ch: resc,
|
||||
addedGzip: requestedGzip,
|
||||
continueCh: continueCh,
|
||||
@ -2591,10 +2602,10 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
|
||||
}
|
||||
return re.res, nil
|
||||
case <-cancelChan:
|
||||
pc.t.CancelRequest(req.Request)
|
||||
pc.t.cancelRequest(req.cancelKey, errRequestCanceled)
|
||||
cancelChan = nil
|
||||
case <-ctxDoneChan:
|
||||
pc.t.cancelRequest(req.Request, req.Context().Err())
|
||||
pc.t.cancelRequest(req.cancelKey, req.Context().Err())
|
||||
cancelChan = nil
|
||||
ctxDoneChan = nil
|
||||
}
|
||||
|
@ -2368,6 +2368,50 @@ func TestTransportCancelRequest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testTransportCancelRequestInDo(t *testing.T, body io.Reader) {
|
||||
setParallel(t)
|
||||
defer afterTest(t)
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in -short mode")
|
||||
}
|
||||
unblockc := make(chan bool)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
<-unblockc
|
||||
}))
|
||||
defer ts.Close()
|
||||
defer close(unblockc)
|
||||
|
||||
c := ts.Client()
|
||||
tr := c.Transport.(*Transport)
|
||||
|
||||
donec := make(chan bool)
|
||||
req, _ := NewRequest("GET", ts.URL, body)
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c.Do(req)
|
||||
}()
|
||||
start := time.Now()
|
||||
timeout := 10 * time.Second
|
||||
for time.Since(start) < timeout {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
tr.CancelRequest(req)
|
||||
select {
|
||||
case <-donec:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.Errorf("Do of canceled request has not returned after %v", timeout)
|
||||
}
|
||||
|
||||
func TestTransportCancelRequestInDo(t *testing.T) {
|
||||
testTransportCancelRequestInDo(t, nil)
|
||||
}
|
||||
|
||||
func TestTransportCancelRequestWithBodyInDo(t *testing.T) {
|
||||
testTransportCancelRequestInDo(t, bytes.NewBuffer([]byte{0}))
|
||||
}
|
||||
|
||||
func TestTransportCancelRequestInDial(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
if testing.Short() {
|
||||
|
@ -1,3 +1,7 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !goexperiment.staticlockranking
|
||||
|
||||
package runtime
|
||||
|
@ -1,3 +1,7 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build goexperiment.staticlockranking
|
||||
|
||||
package runtime
|
||||
|
@ -233,16 +233,12 @@ type pageAlloc struct {
|
||||
|
||||
// The address to start an allocation search with. It must never
|
||||
// point to any memory that is not contained in inUse, i.e.
|
||||
// inUse.contains(searchAddr) must always be true.
|
||||
// inUse.contains(searchAddr.addr()) must always be true. The one
|
||||
// exception to this rule is that it may take on the value of
|
||||
// maxOffAddr to indicate that the heap is exhausted.
|
||||
//
|
||||
// When added with arenaBaseOffset, we guarantee that
|
||||
// all valid heap addresses (when also added with
|
||||
// arenaBaseOffset) below this value are allocated and
|
||||
// not worth searching.
|
||||
//
|
||||
// Note that adding in arenaBaseOffset transforms addresses
|
||||
// to a new address space with a linear view of the full address
|
||||
// space on architectures with segmented address spaces.
|
||||
// We guarantee that all valid heap addresses below this value
|
||||
// are allocated and not worth searching.
|
||||
searchAddr offAddr
|
||||
|
||||
// start and end represent the chunk indices
|
||||
@ -518,6 +514,30 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
|
||||
return uintptr(scav) * pageSize
|
||||
}
|
||||
|
||||
// findMappedAddr returns the smallest mapped offAddr that is
|
||||
// >= addr. That is, if addr refers to mapped memory, then it is
|
||||
// returned. If addr is higher than any mapped region, then
|
||||
// it returns maxOffAddr.
|
||||
//
|
||||
// s.mheapLock must be held.
|
||||
func (s *pageAlloc) findMappedAddr(addr offAddr) offAddr {
|
||||
// If we're not in a test, validate first by checking mheap_.arenas.
|
||||
// This is a fast path which is only safe to use outside of testing.
|
||||
ai := arenaIndex(addr.addr())
|
||||
if s.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
|
||||
vAddr, ok := s.inUse.findAddrGreaterEqual(addr.addr())
|
||||
if ok {
|
||||
return offAddr{vAddr}
|
||||
} else {
|
||||
// The candidate search address is greater than any
|
||||
// known address, which means we definitely have no
|
||||
// free memory left.
|
||||
return maxOffAddr
|
||||
}
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// find searches for the first (address-ordered) contiguous free region of
|
||||
// npages in size and returns a base address for that region.
|
||||
//
|
||||
@ -526,6 +546,7 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
|
||||
//
|
||||
// find also computes and returns a candidate s.searchAddr, which may or
|
||||
// may not prune more of the address space than s.searchAddr already does.
|
||||
// This candidate is always a valid s.searchAddr.
|
||||
//
|
||||
// find represents the slow path and the full radix tree search.
|
||||
//
|
||||
@ -695,7 +716,7 @@ nextLevel:
|
||||
// We found a sufficiently large run of free pages straddling
|
||||
// some boundary, so compute the address and return it.
|
||||
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
|
||||
return addr, firstFree.base
|
||||
return addr, s.findMappedAddr(firstFree.base)
|
||||
}
|
||||
if l == 0 {
|
||||
// We're at level zero, so that means we've exhausted our search.
|
||||
@ -741,7 +762,7 @@ nextLevel:
|
||||
// found an even narrower free window.
|
||||
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
|
||||
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
|
||||
return addr, firstFree.base
|
||||
return addr, s.findMappedAddr(firstFree.base)
|
||||
}
|
||||
|
||||
// alloc allocates npages worth of memory from the page heap, returning the base
|
||||
|
@ -612,6 +612,63 @@ func TestPageAllocAlloc(t *testing.T) {
|
||||
baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
|
||||
},
|
||||
}
|
||||
|
||||
// Test to check for issue #40191. Essentially, the candidate searchAddr
|
||||
// discovered by find may not point to mapped memory, so we need to handle
|
||||
// that explicitly.
|
||||
//
|
||||
// chunkIdxSmallOffset is an offset intended to be used within chunkIdxBigJump.
|
||||
// It is far enough within chunkIdxBigJump that the summaries at the beginning
|
||||
// of an address range the size of chunkIdxBigJump will not be mapped in.
|
||||
const chunkIdxSmallOffset = 0x503
|
||||
tests["DiscontiguousBadSearchAddr"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
// The mechanism for the bug involves three chunks, A, B, and C, which are
|
||||
// far apart in the address space. In particular, B is chunkIdxBigJump +
|
||||
// chunkIdxSmalloffset chunks away from B, and C is 2*chunkIdxBigJump chunks
|
||||
// away from A. A has 1 page free, B has several (NOT at the end of B), and
|
||||
// C is totally free.
|
||||
// Note that B's free memory must not be at the end of B because the fast
|
||||
// path in the page allocator will check if the searchAddr even gives us
|
||||
// enough space to place the allocation in a chunk before accessing the
|
||||
// summary.
|
||||
BaseChunkIdx + chunkIdxBigJump*0: {{0, PallocChunkPages - 1}},
|
||||
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {
|
||||
{0, PallocChunkPages - 10},
|
||||
{PallocChunkPages - 1, 1},
|
||||
},
|
||||
BaseChunkIdx + chunkIdxBigJump*2: {},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx + chunkIdxBigJump*0: {},
|
||||
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {},
|
||||
BaseChunkIdx + chunkIdxBigJump*2: {},
|
||||
},
|
||||
hits: []hit{
|
||||
// We first allocate into A to set the page allocator's searchAddr to the
|
||||
// end of that chunk. That is the only purpose A serves.
|
||||
{1, PageBase(BaseChunkIdx, PallocChunkPages-1), 0},
|
||||
// Then, we make a big allocation that doesn't fit into B, and so must be
|
||||
// fulfilled by C.
|
||||
//
|
||||
// On the way to fulfilling the allocation into C, we estimate searchAddr
|
||||
// using the summary structure, but that will give us a searchAddr of
|
||||
// B's base address minus chunkIdxSmallOffset chunks. These chunks will
|
||||
// not be mapped.
|
||||
{100, PageBase(baseChunkIdx+chunkIdxBigJump*2, 0), 0},
|
||||
// Now we try to make a smaller allocation that can be fulfilled by B.
|
||||
// In an older implementation of the page allocator, this will segfault,
|
||||
// because this last allocation will first try to access the summary
|
||||
// for B's base address minus chunkIdxSmallOffset chunks in the fast path,
|
||||
// and this will not be mapped.
|
||||
{9, PageBase(baseChunkIdx+chunkIdxBigJump*1+chunkIdxSmallOffset, PallocChunkPages-10), 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx + chunkIdxBigJump*0: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + chunkIdxBigJump*2: {{0, 100}},
|
||||
},
|
||||
}
|
||||
}
|
||||
for name, v := range tests {
|
||||
v := v
|
||||
|
@ -188,6 +188,25 @@ func (a *addrRanges) findSucc(addr uintptr) int {
|
||||
return len(a.ranges)
|
||||
}
|
||||
|
||||
// findAddrGreaterEqual returns the smallest address represented by a
|
||||
// that is >= addr. Thus, if the address is represented by a,
|
||||
// then it returns addr. The second return value indicates whether
|
||||
// such an address exists for addr in a. That is, if addr is larger than
|
||||
// any address known to a, the second return value will be false.
|
||||
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
|
||||
i := a.findSucc(addr)
|
||||
if i == 0 {
|
||||
return a.ranges[0].base.addr(), true
|
||||
}
|
||||
if a.ranges[i-1].contains(addr) {
|
||||
return addr, true
|
||||
}
|
||||
if i < len(a.ranges) {
|
||||
return a.ranges[i].base.addr(), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// contains returns true if a covers the address addr.
|
||||
func (a *addrRanges) contains(addr uintptr) bool {
|
||||
i := a.findSucc(addr)
|
||||
|
@ -176,7 +176,7 @@ func main(unsafe.Pointer) {
|
||||
|
||||
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
|
||||
systemstack(func() {
|
||||
newm(sysmon, nil)
|
||||
newm(sysmon, nil, -1)
|
||||
})
|
||||
}
|
||||
|
||||
@ -567,7 +567,7 @@ func schedinit() {
|
||||
|
||||
mallocinit()
|
||||
fastrandinit() // must run before mcommoninit
|
||||
mcommoninit(_g_.m)
|
||||
mcommoninit(_g_.m, -1)
|
||||
cpuinit() // must run before alginit
|
||||
alginit() // maps must not be used before this call
|
||||
|
||||
@ -633,7 +633,22 @@ func checkmcount() {
|
||||
}
|
||||
}
|
||||
|
||||
func mcommoninit(mp *m) {
|
||||
// mReserveID returns the next ID to use for a new m. This new m is immediately
|
||||
// considered 'running' by checkdead.
|
||||
//
|
||||
// sched.lock must be held.
|
||||
func mReserveID() int64 {
|
||||
if sched.mnext+1 < sched.mnext {
|
||||
throw("runtime: thread ID overflow")
|
||||
}
|
||||
id := sched.mnext
|
||||
sched.mnext++
|
||||
checkmcount()
|
||||
return id
|
||||
}
|
||||
|
||||
// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
|
||||
func mcommoninit(mp *m, id int64) {
|
||||
_g_ := getg()
|
||||
|
||||
// g0 stack won't make sense for user (and is not necessary unwindable).
|
||||
@ -642,12 +657,12 @@ func mcommoninit(mp *m) {
|
||||
}
|
||||
|
||||
lock(&sched.lock)
|
||||
if sched.mnext+1 < sched.mnext {
|
||||
throw("runtime: thread ID overflow")
|
||||
|
||||
if id >= 0 {
|
||||
mp.id = id
|
||||
} else {
|
||||
mp.id = mReserveID()
|
||||
}
|
||||
mp.id = sched.mnext
|
||||
sched.mnext++
|
||||
checkmcount()
|
||||
|
||||
mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
|
||||
mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
|
||||
@ -1052,7 +1067,7 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
|
||||
notewakeup(&mp.park)
|
||||
} else {
|
||||
// Start M to run P. Do not start another M below.
|
||||
newm(nil, p)
|
||||
newm(nil, p, -1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1379,12 +1394,13 @@ func runSafePointFn() {
|
||||
// Allocate a new m unassociated with any thread.
|
||||
// Can use p for allocation context if needed.
|
||||
// fn is recorded as the new m's m.mstartfn.
|
||||
// id is optional pre-allocated m ID. Omit by passing -1.
|
||||
//
|
||||
// This function is allowed to have write barriers even if the caller
|
||||
// isn't because it borrows _p_.
|
||||
//
|
||||
//go:yeswritebarrierrec
|
||||
func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
|
||||
func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
|
||||
_g_ := getg()
|
||||
acquirem() // disable GC because it can be called from sysmon
|
||||
if _g_.m.p == 0 {
|
||||
@ -1413,7 +1429,7 @@ func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointe
|
||||
|
||||
mp = new(m)
|
||||
mp.mstartfn = fn
|
||||
mcommoninit(mp)
|
||||
mcommoninit(mp, id)
|
||||
|
||||
mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
|
||||
mp.g0.m = mp
|
||||
@ -1540,7 +1556,7 @@ func oneNewExtraM() {
|
||||
// The sched.pc will never be returned to, but setting it to
|
||||
// goexit makes clear to the traceback routines where
|
||||
// the goroutine stack ends.
|
||||
mp, g0SP, g0SPSize := allocm(nil, nil, true)
|
||||
mp, g0SP, g0SPSize := allocm(nil, nil, -1, true)
|
||||
gp := malg(true, false, nil, nil)
|
||||
// malg returns status as _Gidle. Change to _Gdead before
|
||||
// adding to allg where GC can see it. We use _Gdead to hide
|
||||
@ -1715,9 +1731,11 @@ var newmHandoff struct {
|
||||
// Create a new m. It will start off with a call to fn, or else the scheduler.
|
||||
// fn needs to be static and not a heap allocated closure.
|
||||
// May run with m.p==nil, so write barriers are not allowed.
|
||||
//
|
||||
// id is optional pre-allocated m ID. Omit by passing -1.
|
||||
//go:nowritebarrierrec
|
||||
func newm(fn func(), _p_ *p) {
|
||||
mp, _, _ := allocm(_p_, fn, false)
|
||||
func newm(fn func(), _p_ *p, id int64) {
|
||||
mp, _, _ := allocm(_p_, fn, id, false)
|
||||
mp.nextp.set(_p_)
|
||||
mp.sigmask = initSigmask
|
||||
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
|
||||
@ -1770,7 +1788,7 @@ func startTemplateThread() {
|
||||
releasem(mp)
|
||||
return
|
||||
}
|
||||
newm(templateThread, nil)
|
||||
newm(templateThread, nil, -1)
|
||||
releasem(mp)
|
||||
}
|
||||
|
||||
@ -1865,16 +1883,31 @@ func startm(_p_ *p, spinning bool) {
|
||||
}
|
||||
}
|
||||
mp := mget()
|
||||
unlock(&sched.lock)
|
||||
if mp == nil {
|
||||
// No M is available, we must drop sched.lock and call newm.
|
||||
// However, we already own a P to assign to the M.
|
||||
//
|
||||
// Once sched.lock is released, another G (e.g., in a syscall),
|
||||
// could find no idle P while checkdead finds a runnable G but
|
||||
// no running M's because this new M hasn't started yet, thus
|
||||
// throwing in an apparent deadlock.
|
||||
//
|
||||
// Avoid this situation by pre-allocating the ID for the new M,
|
||||
// thus marking it as 'running' before we drop sched.lock. This
|
||||
// new M will eventually run the scheduler to execute any
|
||||
// queued G's.
|
||||
id := mReserveID()
|
||||
unlock(&sched.lock)
|
||||
|
||||
var fn func()
|
||||
if spinning {
|
||||
// The caller incremented nmspinning, so set m.spinning in the new M.
|
||||
fn = mspinning
|
||||
}
|
||||
newm(fn, _p_)
|
||||
newm(fn, _p_, id)
|
||||
return
|
||||
}
|
||||
unlock(&sched.lock)
|
||||
if mp.spinning {
|
||||
throw("startm: m is spinning")
|
||||
}
|
||||
@ -4897,7 +4930,9 @@ func runqputbatch(pp *p, q *gQueue, qsize int) {
|
||||
|
||||
atomic.StoreRel(&pp.runqtail, t)
|
||||
if !q.empty() {
|
||||
lock(&sched.lock)
|
||||
globrunqputbatch(q, int32(qsize))
|
||||
unlock(&sched.lock)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,7 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !goexperiment.staticlockranking
|
||||
|
||||
package sync
|
||||
|
@ -1,3 +1,7 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build goexperiment.staticlockranking
|
||||
|
||||
package sync
|
||||
|
@ -3,7 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package testing provides support for automated testing of Go packages.
|
||||
// It is intended to be used in concert with the ``go test'' command, which automates
|
||||
// It is intended to be used in concert with the "go test" command, which automates
|
||||
// execution of any function of the form
|
||||
// func TestXxx(*testing.T)
|
||||
// where Xxx does not start with a lowercase letter. The function name
|
||||
@ -14,8 +14,8 @@
|
||||
// To write a new test suite, create a file whose name ends _test.go that
|
||||
// contains the TestXxx functions as described here. Put the file in the same
|
||||
// package as the one being tested. The file will be excluded from regular
|
||||
// package builds but will be included when the ``go test'' command is run.
|
||||
// For more detail, run ``go help test'' and ``go help testflag''.
|
||||
// package builds but will be included when the "go test" command is run.
|
||||
// For more detail, run "go help test" and "go help testflag".
|
||||
//
|
||||
// A simple test function looks like this:
|
||||
//
|
||||
|
@ -901,6 +901,12 @@ typedef struct S32579 { unsigned char data[1]; } S32579;
|
||||
// issue 38649
|
||||
// Test that #define'd type aliases work.
|
||||
#define netbsd_gid unsigned int
|
||||
|
||||
// issue 40494
|
||||
// Inconsistent handling of tagged enum and union types.
|
||||
enum Enum40494 { X_40494 };
|
||||
union Union40494 { int x; };
|
||||
void issue40494(enum Enum40494 e, union Union40494* up) {}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@ -2204,3 +2210,10 @@ var issue38649 C.netbsd_gid = 42
|
||||
// issue 39877
|
||||
|
||||
var issue39877 *C.void = nil
|
||||
|
||||
// issue 40494
|
||||
// No runtime test; just make sure it compiles.
|
||||
|
||||
func Issue40494() {
|
||||
C.issue40494(C.enum_Enum40494(C.X_40494), (*C.union_Union40494)(nil))
|
||||
}
|
||||
|
@ -462,6 +462,7 @@ func TestTrivialExecutable(t *testing.T) {
|
||||
run(t, "trivial executable", "../../bin/trivial")
|
||||
AssertIsLinkedTo(t, "../../bin/trivial", soname)
|
||||
AssertHasRPath(t, "../../bin/trivial", gorootInstallDir)
|
||||
checkSize(t, "../../bin/trivial", 100000) // it is 19K on linux/amd64, 100K should be enough
|
||||
}
|
||||
|
||||
// Build a trivial program in PIE mode that links against the shared runtime and check it runs.
|
||||
@ -470,6 +471,18 @@ func TestTrivialExecutablePIE(t *testing.T) {
|
||||
run(t, "trivial executable", "./trivial.pie")
|
||||
AssertIsLinkedTo(t, "./trivial.pie", soname)
|
||||
AssertHasRPath(t, "./trivial.pie", gorootInstallDir)
|
||||
checkSize(t, "./trivial.pie", 100000) // it is 19K on linux/amd64, 100K should be enough
|
||||
}
|
||||
|
||||
// Check that the file size does not exceed a limit.
|
||||
func checkSize(t *testing.T, f string, limit int64) {
|
||||
fi, err := os.Stat(f)
|
||||
if err != nil {
|
||||
t.Fatalf("stat failed: %v", err)
|
||||
}
|
||||
if sz := fi.Size(); sz > limit {
|
||||
t.Errorf("file too large: got %d, want <= %d", sz, limit)
|
||||
}
|
||||
}
|
||||
|
||||
// Build a division test program and check it runs.
|
||||
|
Loading…
Reference in New Issue
Block a user