libgo: update to Go 1.14.6 release

Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/243317
This commit is contained in:
Ian Lance Taylor 2020-07-17 12:30:51 -07:00
parent f1b6e46c41
commit d5dfd4793f
25 changed files with 1137 additions and 822 deletions

View File

@ -1,4 +1,4 @@
9703ad5fa23ca63062cb403bd12bc7da4d7845bd
2d105e65cca6b536320284273353b7c640b12c5f
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.

View File

@ -1,4 +1,4 @@
83b181c68bf332ac7948f145f33d128377a09c42
edfd6f28486017dcb136cd3f3ec252706d4b326e
The first line of this file holds the git revision number of the
last merge done from the master library sources.

View File

@ -1 +1 @@
go1.14.4
go1.14.6

View File

@ -3956,45 +3956,6 @@ func TestCgoFlagContainsSpace(t *testing.T) {
tg.grepStderrNot(`"-L[^"]+c flags".*"-L[^"]+c flags"`, "found too many quoted ld flags")
}
// Issue 9737: verify that GOARM and GO386 affect the computed build ID.
func TestBuildIDContainsArchModeEnv(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
var tg *testgoData
testWith := func(before, after func()) func(*testing.T) {
return func(t *testing.T) {
tg = testgo(t)
defer tg.cleanup()
tg.tempFile("src/mycmd/x.go", `package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mycmd"))
tg.setenv("GOOS", "linux")
before()
tg.run("install", "mycmd")
after()
tg.wantStale("mycmd", "stale dependency", "should be stale after environment variable change")
}
}
t.Run("386", testWith(func() {
tg.setenv("GOARCH", "386")
tg.setenv("GO386", "387")
}, func() {
tg.setenv("GO386", "sse2")
}))
t.Run("arm", testWith(func() {
tg.setenv("GOARCH", "arm")
tg.setenv("GOARM", "5")
}, func() {
tg.setenv("GOARM", "7")
}))
}
func TestListTests(t *testing.T) {
tooSlow(t)
var tg *testgoData

View File

@ -0,0 +1,75 @@
go test -cpu=1 -run=X/Y -bench=X/Y -count=2 -v testregexp
# Test the following:
# TestX is run, twice
stdout -count=2 '^=== RUN TestX$'
stdout -count=2 '^ x_test.go:6: LOG: X running$'
# TestX/Y is run, twice
stdout -count=2 '^=== RUN TestX/Y$'
stdout -count=2 '^ x_test.go:8: LOG: Y running$'
# TestXX is run, twice
stdout -count=2 '^=== RUN TestXX$'
stdout -count=2 '^ z_test.go:10: LOG: XX running'
# TestZ is not run
! stdout '^=== RUN TestZ$'
# BenchmarkX is run with N=1 once, only to discover what sub-benchmarks it has,
# and should not print a final summary line.
stdout -count=1 '^ x_test.go:13: LOG: X running N=1$'
! stdout '^\s+BenchmarkX: x_test.go:13: LOG: X running N=\d\d+'
! stdout 'BenchmarkX\s+\d+'
# Same for BenchmarkXX.
stdout -count=1 '^ z_test.go:18: LOG: XX running N=1$'
! stdout '^ z_test.go:18: LOG: XX running N=\d\d+'
! stdout 'BenchmarkXX\s+\d+'
# BenchmarkX/Y is run in full twice due to -count=2.
# "Run in full" means that it runs for approximately the default benchtime,
# but may cap out at N=1e9.
# We don't actually care what the final iteration count is, but it should be
# a large number, and the last iteration count prints right before the results.
stdout -count=2 '^ x_test.go:15: LOG: Y running N=[1-9]\d{4,}\nBenchmarkX/Y\s+\d+'
-- testregexp/x_test.go --
package x
import "testing"
func TestX(t *testing.T) {
t.Logf("LOG: X running")
t.Run("Y", func(t *testing.T) {
t.Logf("LOG: Y running")
})
}
func BenchmarkX(b *testing.B) {
b.Logf("LOG: X running N=%d", b.N)
b.Run("Y", func(b *testing.B) {
b.Logf("LOG: Y running N=%d", b.N)
})
}
-- testregexp/z_test.go --
package x
import "testing"
func TestZ(t *testing.T) {
t.Logf("LOG: Z running")
}
func TestXX(t *testing.T) {
t.Logf("LOG: XX running")
}
func BenchmarkZ(b *testing.B) {
b.Logf("LOG: Z running N=%d", b.N)
}
func BenchmarkXX(b *testing.B) {
b.Logf("LOG: XX running N=%d", b.N)
}

View File

@ -88,6 +88,9 @@ func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) e
switch status {
case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
return CertificateInvalidError{c, Expired, ""}
case syscall.CERT_TRUST_IS_NOT_VALID_FOR_USAGE:
return CertificateInvalidError{c, IncompatibleUsage, ""}
// TODO(filippo): surface more error statuses.
default:
return UnknownAuthorityError{c, nil, nil}
}
@ -138,11 +141,19 @@ func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContex
return nil
}
// windowsExtKeyUsageOIDs are the C NUL-terminated string representations of the
// OIDs for use with the Windows API.
var windowsExtKeyUsageOIDs = make(map[ExtKeyUsage][]byte, len(extKeyUsageOIDs))
func init() {
for _, eku := range extKeyUsageOIDs {
windowsExtKeyUsageOIDs[eku.extKeyUsage] = []byte(eku.oid.String() + "\x00")
}
}
// systemVerify is like Verify, except that it uses CryptoAPI calls
// to build certificate chains and verify them.
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
hasDNSName := opts != nil && len(opts.DNSName) > 0
storeCtx, err := createStoreContext(c, opts)
if err != nil {
return nil, err
@ -152,17 +163,26 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
para := new(syscall.CertChainPara)
para.Size = uint32(unsafe.Sizeof(*para))
// If there's a DNSName set in opts, assume we're verifying
// a certificate from a TLS server.
if hasDNSName {
oids := []*byte{
&syscall.OID_PKIX_KP_SERVER_AUTH[0],
// Both IE and Chrome allow certificates with
// Server Gated Crypto as well. Some certificates
// in the wild require them.
&syscall.OID_SERVER_GATED_CRYPTO[0],
&syscall.OID_SGC_NETSCAPE[0],
keyUsages := opts.KeyUsages
if len(keyUsages) == 0 {
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
}
oids := make([]*byte, 0, len(keyUsages))
for _, eku := range keyUsages {
if eku == ExtKeyUsageAny {
oids = nil
break
}
if oid, ok := windowsExtKeyUsageOIDs[eku]; ok {
oids = append(oids, &oid[0])
}
// Like the standard verifier, accept SGC EKUs as equivalent to ServerAuth.
if eku == ExtKeyUsageServerAuth {
oids = append(oids, &syscall.OID_SERVER_GATED_CRYPTO[0])
oids = append(oids, &syscall.OID_SGC_NETSCAPE[0])
}
}
if oids != nil {
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
para.RequestedUsage.Usage.Length = uint32(len(oids))
para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
@ -208,7 +228,7 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
return nil, err
}
if hasDNSName {
if opts != nil && len(opts.DNSName) > 0 {
err = checkChainSSLServerPolicy(c, chainCtx, opts)
if err != nil {
return nil, err

View File

@ -188,23 +188,32 @@ var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificat
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
// because other PKIX verification APIs have ended up needing many options.
type VerifyOptions struct {
DNSName string
// DNSName, if set, is checked against the leaf certificate with
// Certificate.VerifyHostname or the platform verifier.
DNSName string
// Intermediates is an optional pool of certificates that are not trust
// anchors, but can be used to form a chain from the leaf certificate to a
// root certificate.
Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// certificate is accepted if it contains any of the listed values. An empty
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
// Certificate chains are required to nest these extended key usage values.
// (This matches the Windows CryptoAPI behavior, but not the spec.)
// Roots is the set of trusted root certificates the leaf certificate needs
// to chain up to. If nil, the system roots or the platform verifier are used.
Roots *CertPool
// CurrentTime is used to check the validity of all certificates in the
// chain. If zero, the current time is used.
CurrentTime time.Time
// KeyUsages specifies which Extended Key Usage values are acceptable. A
// chain is accepted if it allows any of the listed values. An empty list
// means ExtKeyUsageServerAuth. To accept any key usage, include ExtKeyUsageAny.
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
// zero, a sensible default is used. This limit prevents pathological
// certificates from consuming excessive amounts of CPU time when
// validating.
// validating. It does not apply to the platform verifier.
MaxConstraintComparisions int
}
@ -717,8 +726,9 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
// needed. If successful, it returns one or more chains where the first
// element of the chain is c and the last element is from opts.Roots.
//
// If opts.Roots is nil and system roots are unavailable the returned error
// will be of type SystemRootsError.
// If opts.Roots is nil, the platform verifier might be used, and
// verification details might differ from what is described below. If system
// roots are unavailable the returned error will be of type SystemRootsError.
//
// Name constraints in the intermediates will be applied to all names claimed
// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
@ -726,9 +736,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
// the name being validated. Note that DirectoryName constraints are not
// supported.
//
// Extended Key Usage values are enforced down a chain, so an intermediate or
// root that enumerates EKUs prevents a leaf from asserting an EKU not in that
// list.
// Extended Key Usage values are enforced nested down a chain, so an intermediate
// or root that enumerates EKUs prevents a leaf from asserting an EKU not in that
// list. (While this is not specified, it is common practice in order to limit
// the types of certificates a CA can issue.)
//
// WARNING: this function doesn't do any revocation checking.
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {

File diff suppressed because it is too large Load Diff

View File

@ -255,12 +255,9 @@ type ConnBeginTx interface {
// SessionResetter may be implemented by Conn to allow drivers to reset the
// session state associated with the connection and to signal a bad connection.
type SessionResetter interface {
// ResetSession is called while a connection is in the connection
// pool. No queries will run on this connection until this method returns.
//
// If the connection is bad this should return driver.ErrBadConn to prevent
// the connection from being returned to the connection pool. Any other
// error will be discarded.
// ResetSession is called prior to executing a query on the connection
// if the connection has been used before. If the driver returns ErrBadConn
// the connection is discarded.
ResetSession(ctx context.Context) error
}

View File

@ -390,12 +390,19 @@ func setStrictFakeConnClose(t *testing.T) {
func (c *fakeConn) ResetSession(ctx context.Context) error {
c.dirtySession = false
c.currTx = nil
if c.isBad() {
return driver.ErrBadConn
}
return nil
}
var _ validator = (*fakeConn)(nil)
func (c *fakeConn) IsValid() bool {
return !c.isBad()
}
func (c *fakeConn) Close() (err error) {
drv := fdriver.(*fakeDriver)
defer func() {
@ -728,6 +735,9 @@ var hookExecBadConn func() bool
func (s *fakeStmt) Exec(args []driver.Value) (driver.Result, error) {
panic("Using ExecContext")
}
var errFakeConnSessionDirty = errors.New("fakedb: session is dirty")
func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
if s.panic == "Exec" {
panic(s.panic)
@ -740,7 +750,7 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d
return nil, driver.ErrBadConn
}
if s.c.isDirtyAndMark() {
return nil, errors.New("fakedb: session is dirty")
return nil, errFakeConnSessionDirty
}
err := checkSubsetTypes(s.c.db.allowAny, args)
@ -854,7 +864,7 @@ func (s *fakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (
return nil, driver.ErrBadConn
}
if s.c.isDirtyAndMark() {
return nil, errors.New("fakedb: session is dirty")
return nil, errFakeConnSessionDirty
}
err := checkSubsetTypes(s.c.db.allowAny, args)
@ -887,6 +897,37 @@ func (s *fakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (
}
}
}
if s.table == "tx_status" && s.colName[0] == "tx_status" {
txStatus := "autocommit"
if s.c.currTx != nil {
txStatus = "transaction"
}
cursor := &rowsCursor{
parentMem: s.c,
posRow: -1,
rows: [][]*row{
[]*row{
{
cols: []interface{}{
txStatus,
},
},
},
},
cols: [][]string{
[]string{
"tx_status",
},
},
colType: [][]string{
[]string{
"string",
},
},
errPos: -1,
}
return cursor, nil
}
t.mu.Lock()

View File

@ -421,7 +421,6 @@ type DB struct {
// It is closed during db.Close(). The close tells the connectionOpener
// goroutine to exit.
openerCh chan struct{}
resetterCh chan *driverConn
closed bool
dep map[finalCloser]depSet
lastPut map[*driverConn]string // stacktrace of last conn's put; debug only
@ -458,10 +457,10 @@ type driverConn struct {
sync.Mutex // guards following
ci driver.Conn
needReset bool // The connection session should be reset before use if true.
closed bool
finalClosed bool // ci.Close has been called
openStmt map[*driverStmt]bool
lastErr error // lastError captures the result of the session resetter.
// guarded by db.mu
inUse bool
@ -486,6 +485,41 @@ func (dc *driverConn) expired(timeout time.Duration) bool {
return dc.createdAt.Add(timeout).Before(nowFunc())
}
// resetSession checks if the driver connection needs the
// session to be reset and if required, resets it.
func (dc *driverConn) resetSession(ctx context.Context) error {
dc.Lock()
defer dc.Unlock()
if !dc.needReset {
return nil
}
if cr, ok := dc.ci.(driver.SessionResetter); ok {
return cr.ResetSession(ctx)
}
return nil
}
// validator was introduced for Go1.15, but backported to Go1.14.
type validator interface {
IsValid() bool
}
// validateConnection checks if the connection is valid and can
// still be used. It also marks the session for reset if required.
func (dc *driverConn) validateConnection(needsReset bool) bool {
dc.Lock()
defer dc.Unlock()
if needsReset {
dc.needReset = true
}
if cv, ok := dc.ci.(validator); ok {
return cv.IsValid()
}
return true
}
// prepareLocked prepares the query on dc. When cg == nil the dc must keep track of
// the prepared statements in a pool.
func (dc *driverConn) prepareLocked(ctx context.Context, cg stmtConnGrabber, query string) (*driverStmt, error) {
@ -511,19 +545,6 @@ func (dc *driverConn) prepareLocked(ctx context.Context, cg stmtConnGrabber, que
return ds, nil
}
// resetSession resets the connection session and sets the lastErr
// that is checked before returning the connection to another query.
//
// resetSession assumes that the embedded mutex is locked when the connection
// was returned to the pool. This unlocks the mutex.
func (dc *driverConn) resetSession(ctx context.Context) {
defer dc.Unlock() // In case of panic.
if dc.closed { // Check if the database has been closed.
return
}
dc.lastErr = dc.ci.(driver.SessionResetter).ResetSession(ctx)
}
// the dc.db's Mutex is held.
func (dc *driverConn) closeDBLocked() func() error {
dc.Lock()
@ -713,14 +734,12 @@ func OpenDB(c driver.Connector) *DB {
db := &DB{
connector: c,
openerCh: make(chan struct{}, connectionRequestQueueSize),
resetterCh: make(chan *driverConn, 50),
lastPut: make(map[*driverConn]string),
connRequests: make(map[uint64]chan connRequest),
stop: cancel,
}
go db.connectionOpener(ctx)
go db.connectionResetter(ctx)
return db
}
@ -1058,23 +1077,6 @@ func (db *DB) connectionOpener(ctx context.Context) {
}
}
// connectionResetter runs in a separate goroutine to reset connections async
// to exported API.
func (db *DB) connectionResetter(ctx context.Context) {
for {
select {
case <-ctx.Done():
close(db.resetterCh)
for dc := range db.resetterCh {
dc.Unlock()
}
return
case dc := <-db.resetterCh:
dc.resetSession(ctx)
}
}
}
// Open one new connection
func (db *DB) openNewConnection(ctx context.Context) {
// maybeOpenNewConnctions has already executed db.numOpen++ before it sent
@ -1155,14 +1157,13 @@ func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn
conn.Close()
return nil, driver.ErrBadConn
}
// Lock around reading lastErr to ensure the session resetter finished.
conn.Lock()
err := conn.lastErr
conn.Unlock()
if err == driver.ErrBadConn {
// Reset the session if required.
if err := conn.resetSession(ctx); err == driver.ErrBadConn {
conn.Close()
return nil, driver.ErrBadConn
}
return conn, nil
}
@ -1204,18 +1205,22 @@ func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn
if !ok {
return nil, errDBClosed
}
if ret.err == nil && ret.conn.expired(lifetime) {
// Only check if the connection is expired if the strategy is cachedOrNewConns.
// If we require a new connection, just re-use the connection without looking
// at the expiry time. If it is expired, it will be checked when it is placed
// back into the connection pool.
// This prioritizes giving a valid connection to a client over the exact connection
// lifetime, which could expire exactly after this point anyway.
if strategy == cachedOrNewConn && ret.err == nil && ret.conn.expired(lifetime) {
ret.conn.Close()
return nil, driver.ErrBadConn
}
if ret.conn == nil {
return nil, ret.err
}
// Lock around reading lastErr to ensure the session resetter finished.
ret.conn.Lock()
err := ret.conn.lastErr
ret.conn.Unlock()
if err == driver.ErrBadConn {
// Reset the session if required.
if err := ret.conn.resetSession(ctx); err == driver.ErrBadConn {
ret.conn.Close()
return nil, driver.ErrBadConn
}
@ -1275,13 +1280,23 @@ const debugGetPut = false
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(dc *driverConn, err error, resetSession bool) {
if err != driver.ErrBadConn {
if !dc.validateConnection(resetSession) {
err = driver.ErrBadConn
}
}
db.mu.Lock()
if !dc.inUse {
db.mu.Unlock()
if debugGetPut {
fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
}
panic("sql: connection returned that was never out")
}
if err != driver.ErrBadConn && dc.expired(db.maxLifetime) {
err = driver.ErrBadConn
}
if debugGetPut {
db.lastPut[dc] = stack()
}
@ -1305,41 +1320,13 @@ func (db *DB) putConn(dc *driverConn, err error, resetSession bool) {
if putConnHook != nil {
putConnHook(db, dc)
}
if db.closed {
// Connections do not need to be reset if they will be closed.
// Prevents writing to resetterCh after the DB has closed.
resetSession = false
}
if resetSession {
if _, resetSession = dc.ci.(driver.SessionResetter); resetSession {
// Lock the driverConn here so it isn't released until
// the connection is reset.
// The lock must be taken before the connection is put into
// the pool to prevent it from being taken out before it is reset.
dc.Lock()
}
}
added := db.putConnDBLocked(dc, nil)
db.mu.Unlock()
if !added {
if resetSession {
dc.Unlock()
}
dc.Close()
return
}
if !resetSession {
return
}
select {
default:
// If the resetterCh is blocking then mark the connection
// as bad and continue on.
dc.lastErr = driver.ErrBadConn
dc.Unlock()
case db.resetterCh <- dc:
}
}
// Satisfy a connRequest or put the driverConn in the idle pool and return true
@ -1701,7 +1688,11 @@ func (db *DB) begin(ctx context.Context, opts *TxOptions, strategy connReuseStra
// beginDC starts a transaction. The provided dc must be valid and ready to use.
func (db *DB) beginDC(ctx context.Context, dc *driverConn, release func(error), opts *TxOptions) (tx *Tx, err error) {
var txi driver.Tx
keepConnOnRollback := false
withLock(dc, func() {
_, hasSessionResetter := dc.ci.(driver.SessionResetter)
_, hasConnectionValidator := dc.ci.(validator)
keepConnOnRollback = hasSessionResetter && hasConnectionValidator
txi, err = ctxDriverBegin(ctx, opts, dc.ci)
})
if err != nil {
@ -1713,12 +1704,13 @@ func (db *DB) beginDC(ctx context.Context, dc *driverConn, release func(error),
// The cancel function in Tx will be called after done is set to true.
ctx, cancel := context.WithCancel(ctx)
tx = &Tx{
db: db,
dc: dc,
releaseConn: release,
txi: txi,
cancel: cancel,
ctx: ctx,
db: db,
dc: dc,
releaseConn: release,
txi: txi,
cancel: cancel,
keepConnOnRollback: keepConnOnRollback,
ctx: ctx,
}
go tx.awaitDone()
return tx, nil
@ -1980,6 +1972,11 @@ type Tx struct {
// Use atomic operations on value when checking value.
done int32
// keepConnOnRollback is true if the driver knows
// how to reset the connection's session and if need be discard
// the connection.
keepConnOnRollback bool
// All Stmts prepared for this transaction. These will be closed after the
// transaction has been committed or rolled back.
stmts struct {
@ -2005,7 +2002,10 @@ func (tx *Tx) awaitDone() {
// transaction is closed and the resources are released. This
// rollback does nothing if the transaction has already been
// committed or rolled back.
tx.rollback(true)
// Do not discard the connection if the connection knows
// how to reset the session.
discardConnection := !tx.keepConnOnRollback
tx.rollback(discardConnection)
}
func (tx *Tx) isDone() bool {
@ -2016,14 +2016,10 @@ func (tx *Tx) isDone() bool {
// that has already been committed or rolled back.
var ErrTxDone = errors.New("sql: transaction has already been committed or rolled back")
// close returns the connection to the pool and
// must only be called by Tx.rollback or Tx.Commit.
func (tx *Tx) close(err error) {
tx.cancel()
tx.closemu.Lock()
defer tx.closemu.Unlock()
// closeLocked returns the connection to the pool and
// must only be called by Tx.rollback or Tx.Commit while
// closemu is Locked and tx already canceled.
func (tx *Tx) closeLocked(err error) {
tx.releaseConn(err)
tx.dc = nil
tx.txi = nil
@ -2090,6 +2086,15 @@ func (tx *Tx) Commit() error {
if !atomic.CompareAndSwapInt32(&tx.done, 0, 1) {
return ErrTxDone
}
// Cancel the Tx to release any active R-closemu locks.
// This is safe to do because tx.done has already transitioned
// from 0 to 1. Hold the W-closemu lock prior to rollback
// to ensure no other connection has an active query.
tx.cancel()
tx.closemu.Lock()
defer tx.closemu.Unlock()
var err error
withLock(tx.dc, func() {
err = tx.txi.Commit()
@ -2097,16 +2102,31 @@ func (tx *Tx) Commit() error {
if err != driver.ErrBadConn {
tx.closePrepared()
}
tx.close(err)
tx.closeLocked(err)
return err
}
var rollbackHook func()
// rollback aborts the transaction and optionally forces the pool to discard
// the connection.
func (tx *Tx) rollback(discardConn bool) error {
if !atomic.CompareAndSwapInt32(&tx.done, 0, 1) {
return ErrTxDone
}
if rollbackHook != nil {
rollbackHook()
}
// Cancel the Tx to release any active R-closemu locks.
// This is safe to do because tx.done has already transitioned
// from 0 to 1. Hold the W-closemu lock prior to rollback
// to ensure no other connection has an active query.
tx.cancel()
tx.closemu.Lock()
defer tx.closemu.Unlock()
var err error
withLock(tx.dc, func() {
err = tx.txi.Rollback()
@ -2117,7 +2137,7 @@ func (tx *Tx) rollback(discardConn bool) error {
if discardConn {
err = driver.ErrBadConn
}
tx.close(err)
tx.closeLocked(err)
return err
}

View File

@ -80,6 +80,11 @@ func newTestDBConnector(t testing.TB, fc *fakeConnector, name string) *DB {
exec(t, db, "CREATE|magicquery|op=string,millis=int32")
exec(t, db, "INSERT|magicquery|op=sleep,millis=10")
}
if name == "tx_status" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|tx_status|tx_status=string")
exec(t, db, "INSERT|tx_status|tx_status=invalid")
}
return db
}
@ -437,6 +442,7 @@ func TestTxContextWait(t *testing.T) {
}
t.Fatal(err)
}
tx.keepConnOnRollback = false
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
@ -449,6 +455,35 @@ func TestTxContextWait(t *testing.T) {
waitForFree(t, db, 5*time.Second, 0)
}
// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
// the final connection.
func TestTxContextWaitNoDiscard(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
// Guard against the context being canceled before BeginTx completes.
if err == context.DeadlineExceeded {
t.Skip("tx context canceled prior to first use")
}
t.Fatal(err)
}
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
waitForFree(t, db, 5*time.Second, 1)
}
// TestUnsupportedOptions checks that the database fails when a driver that
// doesn't implement ConnBeginTx is used with non-default options and an
// un-cancellable context.
@ -1525,6 +1560,37 @@ func TestConnTx(t *testing.T) {
}
}
// TestConnIsValid verifies that a database connection that should be discarded,
// is actually discarded and does not re-enter the connection pool.
// If the IsValid method from *fakeConn is removed, this test will fail.
func TestConnIsValid(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
ctx := context.Background()
c, err := db.Conn(ctx)
if err != nil {
t.Fatal(err)
}
err = c.Raw(func(raw interface{}) error {
dc := raw.(*fakeConn)
dc.stickyBad = true
return nil
})
if err != nil {
t.Fatal(err)
}
c.Close()
if len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {
t.Fatal("bad connection returned to pool; expected bad connection to be discarded")
}
}
// Tests fix for issue 2542, that we release a lock when querying on
// a closed connection.
func TestIssue2542Deadlock(t *testing.T) {
@ -2658,6 +2724,159 @@ func TestManyErrBadConn(t *testing.T) {
}
}
// Issue 34755: Ensure that a Tx cannot commit after a rollback.
func TestTxCannotCommitAfterRollback(t *testing.T) {
db := newTestDB(t, "tx_status")
defer closeDB(t, db)
// First check query reporting is correct.
var txStatus string
err := db.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "autocommit"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Ignore dirty session for this test.
// A failing test should trigger the dirty session flag as well,
// but that isn't exactly what this should test for.
tx.txi.(*fakeTx).c.skipDirtySession = true
defer tx.Rollback()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
// 1. Begin a transaction.
// 2. (A) Start a query, (B) begin Tx rollback through a ctx cancel.
// 3. Check if 2.A has committed in Tx (pass) or outside of Tx (fail).
sendQuery := make(chan struct{})
hookTxGrabConn = func() {
cancel()
<-sendQuery
}
rollbackHook = func() {
close(sendQuery)
}
defer func() {
hookTxGrabConn = nil
rollbackHook = nil
}()
err = tx.QueryRow("SELECT|tx_status|tx_status|").Scan(&txStatus)
if err != nil {
// A failure here would be expected if skipDirtySession was not set to true above.
t.Fatal(err)
}
if g, w := txStatus, "transaction"; g != w {
t.Fatalf("tx_status=%q, wanted %q", g, w)
}
}
// Issue32530 encounters an issue where a connection may
// expire right after it comes out of a used connection pool
// even when a new connection is requested.
func TestConnExpiresFreshOutOfPool(t *testing.T) {
execCases := []struct {
expired bool
badReset bool
}{
{false, false},
{true, false},
{false, true},
}
t0 := time.Unix(1000000, 0)
offset := time.Duration(0)
offsetMu := sync.RWMutex{}
nowFunc = func() time.Time {
offsetMu.RLock()
defer offsetMu.RUnlock()
return t0.Add(offset)
}
defer func() { nowFunc = time.Now }()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
for _, ec := range execCases {
ec := ec
name := fmt.Sprintf("expired=%t,badReset=%t", ec.expired, ec.badReset)
t.Run(name, func(t *testing.T) {
db.clearAllConns(t)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(10 * time.Second)
conn, err := db.conn(ctx, alwaysNewConn)
if err != nil {
t.Fatal(err)
}
afterPutConn := make(chan struct{})
waitingForConn := make(chan struct{})
go func() {
conn, err := db.conn(ctx, alwaysNewConn)
if err != nil {
t.Fatal(err)
}
db.putConn(conn, err, false)
close(afterPutConn)
}()
go func() {
for {
db.mu.Lock()
ct := len(db.connRequests)
db.mu.Unlock()
if ct > 0 {
close(waitingForConn)
return
}
time.Sleep(10 * time.Millisecond)
}
}()
<-waitingForConn
offsetMu.Lock()
if ec.expired {
offset = 11 * time.Second
} else {
offset = time.Duration(0)
}
offsetMu.Unlock()
conn.ci.(*fakeConn).stickyBad = ec.badReset
db.putConn(conn, err, true)
<-afterPutConn
})
}
}
// TestIssue20575 ensures the Rows from query does not block
// closing a transaction. Ensure Rows is closed while closing a trasaction.
func TestIssue20575(t *testing.T) {

View File

@ -213,9 +213,6 @@ type decodeState struct {
savedError error
useNumber bool
disallowUnknownFields bool
// safeUnquote is the number of current string literal bytes that don't
// need to be unquoted. When negative, no bytes need unquoting.
safeUnquote int
}
// readIndex returns the position of the last byte read.
@ -317,27 +314,13 @@ func (d *decodeState) rescanLiteral() {
Switch:
switch data[i-1] {
case '"': // string
// safeUnquote is initialized at -1, which means that all bytes
// checked so far can be unquoted at a later time with no work
// at all. When reaching the closing '"', if safeUnquote is
// still -1, all bytes can be unquoted with no work. Otherwise,
// only those bytes up until the first '\\' or non-ascii rune
// can be safely unquoted.
safeUnquote := -1
for ; i < len(data); i++ {
if c := data[i]; c == '\\' {
if safeUnquote < 0 { // first unsafe byte
safeUnquote = int(i - d.off)
}
switch data[i] {
case '\\':
i++ // escaped char
} else if c == '"' {
d.safeUnquote = safeUnquote
case '"':
i++ // tokenize the closing quote too
break Switch
} else if c >= utf8.RuneSelf {
if safeUnquote < 0 { // first unsafe byte
safeUnquote = int(i - d.off)
}
}
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
@ -691,7 +674,7 @@ func (d *decodeState) object(v reflect.Value) error {
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := d.unquoteBytes(item)
key, ok := unquoteBytes(item)
if !ok {
panic(phasePanicMsg)
}
@ -892,7 +875,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
return nil
}
s, ok := d.unquoteBytes(item)
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
@ -943,7 +926,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
case '"': // string
s, ok := d.unquoteBytes(item)
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
@ -1103,7 +1086,7 @@ func (d *decodeState) objectInterface() map[string]interface{} {
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := d.unquote(item)
key, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
@ -1152,7 +1135,7 @@ func (d *decodeState) literalInterface() interface{} {
return c == 't'
case '"': // string
s, ok := d.unquote(item)
s, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
@ -1195,33 +1178,40 @@ func getu4(s []byte) rune {
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
// The first byte in s must be '"'.
func (d *decodeState) unquote(s []byte) (t string, ok bool) {
s, ok = d.unquoteBytes(s)
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func (d *decodeState) unquoteBytes(s []byte) (t []byte, ok bool) {
// We already know that s[0] == '"'. However, we don't know that the
// closing quote exists in all cases, such as when the string is nested
// via the ",string" option.
if len(s) < 2 || s[len(s)-1] != '"' {
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// If there are no unusual characters, no unquoting is needed, so return
// a slice of the original bytes.
r := d.safeUnquote
if r == -1 {
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
// Only perform up to one safe unquote for each re-scanned string
// literal. In some edge cases, the decoder unquotes a literal a second
// time, even after another literal has been re-scanned. Thus, only the
// first unquote can safely use safeUnquote.
d.safeUnquote = 0
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])

View File

@ -2459,4 +2459,20 @@ func TestUnmarshalRescanLiteralMangledUnquote(t *testing.T) {
if t1 != t2 {
t.Errorf("Marshal and Unmarshal roundtrip mismatch: want %q got %q", t1, t2)
}
// See golang.org/issues/39555.
input := map[textUnmarshalerString]string{"FOO": "", `"`: ""}
encoded, err := Marshal(input)
if err != nil {
t.Fatalf("Marshal unexpected error: %v", err)
}
var got map[textUnmarshalerString]string
if err := Unmarshal(encoded, &got); err != nil {
t.Fatalf("Unmarshal unexpected error: %v", err)
}
want := map[textUnmarshalerString]string{"foo": "", `"`: ""}
if !reflect.DeepEqual(want, got) {
t.Fatalf("Unexpected roundtrip result:\nwant: %q\ngot: %q", want, got)
}
}

View File

@ -801,6 +801,7 @@ var printVerbs = []printVerb{
{'g', sharpNumFlag, argFloat | argComplex},
{'G', sharpNumFlag, argFloat | argComplex},
{'o', sharpNumFlag, argInt | argPointer},
{'O', sharpNumFlag, argInt | argPointer},
{'p', "-#", argPointer},
{'q', " -+.0#", argRune | argInt | argString},
{'s', " -+.0", argString},

View File

@ -411,6 +411,7 @@ func checkIfNoneMatch(w ResponseWriter, r *Request) condResult {
}
if buf[0] == ',' {
buf = buf[1:]
continue
}
if buf[0] == '*' {
return condFalse

View File

@ -849,6 +849,15 @@ func TestServeContent(t *testing.T) {
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"if_none_match_malformed": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `,`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,

View File

@ -425,6 +425,16 @@ type response struct {
wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
wantsClose bool // HTTP request has Connection "close"
// canWriteContinue is a boolean value accessed as an atomic int32
// that says whether or not a 100 Continue header can be written
// to the connection.
// writeContinueMu must be held while writing the header.
// These two fields together synchronize the body reader
// (the expectContinueReader, which wants to write 100 Continue)
// against the main writer.
canWriteContinue atomicBool
writeContinueMu sync.Mutex
w *bufio.Writer // buffers output in chunks to chunkWriter
cw chunkWriter
@ -515,6 +525,7 @@ type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
@ -877,21 +888,27 @@ type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed bool
sawEOF bool
sawEOF atomicBool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
if ecr.closed {
return 0, ErrBodyReadAfterClose
}
if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
ecr.resp.wroteContinue = true
ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
ecr.resp.conn.bufw.Flush()
w := ecr.resp
if !w.wroteContinue && w.canWriteContinue.isSet() && !w.conn.hijacked() {
w.wroteContinue = true
w.writeContinueMu.Lock()
if w.canWriteContinue.isSet() {
w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
w.conn.bufw.Flush()
w.canWriteContinue.setFalse()
}
w.writeContinueMu.Unlock()
}
n, err = ecr.readCloser.Read(p)
if err == io.EOF {
ecr.sawEOF = true
ecr.sawEOF.setTrue()
}
return
}
@ -1315,7 +1332,7 @@ func (cw *chunkWriter) writeHeader(p []byte) {
// because we don't know if the next bytes on the wire will be
// the body-following-the-timer or the subsequent request.
// See Issue 11549.
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
w.closeAfterReply = true
}
@ -1565,6 +1582,17 @@ func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err er
}
return 0, ErrHijacked
}
if w.canWriteContinue.isSet() {
// Body reader wants to write 100 Continue but hasn't yet.
// Tell it not to. The store must be done while holding the lock
// because the lock makes sure that there is not an active write
// this very moment.
w.writeContinueMu.Lock()
w.canWriteContinue.setFalse()
w.writeContinueMu.Unlock()
}
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
@ -1876,6 +1904,7 @@ func (c *conn) serve(ctx context.Context) {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
w.canWriteContinue.setTrue()
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()

View File

@ -789,6 +789,11 @@ var loop1, loop2 Loop
var loopy1, loopy2 Loopy
var cycleMap1, cycleMap2, cycleMap3 map[string]interface{}
type structWithSelfPtr struct {
p *structWithSelfPtr
s string
}
func init() {
loop1 = &loop2
loop2 = &loop1
@ -845,6 +850,7 @@ var deepEqualTests = []DeepEqualTest{
{[]float64{math.NaN()}, self{}, true},
{map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
{map[float64]float64{math.NaN(): 1}, self{}, true},
{&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false},
// Nil vs empty: not the same.
{[]int{}, []int(nil), false},

View File

@ -45,8 +45,20 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
}
if hard(v1, v2) {
addr1 := v1.ptr
addr2 := v2.ptr
// For a Ptr or Map value, we need to check flagIndir,
// which we do by calling the pointer method.
// For Slice or Interface, flagIndir is always set,
// and using v.ptr suffices.
ptrval := func(v Value) unsafe.Pointer {
switch v.Kind() {
case Ptr, Map:
return v.pointer()
default:
return v.ptr
}
}
addr1 := ptrval(v1)
addr2 := ptrval(v2)
if uintptr(addr1) > uintptr(addr2) {
// Canonicalize order to reduce number of entries in visited.
// Assumes non-moving garbage collector.

View File

@ -2477,6 +2477,7 @@ func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
}
// Note: this type must agree with runtime.bitvector.
type bitVector struct {
n uint32 // number of bits
data []byte

View File

@ -2175,6 +2175,7 @@ func NewAt(typ Type, p unsafe.Pointer) Value {
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
// target must be initialized memory (or nil).
func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue(context, v)

View File

@ -526,6 +526,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
name: "Main",
w: os.Stdout,
chatty: *chatty,
bench: true,
},
importPath: importPath,
benchFunc: func(b *B) {
@ -559,6 +560,7 @@ func (ctx *benchContext) processBench(b *B) {
name: b.name,
w: b.w,
chatty: b.chatty,
bench: true,
},
benchFunc: b.benchFunc,
benchTime: b.benchTime,
@ -624,6 +626,7 @@ func (b *B) Run(name string, f func(b *B)) bool {
creator: pc[:n],
w: b.w,
chatty: b.chatty,
bench: true,
},
importPath: b.importPath,
benchFunc: f,

View File

@ -438,8 +438,6 @@ func TestTRun(t *T) {
}, {
// A chatty test should always log with fmt.Print, even if the
// parent test has completed.
// TODO(deklerk) Capture the log of fmt.Print and assert that the
// subtest message is not lost.
desc: "log in finished sub test with chatty",
ok: false,
chatty: true,
@ -477,35 +475,37 @@ func TestTRun(t *T) {
},
}}
for _, tc := range testCases {
ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", ""))
buf := &bytes.Buffer{}
root := &T{
common: common{
signal: make(chan bool),
name: "Test",
w: buf,
chatty: tc.chatty,
},
context: ctx,
}
ok := root.Run(tc.desc, tc.f)
ctx.release()
t.Run(tc.desc, func(t *T) {
ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", ""))
buf := &bytes.Buffer{}
root := &T{
common: common{
signal: make(chan bool),
name: "Test",
w: buf,
chatty: tc.chatty,
},
context: ctx,
}
ok := root.Run(tc.desc, tc.f)
ctx.release()
if ok != tc.ok {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok)
}
if ok != !root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
if ctx.running != 0 || ctx.numWaiting != 0 {
t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
if ok != tc.ok {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok)
}
if ok != !root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
if ctx.running != 0 || ctx.numWaiting != 0 {
t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
})
}
}
@ -655,43 +655,45 @@ func TestBRun(t *T) {
},
}}
for _, tc := range testCases {
var ok bool
buf := &bytes.Buffer{}
// This is almost like the Benchmark function, except that we override
// the benchtime and catch the failure result of the subbenchmark.
root := &B{
common: common{
signal: make(chan bool),
name: "root",
w: buf,
chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
root.runN(1)
if ok != !tc.failed {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
}
if !ok != root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
// All tests are run as subtests
if root.result.N != 1 {
t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
t.Run(tc.desc, func(t *T) {
var ok bool
buf := &bytes.Buffer{}
// This is almost like the Benchmark function, except that we override
// the benchtime and catch the failure result of the subbenchmark.
root := &B{
common: common{
signal: make(chan bool),
name: "root",
w: buf,
chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
root.runN(1)
if ok != !tc.failed {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
}
if !ok != root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
// All tests are run as subtests
if root.result.N != 1 {
t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
})
}
}
func makeRegexp(s string) string {
s = regexp.QuoteMeta(s)
s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d:`)
s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d\d?:`)
s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`)
return s
}

View File

@ -320,6 +320,7 @@ var (
cpuListStr *string
parallel *int
testlog *string
printer *testPrinter
haveExamples bool // are there examples?
@ -329,6 +330,48 @@ var (
numFailed uint32 // number of test failures
)
type testPrinter struct {
chatty bool
lastNameMu sync.Mutex // guards lastName
lastName string // last printed test name in chatty mode
}
func newTestPrinter(chatty bool) *testPrinter {
return &testPrinter{
chatty: chatty,
}
}
func (p *testPrinter) Print(testName, out string) {
p.Fprint(os.Stdout, testName, out)
}
func (p *testPrinter) Fprint(w io.Writer, testName, out string) {
p.lastNameMu.Lock()
defer p.lastNameMu.Unlock()
if !p.chatty ||
strings.HasPrefix(out, "--- PASS") ||
strings.HasPrefix(out, "--- FAIL") ||
strings.HasPrefix(out, "=== CONT") ||
strings.HasPrefix(out, "=== RUN") {
p.lastName = testName
fmt.Fprint(w, out)
return
}
if p.lastName == "" {
p.lastName = testName
} else if p.lastName != testName {
// Always printed as-is, with 0 decoration or indentation. So, we skip
// printing to w.
fmt.Printf("=== CONT %s\n", testName)
p.lastName = testName
}
fmt.Fprint(w, out)
}
// The maximum number of stack frames to go through when skipping helper functions for
// the purpose of decorating log messages.
const maxStackLen = 50
@ -347,10 +390,11 @@ type common struct {
cleanup func() // optional function to be called at the end of the test
chatty bool // A copy of the chatty flag.
bench bool // Whether the current test is a benchmark.
finished bool // Test function has completed.
hasSub int32 // written atomically
raceErrors int // number of races detected during test
runner string // function name of tRunner running the test
hasSub int32 // Written atomically.
raceErrors int // Number of races detected during test.
runner string // Function name of tRunner running the test.
parent *common
level int // Nesting depth of test or benchmark.
@ -480,9 +524,6 @@ func (c *common) decorate(s string, skip int) string {
buf := new(strings.Builder)
// Every line is indented at least 4 spaces.
buf.WriteString(" ")
if c.chatty {
fmt.Fprintf(buf, "%s: ", c.name)
}
fmt.Fprintf(buf, "%s:%d: ", file, line)
lines := strings.Split(s, "\n")
if l := len(lines); l > 1 && lines[l-1] == "" {
@ -501,12 +542,12 @@ func (c *common) decorate(s string, skip int) string {
// flushToParent writes c.output to the parent after first writing the header
// with the given format and arguments.
func (c *common) flushToParent(format string, args ...interface{}) {
func (c *common) flushToParent(testName, format string, args ...interface{}) {
p := c.parent
p.mu.Lock()
defer p.mu.Unlock()
fmt.Fprintf(p.w, format, args...)
printer.Fprint(p.w, testName, fmt.Sprintf(format, args...))
c.mu.Lock()
defer c.mu.Unlock()
@ -680,7 +721,14 @@ func (c *common) logDepth(s string, depth int) {
panic("Log in goroutine after " + c.name + " has completed")
} else {
if c.chatty {
fmt.Print(c.decorate(s, depth+1))
if c.bench {
// Benchmarks don't print === CONT, so we should skip the test
// printer and just print straight to stdout.
fmt.Print(c.decorate(s, depth+1))
} else {
printer.Print(c.name, c.decorate(s, depth+1))
}
return
}
c.output = append(c.output, c.decorate(s, depth+1)...)
@ -909,7 +957,7 @@ func (t *T) Parallel() {
for ; root.parent != nil; root = root.parent {
}
root.mu.Lock()
fmt.Fprintf(root.w, "=== CONT %s\n", t.name)
printer.Fprint(root.w, t.name, fmt.Sprintf("=== CONT %s\n", t.name))
root.mu.Unlock()
}
@ -968,7 +1016,7 @@ func tRunner(t *T, fn func(t *T)) {
root.duration += time.Since(root.start)
d := root.duration
root.mu.Unlock()
root.flushToParent("--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
}
@ -1067,7 +1115,7 @@ func (t *T) Run(name string, f func(t *T)) bool {
for ; root.parent != nil; root = root.parent {
}
root.mu.Lock()
fmt.Fprintf(root.w, "=== RUN %s\n", t.name)
printer.Fprint(root.w, t.name, fmt.Sprintf("=== RUN %s\n", t.name))
root.mu.Unlock()
}
// Instead of reducing the running count of this test before calling the
@ -1215,6 +1263,8 @@ func (m *M) Run() int {
flag.Parse()
}
printer = newTestPrinter(Verbose())
if *parallel < 1 {
fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer")
flag.Usage()
@ -1254,12 +1304,12 @@ func (t *T) report() {
dstr := fmtDuration(t.duration)
format := "--- %s: %s (%s)\n"
if t.Failed() {
t.flushToParent(format, "FAIL", t.name, dstr)
t.flushToParent(t.name, format, "FAIL", t.name, dstr)
} else if t.chatty {
if t.Skipped() {
t.flushToParent(format, "SKIP", t.name, dstr)
t.flushToParent(t.name, format, "SKIP", t.name, dstr)
} else {
t.flushToParent(format, "PASS", t.name, dstr)
t.flushToParent(t.name, format, "PASS", t.name, dstr)
}
}
}