libgo: Update to weekly.2012-03-04 release.

From-SVN: r185010
This commit is contained in:
Ian Lance Taylor 2012-03-06 17:57:23 +00:00
parent 46402cbe0b
commit 593f74bbab
147 changed files with 3080 additions and 2022 deletions

View File

@ -1,4 +1,4 @@
96bd78e7d35e
f4470a54e6db
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.

View File

@ -658,10 +658,17 @@ go_net_sock_file = go/net/sock_linux.go
go_net_sockopt_file = go/net/sockopt_linux.go
go_net_sockoptip_file = go/net/sockoptip_linux.go
else
if LIBGO_IS_FREEBSD
go_net_cgo_file = go/net/cgo_bsd.go
go_net_sock_file = go/net/sock_bsd.go
go_net_sockopt_file = go/net/sockopt_bsd.go
go_net_sockoptip_file = go/net/sockoptip_bsd.go
go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_freebsd.go
else
go_net_cgo_file = go/net/cgo_bsd.go
go_net_sock_file = go/net/sock_bsd.go
go_net_sockopt_file = go/net/sockopt_bsd.go
go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_netbsd.go
endif
endif
endif
endif
@ -704,6 +711,7 @@ go_net_files = \
go/net/ipsock.go \
go/net/ipsock_posix.go \
go/net/lookup_unix.go \
go/net/mac.go \
go/net/net.go \
go/net/parse.go \
go/net/pipe.go \
@ -1126,8 +1134,7 @@ go_go_ast_files = \
go/go/ast/walk.go
go_go_build_files = \
go/go/build/build.go \
go/go/build/dir.go \
go/go/build/path.go \
go/go/build/doc.go \
syslist.go
go_go_doc_files = \
go/go/doc/comment.go \

View File

@ -1012,19 +1012,23 @@ go_mime_files = \
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_newpollserver_file = go/net/newpollserver.go
@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_newpollserver_file = go/net/newpollserver.go
@LIBGO_IS_RTEMS_TRUE@go_net_newpollserver_file = go/net/newpollserver_rtems.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_cgo_file = go/net/cgo_bsd.go
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_cgo_file = go/net/cgo_bsd.go
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_cgo_file = go/net/cgo_bsd.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_cgo_file = go/net/cgo_linux.go
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_cgo_file = go/net/cgo_linux.go
@LIBGO_IS_LINUX_TRUE@go_net_cgo_file = go/net/cgo_linux.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sock_file = go/net/sock_linux.go
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sock_file = go/net/sock_linux.go
@LIBGO_IS_LINUX_TRUE@go_net_sock_file = go/net/sock_linux.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockopt_file = go/net/sockopt_bsd.go
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockopt_file = go/net/sockopt_bsd.go
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockopt_file = go/net/sockopt_bsd.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sockopt_file = go/net/sockopt_linux.go
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sockopt_file = go/net/sockopt_linux.go
@LIBGO_IS_LINUX_TRUE@go_net_sockopt_file = go/net/sockopt_linux.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockoptip_file = go/net/sockoptip_bsd.go
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_netbsd.go
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_freebsd.go
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sockoptip_file = go/net/sockoptip_linux.go
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sockoptip_file = go/net/sockoptip_linux.go
@LIBGO_IS_LINUX_TRUE@go_net_sockoptip_file = go/net/sockoptip_linux.go
@ -1055,6 +1059,7 @@ go_net_files = \
go/net/ipsock.go \
go/net/ipsock_posix.go \
go/net/lookup_unix.go \
go/net/mac.go \
go/net/net.go \
go/net/parse.go \
go/net/pipe.go \
@ -1467,8 +1472,7 @@ go_go_ast_files = \
go_go_build_files = \
go/go/build/build.go \
go/go/build/dir.go \
go/go/build/path.go \
go/go/build/doc.go \
syslist.go
go_go_doc_files = \

View File

@ -169,48 +169,21 @@ func (r *checksumReader) Read(b []byte) (n int, err error) {
func (r *checksumReader) Close() error { return r.rc.Close() }
func readFileHeader(f *File, r io.Reader) error {
var b [fileHeaderLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != fileHeaderSignature {
return ErrFormat
}
f.ReaderVersion = c.Uint16(b[4:6])
f.Flags = c.Uint16(b[6:8])
f.Method = c.Uint16(b[8:10])
f.ModifiedTime = c.Uint16(b[10:12])
f.ModifiedDate = c.Uint16(b[12:14])
f.CRC32 = c.Uint32(b[14:18])
f.CompressedSize = c.Uint32(b[18:22])
f.UncompressedSize = c.Uint32(b[22:26])
filenameLen := int(c.Uint16(b[26:28]))
extraLen := int(c.Uint16(b[28:30]))
d := make([]byte, filenameLen+extraLen)
if _, err := io.ReadFull(r, d); err != nil {
return err
}
f.Name = string(d[:filenameLen])
f.Extra = d[filenameLen:]
return nil
}
// findBodyOffset does the minimum work to verify the file has a header
// and returns the file body offset.
func (f *File) findBodyOffset() (int64, error) {
r := io.NewSectionReader(f.zipr, f.headerOffset, f.zipsize-f.headerOffset)
var b [fileHeaderLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
var buf [fileHeaderLen]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return 0, err
}
c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != fileHeaderSignature {
b := readBuf(buf[:])
if sig := b.uint32(); sig != fileHeaderSignature {
return 0, ErrFormat
}
filenameLen := int(c.Uint16(b[26:28]))
extraLen := int(c.Uint16(b[28:30]))
b = b[22:] // skip over most of the header
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
return int64(fileHeaderLen + filenameLen + extraLen), nil
}
@ -218,30 +191,29 @@ func (f *File) findBodyOffset() (int64, error) {
// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
// and ErrFormat if it doesn't find a valid header signature.
func readDirectoryHeader(f *File, r io.Reader) error {
var b [directoryHeaderLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
var buf [directoryHeaderLen]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != directoryHeaderSignature {
b := readBuf(buf[:])
if sig := b.uint32(); sig != directoryHeaderSignature {
return ErrFormat
}
f.CreatorVersion = c.Uint16(b[4:6])
f.ReaderVersion = c.Uint16(b[6:8])
f.Flags = c.Uint16(b[8:10])
f.Method = c.Uint16(b[10:12])
f.ModifiedTime = c.Uint16(b[12:14])
f.ModifiedDate = c.Uint16(b[14:16])
f.CRC32 = c.Uint32(b[16:20])
f.CompressedSize = c.Uint32(b[20:24])
f.UncompressedSize = c.Uint32(b[24:28])
filenameLen := int(c.Uint16(b[28:30]))
extraLen := int(c.Uint16(b[30:32]))
commentLen := int(c.Uint16(b[32:34]))
// startDiskNumber := c.Uint16(b[34:36]) // Unused
// internalAttributes := c.Uint16(b[36:38]) // Unused
f.ExternalAttrs = c.Uint32(b[38:42])
f.headerOffset = int64(c.Uint32(b[42:46]))
f.CreatorVersion = b.uint16()
f.ReaderVersion = b.uint16()
f.Flags = b.uint16()
f.Method = b.uint16()
f.ModifiedTime = b.uint16()
f.ModifiedDate = b.uint16()
f.CRC32 = b.uint32()
f.CompressedSize = b.uint32()
f.UncompressedSize = b.uint32()
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
commentLen := int(b.uint16())
b = b[4:] // skipped start disk number and internal attributes (2x uint16)
f.ExternalAttrs = b.uint32()
f.headerOffset = int64(b.uint32())
d := make([]byte, filenameLen+extraLen+commentLen)
if _, err := io.ReadFull(r, d); err != nil {
return err
@ -253,30 +225,30 @@ func readDirectoryHeader(f *File, r io.Reader) error {
}
func readDataDescriptor(r io.Reader, f *File) error {
var b [dataDescriptorLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
var buf [dataDescriptorLen]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
c := binary.LittleEndian
f.CRC32 = c.Uint32(b[:4])
f.CompressedSize = c.Uint32(b[4:8])
f.UncompressedSize = c.Uint32(b[8:12])
b := readBuf(buf[:])
f.CRC32 = b.uint32()
f.CompressedSize = b.uint32()
f.UncompressedSize = b.uint32()
return nil
}
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
// look for directoryEndSignature in the last 1k, then in the last 65k
var b []byte
var buf []byte
for i, bLen := range []int64{1024, 65 * 1024} {
if bLen > size {
bLen = size
}
b = make([]byte, int(bLen))
if _, err := r.ReadAt(b, size-bLen); err != nil && err != io.EOF {
buf = make([]byte, int(bLen))
if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
return nil, err
}
if p := findSignatureInBlock(b); p >= 0 {
b = b[p:]
if p := findSignatureInBlock(buf); p >= 0 {
buf = buf[p:]
break
}
if i == 1 || bLen == size {
@ -285,16 +257,21 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error)
}
// read header into struct
c := binary.LittleEndian
d := new(directoryEnd)
d.diskNbr = c.Uint16(b[4:6])
d.dirDiskNbr = c.Uint16(b[6:8])
d.dirRecordsThisDisk = c.Uint16(b[8:10])
d.directoryRecords = c.Uint16(b[10:12])
d.directorySize = c.Uint32(b[12:16])
d.directoryOffset = c.Uint32(b[16:20])
d.commentLen = c.Uint16(b[20:22])
d.comment = string(b[22 : 22+int(d.commentLen)])
b := readBuf(buf[4:]) // skip signature
d := &directoryEnd{
diskNbr: b.uint16(),
dirDiskNbr: b.uint16(),
dirRecordsThisDisk: b.uint16(),
directoryRecords: b.uint16(),
directorySize: b.uint32(),
directoryOffset: b.uint32(),
commentLen: b.uint16(),
}
l := int(d.commentLen)
if l > len(b) {
return nil, errors.New("zip: invalid comment length")
}
d.comment = string(b[:l])
return d, nil
}
@ -311,3 +288,17 @@ func findSignatureInBlock(b []byte) int {
}
return -1
}
type readBuf []byte
func (b *readBuf) uint16() uint16 {
v := binary.LittleEndian.Uint16(*b)
*b = (*b)[2:]
return v
}
func (b *readBuf) uint32() uint32 {
v := binary.LittleEndian.Uint32(*b)
*b = (*b)[4:]
return v
}

View File

@ -165,7 +165,7 @@ func readTestZip(t *testing.T, zt ZipTest) {
t.Errorf("%s: comment=%q, want %q", zt.Name, z.Comment, zt.Comment)
}
if len(z.File) != len(zt.File) {
t.Errorf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File))
t.Fatalf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File))
}
// test read of each file

View File

@ -100,16 +100,6 @@ type directoryEnd struct {
comment string
}
func recoverError(errp *error) {
if e := recover(); e != nil {
if err, ok := e.(error); ok {
*errp = err
return
}
panic(e)
}
}
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
// The resolution is 2s.
// See: http://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx

View File

@ -37,10 +37,10 @@ func NewWriter(w io.Writer) *Writer {
// Close finishes writing the zip file by writing the central directory.
// It does not (and can not) close the underlying writer.
func (w *Writer) Close() (err error) {
func (w *Writer) Close() error {
if w.last != nil && !w.last.closed {
if err = w.last.close(); err != nil {
return
if err := w.last.close(); err != nil {
return err
}
w.last = nil
}
@ -49,43 +49,55 @@ func (w *Writer) Close() (err error) {
}
w.closed = true
defer recoverError(&err)
// write central directory
start := w.cw.count
for _, h := range w.dir {
write(w.cw, uint32(directoryHeaderSignature))
write(w.cw, h.CreatorVersion)
write(w.cw, h.ReaderVersion)
write(w.cw, h.Flags)
write(w.cw, h.Method)
write(w.cw, h.ModifiedTime)
write(w.cw, h.ModifiedDate)
write(w.cw, h.CRC32)
write(w.cw, h.CompressedSize)
write(w.cw, h.UncompressedSize)
write(w.cw, uint16(len(h.Name)))
write(w.cw, uint16(len(h.Extra)))
write(w.cw, uint16(len(h.Comment)))
write(w.cw, uint16(0)) // disk number start
write(w.cw, uint16(0)) // internal file attributes
write(w.cw, h.ExternalAttrs)
write(w.cw, h.offset)
writeBytes(w.cw, []byte(h.Name))
writeBytes(w.cw, h.Extra)
writeBytes(w.cw, []byte(h.Comment))
var buf [directoryHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryHeaderSignature))
b.uint16(h.CreatorVersion)
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
b.uint32(h.CRC32)
b.uint32(h.CompressedSize)
b.uint32(h.UncompressedSize)
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
b.uint16(uint16(len(h.Comment)))
b = b[4:] // skip disk number start and internal file attr (2x uint16)
b.uint32(h.ExternalAttrs)
b.uint32(h.offset)
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Name); err != nil {
return err
}
if _, err := w.cw.Write(h.Extra); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Comment); err != nil {
return err
}
}
end := w.cw.count
// write end record
write(w.cw, uint32(directoryEndSignature))
write(w.cw, uint16(0)) // disk number
write(w.cw, uint16(0)) // disk number where directory starts
write(w.cw, uint16(len(w.dir))) // number of entries this disk
write(w.cw, uint16(len(w.dir))) // number of entries total
write(w.cw, uint32(end-start)) // size of directory
write(w.cw, uint32(start)) // start of directory
write(w.cw, uint16(0)) // size of comment
var buf [directoryEndLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryEndSignature))
b = b[4:] // skip over disk number and first disk number (2x uint16)
b.uint16(uint16(len(w.dir))) // number of entries this disk
b.uint16(uint16(len(w.dir))) // number of entries total
b.uint32(uint32(end - start)) // size of directory
b.uint32(uint32(start)) // start of directory
// skipped size of comment (always zero)
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
return w.cw.w.(*bufio.Writer).Flush()
}
@ -152,22 +164,28 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
return fw, nil
}
func writeHeader(w io.Writer, h *FileHeader) (err error) {
defer recoverError(&err)
write(w, uint32(fileHeaderSignature))
write(w, h.ReaderVersion)
write(w, h.Flags)
write(w, h.Method)
write(w, h.ModifiedTime)
write(w, h.ModifiedDate)
write(w, h.CRC32)
write(w, h.CompressedSize)
write(w, h.UncompressedSize)
write(w, uint16(len(h.Name)))
write(w, uint16(len(h.Extra)))
writeBytes(w, []byte(h.Name))
writeBytes(w, h.Extra)
return nil
func writeHeader(w io.Writer, h *FileHeader) error {
var buf [fileHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(fileHeaderSignature))
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
b.uint32(h.CRC32)
b.uint32(h.CompressedSize)
b.uint32(h.UncompressedSize)
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
if _, err := w.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w, h.Name); err != nil {
return err
}
_, err := w.Write(h.Extra)
return err
}
type fileWriter struct {
@ -188,13 +206,13 @@ func (w *fileWriter) Write(p []byte) (int, error) {
return w.rawCount.Write(p)
}
func (w *fileWriter) close() (err error) {
func (w *fileWriter) close() error {
if w.closed {
return errors.New("zip: file closed twice")
}
w.closed = true
if err = w.comp.Close(); err != nil {
return
if err := w.comp.Close(); err != nil {
return err
}
// update FileHeader
@ -204,12 +222,13 @@ func (w *fileWriter) close() (err error) {
fh.UncompressedSize = uint32(w.rawCount.count)
// write data descriptor
defer recoverError(&err)
write(w.zipw, fh.CRC32)
write(w.zipw, fh.CompressedSize)
write(w.zipw, fh.UncompressedSize)
return nil
var buf [dataDescriptorLen]byte
b := writeBuf(buf[:])
b.uint32(fh.CRC32)
b.uint32(fh.CompressedSize)
b.uint32(fh.UncompressedSize)
_, err := w.zipw.Write(buf[:])
return err
}
type countWriter struct {
@ -231,18 +250,14 @@ func (w nopCloser) Close() error {
return nil
}
func write(w io.Writer, data interface{}) {
if err := binary.Write(w, binary.LittleEndian, data); err != nil {
panic(err)
}
type writeBuf []byte
func (b *writeBuf) uint16(v uint16) {
binary.LittleEndian.PutUint16(*b, v)
*b = (*b)[2:]
}
func writeBytes(w io.Writer, b []byte) {
n, err := w.Write(b)
if err != nil {
panic(err)
}
if n != len(b) {
panic(io.ErrShortWrite)
}
func (b *writeBuf) uint32(v uint32) {
binary.LittleEndian.PutUint32(*b, v)
*b = (*b)[4:]
}

View File

@ -23,7 +23,6 @@ var (
ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
ErrBufferFull = errors.New("bufio: buffer full")
ErrNegativeCount = errors.New("bufio: negative count")
errInternal = errors.New("bufio: internal error")
)
// Buffered input.

View File

@ -7,7 +7,7 @@
package ecdsa
// References:
// [NSA]: Suite B implementor's guide to FIPS 186-3,
// [NSA]: Suite B implementer's guide to FIPS 186-3,
// http://www.nsa.gov/ia/_files/ecdsa.pdf
// [SECG]: SECG, SEC1
// http://www.secg.org/download/aid-780/sec1-v2.pdf

View File

@ -5,11 +5,9 @@
package tls
/*
// Note: We disable -Werror here because the code in this file uses a deprecated API to stay
// compatible with both Mac OS X 10.6 and 10.7. Using a deprecated function on Darwin generates
// a warning.
#cgo CFLAGS: -Wno-error -Wno-deprecated-declarations
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
@ -40,26 +38,12 @@ int FetchPEMRoots(CFDataRef *pemRoots) {
continue;
}
// SecKeychainImportExport is deprecated in >= OS X 10.7, and has been replaced by
// SecItemExport. If we're built on a host with a Lion SDK, this code gets conditionally
// included in the output, also for binaries meant for 10.6.
//
// To make sure that we run on both Mac OS X 10.6 and 10.7 we use weak linking
// and check whether SecItemExport is available before we attempt to call it. On
// 10.6, this won't be the case, and we'll fall back to calling SecKeychainItemExport.
#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (SecItemExport) {
err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
} else
#endif
if (data == NULL) {
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {

View File

@ -135,8 +135,8 @@ func (c *Certificate) isValid(certType int, opts *VerifyOptions) error {
// Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or chains where the first element of
// the chain is c and the last element is from opts.Roots.
// needed. If successful, it returns one or more chains where the first
// element of the chain is c and the last element is from opts.Roots.
//
// WARNING: this doesn't do any revocation checking.
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {

View File

@ -153,7 +153,7 @@ const (
//
// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
//
// md5WithRSAEncryption OBJECT IDENTIFER ::= { pkcs-1 4 }
// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
//
// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
//
@ -172,9 +172,9 @@ const (
//
// RFC 5758 3.1 DSA Signature Algorithms
//
// dsaWithSha356 OBJECT IDENTIFER ::= {
// dsaWithSha256 OBJECT IDENTIFIER ::= {
// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
// algorithms(4) id-dsa-with-sha2(3) 2}
// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
//
var (
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}

View File

@ -29,17 +29,13 @@ type ByteOrder interface {
String() string
}
// This is byte instead of struct{} so that it can be compared,
// allowing, e.g., order == binary.LittleEndian.
type unused byte
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian unused
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
@ -79,7 +75,7 @@ func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian unused
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }

View File

@ -1455,11 +1455,14 @@ func TestFuzz(t *testing.T) {
func TestFuzzRegressions(t *testing.T) {
// An instance triggering a type name of length ~102 GB.
testFuzz(t, 1328492090837718000, 100, new(float32))
// An instance triggering a type name of 1.6 GB.
// Commented out because it takes 5m to run.
//testFuzz(t, 1330522872628565000, 100, new(int))
}
func testFuzz(t *testing.T, seed int64, n int, input ...interface{}) {
t.Logf("seed=%d n=%d\n", seed, n)
for _, e := range input {
t.Logf("seed=%d n=%d e=%T", seed, n, e)
rng := rand.New(rand.NewSource(seed))
for i := 0; i < n; i++ {
encFuzzDec(rng, e)

View File

@ -3,14 +3,15 @@
// license that can be found in the LICENSE file.
// Delete the next line to include in the gob package.
// +build gob-debug
// +build ignore
package gob
// This file is not normally included in the gob package. Used only for debugging the package itself.
// Add debug.go to the files listed in the Makefile to add Debug to the gob package.
// Except for reading uints, it is an implementation of a reader that is independent of
// the one implemented by Decoder.
// To enable the Debug function, delete the +build ignore line above and do
// go install
import (
"bytes"

View File

@ -392,12 +392,12 @@ func decUint8Slice(i *decInstr, state *decoderState, p unsafe.Pointer) {
}
p = *(*unsafe.Pointer)(p)
}
n := int(state.decodeUint())
if n < 0 {
errorf("negative length decoding []byte")
n := state.decodeUint()
if n > uint64(state.b.Len()) {
errorf("length of []byte exceeds input size (%d bytes)", n)
}
slice := (*[]uint8)(p)
if cap(*slice) < n {
if uint64(cap(*slice)) < n {
*slice = make([]uint8, n)
} else {
*slice = (*slice)[0:n]
@ -417,7 +417,11 @@ func decString(i *decInstr, state *decoderState, p unsafe.Pointer) {
}
p = *(*unsafe.Pointer)(p)
}
b := make([]byte, state.decodeUint())
n := state.decodeUint()
if n > uint64(state.b.Len()) {
errorf("string length exceeds input size (%d bytes)", n)
}
b := make([]byte, n)
state.b.Read(b)
// It would be a shame to do the obvious thing here,
// *(*string)(p) = string(b)
@ -647,7 +651,11 @@ func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
// decodeSlice decodes a slice and stores the slice header through p.
// Slices are encoded as an unsigned length followed by the elements.
func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) {
n := int(uintptr(state.decodeUint()))
nr := state.decodeUint()
if nr > uint64(state.b.Len()) {
errorf("length of slice exceeds input size (%d elements)", nr)
}
n := int(nr)
if indir > 0 {
up := unsafe.Pointer(p)
if *(*unsafe.Pointer)(up) == nil {
@ -702,6 +710,9 @@ func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p ui
*(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
return
}
if len(name) > 1024 {
errorf("name too long (%d bytes): %.20q...", len(name), name)
}
// The concrete type must be registered.
typ, ok := nameToConcreteType[name]
if !ok {

View File

@ -7,6 +7,7 @@
package main
// Need to compile package gob with debug.go to build this program.
// See comments in debug.go for how to do this.
import (
"encoding/gob"

View File

@ -709,7 +709,7 @@ func TestGobPtrSlices(t *testing.T) {
t.Fatal("decode:", err)
}
if !reflect.DeepEqual(in, out) {
t.Fatal("got %v; wanted %v", out, in)
t.Fatalf("got %v; wanted %v", out, in)
}
}

View File

@ -239,16 +239,6 @@ func TestEscape(t *testing.T) {
}
}
func TestHTMLEscape(t *testing.T) {
b, err := MarshalForHTML("foobarbaz<>&quux")
if err != nil {
t.Fatalf("MarshalForHTML error: %v", err)
}
if !bytes.Equal(b, []byte(`"foobarbaz\u003c\u003e\u0026quux"`)) {
t.Fatalf("Unexpected encoding of \"<>&\": %s", b)
}
}
// WrongString is a struct that's misusing the ,string modifier.
type WrongString struct {
Message string `json:"result,string"`

View File

@ -123,17 +123,6 @@ func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
return buf.Bytes(), nil
}
// MarshalForHTML is like Marshal but applies HTMLEscape to the output.
func MarshalForHTML(v interface{}) ([]byte, error) {
b, err := Marshal(v)
if err != nil {
return nil, err
}
var buf bytes.Buffer
HTMLEscape(&buf, b)
return buf.Bytes(), nil
}
// HTMLEscape appends to dst the JSON-encoded src with <, >, and &
// characters inside string literals changed to \u003c, \u003e, \u0026
// so that the JSON will be safe to embed inside HTML <script> tags.
@ -200,11 +189,6 @@ func (e *MarshalerError) Error() string {
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
}
type interfaceOrPtrValue interface {
IsNil() bool
Elem() reflect.Value
}
var hex = "0123456789abcdef"
// An encodeState encodes JSON into a bytes.Buffer.
@ -276,7 +260,7 @@ func (e *encodeState) reflectValueQuoted(v reflect.Value, quoted bool) {
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = Compact(&e.Buffer, b)
err = compact(&e.Buffer, b, true)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})

View File

@ -167,3 +167,22 @@ func TestRefValMarshal(t *testing.T) {
t.Errorf("got %q, want %q", got, want)
}
}
// C implements Marshaler and returns unescaped JSON.
type C int
func (C) MarshalJSON() ([]byte, error) {
return []byte(`"<&>"`), nil
}
func TestMarshalerEscaping(t *testing.T) {
var c C
const want = `"\u003c\u0026\u003e"`
b, err := Marshal(c)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if got := string(b); got != want {
t.Errorf("got %q, want %q", got, want)
}
}

View File

@ -9,11 +9,24 @@ import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
v := scan.step(&scan, int(c))
if v >= scanSkipSpace {
if v == scanError {

View File

@ -136,12 +136,12 @@ type NamePrecedence struct {
type XMLNameWithTag struct {
XMLName Name `xml:"InXMLNameTag"`
Value string ",chardata"
Value string `xml:",chardata"`
}
type XMLNameWithoutTag struct {
XMLName Name
Value string ",chardata"
Value string `xml:",chardata"`
}
type NameInField struct {
@ -532,9 +532,9 @@ var marshalTests = []struct {
InFieldName: "D",
},
ExpectXML: `<Parent>` +
`<InTag><Value>A</Value></InTag>` +
`<InXMLName><Value>B</Value></InXMLName>` +
`<InXMLNameTag><Value>C</Value></InXMLNameTag>` +
`<InTag>A</InTag>` +
`<InXMLName>B</InXMLName>` +
`<InXMLNameTag>C</InXMLNameTag>` +
`<InFieldName>D</InFieldName>` +
`</Parent>`,
MarshalOnly: true,
@ -548,9 +548,9 @@ var marshalTests = []struct {
InFieldName: "D",
},
ExpectXML: `<Parent>` +
`<InTag><Value>A</Value></InTag>` +
`<FromNameVal><Value>B</Value></FromNameVal>` +
`<InXMLNameTag><Value>C</Value></InXMLNameTag>` +
`<InTag>A</InTag>` +
`<FromNameVal>B</FromNameVal>` +
`<InXMLNameTag>C</InXMLNameTag>` +
`<InFieldName>D</InFieldName>` +
`</Parent>`,
UnmarshalOnly: true,

View File

@ -34,6 +34,8 @@ The flags are:
Verbose mode.
Debugging flags:
-comments
Parse comments (ignored if -ast not set).
-ast
Print AST (disables concurrent parsing).
-trace

View File

@ -27,8 +27,9 @@ var (
allErrors = flag.Bool("e", false, "print all (including spurious) errors")
// debugging support
printTrace = flag.Bool("trace", false, "print parse trace")
printAST = flag.Bool("ast", false, "print AST")
parseComments = flag.Bool("comments", false, "parse comments (ignored if -ast not set)")
printTrace = flag.Bool("trace", false, "print parse trace")
printAST = flag.Bool("ast", false, "print AST")
)
var exitCode = 0
@ -73,6 +74,9 @@ func parse(fset *token.FileSet, filename string, src []byte) *ast.File {
if *allErrors {
mode |= parser.SpuriousErrors
}
if *parseComments && *printAST {
mode |= parser.ParseComments
}
if *printTrace {
mode |= parser.Trace
}

View File

@ -110,7 +110,7 @@ func (s *nodeStack) top() *Node {
return nil
}
// index returns the index of the top-most occurence of n in the stack, or -1
// index returns the index of the top-most occurrence of n in the stack, or -1
// if n is not present.
func (s *nodeStack) index(n *Node) int {
for i := len(*s) - 1; i >= 0; i-- {

View File

@ -18,17 +18,17 @@ package norm
// has the form:
// <header> <decomp_byte>* [<tccc> [<lccc>]]
// The header contains the number of bytes in the decomposition (excluding this
// length byte). The two most significant bits of this lenght byte correspond
// length byte). The two most significant bits of this length byte correspond
// to bit 2 and 3 of qcIfo (see below). The byte sequence itself starts at v+1.
// The byte sequence is followed by a trailing and leading CCC if the values
// for these are not zero. The value of v determines which ccc are appended
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
// the seqence is followed by a trailing ccc, and for v >= firstLeadingCC
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
// there is an additional leading ccc.
const (
qcInfoMask = 0xF // to clear all but the relevant bits in a qcInfo
headerLenMask = 0x3F // extract the lenght value from the header byte
headerLenMask = 0x3F // extract the length value from the header byte
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
)

View File

@ -75,7 +75,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer {
}
// AddFromString parses a string that contains comma-separated values
// specifing hosts that should use the bypass proxy. Each value is either an
// specifying hosts that should use the bypass proxy. Each value is either an
// IP address, a CIDR range, a zone (*.example.com) or a hostname
// (localhost). A best effort is made to parse the string and errors are
// ignored.

View File

@ -18,6 +18,7 @@ import (
"os"
"path/filepath"
"strconv"
"strings"
"text/scanner"
)
@ -39,11 +40,14 @@ func findPkg(path string) (filename, id string) {
switch path[0] {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
tree, pkg, err := build.FindTree(path)
if err != nil {
bp, _ := build.Import(path, "", build.FindOnly)
if bp.PkgObj == "" {
return
}
noext = filepath.Join(tree.PkgDir(), pkg)
noext = bp.PkgObj
if strings.HasSuffix(noext, ".a") {
noext = noext[:len(noext)-2]
}
case '.':
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
@ -742,7 +746,7 @@ func (p *gcParser) parseVarDecl() {
}
// FuncBody = "{" ... "}" .
//
//
func (p *gcParser) parseFuncBody() {
p.expect('{')
for i := 1; i > 0; p.next() {

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// PACKAGE UNDER CONSTRUCTION. ANY AND ALL PARTS MAY CHANGE.
// Package types declares the types used to represent Go types.
// Package types declares the types used to represent Go types
// (UNDER CONSTRUCTION). ANY AND ALL PARTS MAY CHANGE.
//
package types

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains printing suppport for ASTs.
// This file contains printing support for ASTs.
package ast

View File

@ -2,10 +2,948 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package build provides tools for building Go packages.
package build
import "errors"
import (
"bytes"
"errors"
"fmt"
"go/ast"
"go/doc"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
pathpkg "path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"unicode"
)
// A Context specifies the supporting context for a build.
type Context struct {
GOARCH string // target architecture
GOOS string // target operating system
GOROOT string // Go root
GOPATH string // Go path
CgoEnabled bool // whether cgo can be used
BuildTags []string // additional tags to recognize in +build lines
UseAllFiles bool // use files regardless of +build lines, file names
Gccgo bool // assume use of gccgo when computing object paths
// By default, Import uses the operating system's file system calls
// to read directories and files. To read from other sources,
// callers can set the following functions. They all have default
// behaviors that use the local file system, so clients need only set
// the functions whose behaviors they wish to change.
// JoinPath joins the sequence of path fragments into a single path.
// If JoinPath is nil, Import uses filepath.Join.
JoinPath func(elem ...string) string
// SplitPathList splits the path list into a slice of individual paths.
// If SplitPathList is nil, Import uses filepath.SplitList.
SplitPathList func(list string) []string
// IsAbsPath reports whether path is an absolute path.
// If IsAbsPath is nil, Import uses filepath.IsAbs.
IsAbsPath func(path string) bool
// IsDir reports whether the path names a directory.
// If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
IsDir func(path string) bool
// HasSubdir reports whether dir is a subdirectory of
// (perhaps multiple levels below) root.
// If so, HasSubdir sets rel to a slash-separated path that
// can be joined to root to produce a path equivalent to dir.
// If HasSubdir is nil, Import uses an implementation built on
// filepath.EvalSymlinks.
HasSubdir func(root, dir string) (rel string, ok bool)
// ReadDir returns a slice of os.FileInfo, sorted by Name,
// describing the content of the named directory.
// If ReadDir is nil, Import uses io.ReadDir.
ReadDir func(dir string) (fi []os.FileInfo, err error)
// OpenFile opens a file (not a directory) for reading.
// If OpenFile is nil, Import uses os.Open.
OpenFile func(path string) (r io.ReadCloser, err error)
}
// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
func (ctxt *Context) joinPath(elem ...string) string {
if f := ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
func (ctxt *Context) splitPathList(s string) []string {
if f := ctxt.SplitPathList; f != nil {
return f(s)
}
return filepath.SplitList(s)
}
// isAbsPath calls ctxt.IsAbsSPath (if not nil) or else filepath.IsAbs.
func (ctxt *Context) isAbsPath(path string) bool {
if f := ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
func (ctxt *Context) isDir(path string) bool {
if f := ctxt.IsDir; f != nil {
return f(path)
}
fi, err := os.Stat(path)
return err == nil && fi.IsDir()
}
// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
// the local file system to answer the question.
func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
if f := ctxt.HasSubdir; f != nil {
return f(root, dir)
}
if p, err := filepath.EvalSymlinks(root); err == nil {
root = p
}
if p, err := filepath.EvalSymlinks(dir); err == nil {
dir = p
}
const sep = string(filepath.Separator)
root = filepath.Clean(root)
if !strings.HasSuffix(root, sep) {
root += sep
}
dir = filepath.Clean(dir)
if !strings.HasPrefix(dir, root) {
return "", false
}
return filepath.ToSlash(dir[len(root):]), true
}
// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
func (ctxt *Context) readDir(path string) ([]os.FileInfo, error) {
if f := ctxt.ReadDir; f != nil {
return f(path)
}
return ioutil.ReadDir(path)
}
// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
if fn := ctxt.OpenFile; fn != nil {
return fn(path)
}
f, err := os.Open(path)
if err != nil {
return nil, err // nil interface
}
return f, nil
}
// isFile determines whether path is a file by trying to open it.
// It reuses openFile instead of adding another function to the
// list in Context.
func (ctxt *Context) isFile(path string) bool {
f, err := ctxt.openFile(path)
if err != nil {
return false
}
f.Close()
return true
}
// gopath returns the list of Go path directories.
func (ctxt *Context) gopath() []string {
var all []string
for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
if p == "" || p == ctxt.GOROOT {
// Empty paths are uninteresting.
// If the path is the GOROOT, ignore it.
// People sometimes set GOPATH=$GOROOT, which is useless
// but would cause us to find packages with import paths
// like "pkg/math".
// Do not get confused by this common mistake.
continue
}
all = append(all, p)
}
return all
}
// SrcDirs returns a list of package source root directories.
// It draws from the current Go root and Go path but omits directories
// that do not exist.
func (ctxt *Context) SrcDirs() []string {
var all []string
if ctxt.GOROOT != "" {
dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg")
if ctxt.isDir(dir) {
all = append(all, dir)
}
}
for _, p := range ctxt.gopath() {
dir := ctxt.joinPath(p, "src")
if ctxt.isDir(dir) {
all = append(all, dir)
}
}
return all
}
// Default is the default Context for builds.
// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
var Default Context = defaultContext()
var cgoEnabled = map[string]bool{
"darwin/386": true,
"darwin/amd64": true,
"linux/386": true,
"linux/amd64": true,
"freebsd/386": true,
"freebsd/amd64": true,
"windows/386": true,
"windows/amd64": true,
}
func defaultContext() Context {
var c Context
c.GOARCH = envOr("GOARCH", runtime.GOARCH)
c.GOOS = envOr("GOOS", runtime.GOOS)
c.GOROOT = runtime.GOROOT()
c.GOPATH = envOr("GOPATH", "")
switch os.Getenv("CGO_ENABLED") {
case "1":
c.CgoEnabled = true
case "0":
c.CgoEnabled = false
default:
c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH]
}
return c
}
func envOr(name, def string) string {
s := os.Getenv(name)
if s == "" {
return def
}
return s
}
// An ImportMode controls the behavior of the Import method.
type ImportMode uint
const (
// If FindOnly is set, Import stops after locating the directory
// that should contain the sources for a package. It does not
// read any files in the directory.
FindOnly ImportMode = 1 << iota
// If AllowBinary is set, Import can be satisfied by a compiled
// package object without corresponding sources.
AllowBinary
)
// A Package describes the Go package found in a directory.
type Package struct {
Dir string // directory containing package sources
Name string // package name
Doc string // documentation synopsis
ImportPath string // import path of package ("" if unknown)
Root string // root of Go tree where this package lives
SrcRoot string // package source root directory ("" if unknown)
PkgRoot string // package install root directory ("" if unknown)
BinDir string // command install directory ("" if unknown)
Goroot bool // package found in Go root
PkgObj string // installed .a file
// Source files
GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
CgoFiles []string // .go source files that import "C"
CFiles []string // .c source files
HFiles []string // .h source files
SFiles []string // .s source files
// Cgo directives
CgoPkgConfig []string // Cgo pkg-config directives
CgoCFLAGS []string // Cgo CFLAGS directives
CgoLDFLAGS []string // Cgo LDFLAGS directives
// Dependency information
Imports []string // imports from GoFiles, CgoFiles
ImportPos map[string][]token.Position // line information for Imports
// Test information
TestGoFiles []string // _test.go files in package
TestImports []string // imports from TestGoFiles
TestImportPos map[string][]token.Position // line information for TestImports
XTestGoFiles []string // _test.go files outside package
XTestImports []string // imports from XTestGoFiles
XTestImportPos map[string][]token.Position // line information for XTestImports
}
// IsCommand reports whether the package is considered a
// command to be installed (not just a library).
// Packages named "main" are treated as commands.
func (p *Package) IsCommand() bool {
return p.Name == "main"
}
// ImportDir is like Import but processes the Go package found in
// the named directory.
func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
return ctxt.Import(".", dir, mode)
}
// Import returns details about the Go package named by the import path,
// interpreting local import paths relative to the src directory. If the path
// is a local import path naming a package that can be imported using a
// standard import path, the returned package will set p.ImportPath to
// that path.
//
// In the directory containing the package, .go, .c, .h, and .s files are
// considered part of the package except for:
//
// - .go files in package documentation
// - files starting with _ or .
// - files with build constraints not satisfied by the context
//
// If an error occurs, Import returns a non-nil error also returns a non-nil
// *Package containing partial information.
//
func (ctxt *Context) Import(path string, src string, mode ImportMode) (*Package, error) {
p := &Package{
ImportPath: path,
}
var pkga string
if ctxt.Gccgo {
dir, elem := pathpkg.Split(p.ImportPath)
pkga = "pkg/gccgo/" + dir + "lib" + elem + ".a"
} else {
pkga = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + "/" + p.ImportPath + ".a"
}
binaryOnly := false
if IsLocalImport(path) {
if src == "" {
return p, fmt.Errorf("import %q: import relative to unknown directory", path)
}
if !ctxt.isAbsPath(path) {
p.Dir = ctxt.joinPath(src, path)
}
// Determine canonical import path, if any.
if ctxt.GOROOT != "" {
root := ctxt.joinPath(ctxt.GOROOT, "src", "pkg")
if sub, ok := ctxt.hasSubdir(root, p.Dir); ok {
p.Goroot = true
p.ImportPath = sub
p.Root = ctxt.GOROOT
goto Found
}
}
all := ctxt.gopath()
for i, root := range all {
rootsrc := ctxt.joinPath(root, "src")
if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok {
// We found a potential import path for dir,
// but check that using it wouldn't find something
// else first.
if ctxt.GOROOT != "" {
if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
goto Found
}
}
for _, earlyRoot := range all[:i] {
if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
goto Found
}
}
// sub would not name some other directory instead of this one.
// Record it.
p.ImportPath = sub
p.Root = root
goto Found
}
}
// It's okay that we didn't find a root containing dir.
// Keep going with the information we have.
} else {
if strings.HasPrefix(path, "/") {
return p, fmt.Errorf("import %q: cannot import absolute path", path)
}
// Determine directory from import path.
if ctxt.GOROOT != "" {
dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", path)
isDir := ctxt.isDir(dir)
binaryOnly = !isDir && mode&AllowBinary != 0 && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
if isDir || binaryOnly {
p.Dir = dir
p.Goroot = true
p.Root = ctxt.GOROOT
goto Found
}
}
for _, root := range ctxt.gopath() {
dir := ctxt.joinPath(root, "src", path)
isDir := ctxt.isDir(dir)
binaryOnly = !isDir && mode&AllowBinary != 0 && ctxt.isFile(ctxt.joinPath(root, pkga))
if isDir || binaryOnly {
p.Dir = dir
p.Root = root
goto Found
}
}
return p, fmt.Errorf("import %q: cannot find package", path)
}
Found:
if p.Root != "" {
if p.Goroot {
p.SrcRoot = ctxt.joinPath(p.Root, "src", "pkg")
} else {
p.SrcRoot = ctxt.joinPath(p.Root, "src")
}
p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
p.BinDir = ctxt.joinPath(p.Root, "bin")
p.PkgObj = ctxt.joinPath(p.Root, pkga)
}
if mode&FindOnly != 0 {
return p, nil
}
if binaryOnly && (mode&AllowBinary) != 0 {
return p, nil
}
dirs, err := ctxt.readDir(p.Dir)
if err != nil {
return p, err
}
var Sfiles []string // files with ".S" (capital S)
var firstFile string
imported := make(map[string][]token.Position)
testImported := make(map[string][]token.Position)
xTestImported := make(map[string][]token.Position)
fset := token.NewFileSet()
for _, d := range dirs {
if d.IsDir() {
continue
}
name := d.Name()
if strings.HasPrefix(name, "_") ||
strings.HasPrefix(name, ".") {
continue
}
if !ctxt.UseAllFiles && !ctxt.goodOSArchFile(name) {
continue
}
i := strings.LastIndex(name, ".")
if i < 0 {
i = len(name)
}
ext := name[i:]
switch ext {
case ".go", ".c", ".s", ".h", ".S":
// tentatively okay
default:
// skip
continue
}
filename := ctxt.joinPath(p.Dir, name)
f, err := ctxt.openFile(filename)
if err != nil {
return p, err
}
data, err := ioutil.ReadAll(f)
f.Close()
if err != nil {
return p, fmt.Errorf("read %s: %v", filename, err)
}
// Look for +build comments to accept or reject the file.
if !ctxt.UseAllFiles && !ctxt.shouldBuild(data) {
continue
}
// Going to save the file. For non-Go files, can stop here.
switch ext {
case ".c":
p.CFiles = append(p.CFiles, name)
continue
case ".h":
p.HFiles = append(p.HFiles, name)
continue
case ".s":
p.SFiles = append(p.SFiles, name)
continue
case ".S":
Sfiles = append(Sfiles, name)
continue
}
pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return p, err
}
pkg := string(pf.Name.Name)
if pkg == "documentation" {
continue
}
isTest := strings.HasSuffix(name, "_test.go")
isXTest := false
if isTest && strings.HasSuffix(pkg, "_test") {
isXTest = true
pkg = pkg[:len(pkg)-len("_test")]
}
if p.Name == "" {
p.Name = pkg
firstFile = name
} else if pkg != p.Name {
return p, fmt.Errorf("found packages %s (%s) and %s (%s) in %s", p.Name, firstFile, pkg, name, p.Dir)
}
if pf.Doc != nil && p.Doc == "" {
p.Doc = doc.Synopsis(pf.Doc.Text())
}
// Record imports and information about cgo.
isCgo := false
for _, decl := range pf.Decls {
d, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, dspec := range d.Specs {
spec, ok := dspec.(*ast.ImportSpec)
if !ok {
continue
}
quoted := string(spec.Path.Value)
path, err := strconv.Unquote(quoted)
if err != nil {
log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
}
if isXTest {
xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos()))
} else if isTest {
testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
} else {
imported[path] = append(imported[path], fset.Position(spec.Pos()))
}
if path == "C" {
if isTest {
return p, fmt.Errorf("use of cgo in test %s not supported", filename)
}
cg := spec.Doc
if cg == nil && len(d.Specs) == 1 {
cg = d.Doc
}
if cg != nil {
if err := ctxt.saveCgo(filename, p, cg); err != nil {
return p, err
}
}
isCgo = true
}
}
}
if isCgo {
if ctxt.CgoEnabled {
p.CgoFiles = append(p.CgoFiles, name)
}
} else if isXTest {
p.XTestGoFiles = append(p.XTestGoFiles, name)
} else if isTest {
p.TestGoFiles = append(p.TestGoFiles, name)
} else {
p.GoFiles = append(p.GoFiles, name)
}
}
if p.Name == "" {
return p, fmt.Errorf("no Go source files in %s", p.Dir)
}
p.Imports, p.ImportPos = cleanImports(imported)
p.TestImports, p.TestImportPos = cleanImports(testImported)
p.XTestImports, p.XTestImportPos = cleanImports(xTestImported)
// add the .S files only if we are using cgo
// (which means gcc will compile them).
// The standard assemblers expect .s files.
if len(p.CgoFiles) > 0 {
p.SFiles = append(p.SFiles, Sfiles...)
sort.Strings(p.SFiles)
}
return p, nil
}
func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) {
all := make([]string, 0, len(m))
for path := range m {
all = append(all, path)
}
sort.Strings(all)
return all, m
}
// Import is shorthand for Default.Import.
func Import(path, src string, mode ImportMode) (*Package, error) {
return Default.Import(path, src, mode)
}
// ImportDir is shorthand for Default.ImportDir.
func ImportDir(dir string, mode ImportMode) (*Package, error) {
return Default.ImportDir(dir, mode)
}
var slashslash = []byte("//")
// shouldBuild reports whether it is okay to use this file,
// The rule is that in the file's leading run of // comments
// and blank lines, which must be followed by a blank line
// (to avoid including a Go package clause doc comment),
// lines beginning with '// +build' are taken as build directives.
//
// The file is accepted only if each such line lists something
// matching the file. For example:
//
// // +build windows linux
//
// marks the file as applicable only on Windows and Linux.
//
func (ctxt *Context) shouldBuild(content []byte) bool {
// Pass 1. Identify leading run of // comments and blank lines,
// which must be followed by a blank line.
end := 0
p := content
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if len(line) == 0 { // Blank line
end = cap(content) - cap(line) // &line[0] - &content[0]
continue
}
if !bytes.HasPrefix(line, slashslash) { // Not comment line
break
}
}
content = content[:end]
// Pass 2. Process each line in the run.
p = content
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if bytes.HasPrefix(line, slashslash) {
line = bytes.TrimSpace(line[len(slashslash):])
if len(line) > 0 && line[0] == '+' {
// Looks like a comment +line.
f := strings.Fields(string(line))
if f[0] == "+build" {
ok := false
for _, tok := range f[1:] {
if ctxt.match(tok) {
ok = true
break
}
}
if !ok {
return false // this one doesn't match
}
}
}
}
}
return true // everything matches
}
// saveCgo saves the information from the #cgo lines in the import "C" comment.
// These lines set CFLAGS and LDFLAGS and pkg-config directives that affect
// the way cgo's C code is built.
//
// TODO(rsc): This duplicates code in cgo.
// Once the dust settles, remove this code from cgo.
func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
text := cg.Text()
for _, line := range strings.Split(text, "\n") {
orig := line
// Line is
// #cgo [GOOS/GOARCH...] LDFLAGS: stuff
//
line = strings.TrimSpace(line)
if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
continue
}
// Split at colon.
line = strings.TrimSpace(line[4:])
i := strings.Index(line, ":")
if i < 0 {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
line, argstr := line[:i], line[i+1:]
// Parse GOOS/GOARCH stuff.
f := strings.Fields(line)
if len(f) < 1 {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
cond, verb := f[:len(f)-1], f[len(f)-1]
if len(cond) > 0 {
ok := false
for _, c := range cond {
if ctxt.match(c) {
ok = true
break
}
}
if !ok {
continue
}
}
args, err := splitQuoted(argstr)
if err != nil {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
for _, arg := range args {
if !safeName(arg) {
return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
}
}
switch verb {
case "CFLAGS":
di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
case "LDFLAGS":
di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
case "pkg-config":
di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
default:
return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
}
}
return nil
}
var safeBytes = []byte("+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:")
func safeName(s string) bool {
if s == "" {
return false
}
for i := 0; i < len(s); i++ {
if c := s[i]; c < 0x80 && bytes.IndexByte(safeBytes, c) < 0 {
return false
}
}
return true
}
// splitQuoted splits the string s around each instance of one or more consecutive
// white space characters while taking into account quotes and escaping, and
// returns an array of substrings of s or an empty list if s contains only white space.
// Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping.
//
// For example, the following string:
//
// a b:"c d" 'e''f' "g\""
//
// Would be parsed as:
//
// []string{"a", "b:c d", "ef", `g"`}
//
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
escaped := false
quoted := false
quote := '\x00'
i := 0
for _, rune := range s {
switch {
case escaped:
escaped = false
case rune == '\\':
escaped = true
continue
case quote != '\x00':
if rune == quote {
quote = '\x00'
continue
}
case rune == '"' || rune == '\'':
quoted = true
quote = rune
continue
case unicode.IsSpace(rune):
if quoted || i > 0 {
quoted = false
args = append(args, string(arg[:i]))
i = 0
}
continue
}
arg[i] = rune
i++
}
if quoted || i > 0 {
args = append(args, string(arg[:i]))
}
if quote != 0 {
err = errors.New("unclosed quote")
} else if escaped {
err = errors.New("unfinished escaping")
}
return args, err
}
// match returns true if the name is one of:
//
// $GOOS
// $GOARCH
// cgo (if cgo is enabled)
// !cgo (if cgo is disabled)
// tag (if tag is listed in ctxt.BuildTags)
// !tag (if tag is not listed in ctxt.BuildTags)
// a slash-separated list of any of these
//
func (ctxt *Context) match(name string) bool {
if name == "" {
return false
}
if i := strings.Index(name, ","); i >= 0 {
// comma-separated list
return ctxt.match(name[:i]) && ctxt.match(name[i+1:])
}
if strings.HasPrefix(name, "!!") { // bad syntax, reject always
return false
}
if strings.HasPrefix(name, "!") { // negation
return !ctxt.match(name[1:])
}
// Tags must be letters, digits, underscores.
// Unlike in Go identifiers, all digits is fine (e.g., "386").
for _, c := range name {
if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' {
return false
}
}
// special tags
if ctxt.CgoEnabled && name == "cgo" {
return true
}
if name == ctxt.GOOS || name == ctxt.GOARCH {
return true
}
// other tags
for _, tag := range ctxt.BuildTags {
if tag == name {
return true
}
}
return false
}
// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
// suffix which does not match the current system.
// The recognized name formats are:
//
// name_$(GOOS).*
// name_$(GOARCH).*
// name_$(GOOS)_$(GOARCH).*
// name_$(GOOS)_test.*
// name_$(GOARCH)_test.*
// name_$(GOOS)_$(GOARCH)_test.*
//
func (ctxt *Context) goodOSArchFile(name string) bool {
if dot := strings.Index(name, "."); dot != -1 {
name = name[:dot]
}
l := strings.Split(name, "_")
if n := len(l); n > 0 && l[n-1] == "test" {
l = l[:n-1]
}
n := len(l)
if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
return l[n-2] == ctxt.GOOS && l[n-1] == ctxt.GOARCH
}
if n >= 1 && knownOS[l[n-1]] {
return l[n-1] == ctxt.GOOS
}
if n >= 1 && knownArch[l[n-1]] {
return l[n-1] == ctxt.GOARCH
}
return true
}
var knownOS = make(map[string]bool)
var knownArch = make(map[string]bool)
func init() {
for _, v := range strings.Fields(goosList) {
knownOS[v] = true
}
for _, v := range strings.Fields(goarchList) {
knownArch[v] = true
}
}
// ToolDir is the directory containing build tools.
var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
// IsLocalImport reports whether the import path is
// a local import path, like ".", "..", "./foo", or "../foo".
func IsLocalImport(path string) bool {
return path == "." || path == ".." ||
strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
}
// ArchChar returns the architecture character for the given goarch.
// For example, ArchChar("amd64") returns "6".

View File

@ -5,83 +5,14 @@
package build
import (
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"testing"
)
func sortstr(x []string) []string {
sort.Strings(x)
return x
}
var buildPkgs = []struct {
dir string
info *DirInfo
}{
{
"go/build/pkgtest",
&DirInfo{
GoFiles: []string{"pkgtest.go"},
SFiles: []string{"sqrt_" + runtime.GOARCH + ".s"},
Package: "pkgtest",
Imports: []string{"bytes"},
TestImports: []string{"fmt", "pkgtest"},
TestGoFiles: sortstr([]string{"sqrt_test.go", "sqrt_" + runtime.GOARCH + "_test.go"}),
XTestGoFiles: []string{"xsqrt_test.go"},
},
},
{
"go/build/cmdtest",
&DirInfo{
GoFiles: []string{"main.go"},
Package: "main",
Imports: []string{"go/build/pkgtest"},
TestImports: []string{},
},
},
{
"go/build/cgotest",
&DirInfo{
CgoFiles: ifCgo([]string{"cgotest.go"}),
CFiles: []string{"cgotest.c"},
HFiles: []string{"cgotest.h"},
Imports: []string{"C", "unsafe"},
TestImports: []string{},
Package: "cgotest",
},
},
}
func ifCgo(x []string) []string {
if DefaultContext.CgoEnabled {
return x
}
return nil
}
func TestBuild(t *testing.T) {
for _, tt := range buildPkgs {
tree := Path[0] // Goroot
dir := filepath.Join(tree.SrcDir(), tt.dir)
info, err := ScanDir(dir)
if err != nil {
t.Errorf("ScanDir(%#q): %v", tt.dir, err)
continue
}
// Don't bother testing import positions.
tt.info.ImportPos, tt.info.TestImportPos = info.ImportPos, info.TestImportPos
if !reflect.DeepEqual(info, tt.info) {
t.Errorf("ScanDir(%#q) = %#v, want %#v\n", tt.dir, info, tt.info)
continue
}
}
}
func TestMatch(t *testing.T) {
ctxt := DefaultContext
ctxt := Default
what := "default"
match := func(tag string) {
if !ctxt.match(tag) {
@ -106,3 +37,40 @@ func TestMatch(t *testing.T) {
match(runtime.GOOS + "," + runtime.GOARCH + ",!bar")
nomatch(runtime.GOOS + "," + runtime.GOARCH + ",bar")
}
func TestDotSlashImport(t *testing.T) {
p, err := ImportDir("testdata/other", 0)
if err != nil {
t.Fatal(err)
}
if len(p.Imports) != 1 || p.Imports[0] != "./file" {
t.Fatalf("testdata/other: Imports=%v, want [./file]", p.Imports)
}
p1, err := Import("./file", "testdata/other", 0)
if err != nil {
t.Fatal(err)
}
if p1.Name != "file" {
t.Fatalf("./file: Name=%q, want %q", p1.Name, "file")
}
dir := filepath.Clean("testdata/other/file") // Clean to use \ on Windows
if p1.Dir != dir {
t.Fatalf("./file: Dir=%q, want %q", p1.Name, dir)
}
}
func TestLocalDirectory(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
p, err := ImportDir(cwd, 0)
if err != nil {
t.Fatal(err)
}
if p.ImportPath != "go/build" {
t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "go/build")
}
}

View File

@ -1,19 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgotest
/*
char* greeting = "hello, world";
*/
// #include "cgotest.h"
import "C"
import "unsafe"
var Greeting = C.GoString(C.greeting)
func DoAdd(x, y int) (sum int) {
C.Add(C.int(x), C.int(y), (*C.int)(unsafe.Pointer(&sum)))
return
}

View File

@ -1,12 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "go/build/pkgtest"
func main() {
pkgtest.Foo()
print(int(pkgtest.Sqrt(9)))
}

View File

@ -1,705 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bytes"
"errors"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"unicode"
)
// A Context specifies the supporting context for a build.
type Context struct {
GOARCH string // target architecture
GOOS string // target operating system
CgoEnabled bool // whether cgo can be used
BuildTags []string // additional tags to recognize in +build lines
UseAllFiles bool // use files regardless of +build lines, file names
// By default, ScanDir uses the operating system's
// file system calls to read directories and files.
// Callers can override those calls to provide other
// ways to read data by setting ReadDir and ReadFile.
// ScanDir does not make any assumptions about the
// format of the strings dir and file: they can be
// slash-separated, backslash-separated, even URLs.
// ReadDir returns a slice of os.FileInfo, sorted by Name,
// describing the content of the named directory.
// The dir argument is the argument to ScanDir.
// If ReadDir is nil, ScanDir uses io.ReadDir.
ReadDir func(dir string) (fi []os.FileInfo, err error)
// ReadFile returns the content of the file named file
// in the directory named dir. The dir argument is the
// argument to ScanDir, and the file argument is the
// Name field from an os.FileInfo returned by ReadDir.
// The returned path is the full name of the file, to be
// used in error messages.
//
// If ReadFile is nil, ScanDir uses filepath.Join(dir, file)
// as the path and ioutil.ReadFile to read the data.
ReadFile func(dir, file string) (path string, content []byte, err error)
}
func (ctxt *Context) readDir(dir string) ([]os.FileInfo, error) {
if f := ctxt.ReadDir; f != nil {
return f(dir)
}
return ioutil.ReadDir(dir)
}
func (ctxt *Context) readFile(dir, file string) (string, []byte, error) {
if f := ctxt.ReadFile; f != nil {
return f(dir, file)
}
p := filepath.Join(dir, file)
content, err := ioutil.ReadFile(p)
return p, content, err
}
// The DefaultContext is the default Context for builds.
// It uses the GOARCH and GOOS environment variables
// if set, or else the compiled code's GOARCH and GOOS.
var DefaultContext Context = defaultContext()
var cgoEnabled = map[string]bool{
"darwin/386": true,
"darwin/amd64": true,
"linux/386": true,
"linux/amd64": true,
"freebsd/386": true,
"freebsd/amd64": true,
"windows/386": true,
"windows/amd64": true,
}
func defaultContext() Context {
var c Context
c.GOARCH = envOr("GOARCH", runtime.GOARCH)
c.GOOS = envOr("GOOS", runtime.GOOS)
s := os.Getenv("CGO_ENABLED")
switch s {
case "1":
c.CgoEnabled = true
case "0":
c.CgoEnabled = false
default:
c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH]
}
return c
}
func envOr(name, def string) string {
s := os.Getenv(name)
if s == "" {
return def
}
return s
}
type DirInfo struct {
Package string // Name of package in dir
PackageComment *ast.CommentGroup // Package comments from GoFiles
ImportPath string // Import path of package in dir
Imports []string // All packages imported by GoFiles
ImportPos map[string][]token.Position // Source code location of imports
// Source files
GoFiles []string // .go files in dir (excluding CgoFiles, TestGoFiles, XTestGoFiles)
HFiles []string // .h files in dir
CFiles []string // .c files in dir
SFiles []string // .s (and, when using cgo, .S files in dir)
CgoFiles []string // .go files that import "C"
// Cgo directives
CgoPkgConfig []string // Cgo pkg-config directives
CgoCFLAGS []string // Cgo CFLAGS directives
CgoLDFLAGS []string // Cgo LDFLAGS directives
// Test information
TestGoFiles []string // _test.go files in package
XTestGoFiles []string // _test.go files outside package
TestImports []string // All packages imported by (X)TestGoFiles
TestImportPos map[string][]token.Position
}
func (d *DirInfo) IsCommand() bool {
// TODO(rsc): This is at least a little bogus.
return d.Package == "main"
}
// ScanDir calls DefaultContext.ScanDir.
func ScanDir(dir string) (info *DirInfo, err error) {
return DefaultContext.ScanDir(dir)
}
// TODO(rsc): Move this comment to a more appropriate place.
// ScanDir returns a structure with details about the Go package
// found in the given directory.
//
// Most .go, .c, .h, and .s files in the directory are considered part
// of the package. The exceptions are:
//
// - .go files in package main (unless no other package is found)
// - .go files in package documentation
// - files starting with _ or .
// - files with build constraints not satisfied by the context
//
// Build Constraints
//
// A build constraint is a line comment beginning with the directive +build
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
// they must be appear near the top of the file, preceded
// only by blank lines and other line comments.
//
// A build constraint is evaluated as the OR of space-separated options;
// each option evaluates as the AND of its comma-separated terms;
// and each term is an alphanumeric word or, preceded by !, its negation.
// That is, the build constraint:
//
// // +build linux,386 darwin,!cgo
//
// corresponds to the boolean formula:
//
// (linux AND 386) OR (darwin AND (NOT cgo))
//
// During a particular build, the following words are satisfied:
//
// - the target operating system, as spelled by runtime.GOOS
// - the target architecture, as spelled by runtime.GOARCH
// - "cgo", if ctxt.CgoEnabled is true
// - any additional words listed in ctxt.BuildTags
//
// If a file's name, after stripping the extension and a possible _test suffix,
// matches *_GOOS, *_GOARCH, or *_GOOS_GOARCH for any known operating
// system and architecture values, then the file is considered to have an implicit
// build constraint requiring those terms.
//
// Examples
//
// To keep a file from being considered for the build:
//
// // +build ignore
//
// (any other unsatisfied word will work as well, but ``ignore'' is conventional.)
//
// To build a file only when using cgo, and only on Linux and OS X:
//
// // +build linux,cgo darwin,cgo
//
// Such a file is usually paired with another file implementing the
// default functionality for other systems, which in this case would
// carry the constraint:
//
// // +build !linux !darwin !cgo
//
// Naming a file dns_windows.go will cause it to be included only when
// building the package for Windows; similarly, math_386.s will be included
// only when building the package for 32-bit x86.
//
func (ctxt *Context) ScanDir(dir string) (info *DirInfo, err error) {
dirs, err := ctxt.readDir(dir)
if err != nil {
return nil, err
}
var Sfiles []string // files with ".S" (capital S)
var di DirInfo
var firstFile string
imported := make(map[string][]token.Position)
testImported := make(map[string][]token.Position)
fset := token.NewFileSet()
for _, d := range dirs {
if d.IsDir() {
continue
}
name := d.Name()
if strings.HasPrefix(name, "_") ||
strings.HasPrefix(name, ".") {
continue
}
if !ctxt.UseAllFiles && !ctxt.goodOSArchFile(name) {
continue
}
ext := path.Ext(name)
switch ext {
case ".go", ".c", ".s", ".h", ".S":
// tentatively okay
default:
// skip
continue
}
filename, data, err := ctxt.readFile(dir, name)
if err != nil {
return nil, err
}
// Look for +build comments to accept or reject the file.
if !ctxt.UseAllFiles && !ctxt.shouldBuild(data) {
continue
}
// Going to save the file. For non-Go files, can stop here.
switch ext {
case ".c":
di.CFiles = append(di.CFiles, name)
continue
case ".h":
di.HFiles = append(di.HFiles, name)
continue
case ".s":
di.SFiles = append(di.SFiles, name)
continue
case ".S":
Sfiles = append(Sfiles, name)
continue
}
pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return nil, err
}
pkg := string(pf.Name.Name)
if pkg == "documentation" {
continue
}
isTest := strings.HasSuffix(name, "_test.go")
if isTest && strings.HasSuffix(pkg, "_test") {
pkg = pkg[:len(pkg)-len("_test")]
}
if di.Package == "" {
di.Package = pkg
firstFile = name
} else if pkg != di.Package {
return nil, fmt.Errorf("%s: found packages %s (%s) and %s (%s)", dir, di.Package, firstFile, pkg, name)
}
if pf.Doc != nil {
if di.PackageComment != nil {
di.PackageComment.List = append(di.PackageComment.List, pf.Doc.List...)
} else {
di.PackageComment = pf.Doc
}
}
// Record imports and information about cgo.
isCgo := false
for _, decl := range pf.Decls {
d, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, dspec := range d.Specs {
spec, ok := dspec.(*ast.ImportSpec)
if !ok {
continue
}
quoted := string(spec.Path.Value)
path, err := strconv.Unquote(quoted)
if err != nil {
log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
}
if isTest {
testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
} else {
imported[path] = append(imported[path], fset.Position(spec.Pos()))
}
if path == "C" {
if isTest {
return nil, fmt.Errorf("%s: use of cgo in test not supported", filename)
}
cg := spec.Doc
if cg == nil && len(d.Specs) == 1 {
cg = d.Doc
}
if cg != nil {
if err := ctxt.saveCgo(filename, &di, cg); err != nil {
return nil, err
}
}
isCgo = true
}
}
}
if isCgo {
if ctxt.CgoEnabled {
di.CgoFiles = append(di.CgoFiles, name)
}
} else if isTest {
if pkg == string(pf.Name.Name) {
di.TestGoFiles = append(di.TestGoFiles, name)
} else {
di.XTestGoFiles = append(di.XTestGoFiles, name)
}
} else {
di.GoFiles = append(di.GoFiles, name)
}
}
if di.Package == "" {
return nil, fmt.Errorf("%s: no Go source files", dir)
}
di.Imports = make([]string, len(imported))
di.ImportPos = imported
i := 0
for p := range imported {
di.Imports[i] = p
i++
}
di.TestImports = make([]string, len(testImported))
di.TestImportPos = testImported
i = 0
for p := range testImported {
di.TestImports[i] = p
i++
}
// add the .S files only if we are using cgo
// (which means gcc will compile them).
// The standard assemblers expect .s files.
if len(di.CgoFiles) > 0 {
di.SFiles = append(di.SFiles, Sfiles...)
sort.Strings(di.SFiles)
}
// File name lists are sorted because ReadDir sorts.
sort.Strings(di.Imports)
sort.Strings(di.TestImports)
return &di, nil
}
var slashslash = []byte("//")
// shouldBuild reports whether it is okay to use this file,
// The rule is that in the file's leading run of // comments
// and blank lines, which must be followed by a blank line
// (to avoid including a Go package clause doc comment),
// lines beginning with '// +build' are taken as build directives.
//
// The file is accepted only if each such line lists something
// matching the file. For example:
//
// // +build windows linux
//
// marks the file as applicable only on Windows and Linux.
//
func (ctxt *Context) shouldBuild(content []byte) bool {
// Pass 1. Identify leading run of // comments and blank lines,
// which must be followed by a blank line.
end := 0
p := content
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if len(line) == 0 { // Blank line
end = cap(content) - cap(line) // &line[0] - &content[0]
continue
}
if !bytes.HasPrefix(line, slashslash) { // Not comment line
break
}
}
content = content[:end]
// Pass 2. Process each line in the run.
p = content
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if bytes.HasPrefix(line, slashslash) {
line = bytes.TrimSpace(line[len(slashslash):])
if len(line) > 0 && line[0] == '+' {
// Looks like a comment +line.
f := strings.Fields(string(line))
if f[0] == "+build" {
ok := false
for _, tok := range f[1:] {
if ctxt.match(tok) {
ok = true
break
}
}
if !ok {
return false // this one doesn't match
}
}
}
}
}
return true // everything matches
}
// saveCgo saves the information from the #cgo lines in the import "C" comment.
// These lines set CFLAGS and LDFLAGS and pkg-config directives that affect
// the way cgo's C code is built.
//
// TODO(rsc): This duplicates code in cgo.
// Once the dust settles, remove this code from cgo.
func (ctxt *Context) saveCgo(filename string, di *DirInfo, cg *ast.CommentGroup) error {
text := cg.Text()
for _, line := range strings.Split(text, "\n") {
orig := line
// Line is
// #cgo [GOOS/GOARCH...] LDFLAGS: stuff
//
line = strings.TrimSpace(line)
if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
continue
}
// Split at colon.
line = strings.TrimSpace(line[4:])
i := strings.Index(line, ":")
if i < 0 {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
line, argstr := line[:i], line[i+1:]
// Parse GOOS/GOARCH stuff.
f := strings.Fields(line)
if len(f) < 1 {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
cond, verb := f[:len(f)-1], f[len(f)-1]
if len(cond) > 0 {
ok := false
for _, c := range cond {
if ctxt.match(c) {
ok = true
break
}
}
if !ok {
continue
}
}
args, err := splitQuoted(argstr)
if err != nil {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
for _, arg := range args {
if !safeName(arg) {
return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
}
}
switch verb {
case "CFLAGS":
di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
case "LDFLAGS":
di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
case "pkg-config":
di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
default:
return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
}
}
return nil
}
var safeBytes = []byte("+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:")
func safeName(s string) bool {
if s == "" {
return false
}
for i := 0; i < len(s); i++ {
if c := s[i]; c < 0x80 && bytes.IndexByte(safeBytes, c) < 0 {
return false
}
}
return true
}
// splitQuoted splits the string s around each instance of one or more consecutive
// white space characters while taking into account quotes and escaping, and
// returns an array of substrings of s or an empty list if s contains only white space.
// Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping.
//
// For example, the following string:
//
// a b:"c d" 'e''f' "g\""
//
// Would be parsed as:
//
// []string{"a", "b:c d", "ef", `g"`}
//
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
escaped := false
quoted := false
quote := '\x00'
i := 0
for _, rune := range s {
switch {
case escaped:
escaped = false
case rune == '\\':
escaped = true
continue
case quote != '\x00':
if rune == quote {
quote = '\x00'
continue
}
case rune == '"' || rune == '\'':
quoted = true
quote = rune
continue
case unicode.IsSpace(rune):
if quoted || i > 0 {
quoted = false
args = append(args, string(arg[:i]))
i = 0
}
continue
}
arg[i] = rune
i++
}
if quoted || i > 0 {
args = append(args, string(arg[:i]))
}
if quote != 0 {
err = errors.New("unclosed quote")
} else if escaped {
err = errors.New("unfinished escaping")
}
return args, err
}
// match returns true if the name is one of:
//
// $GOOS
// $GOARCH
// cgo (if cgo is enabled)
// !cgo (if cgo is disabled)
// tag (if tag is listed in ctxt.BuildTags)
// !tag (if tag is not listed in ctxt.BuildTags)
// a slash-separated list of any of these
//
func (ctxt *Context) match(name string) bool {
if name == "" {
return false
}
if i := strings.Index(name, ","); i >= 0 {
// comma-separated list
return ctxt.match(name[:i]) && ctxt.match(name[i+1:])
}
if strings.HasPrefix(name, "!!") { // bad syntax, reject always
return false
}
if strings.HasPrefix(name, "!") { // negation
return !ctxt.match(name[1:])
}
// Tags must be letters, digits, underscores.
// Unlike in Go identifiers, all digits is fine (e.g., "386").
for _, c := range name {
if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' {
return false
}
}
// special tags
if ctxt.CgoEnabled && name == "cgo" {
return true
}
if name == ctxt.GOOS || name == ctxt.GOARCH {
return true
}
// other tags
for _, tag := range ctxt.BuildTags {
if tag == name {
return true
}
}
return false
}
// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
// suffix which does not match the current system.
// The recognized name formats are:
//
// name_$(GOOS).*
// name_$(GOARCH).*
// name_$(GOOS)_$(GOARCH).*
// name_$(GOOS)_test.*
// name_$(GOARCH)_test.*
// name_$(GOOS)_$(GOARCH)_test.*
//
func (ctxt *Context) goodOSArchFile(name string) bool {
if dot := strings.Index(name, "."); dot != -1 {
name = name[:dot]
}
l := strings.Split(name, "_")
if n := len(l); n > 0 && l[n-1] == "test" {
l = l[:n-1]
}
n := len(l)
if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
return l[n-2] == ctxt.GOOS && l[n-1] == ctxt.GOARCH
}
if n >= 1 && knownOS[l[n-1]] {
return l[n-1] == ctxt.GOOS
}
if n >= 1 && knownArch[l[n-1]] {
return l[n-1] == ctxt.GOARCH
}
return true
}
var knownOS = make(map[string]bool)
var knownArch = make(map[string]bool)
func init() {
for _, v := range strings.Fields(goosList) {
knownOS[v] = true
}
for _, v := range strings.Fields(goarchList) {
knownArch[v] = true
}
}

109
libgo/go/go/build/doc.go Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package build gathers information about Go packages.
//
// Go Path
//
// The Go path is a list of directory trees containing Go source code.
// It is consulted to resolve imports that cannot be found in the standard
// Go tree. The default path is the value of the GOPATH environment
// variable, interpreted as a path list appropriate to the operating system
// (on Unix, the variable is a colon-separated string;
// on Windows, a semicolon-separated string;
// on Plan 9, a list).
//
// Each directory listed in the Go path must have a prescribed structure:
//
// The src/ directory holds source code. The path below 'src' determines
// the import path or executable name.
//
// The pkg/ directory holds installed package objects.
// As in the Go tree, each target operating system and
// architecture pair has its own subdirectory of pkg
// (pkg/GOOS_GOARCH).
//
// If DIR is a directory listed in the Go path, a package with
// source in DIR/src/foo/bar can be imported as "foo/bar" and
// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
//
// The bin/ directory holds compiled commands.
// Each command is named for its source directory, but only
// using the final element, not the entire path. That is, the
// command with source in DIR/src/foo/quux is installed into
// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
// so that you can add DIR/bin to your PATH to get at the
// installed commands.
//
// Here's an example directory layout:
//
// GOPATH=/home/user/gocode
//
// /home/user/gocode/
// src/
// foo/
// bar/ (go code in package bar)
// x.go
// quux/ (go code in package main)
// y.go
// bin/
// quux (installed command)
// pkg/
// linux_amd64/
// foo/
// bar.a (installed package object)
//
// Build Constraints
//
// A build constraint is a line comment beginning with the directive +build
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
// they must be appear near the top of the file, preceded
// only by blank lines and other line comments.
//
// A build constraint is evaluated as the OR of space-separated options;
// each option evaluates as the AND of its comma-separated terms;
// and each term is an alphanumeric word or, preceded by !, its negation.
// That is, the build constraint:
//
// // +build linux,386 darwin,!cgo
//
// corresponds to the boolean formula:
//
// (linux AND 386) OR (darwin AND (NOT cgo))
//
// During a particular build, the following words are satisfied:
//
// - the target operating system, as spelled by runtime.GOOS
// - the target architecture, as spelled by runtime.GOARCH
// - "cgo", if ctxt.CgoEnabled is true
// - any additional words listed in ctxt.BuildTags
//
// If a file's name, after stripping the extension and a possible _test suffix,
// matches *_GOOS, *_GOARCH, or *_GOOS_GOARCH for any known operating
// system and architecture values, then the file is considered to have an implicit
// build constraint requiring those terms.
//
// To keep a file from being considered for the build:
//
// // +build ignore
//
// (any other unsatisfied word will work as well, but ``ignore'' is conventional.)
//
// To build a file only when using cgo, and only on Linux and OS X:
//
// // +build linux,cgo darwin,cgo
//
// Such a file is usually paired with another file implementing the
// default functionality for other systems, which in this case would
// carry the constraint:
//
// // +build !linux !darwin !cgo
//
// Naming a file dns_windows.go will cause it to be included only when
// building the package for Windows; similarly, math_386.s will be included
// only when building the package for 32-bit x86.
//
package build

View File

@ -1,182 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
)
// ToolDir is the directory containing build tools.
var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
// Path is a validated list of Trees derived from $GOROOT and $GOPATH at init.
var Path []*Tree
// Tree describes a Go source tree, either $GOROOT or one from $GOPATH.
type Tree struct {
Path string
Goroot bool
}
func newTree(p string) (*Tree, error) {
if !filepath.IsAbs(p) {
return nil, errors.New("must be absolute")
}
ep, err := filepath.EvalSymlinks(p)
if err != nil {
return nil, err
}
return &Tree{Path: ep}, nil
}
// SrcDir returns the tree's package source directory.
func (t *Tree) SrcDir() string {
if t.Goroot {
return filepath.Join(t.Path, "src", "pkg")
}
return filepath.Join(t.Path, "src")
}
// PkgDir returns the tree's package object directory.
func (t *Tree) PkgDir() string {
goos, goarch := runtime.GOOS, runtime.GOARCH
if e := os.Getenv("GOOS"); e != "" {
goos = e
}
if e := os.Getenv("GOARCH"); e != "" {
goarch = e
}
return filepath.Join(t.Path, "pkg", goos+"_"+goarch)
}
// BinDir returns the tree's binary executable directory.
func (t *Tree) BinDir() string {
if t.Goroot {
if gobin := os.Getenv("GOBIN"); gobin != "" {
return filepath.Clean(gobin)
}
}
return filepath.Join(t.Path, "bin")
}
// HasSrc returns whether the given package's
// source can be found inside this Tree.
func (t *Tree) HasSrc(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.SrcDir(), pkg))
if err != nil {
return false
}
return fi.IsDir()
}
// HasPkg returns whether the given package's
// object file can be found inside this Tree.
func (t *Tree) HasPkg(pkg string) bool {
fi, err := os.Stat(filepath.Join(t.PkgDir(), pkg+".a"))
if err != nil {
return false
}
return !fi.IsDir()
}
var (
ErrNotFound = errors.New("package could not be found locally")
ErrTreeNotFound = errors.New("no valid GOROOT or GOPATH could be found")
)
// FindTree takes an import or filesystem path and returns the
// tree where the package source should be and the package import path.
func FindTree(path string) (tree *Tree, pkg string, err error) {
if isLocalPath(path) {
if path, err = filepath.Abs(path); err != nil {
return
}
if path, err = filepath.EvalSymlinks(path); err != nil {
return
}
for _, t := range Path {
tpath := t.SrcDir() + string(filepath.Separator)
if !filepath.HasPrefix(path, tpath) {
continue
}
tree = t
pkg = filepath.ToSlash(path[len(tpath):])
return
}
err = fmt.Errorf("path %q not inside a GOPATH", path)
return
}
tree = defaultTree
pkg = filepath.ToSlash(path)
for _, t := range Path {
if t.HasSrc(pkg) {
tree = t
return
}
}
if tree == nil {
err = ErrTreeNotFound
} else {
err = ErrNotFound
}
return
}
// isLocalPath returns whether the given path is local (/foo ./foo ../foo . ..)
// Windows paths that starts with drive letter (c:\foo c:foo) are considered local.
func isLocalPath(s string) bool {
const sep = string(filepath.Separator)
return s == "." || s == ".." ||
filepath.HasPrefix(s, sep) ||
filepath.HasPrefix(s, "."+sep) || filepath.HasPrefix(s, ".."+sep) ||
filepath.VolumeName(s) != ""
}
var (
// argument lists used by the build's gc and ld methods
gcImportArgs []string
ldImportArgs []string
// default tree for remote packages
defaultTree *Tree
)
// set up Path: parse and validate GOROOT and GOPATH variables
func init() {
root := runtime.GOROOT()
t, err := newTree(root)
if err == nil {
t.Goroot = true
Path = []*Tree{t}
}
for _, p := range filepath.SplitList(os.Getenv("GOPATH")) {
if p == "" {
continue
}
t, err := newTree(p)
if err != nil {
continue
}
Path = append(Path, t)
gcImportArgs = append(gcImportArgs, "-I", t.PkgDir())
ldImportArgs = append(ldImportArgs, "-L", t.PkgDir())
// select first GOPATH entry as default
if defaultTree == nil {
defaultTree = t
}
}
// use GOROOT if no valid GOPATH specified
if defaultTree == nil && len(Path) > 0 {
defaultTree = Path[0]
}
}

View File

@ -1,13 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgtest
import "bytes"
func Foo() *bytes.Buffer {
return nil
}
func Sqrt(x float64) float64

View File

@ -1 +0,0 @@
package pkgtest

View File

@ -1 +0,0 @@
package pkgtest

View File

@ -1 +0,0 @@
package pkgtest

View File

@ -1,9 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgtest
import "fmt"
var _ = fmt.Printf

View File

@ -1,9 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgtest_test
import "pkgtest"
var _ = pkgtest.Foo

View File

@ -55,7 +55,7 @@ var tests = []GoodFileTest{
func TestGoodOSArch(t *testing.T) {
for _, test := range tests {
if DefaultContext.goodOSArchFile(test.name) != test.result {
if Default.goodOSArchFile(test.name) != test.result {
t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
}
}

View File

@ -0,0 +1,5 @@
// Test data - not compiled.
package file
func F() {}

View File

@ -0,0 +1,11 @@
// Test data - not compiled.
package main
import (
"./file"
)
func main() {
file.F()
}

View File

@ -432,6 +432,17 @@ func (r *reader) readFile(src *ast.File) {
r.readValue(d)
case token.TYPE:
// types are handled individually
if len(d.Specs) == 1 && !d.Lparen.IsValid() {
// common case: single declaration w/o parentheses
// (if a single declaration is parenthesized,
// create a new fake declaration below, so that
// go/doc type declarations always appear w/o
// parentheses)
if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
r.readType(d, s)
}
break
}
for _, spec := range d.Specs {
if s, ok := spec.(*ast.TypeSpec); ok {
// use an individual (possibly fake) declaration
@ -439,8 +450,13 @@ func (r *reader) readFile(src *ast.File) {
// gets to (re-)use the declaration documentation
// if there's none associated with the spec itself
fake := &ast.GenDecl{
Doc: d.Doc,
TokPos: d.Pos(),
Doc: d.Doc,
// don't use the existing TokPos because it
// will lead to the wrong selection range for
// the fake declaration if there are more
// than one type in the group (this affects
// src/cmd/godoc/godoc.go's posLink_urlFunc)
TokPos: s.Pos(),
Tok: token.TYPE,
Specs: []ast.Spec{s},
}

View File

@ -14,6 +14,9 @@ import (
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
@ -1913,6 +1916,17 @@ func (p *parser) parseStmt() (s ast.Stmt) {
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
@ -1929,6 +1943,9 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {

View File

@ -5,6 +5,7 @@
package parser
import (
"fmt"
"go/ast"
"go/token"
"os"
@ -204,3 +205,48 @@ func TestVarScope(t *testing.T) {
}
}
}
var imports = map[string]bool{
`"a"`: true,
"`a`": true,
`"a/b"`: true,
`"a.b"`: true,
`"m\x61th"`: true,
`"greek/αβ"`: true,
`""`: false,
// Each of these pairs tests both `` vs "" strings
// and also use of invalid characters spelled out as
// escape sequences and written directly.
// For example `"\x00"` tests import "\x00"
// while "`\x00`" tests import `<actual-NUL-byte>`.
`"\x00"`: false,
"`\x00`": false,
`"\x7f"`: false,
"`\x7f`": false,
`"a!"`: false,
"`a!`": false,
`"a b"`: false,
"`a b`": false,
`"a\\b"`: false,
"`a\\b`": false,
"\"`a`\"": false,
"`\"a\"`": false,
`"\x80\x80"`: false,
"`\x80\x80`": false,
`"\xFFFD"`: false,
"`\xFFFD`": false,
}
func TestImports(t *testing.T) {
for path, isValid := range imports {
src := fmt.Sprintf("package p; import %s", path)
_, err := ParseFile(fset, "", src, 0)
switch {
case err != nil && isValid:
t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
case err == nil && !isValid:
t.Errorf("ParseFile(%s): got no error; expected one", src)
}
}
}

View File

@ -12,6 +12,7 @@ import (
"bytes"
"go/ast"
"go/token"
"unicode/utf8"
)
// Other formatting issues:
@ -82,46 +83,37 @@ func (p *printer) setComment(g *ast.CommentGroup) {
type exprListMode uint
const (
blankStart exprListMode = 1 << iota // print a blank before a non-empty list
blankEnd // print a blank after a non-empty list
commaSep // elements are separated by commas
commaTerm // list is optionally terminated by a comma
noIndent // no extra indentation in multi-line lists
commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
noIndent // no extra indentation in multi-line lists
)
// Sets multiLine to true if the identifier list spans multiple lines.
// If indent is set, a multi-line identifier list is indented after the
// first linebreak encountered.
func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) {
func (p *printer) identList(list []*ast.Ident, indent bool) {
// convert into an expression list so we can re-use exprList formatting
xlist := make([]ast.Expr, len(list))
for i, x := range list {
xlist[i] = x
}
mode := commaSep
var mode exprListMode
if !indent {
mode |= noIndent
mode = noIndent
}
p.exprList(token.NoPos, xlist, 1, mode, multiLine, token.NoPos)
p.exprList(token.NoPos, xlist, 1, mode, token.NoPos)
}
// Print a list of expressions. If the list spans multiple
// source lines, the original line breaks are respected between
// expressions. Sets multiLine to true if the list spans multiple
// lines.
// expressions.
//
// TODO(gri) Consider rewriting this to be independent of []ast.Expr
// so that we can use the algorithm for any kind of list
// (e.g., pass list via a channel over which to range).
func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, multiLine *bool, next0 token.Pos) {
func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos) {
if len(list) == 0 {
return
}
if mode&blankStart != 0 {
p.print(blank)
}
prev := p.posFor(prev0)
next := p.posFor(next0)
line := p.lineFor(list[0].Pos())
@ -131,17 +123,11 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// all list entries on a single line
for i, x := range list {
if i > 0 {
if mode&commaSep != 0 {
// use position of expression following the comma as
// comma position for correct comment placement
p.print(x.Pos(), token.COMMA)
}
p.print(blank)
// use position of expression following the comma as
// comma position for correct comment placement
p.print(x.Pos(), token.COMMA, blank)
}
p.expr0(x, depth, multiLine)
}
if mode&blankEnd != 0 {
p.print(blank)
p.expr0(x, depth)
}
return
}
@ -161,7 +147,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
prevBreak := -1 // index of last expression that was followed by a linebreak
if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) {
ws = ignore
*multiLine = true
prevBreak = 0
}
@ -215,15 +200,13 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
if i > 0 {
needsLinebreak := prevLine < line && prevLine > 0 && line > 0
if mode&commaSep != 0 {
// use position of expression following the comma as
// comma position for correct comment placement, but
// only if the expression is on the same line
if !needsLinebreak {
p.print(x.Pos())
}
p.print(token.COMMA)
// use position of expression following the comma as
// comma position for correct comment placement, but
// only if the expression is on the same line
if !needsLinebreak {
p.print(x.Pos())
}
p.print(token.COMMA)
needsBlank := true
if needsLinebreak {
// lines are broken using newlines so comments remain aligned
@ -231,7 +214,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// the same line in which case formfeed is used
if p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) {
ws = ignore
*multiLine = true
prevBreak = i
needsBlank = false // we got a line break instead
}
@ -245,11 +227,11 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// we have a key:value expression that fits onto one line and
// is in a list with more then one entry: use a column for the
// key such that consecutive entries can align if possible
p.expr(pair.Key, multiLine)
p.expr(pair.Key)
p.print(pair.Colon, token.COLON, vtab)
p.expr(pair.Value, multiLine)
p.expr(pair.Value)
} else {
p.expr0(x, depth, multiLine)
p.expr0(x, depth)
}
}
@ -264,18 +246,13 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
return
}
if mode&blankEnd != 0 {
p.print(blank)
}
if ws == ignore && mode&noIndent == 0 {
// unindent if we indented
p.print(unindent)
}
}
// Sets multiLine to true if the the parameter list spans multiple lines.
func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
func (p *printer) parameters(fields *ast.FieldList) {
p.print(fields.Opening, token.LPAREN)
if len(fields.List) > 0 {
prevLine := p.lineFor(fields.Opening)
@ -306,7 +283,6 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) {
// break line if the opening "(" or previous parameter ended on a different line
ws = ignore
*multiLine = true
} else if i > 0 {
p.print(blank)
}
@ -318,11 +294,11 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
// again at the end (and still ws == indent). Thus, a subsequent indent
// by a linebreak call after a type, or in the next multi-line identList
// will do the right thing.
p.identList(par.Names, ws == indent, multiLine)
p.identList(par.Names, ws == indent)
p.print(blank)
}
// parameter type
p.expr(par.Type, multiLine)
p.expr(par.Type)
prevLine = parLineEnd
}
// if the closing ")" is on a separate line from the last parameter,
@ -339,27 +315,26 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
p.print(fields.Closing, token.RPAREN)
}
// Sets multiLine to true if the signature spans multiple lines.
func (p *printer) signature(params, result *ast.FieldList, multiLine *bool) {
p.parameters(params, multiLine)
func (p *printer) signature(params, result *ast.FieldList) {
p.parameters(params)
n := result.NumFields()
if n > 0 {
p.print(blank)
if n == 1 && result.List[0].Names == nil {
// single anonymous result; no ()'s
p.expr(result.List[0].Type, multiLine)
p.expr(result.List[0].Type)
return
}
p.parameters(result, multiLine)
p.parameters(result)
}
}
func identListSize(list []*ast.Ident, maxSize int) (size int) {
for i, x := range list {
if i > 0 {
size += 2 // ", "
size += len(", ")
}
size += len(x.Name)
size += utf8.RuneCountInString(x.Name)
if size >= maxSize {
break
}
@ -389,6 +364,10 @@ func (p *printer) setLineComment(text string) {
p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
}
func (p *printer) isMultiLine(n ast.Node) bool {
return p.lineFor(n.End())-p.lineFor(n.Pos()) > 1
}
func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
lbrace := fields.Opening
list := fields.List
@ -412,12 +391,12 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
// no comments so no need for comma position
p.print(token.COMMA, blank)
}
p.expr(x, ignoreMultiLine)
p.expr(x)
}
if len(f.Names) > 0 {
p.print(blank)
}
p.expr(f.Type, ignoreMultiLine)
p.expr(f.Type)
p.print(blank, rbrace, token.RBRACE)
return
}
@ -435,23 +414,22 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
if len(list) == 1 {
sep = blank
}
var ml bool
newSection := false
for i, f := range list {
if i > 0 {
p.linebreak(p.lineFor(f.Pos()), 1, ignore, ml)
p.linebreak(p.lineFor(f.Pos()), 1, ignore, newSection)
}
ml = false
extraTabs := 0
p.setComment(f.Doc)
if len(f.Names) > 0 {
// named fields
p.identList(f.Names, false, &ml)
p.identList(f.Names, false)
p.print(sep)
p.expr(f.Type, &ml)
p.expr(f.Type)
extraTabs = 1
} else {
// anonymous field
p.expr(f.Type, &ml)
p.expr(f.Type)
extraTabs = 2
}
if f.Tag != nil {
@ -459,7 +437,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
p.print(sep)
}
p.print(sep)
p.expr(f.Tag, &ml)
p.expr(f.Tag)
extraTabs = 0
}
if f.Comment != nil {
@ -468,6 +446,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
}
p.setComment(f.Comment)
}
newSection = p.isMultiLine(f)
}
if isIncomplete {
if len(list) > 0 {
@ -479,22 +458,22 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
} else { // interface
var ml bool
newSection := false
for i, f := range list {
if i > 0 {
p.linebreak(p.lineFor(f.Pos()), 1, ignore, ml)
p.linebreak(p.lineFor(f.Pos()), 1, ignore, newSection)
}
ml = false
p.setComment(f.Doc)
if ftyp, isFtyp := f.Type.(*ast.FuncType); isFtyp {
// method
p.expr(f.Names[0], &ml)
p.signature(ftyp.Params, ftyp.Results, &ml)
p.expr(f.Names[0])
p.signature(ftyp.Params, ftyp.Results)
} else {
// embedded interface
p.expr(f.Type, &ml)
p.expr(f.Type)
}
p.setComment(f.Comment)
newSection = p.isMultiLine(f)
}
if isIncomplete {
if len(list) > 0 {
@ -635,15 +614,14 @@ func reduceDepth(depth int) int {
// cutoff is 6 (always use spaces) in Normal mode
// and 4 (never use spaces) in Compact mode.
//
// Sets multiLine to true if the binary expression spans multiple lines.
func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiLine *bool) {
func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
prec := x.Op.Precedence()
if prec < prec1 {
// parenthesis needed
// Note: The parser inserts an ast.ParenExpr node; thus this case
// can only occur if the AST is created in a different way.
p.print(token.LPAREN)
p.expr0(x, reduceDepth(depth), multiLine) // parentheses undo one level of depth
p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
p.print(token.RPAREN)
return
}
@ -651,7 +629,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
printBlank := prec < cutoff
ws := indent
p.expr1(x.X, prec, depth+diffPrec(x.X, prec), multiLine)
p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
if printBlank {
p.print(blank)
}
@ -663,14 +641,13 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
// in the source
if p.linebreak(yline, 1, ws, true) {
ws = ignore
*multiLine = true
printBlank = false // no blank after line break
}
}
if printBlank {
p.print(blank)
}
p.expr1(x.Y, prec+1, depth+1, multiLine)
p.expr1(x.Y, prec+1, depth+1)
if ws == ignore {
p.print(unindent)
}
@ -681,8 +658,7 @@ func isBinary(expr ast.Expr) bool {
return ok
}
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
p.print(expr.Pos())
switch x := expr.(type) {
@ -697,12 +673,12 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.internalError("depth < 1:", depth)
depth = 1
}
p.binaryExpr(x, prec1, cutoff(x, depth), depth, multiLine)
p.binaryExpr(x, prec1, cutoff(x, depth), depth)
case *ast.KeyValueExpr:
p.expr(x.Key, multiLine)
p.expr(x.Key)
p.print(x.Colon, token.COLON, blank)
p.expr(x.Value, multiLine)
p.expr(x.Value)
case *ast.StarExpr:
const prec = token.UnaryPrec
@ -710,12 +686,12 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
// parenthesis needed
p.print(token.LPAREN)
p.print(token.MUL)
p.expr(x.X, multiLine)
p.expr(x.X)
p.print(token.RPAREN)
} else {
// no parenthesis needed
p.print(token.MUL)
p.expr(x.X, multiLine)
p.expr(x.X)
}
case *ast.UnaryExpr:
@ -723,7 +699,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
if prec < prec1 {
// parenthesis needed
p.print(token.LPAREN)
p.expr(x, multiLine)
p.expr(x)
p.print(token.RPAREN)
} else {
// no parenthesis needed
@ -732,42 +708,41 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
// TODO(gri) Remove this code if it cannot be reached.
p.print(blank)
}
p.expr1(x.X, prec, depth, multiLine)
p.expr1(x.X, prec, depth)
}
case *ast.BasicLit:
p.print(x)
case *ast.FuncLit:
p.expr(x.Type, multiLine)
p.funcBody(x.Body, p.distance(x.Type.Pos(), p.pos), true, multiLine)
p.expr(x.Type)
p.funcBody(x.Body, p.distance(x.Type.Pos(), p.pos), true)
case *ast.ParenExpr:
if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
// don't print parentheses around an already parenthesized expression
// TODO(gri) consider making this more general and incorporate precedence levels
p.expr0(x.X, reduceDepth(depth), multiLine) // parentheses undo one level of depth
p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
} else {
p.print(token.LPAREN)
p.expr0(x.X, reduceDepth(depth), multiLine) // parentheses undo one level of depth
p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
p.print(x.Rparen, token.RPAREN)
}
case *ast.SelectorExpr:
p.expr1(x.X, token.HighestPrec, depth, multiLine)
p.expr1(x.X, token.HighestPrec, depth)
p.print(token.PERIOD)
if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
p.print(indent, newline, x.Sel.Pos(), x.Sel, unindent)
*multiLine = true
} else {
p.print(x.Sel.Pos(), x.Sel)
}
case *ast.TypeAssertExpr:
p.expr1(x.X, token.HighestPrec, depth, multiLine)
p.expr1(x.X, token.HighestPrec, depth)
p.print(token.PERIOD, token.LPAREN)
if x.Type != nil {
p.expr(x.Type, multiLine)
p.expr(x.Type)
} else {
p.print(token.TYPE)
}
@ -775,17 +750,17 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.IndexExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.expr1(x.X, token.HighestPrec, 1)
p.print(x.Lbrack, token.LBRACK)
p.expr0(x.Index, depth+1, multiLine)
p.expr0(x.Index, depth+1)
p.print(x.Rbrack, token.RBRACK)
case *ast.SliceExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.expr1(x.X, token.HighestPrec, 1)
p.print(x.Lbrack, token.LBRACK)
if x.Low != nil {
p.expr0(x.Low, depth+1, multiLine)
p.expr0(x.Low, depth+1)
}
// blanks around ":" if both sides exist and either side is a binary expression
if depth <= 1 && x.Low != nil && x.High != nil && (isBinary(x.Low) || isBinary(x.High)) {
@ -794,7 +769,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(token.COLON)
}
if x.High != nil {
p.expr0(x.High, depth+1, multiLine)
p.expr0(x.High, depth+1)
}
p.print(x.Rbrack, token.RBRACK)
@ -802,21 +777,26 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
if len(x.Args) > 1 {
depth++
}
p.expr1(x.Fun, token.HighestPrec, depth, multiLine)
p.expr1(x.Fun, token.HighestPrec, depth)
p.print(x.Lparen, token.LPAREN)
p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen)
if x.Ellipsis.IsValid() {
p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis)
p.print(x.Ellipsis, token.ELLIPSIS)
if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
p.print(token.COMMA, formfeed)
}
} else {
p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen)
}
p.print(x.Rparen, token.RPAREN)
case *ast.CompositeLit:
// composite literal elements that are composite literals themselves may have the type omitted
if x.Type != nil {
p.expr1(x.Type, token.HighestPrec, depth, multiLine)
p.expr1(x.Type, token.HighestPrec, depth)
}
p.print(x.Lbrace, token.LBRACE)
p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace)
p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace)
// do not insert extra line breaks because of comments before
// the closing '}' as it might break the code if there is no
// trailing ','
@ -825,16 +805,16 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.Ellipsis:
p.print(token.ELLIPSIS)
if x.Elt != nil {
p.expr(x.Elt, multiLine)
p.expr(x.Elt)
}
case *ast.ArrayType:
p.print(token.LBRACK)
if x.Len != nil {
p.expr(x.Len, multiLine)
p.expr(x.Len)
}
p.print(token.RBRACK)
p.expr(x.Elt, multiLine)
p.expr(x.Elt)
case *ast.StructType:
p.print(token.STRUCT)
@ -842,7 +822,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.FuncType:
p.print(token.FUNC)
p.signature(x.Params, x.Results, multiLine)
p.signature(x.Params, x.Results)
case *ast.InterfaceType:
p.print(token.INTERFACE)
@ -850,9 +830,9 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.MapType:
p.print(token.MAP, token.LBRACK)
p.expr(x.Key, multiLine)
p.expr(x.Key)
p.print(token.RBRACK)
p.expr(x.Value, multiLine)
p.expr(x.Value)
case *ast.ChanType:
switch x.Dir {
@ -864,7 +844,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(token.CHAN, token.ARROW)
}
p.print(blank)
p.expr(x.Value, multiLine)
p.expr(x.Value)
default:
panic("unreachable")
@ -873,14 +853,13 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
return
}
func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) {
p.expr1(x, token.LowestPrec, depth, multiLine)
func (p *printer) expr0(x ast.Expr, depth int) {
p.expr1(x, token.LowestPrec, depth)
}
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr(x ast.Expr, multiLine *bool) {
func (p *printer) expr(x ast.Expr) {
const depth = 1
p.expr1(x, token.LowestPrec, depth, multiLine)
p.expr1(x, token.LowestPrec, depth)
}
// ----------------------------------------------------------------------------
@ -894,13 +873,13 @@ func (p *printer) stmtList(list []ast.Stmt, _indent int, nextIsRBrace bool) {
if _indent > 0 {
p.print(indent)
}
var multiLine bool
multiLine := false
for i, s := range list {
// _indent == 0 only for lists of switch/select case clauses;
// in those cases each clause is a new section
p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || _indent == 0 || multiLine)
multiLine = false
p.stmt(s, nextIsRBrace && i == len(list)-1, &multiLine)
p.stmt(s, nextIsRBrace && i == len(list)-1)
multiLine = p.isMultiLine(s)
}
if _indent > 0 {
p.print(unindent)
@ -957,25 +936,25 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po
if init == nil && post == nil {
// no semicolons required
if expr != nil {
p.expr(stripParens(expr), ignoreMultiLine)
p.expr(stripParens(expr))
needsBlank = true
}
} else {
// all semicolons required
// (they are not separators, print them explicitly)
if init != nil {
p.stmt(init, false, ignoreMultiLine)
p.stmt(init, false)
}
p.print(token.SEMICOLON, blank)
if expr != nil {
p.expr(stripParens(expr), ignoreMultiLine)
p.expr(stripParens(expr))
needsBlank = true
}
if isForStmt {
p.print(token.SEMICOLON, blank)
needsBlank = false
if post != nil {
p.stmt(post, false, ignoreMultiLine)
p.stmt(post, false)
needsBlank = true
}
}
@ -985,8 +964,7 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po
}
}
// Sets multiLine to true if the statements spans multiple lines.
func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
p.print(stmt.Pos())
switch s := stmt.(type) {
@ -994,7 +972,7 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print("BadStmt")
case *ast.DeclStmt:
p.decl(s.Decl, multiLine)
p.decl(s.Decl)
case *ast.EmptyStmt:
// nothing to do
@ -1004,7 +982,7 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
// is applied before the line break if there is no comment
// between (see writeWhitespace)
p.print(unindent)
p.expr(s.Label, multiLine)
p.expr(s.Label)
p.print(s.Colon, token.COLON, indent)
if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
if !nextIsRBrace {
@ -1014,21 +992,21 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
} else {
p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
}
p.stmt(s.Stmt, nextIsRBrace, multiLine)
p.stmt(s.Stmt, nextIsRBrace)
case *ast.ExprStmt:
const depth = 1
p.expr0(s.X, depth, multiLine)
p.expr0(s.X, depth)
case *ast.SendStmt:
const depth = 1
p.expr0(s.Chan, depth, multiLine)
p.expr0(s.Chan, depth)
p.print(blank, s.Arrow, token.ARROW, blank)
p.expr0(s.Value, depth, multiLine)
p.expr0(s.Value, depth)
case *ast.IncDecStmt:
const depth = 1
p.expr0(s.X, depth+1, multiLine)
p.expr0(s.X, depth+1)
p.print(s.TokPos, s.Tok)
case *ast.AssignStmt:
@ -1036,56 +1014,55 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
depth++
}
p.exprList(s.Pos(), s.Lhs, depth, commaSep, multiLine, s.TokPos)
p.print(blank, s.TokPos, s.Tok)
p.exprList(s.TokPos, s.Rhs, depth, blankStart|commaSep, multiLine, token.NoPos)
p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos)
p.print(blank, s.TokPos, s.Tok, blank)
p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos)
case *ast.GoStmt:
p.print(token.GO, blank)
p.expr(s.Call, multiLine)
p.expr(s.Call)
case *ast.DeferStmt:
p.print(token.DEFER, blank)
p.expr(s.Call, multiLine)
p.expr(s.Call)
case *ast.ReturnStmt:
p.print(token.RETURN)
if s.Results != nil {
p.exprList(s.Pos(), s.Results, 1, blankStart|commaSep, multiLine, token.NoPos)
p.print(blank)
p.exprList(s.Pos(), s.Results, 1, 0, token.NoPos)
}
case *ast.BranchStmt:
p.print(s.Tok)
if s.Label != nil {
p.print(blank)
p.expr(s.Label, multiLine)
p.expr(s.Label)
}
case *ast.BlockStmt:
p.block(s, 1)
*multiLine = true
case *ast.IfStmt:
p.print(token.IF)
p.controlClause(false, s.Init, s.Cond, nil)
p.block(s.Body, 1)
*multiLine = true
if s.Else != nil {
p.print(blank, token.ELSE, blank)
switch s.Else.(type) {
case *ast.BlockStmt, *ast.IfStmt:
p.stmt(s.Else, nextIsRBrace, ignoreMultiLine)
p.stmt(s.Else, nextIsRBrace)
default:
p.print(token.LBRACE, indent, formfeed)
p.stmt(s.Else, true, ignoreMultiLine)
p.stmt(s.Else, true)
p.print(unindent, formfeed, token.RBRACE)
}
}
case *ast.CaseClause:
if s.List != nil {
p.print(token.CASE)
p.exprList(s.Pos(), s.List, 1, blankStart|commaSep, multiLine, s.Colon)
p.print(token.CASE, blank)
p.exprList(s.Pos(), s.List, 1, 0, s.Colon)
} else {
p.print(token.DEFAULT)
}
@ -1096,25 +1073,23 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print(token.SWITCH)
p.controlClause(false, s.Init, s.Tag, nil)
p.block(s.Body, 0)
*multiLine = true
case *ast.TypeSwitchStmt:
p.print(token.SWITCH)
if s.Init != nil {
p.print(blank)
p.stmt(s.Init, false, ignoreMultiLine)
p.stmt(s.Init, false)
p.print(token.SEMICOLON)
}
p.print(blank)
p.stmt(s.Assign, false, ignoreMultiLine)
p.stmt(s.Assign, false)
p.print(blank)
p.block(s.Body, 0)
*multiLine = true
case *ast.CommClause:
if s.Comm != nil {
p.print(token.CASE, blank)
p.stmt(s.Comm, false, ignoreMultiLine)
p.stmt(s.Comm, false)
} else {
p.print(token.DEFAULT)
}
@ -1129,29 +1104,26 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print(body.Lbrace, token.LBRACE, body.Rbrace, token.RBRACE)
} else {
p.block(body, 0)
*multiLine = true
}
case *ast.ForStmt:
p.print(token.FOR)
p.controlClause(true, s.Init, s.Cond, s.Post)
p.block(s.Body, 1)
*multiLine = true
case *ast.RangeStmt:
p.print(token.FOR, blank)
p.expr(s.Key, multiLine)
p.expr(s.Key)
if s.Value != nil {
// use position of value following the comma as
// comma position for correct comment placement
p.print(s.Value.Pos(), token.COMMA, blank)
p.expr(s.Value, multiLine)
p.expr(s.Value)
}
p.print(blank, s.TokPos, s.Tok, blank, token.RANGE, blank)
p.expr(stripParens(s.X), multiLine)
p.expr(stripParens(s.X))
p.print(blank)
p.block(s.Body, 1)
*multiLine = true
default:
panic("unreachable")
@ -1228,20 +1200,20 @@ func keepTypeColumn(specs []ast.Spec) []bool {
return m
}
func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine *bool) {
func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool) {
p.setComment(s.Doc)
p.identList(s.Names, doIndent, multiLine) // always present
p.identList(s.Names, doIndent) // always present
extraTabs := 3
if s.Type != nil || keepType {
p.print(vtab)
extraTabs--
}
if s.Type != nil {
p.expr(s.Type, multiLine)
p.expr(s.Type)
}
if s.Values != nil {
p.print(vtab, token.ASSIGN)
p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
p.print(vtab, token.ASSIGN, blank)
p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
extraTabs--
}
if s.Comment != nil {
@ -1255,17 +1227,16 @@ func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine
// The parameter n is the number of specs in the group. If doIndent is set,
// multi-line identifier lists in the spec are indented when the first
// linebreak is encountered.
// Sets multiLine to true if the spec spans multiple lines.
//
func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
switch s := spec.(type) {
case *ast.ImportSpec:
p.setComment(s.Doc)
if s.Name != nil {
p.expr(s.Name, multiLine)
p.expr(s.Name)
p.print(blank)
}
p.expr(s.Path, multiLine)
p.expr(s.Path)
p.setComment(s.Comment)
p.print(s.EndPos)
@ -1274,26 +1245,26 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
p.internalError("expected n = 1; got", n)
}
p.setComment(s.Doc)
p.identList(s.Names, doIndent, multiLine) // always present
p.identList(s.Names, doIndent) // always present
if s.Type != nil {
p.print(blank)
p.expr(s.Type, multiLine)
p.expr(s.Type)
}
if s.Values != nil {
p.print(blank, token.ASSIGN)
p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
p.print(blank, token.ASSIGN, blank)
p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
}
p.setComment(s.Comment)
case *ast.TypeSpec:
p.setComment(s.Doc)
p.expr(s.Name, multiLine)
p.expr(s.Name)
if n == 1 {
p.print(blank)
} else {
p.print(vtab)
}
p.expr(s.Type, multiLine)
p.expr(s.Type)
p.setComment(s.Comment)
default:
@ -1301,8 +1272,7 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
}
}
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
func (p *printer) genDecl(d *ast.GenDecl) {
p.setComment(d.Doc)
p.print(d.Pos(), d.Tok, blank)
@ -1315,32 +1285,31 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
// two or more grouped const/var declarations:
// determine if the type column must be kept
keepType := keepTypeColumn(d.Specs)
var ml bool
newSection := false
for i, s := range d.Specs {
if i > 0 {
p.linebreak(p.lineFor(s.Pos()), 1, ignore, ml)
p.linebreak(p.lineFor(s.Pos()), 1, ignore, newSection)
}
ml = false
p.valueSpec(s.(*ast.ValueSpec), keepType[i], false, &ml)
p.valueSpec(s.(*ast.ValueSpec), keepType[i], false)
newSection = p.isMultiLine(s)
}
} else {
var ml bool
newSection := false
for i, s := range d.Specs {
if i > 0 {
p.linebreak(p.lineFor(s.Pos()), 1, ignore, ml)
p.linebreak(p.lineFor(s.Pos()), 1, ignore, newSection)
}
ml = false
p.spec(s, n, false, &ml)
p.spec(s, n, false)
newSection = p.isMultiLine(s)
}
}
p.print(unindent, formfeed)
*multiLine = true
}
p.print(d.Rparen, token.RPAREN)
} else {
// single declaration
p.spec(d.Specs[0], 1, true, multiLine)
p.spec(d.Specs[0], 1, true)
}
}
@ -1404,8 +1373,7 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
return headerSize+bodySize <= maxSize
}
// Sets multiLine to true if the function body spans multiple lines.
func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLine *bool) {
func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool) {
if b == nil {
return
}
@ -1422,7 +1390,7 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi
if i > 0 {
p.print(token.SEMICOLON, blank)
}
p.stmt(s, i == len(b.List)-1, ignoreMultiLine)
p.stmt(s, i == len(b.List)-1)
}
p.print(blank)
}
@ -1432,7 +1400,6 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi
p.print(blank)
p.block(b, 1)
*multiLine = true
}
// distance returns the column difference between from and to if both
@ -1446,28 +1413,26 @@ func (p *printer) distance(from0 token.Pos, to token.Position) int {
return infinity
}
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) {
func (p *printer) funcDecl(d *ast.FuncDecl) {
p.setComment(d.Doc)
p.print(d.Pos(), token.FUNC, blank)
if d.Recv != nil {
p.parameters(d.Recv, multiLine) // method: print receiver
p.parameters(d.Recv) // method: print receiver
p.print(blank)
}
p.expr(d.Name, multiLine)
p.signature(d.Type.Params, d.Type.Results, multiLine)
p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false, multiLine)
p.expr(d.Name)
p.signature(d.Type.Params, d.Type.Results)
p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false)
}
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) decl(decl ast.Decl, multiLine *bool) {
func (p *printer) decl(decl ast.Decl) {
switch d := decl.(type) {
case *ast.BadDecl:
p.print(d.Pos(), "BadDecl")
case *ast.GenDecl:
p.genDecl(d, multiLine)
p.genDecl(d)
case *ast.FuncDecl:
p.funcDecl(d, multiLine)
p.funcDecl(d)
default:
panic("unreachable")
}
@ -1490,7 +1455,7 @@ func declToken(decl ast.Decl) (tok token.Token) {
func (p *printer) file(src *ast.File) {
p.setComment(src.Doc)
p.print(src.Pos(), token.PACKAGE, blank)
p.expr(src.Name, ignoreMultiLine)
p.expr(src.Name)
if len(src.Decls) > 0 {
tok := token.ILLEGAL
@ -1509,7 +1474,7 @@ func (p *printer) file(src *ast.File) {
min = 2
}
p.linebreak(p.lineFor(d.Pos()), min, ignore, false)
p.decl(d, ignoreMultiLine)
p.decl(d)
}
}

View File

@ -34,9 +34,6 @@ const (
unindent = whiteSpace('<')
)
// Use ignoreMultiLine if the multiLine information is not important.
var ignoreMultiLine = new(bool)
// A pmode value represents the current printer mode.
type pmode int
@ -280,10 +277,9 @@ func (p *printer) writeString(pos token.Position, s string, isLit bool) {
// it as is likely to help position the comment nicely.
// pos is the comment position, next the position of the item
// after all pending comments, prev is the previous comment in
// a group of comments (or nil), and isKeyword indicates if the
// next item is a keyword.
// a group of comments (or nil), and tok is the next token.
//
func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, isKeyword bool) {
func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, tok token.Token) {
if len(p.output) == 0 {
// the comment is the first item to be printed - don't write any whitespace
return
@ -338,38 +334,41 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *as
// comment on a different line:
// separate with at least one line break
droppedLinebreak := false
if prev == nil {
// first comment of a comment group
j := 0
for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore any horizontal whitespace before line breaks
p.wsbuf[i] = ignore
j := 0
for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore any horizontal whitespace before line breaks
p.wsbuf[i] = ignore
continue
case indent:
// apply pending indentation
continue
case unindent:
// if this is not the last unindent, apply it
// as it is (likely) belonging to the last
// construct (e.g., a multi-line expression list)
// and is not part of closing a block
if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
continue
case indent:
// apply pending indentation
continue
case unindent:
// if the next token is a keyword, apply the outdent
// if it appears that the comment is aligned with the
// keyword; otherwise assume the outdent is part of a
// closing block and stop (this scenario appears with
// comments before a case label where the comments
// apply to the next case instead of the current one)
if isKeyword && pos.Column == next.Column {
continue
}
case newline, formfeed:
// TODO(gri): may want to keep formfeed info in some cases
p.wsbuf[i] = ignore
droppedLinebreak = true
}
j = i
break
// if the next token is not a closing }, apply the unindent
// if it appears that the comment is aligned with the
// token; otherwise assume the unindent is part of a
// closing block and stop (this scenario appears with
// comments before a case label where the comments
// apply to the next case instead of the current one)
if tok != token.RBRACE && pos.Column == next.Column {
continue
}
case newline, formfeed:
p.wsbuf[i] = ignore
droppedLinebreak = prev == nil // record only if first comment of a group
}
p.writeWhitespace(j)
j = i
break
}
p.writeWhitespace(j)
// determine number of linebreaks before the comment
n := 0
@ -678,7 +677,7 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (wro
var last *ast.Comment
for p.commentBefore(next) {
for _, c := range p.comment.List {
p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok.IsKeyword())
p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok)
p.writeComment(c)
last = c
}
@ -1011,18 +1010,18 @@ func (p *printer) printNode(node interface{}) error {
// format node
switch n := node.(type) {
case ast.Expr:
p.expr(n, ignoreMultiLine)
p.expr(n)
case ast.Stmt:
// A labeled statement will un-indent to position the
// label. Set indent to 1 so we don't get indent "underflow".
if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
p.indent = 1
}
p.stmt(n, false, ignoreMultiLine)
p.stmt(n, false)
case ast.Decl:
p.decl(n, ignoreMultiLine)
p.decl(n)
case ast.Spec:
p.spec(n, 1, false, ignoreMultiLine)
p.spec(n, 1, false)
case *ast.File:
p.file(n)
default:

View File

@ -154,15 +154,12 @@ var data = []entry{
}
func TestFiles(t *testing.T) {
for i, e := range data {
for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden, e.mode)
// TODO(gri) check that golden is idempotent
//check(t, golden, golden, e.mode)
if testing.Short() && i >= 3 {
break
}
}
}

View File

@ -168,6 +168,91 @@ func typeswitch(x interface{}) {
// this comment should not be indented
}
//
// Indentation of comments after possibly indented multi-line constructs
// (test cases for issue 3147).
//
func _() {
s := 1 +
2
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
_ = 0
}
// Test case from issue 3147.
func f() {
templateText := "a" + // A
"b" + // B
"c" // C
// should be aligned with f()
f()
}
// Modified test case from issue 3147.
func f() {
templateText := "a" + // A
"b" + // B
"c" // C
// may not be aligned with f() (source is not aligned)
f()
}
//
// Test cases for alignment of lines in general comments.
//
func _() {
/* freestanding comment
aligned line

View File

@ -171,6 +171,91 @@ func typeswitch(x interface{}) {
// this comment should not be indented
}
//
// Indentation of comments after possibly indented multi-line constructs
// (test cases for issue 3147).
//
func _() {
s := 1 +
2
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
}
func _() {
s := 1 +
2 // comment
// should be indented like s
_ = 0
}
func _() {
s := 1 +
2
// should be indented like s
_ = 0
}
// Test case from issue 3147.
func f() {
templateText := "a" + // A
"b" + // B
"c" // C
// should be aligned with f()
f()
}
// Modified test case from issue 3147.
func f() {
templateText := "a" + // A
"b" + // B
"c" // C
// may not be aligned with f() (source is not aligned)
f()
}
//
// Test cases for alignment of lines in general comments.
//
func _() {
/* freestanding comment
aligned line

View File

@ -83,13 +83,13 @@ import (
// more import examples
import (
"xxx"
"much longer name" // comment
"short name" // comment
"much_longer_name" // comment
"short_name" // comment
)
import (
_ "xxx"
"much longer name" // comment
"much_longer_name" // comment
)
import (
@ -500,7 +500,7 @@ type _ struct {
type _ struct {
a, b,
c, d int // this line should be indented
c, d int // this line should be indented
u, v, w, x float // this line should be indented
p, q,
r, s float // this line should be indented
@ -562,7 +562,7 @@ var a2, b2,
var (
a3, b3,
c3, d3 int // this line should be indented
c3, d3 int // this line should be indented
a4, b4, c4 int // this line should be indented
)

View File

@ -84,13 +84,13 @@ import (
// more import examples
import (
"xxx"
"much longer name" // comment
"short name" // comment
"much_longer_name" // comment
"short_name" // comment
)
import (
_ "xxx"
"much longer name" // comment
"much_longer_name" // comment
)
import (

View File

@ -625,3 +625,25 @@ func f() {
log.Fatal(err)
}
}
// Handle multi-line argument lists ending in ... correctly.
// Was issue 3130.
func _() {
_ = append(s, a...)
_ = append(
s, a...)
_ = append(s,
a...)
_ = append(
s,
a...)
_ = append(s, a...,
)
_ = append(s,
a...,
)
_ = append(
s,
a...,
)
}

View File

@ -654,3 +654,25 @@ func f() {
log.Fatal(err)
}
}
// Handle multi-line argument lists ending in ... correctly.
// Was issue 3130.
func _() {
_ = append(s, a...)
_ = append(
s, a...)
_ = append(s,
a...)
_ = append(
s,
a...)
_ = append(s, a...,
)
_ = append(s,
a...,
)
_ = append(
s,
a...,
)
}

View File

@ -625,3 +625,25 @@ func f() {
log.Fatal(err)
}
}
// Handle multi-line argument lists ending in ... correctly.
// Was issue 3130.
func _() {
_ = append(s, a...)
_ = append(
s, a...)
_ = append(s,
a...)
_ = append(
s,
a...)
_ = append(s, a...,
)
_ = append(s,
a...,
)
_ = append(
s,
a...,
)
}

View File

@ -52,7 +52,7 @@ type parser struct {
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifer scopes
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers

View File

@ -8,6 +8,82 @@ var expr bool
func use(x interface{}) {}
// Formatting of multi-line return statements.
func _f() {
return
return x, y, z
return T{}
return T{1, 2, 3},
x, y, z
return T{1, 2, 3},
x, y,
z
return T{1,
2,
3}
return T{1,
2,
3,
}
return T{
1,
2,
3}
return T{
1,
2,
3,
}
return T{
1,
T{1, 2, 3},
3,
}
return T{
1,
T{1,
2, 3},
3,
}
return T{
1,
T{1,
2,
3},
3,
}
return T{
1,
2,
},
nil
return T{
1,
2,
},
T{
x: 3,
y: 4,
},
nil
return x + y +
z
return func() {}
return func() {
_ = 0
}, T{
1, 2,
}
return func() {
_ = 0
}
return func() T {
return T{
1, 2,
}
}
}
// Formatting of if-statement headers.
func _() {
if true {

View File

@ -8,6 +8,82 @@ var expr bool
func use(x interface{}) {}
// Formatting of multi-line return statements.
func _f() {
return
return x, y, z
return T{}
return T{1, 2, 3},
x, y, z
return T{1, 2, 3},
x, y,
z
return T{1,
2,
3}
return T{1,
2,
3,
}
return T{
1,
2,
3}
return T{
1,
2,
3,
}
return T{
1,
T{1, 2, 3},
3,
}
return T{
1,
T{1,
2, 3},
3,
}
return T{
1,
T{1,
2,
3},
3,
}
return T{
1,
2,
},
nil
return T{
1,
2,
},
T{
x: 3,
y: 4,
},
nil
return x + y +
z
return func() {}
return func() {
_ = 0
}, T{
1, 2,
}
return func() {
_ = 0
}
return func() T {
return T {
1, 2,
}
}
}
// Formatting of if-statement headers.
func _() {
if true {}

View File

@ -19,7 +19,7 @@ to parse and execute HTML templates safely.
tmpl, err := template.New("name").Parse(...)
// Error checking elided
err = tmpl.Execute(out, "Foo", data)
err = tmpl.Execute(out, data)
If successful, tmpl will now be injection-safe. Otherwise, err is an error
defined in the docs for ErrorCode.

View File

@ -593,7 +593,7 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
}
}
for j := i; j < end; j++ {
if s[j] == '<' && !bytes.HasPrefix(s[j:], doctypeBytes) {
if s[j] == '<' && !bytes.HasPrefix(bytes.ToUpper(s[j:]), doctypeBytes) {
b.Write(s[written:j])
b.WriteString("&lt;")
written = j + 1

View File

@ -223,14 +223,14 @@ func TestEscape(t *testing.T) {
`<button onclick='alert(&quot;\x3cHello\x3e&quot;)'>`,
},
{
"badMarshaller",
"badMarshaler",
`<button onclick='alert(1/{{.B}}in numbers)'>`,
`<button onclick='alert(1/ /* json: error calling MarshalJSON for type *template.badMarshaler: invalid character &#39;f&#39; looking for beginning of object key string */null in numbers)'>`,
},
{
"jsMarshaller",
"jsMarshaler",
`<button onclick='alert({{.M}})'>`,
`<button onclick='alert({&#34;&lt;foo&gt;&#34;:&#34;O&#39;Reilly&#34;})'>`,
`<button onclick='alert({&#34;\u003cfoo\u003e&#34;:&#34;O&#39;Reilly&#34;})'>`,
},
{
"jsStrNotUnderEscaped",
@ -431,6 +431,11 @@ func TestEscape(t *testing.T) {
"<!DOCTYPE html>Hello, World!",
"<!DOCTYPE html>Hello, World!",
},
{
"HTML doctype not case-insensitive",
"<!doCtYPE htMl>Hello, World!",
"<!doCtYPE htMl>Hello, World!",
},
{
"No doctype injection",
`<!{{"DOCTYPE"}}`,

View File

@ -134,7 +134,7 @@ var htmlNospaceNormReplacementTable = []string{
'`': "&#96;",
}
// htmlReplacer returns s with runes replaced acccording to replacementTable
// htmlReplacer returns s with runes replaced according to replacementTable
// and when badRunes is true, certain bad runes are allowed through unescaped.
func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
written, b := 0, new(bytes.Buffer)

View File

@ -6,6 +6,10 @@
// Its primary job is to wrap existing implementations of such primitives,
// such as those in package os, into shared public interfaces that
// abstract the functionality, plus some other related primitives.
//
// Because these interfaces and primitives wrap lower-level operations with
// various implementations, unless otherwise informed clients should not
// assume they are safe for parallel execution.
package io
import (
@ -156,6 +160,9 @@ type WriterTo interface {
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source.
type ReaderAt interface {
ReadAt(p []byte, off int64) (n int, err error)
}

View File

@ -175,6 +175,10 @@ func (w *PipeWriter) CloseWithError(err error) error {
// with code expecting an io.Writer.
// Reads on one end are matched with writes on the other,
// copying data directly between the two; there is no internal buffering.
// It is safe to call Read and Write in parallel with each other or with
// Close. Close will complete once pending I/O is done. Parallel calls to
// Read, and parallel calls to Write, are also safe:
// the individual calls will be gated sequentially.
func Pipe() (*PipeReader, *PipeWriter) {
p := new(pipe)
p.rwait.L = &p.l

View File

@ -27,11 +27,11 @@ const (
// Max is the largest finite value representable by the type.
// SmallestNonzero is the smallest positive, non-zero value representable by the type.
const (
MaxFloat32 = 3.40282346638528859811704183484516925440e+38 /* 2**127 * (2**24 - 1) / 2**23 */
SmallestNonzeroFloat32 = 1.401298464324817070923729583289916131280e-45 /* 1 / 2**(127 - 1 + 23) */
MaxFloat32 = 3.40282346638528859811704183484516925440e+38 // 2**127 * (2**24 - 1) / 2**23
SmallestNonzeroFloat32 = 1.401298464324817070923729583289916131280e-45 // 1 / 2**(127 - 1 + 23)
MaxFloat64 = 1.797693134862315708145274237317043567981e+308 /* 2**1023 * (2**53 - 1) / 2**52 */
SmallestNonzeroFloat64 = 4.940656458412465441765687928682213723651e-324 /* 1 / 2**(1023 - 1 + 52) */
MaxFloat64 = 1.797693134862315708145274237317043567981e+308 // 2**1023 * (2**53 - 1) / 2**52
SmallestNonzeroFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52)
)
// Integer limit values.

View File

@ -69,7 +69,7 @@ func resolveNetAddr(op, net, addr string) (afnet string, a Addr, err error) {
//
// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only),
// "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4"
// (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket".
// (IPv4-only), "ip6" (IPv6-only), "unix" and "unixpacket".
//
// For TCP and UDP networks, addresses have the form host:port.
// If host is a literal IPv6 address, it must be enclosed

View File

@ -5,6 +5,8 @@
package net
import (
"flag"
"regexp"
"runtime"
"testing"
"time"
@ -128,3 +130,82 @@ func TestSelfConnect(t *testing.T) {
}
}
}
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: no such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use -run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}

View File

@ -42,9 +42,8 @@ func doDial(t *testing.T, network, addr string) {
}
func TestLookupCNAME(t *testing.T) {
if testing.Short() {
// Don't use external network.
t.Logf("skipping external network test during -short")
if testing.Short() || !*testExternal {
t.Logf("skipping test to avoid external network")
return
}
cname, err := LookupCNAME("www.google.com")
@ -67,9 +66,8 @@ var googleaddrsipv4 = []string{
}
func TestDialGoogleIPv4(t *testing.T) {
if testing.Short() {
// Don't use external network.
t.Logf("skipping external network test during -short")
if testing.Short() || !*testExternal {
t.Logf("skipping test to avoid external network")
return
}
@ -124,9 +122,8 @@ var googleaddrsipv6 = []string{
}
func TestDialGoogleIPv6(t *testing.T) {
if testing.Short() {
// Don't use external network.
t.Logf("skipping external network test during -short")
if testing.Short() || !*testExternal {
t.Logf("skipping test to avoid external network")
return
}
// Only run tcp6 if the kernel will take it.

View File

@ -144,6 +144,7 @@ func Serve(handler http.Handler) error {
bufw: bufio.NewWriter(os.Stdout),
}
handler.ServeHTTP(rw, req)
rw.Write(nil) // make sure a response is sent
if err = rw.bufw.Flush(); err != nil {
return err
}

View File

@ -41,6 +41,7 @@ func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string
// Make a map to hold the test map that the CGI returns.
m := make(map[string]string)
m["_body"] = rw.Body.String()
linesRead := 0
readlines:
for {

View File

@ -51,6 +51,22 @@ func TestHostingOurselves(t *testing.T) {
}
}
// Test that a child handler only writing headers works.
func TestChildOnlyHeaders(t *testing.T) {
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
expectedMap := map[string]string{
"_body": "",
}
replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
// Note: not actually a test.
func TestBeChildCGIProcess(t *testing.T) {
if os.Getenv("REQUEST_METHOD") == "" {
@ -59,8 +75,11 @@ func TestBeChildCGIProcess(t *testing.T) {
}
Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("X-Test-Header", "X-Test-Value")
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
req.ParseForm()
if req.FormValue("no-body") == "1" {
return
}
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
for k, vv := range req.Form {
for _, v := range vv {
fmt.Fprintf(rw, "param-%s=%s\n", k, v)

View File

@ -152,12 +152,19 @@ func TestFileServerCleans(t *testing.T) {
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer os.RemoveAll(tempDir)
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
@ -172,6 +179,7 @@ func TestFileServerImplicitLeadingSlash(t *testing.T) {
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {

View File

@ -13,6 +13,7 @@ import (
"net"
"net/http"
"os"
"sync"
)
// A Server is an HTTP server listening on a system-chosen port on the
@ -25,6 +26,10 @@ type Server struct {
// Config may be changed after calling NewUnstartedServer and
// before Start or StartTLS.
Config *http.Server
// wg counts the number of outstanding HTTP requests on this server.
// Close blocks until all requests are finished.
wg sync.WaitGroup
}
// historyListener keeps track of all connections that it's ever
@ -93,6 +98,7 @@ func (s *Server) Start() {
}
s.Listener = &historyListener{s.Listener, make([]net.Conn, 0)}
s.URL = "http://" + s.Listener.Addr().String()
s.wrapHandler()
go s.Config.Serve(s.Listener)
if *serve != "" {
fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL)
@ -118,9 +124,21 @@ func (s *Server) StartTLS() {
s.Listener = &historyListener{tlsListener, make([]net.Conn, 0)}
s.URL = "https://" + s.Listener.Addr().String()
s.wrapHandler()
go s.Config.Serve(s.Listener)
}
func (s *Server) wrapHandler() {
h := s.Config.Handler
if h == nil {
h = http.DefaultServeMux
}
s.Config.Handler = &waitGroupHandler{
s: s,
h: h,
}
}
// NewTLSServer starts and returns a new Server using TLS.
// The caller should call Close when finished, to shut it down.
func NewTLSServer(handler http.Handler) *Server {
@ -129,9 +147,11 @@ func NewTLSServer(handler http.Handler) *Server {
return ts
}
// Close shuts down the server.
// Close shuts down the server and blocks until all outstanding
// requests on this server have completed.
func (s *Server) Close() {
s.Listener.Close()
s.wg.Wait()
}
// CloseClientConnections closes any currently open HTTP connections
@ -146,6 +166,20 @@ func (s *Server) CloseClientConnections() {
}
}
// waitGroupHandler wraps a handler, incrementing and decrementing a
// sync.WaitGroup on each request, to enable Server.Close to block
// until outstanding requests are finished.
type waitGroupHandler struct {
s *Server
h http.Handler // non-nil
}
func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.s.wg.Add(1)
defer h.s.wg.Done() // a defer, in case ServeHTTP below panics
h.h.ServeHTTP(w, r)
}
// localhostCert is a PEM-encoded TLS cert with SAN DNS names
// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
// of ASN.1 time).

View File

@ -12,6 +12,7 @@ import (
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
@ -59,6 +60,19 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
}
}
// Since we're using the actual Transport code to write the request,
// switch to http so the Transport doesn't try to do an SSL
// negotiation with our dumpConn and its bytes.Buffer & pipe.
// The wire format for https and http are the same, anyway.
reqSend := req
if req.URL.Scheme == "https" {
reqSend = new(http.Request)
*reqSend = *req
reqSend.URL = new(url.URL)
*reqSend.URL = *req.URL
reqSend.URL.Scheme = "http"
}
// Use the actual Transport code to record what we would send
// on the wire, but not using TCP. Use a Transport with a
// customer dialer that returns a fake net.Conn that waits
@ -79,7 +93,7 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
},
}
_, err := t.RoundTrip(req)
_, err := t.RoundTrip(reqSend)
req.Body = save
if err != nil {

View File

@ -71,6 +71,18 @@ var dumpTests = []dumpTest{
"User-Agent: Go http package\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
// Test that an https URL doesn't try to do an SSL negotiation
// with a bytes.Buffer and hang with all goroutines not
// runnable.
{
Req: *mustNewRequest("GET", "https://example.com/foo", nil),
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
"Host: example.com\r\n" +
"User-Agent: Go http package\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
}
func TestDumpRequest(t *testing.T) {

View File

@ -383,7 +383,7 @@ func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
// Make sure body is fully consumed, even if user does not call body.Close
if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to
// it should return the error that its first invokation
// it should return the error that its first invocation
// returned.
err = lastbody.Close()
if err != nil {

View File

@ -14,14 +14,6 @@ func isSeparator(c byte) bool {
return false
}
func isSpace(c byte) bool {
switch c {
case ' ', '\t', '\r', '\n':
return true
}
return false
}
func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
func isChar(c byte) bool { return 0 <= c && c <= 127 }

View File

@ -129,9 +129,10 @@ func TestSniffWriteSize(t *testing.T) {
}))
defer ts.Close()
for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} {
_, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size))
res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size))
if err != nil {
t.Fatalf("size %d: %v", size, err)
}
res.Body.Close()
}
}

View File

@ -43,6 +43,7 @@ const (
StatusUnsupportedMediaType = 415
StatusRequestedRangeNotSatisfiable = 416
StatusExpectationFailed = 417
StatusTeapot = 418
StatusInternalServerError = 500
StatusNotImplemented = 501
@ -90,6 +91,7 @@ var statusText = map[int]string{
StatusUnsupportedMediaType: "Unsupported Media Type",
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
StatusExpectationFailed: "Expectation Failed",
StatusTeapot: "I'm a teapot",
StatusInternalServerError: "Internal Server Error",
StatusNotImplemented: "Not Implemented",

View File

@ -383,7 +383,7 @@ func fixTransferEncoding(requestMethod string, header Header) ([]string, error)
// chunked encoding must always come first.
for _, encoding := range encodings {
encoding = strings.ToLower(strings.TrimSpace(encoding))
// "identity" encoding is not recored
// "identity" encoding is not recorded
if encoding == "identity" {
break
}

View File

@ -76,7 +76,9 @@ type Transport struct {
// ProxyFromEnvironment returns the URL of the proxy to use for a
// given request, as indicated by the environment variables
// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
// Either URL or an error is returned.
// An error is returned if the proxy environment is invalid.
// A nil URL and nil error are returned if no proxy is defined in the
// environment, or a proxy should not be used for the given request.
func ProxyFromEnvironment(req *Request) (*url.URL, error) {
proxy := getenvEitherCase("HTTP_PROXY")
if proxy == "" {
@ -86,7 +88,7 @@ func ProxyFromEnvironment(req *Request) (*url.URL, error) {
return nil, nil
}
proxyURL, err := url.Parse(proxy)
if err != nil {
if err != nil || proxyURL.Scheme == "" {
if u, err := url.Parse("http://" + proxy); err == nil {
proxyURL = u
err = nil

View File

@ -16,6 +16,7 @@ import (
. "net/http"
"net/http/httptest"
"net/url"
"os"
"runtime"
"strconv"
"strings"
@ -727,6 +728,36 @@ func TestTransportAltProto(t *testing.T) {
}
}
var proxyFromEnvTests = []struct {
env string
wanturl string
wanterr error
}{
{"127.0.0.1:8080", "http://127.0.0.1:8080", nil},
{"http://127.0.0.1:8080", "http://127.0.0.1:8080", nil},
{"https://127.0.0.1:8080", "https://127.0.0.1:8080", nil},
{"", "<nil>", nil},
}
func TestProxyFromEnvironment(t *testing.T) {
os.Setenv("HTTP_PROXY", "")
os.Setenv("http_proxy", "")
os.Setenv("NO_PROXY", "")
os.Setenv("no_proxy", "")
for i, tt := range proxyFromEnvTests {
os.Setenv("HTTP_PROXY", tt.env)
req, _ := NewRequest("GET", "http://example.com", nil)
url, err := ProxyFromEnvironment(req)
if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.wanterr); g != e {
t.Errorf("%d. got error = %q, want %q", i, g, e)
continue
}
if got := fmt.Sprintf("%s", url); got != tt.wanturl {
t.Errorf("%d. got URL = %q, want %q", i, url, tt.wanturl)
}
}
}
// rgz is a gzip quine that uncompresses to itself.
var rgz = []byte{
0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,

View File

@ -108,7 +108,6 @@ func DateServer(rw http.ResponseWriter, req *http.Request) {
fmt.Fprintf(rw, "fork/exec: %s\n", err)
return
}
defer p.Release()
io.Copy(rw, r)
wait, err := p.Wait(0)
if err != nil {

View File

@ -6,11 +6,7 @@
package net
import (
"bytes"
"errors"
"fmt"
)
import "errors"
var (
errInvalidInterface = errors.New("net: invalid interface")
@ -20,77 +16,6 @@ var (
errNoSuchMulticastInterface = errors.New("net: no such multicast interface")
)
// A HardwareAddr represents a physical hardware address.
type HardwareAddr []byte
func (a HardwareAddr) String() string {
var buf bytes.Buffer
for i, b := range a {
if i > 0 {
buf.WriteByte(':')
}
fmt.Fprintf(&buf, "%02x", b)
}
return buf.String()
}
// ParseMAC parses s as an IEEE 802 MAC-48, EUI-48, or EUI-64 using one of the
// following formats:
// 01:23:45:67:89:ab
// 01:23:45:67:89:ab:cd:ef
// 01-23-45-67-89-ab
// 01-23-45-67-89-ab-cd-ef
// 0123.4567.89ab
// 0123.4567.89ab.cdef
func ParseMAC(s string) (hw HardwareAddr, err error) {
if len(s) < 14 {
goto error
}
if s[2] == ':' || s[2] == '-' {
if (len(s)+1)%3 != 0 {
goto error
}
n := (len(s) + 1) / 3
if n != 6 && n != 8 {
goto error
}
hw = make(HardwareAddr, n)
for x, i := 0, 0; i < n; i++ {
var ok bool
if hw[i], ok = xtoi2(s[x:], s[2]); !ok {
goto error
}
x += 3
}
} else if s[4] == '.' {
if (len(s)+1)%5 != 0 {
goto error
}
n := 2 * (len(s) + 1) / 5
if n != 6 && n != 8 {
goto error
}
hw = make(HardwareAddr, n)
for x, i := 0, 0; i < n; i += 2 {
var ok bool
if hw[i], ok = xtoi2(s[x:x+2], 0); !ok {
goto error
}
if hw[i+1], ok = xtoi2(s[x+2:], s[4]); !ok {
goto error
}
x += 5
}
} else {
goto error
}
return hw, nil
error:
return nil, errors.New("invalid MAC address: " + s)
}
// Interface represents a mapping between network interface name
// and index. It also represents network interface facility
// information.

View File

@ -6,8 +6,6 @@ package net
import (
"bytes"
"reflect"
"strings"
"testing"
)
@ -96,46 +94,3 @@ func testMulticastAddrs(t *testing.T, ifmat []Addr) {
}
}
}
var mactests = []struct {
in string
out HardwareAddr
err string
}{
{"01:23:45:67:89:AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"01-23-45-67-89-AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"0123.4567.89AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"ab:cd:ef:AB:CD:EF", HardwareAddr{0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef}, ""},
{"01.02.03.04.05.06", nil, "invalid MAC address"},
{"01:02:03:04:05:06:", nil, "invalid MAC address"},
{"x1:02:03:04:05:06", nil, "invalid MAC address"},
{"01002:03:04:05:06", nil, "invalid MAC address"},
{"01:02003:04:05:06", nil, "invalid MAC address"},
{"01:02:03004:05:06", nil, "invalid MAC address"},
{"01:02:03:04005:06", nil, "invalid MAC address"},
{"01:02:03:04:05006", nil, "invalid MAC address"},
{"01-02:03:04:05:06", nil, "invalid MAC address"},
{"01:02-03-04-05-06", nil, "invalid MAC address"},
{"0123:4567:89AF", nil, "invalid MAC address"},
{"0123-4567-89AF", nil, "invalid MAC address"},
{"01:23:45:67:89:AB:CD:EF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
{"01-23-45-67-89-AB-CD-EF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
{"0123.4567.89AB.CDEF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
}
func match(err error, s string) bool {
if s == "" {
return err == nil
}
return err != nil && strings.Contains(err.Error(), s)
}
func TestParseMAC(t *testing.T) {
for _, tt := range mactests {
out, err := ParseMAC(tt.in)
if !reflect.DeepEqual(out, tt.out) || !match(err, tt.err) {
t.Errorf("ParseMAC(%q) = %v, %v, want %v, %v", tt.in, out, err, tt.out,
tt.err)
}
}
}

View File

@ -76,7 +76,7 @@ func lookupProtocol(name string) (proto int, err error) {
}
func lookupHost(host string) (addrs []string, err error) {
// Use /net/cs insead of /net/dns because cs knows about
// Use /net/cs instead of /net/dns because cs knows about
// host names in local network (e.g. from /lib/ndb/local)
lines, err := queryCS("tcp", host, "1")
if err != nil {

View File

@ -12,7 +12,7 @@ import (
"testing"
)
var testExternal = flag.Bool("external", false, "allow use of external networks during test")
var testExternal = flag.Bool("external", true, "allow use of external networks during long test")
func TestGoogleSRV(t *testing.T) {
if testing.Short() || !*testExternal {
@ -78,3 +78,40 @@ func TestGoogleDNSAddr(t *testing.T) {
t.Errorf("no results")
}
}
var revAddrTests = []struct {
Addr string
Reverse string
ErrPrefix string
}{
{"1.2.3.4", "4.3.2.1.in-addr.arpa.", ""},
{"245.110.36.114", "114.36.110.245.in-addr.arpa.", ""},
{"::ffff:12.34.56.78", "78.56.34.12.in-addr.arpa.", ""},
{"::1", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", ""},
{"1::", "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.", ""},
{"1234:567::89a:bcde", "e.d.c.b.a.9.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
{"1234:567:fefe:bcbc:adad:9e4a:89a:bcde", "e.d.c.b.a.9.8.0.a.4.e.9.d.a.d.a.c.b.c.b.e.f.e.f.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
{"1.2.3", "", "unrecognized address"},
{"1.2.3.4.5", "", "unrecognized address"},
{"1234:567:bcbca::89a:bcde", "", "unrecognized address"},
{"1234:567::bcbc:adad::89a:bcde", "", "unrecognized address"},
}
func TestReverseAddress(t *testing.T) {
for i, tt := range revAddrTests {
a, err := reverseaddr(tt.Addr)
if len(tt.ErrPrefix) > 0 && err == nil {
t.Errorf("#%d: expected %q, got <nil> (error)", i, tt.ErrPrefix)
continue
}
if len(tt.ErrPrefix) == 0 && err != nil {
t.Errorf("#%d: expected <nil>, got %q (error)", i, err)
}
if err != nil && err.(*DNSError).Err != tt.ErrPrefix {
t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, err.(*DNSError).Err)
}
if a != tt.Reverse {
t.Errorf("#%d: expected %q, got %q (reverse address)", i, tt.Reverse, a)
}
}
}

84
libgo/go/net/mac.go Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MAC address manipulations
package net
import (
"bytes"
"errors"
"fmt"
)
// A HardwareAddr represents a physical hardware address.
type HardwareAddr []byte
func (a HardwareAddr) String() string {
var buf bytes.Buffer
for i, b := range a {
if i > 0 {
buf.WriteByte(':')
}
fmt.Fprintf(&buf, "%02x", b)
}
return buf.String()
}
// ParseMAC parses s as an IEEE 802 MAC-48, EUI-48, or EUI-64 using one of the
// following formats:
// 01:23:45:67:89:ab
// 01:23:45:67:89:ab:cd:ef
// 01-23-45-67-89-ab
// 01-23-45-67-89-ab-cd-ef
// 0123.4567.89ab
// 0123.4567.89ab.cdef
func ParseMAC(s string) (hw HardwareAddr, err error) {
if len(s) < 14 {
goto error
}
if s[2] == ':' || s[2] == '-' {
if (len(s)+1)%3 != 0 {
goto error
}
n := (len(s) + 1) / 3
if n != 6 && n != 8 {
goto error
}
hw = make(HardwareAddr, n)
for x, i := 0, 0; i < n; i++ {
var ok bool
if hw[i], ok = xtoi2(s[x:], s[2]); !ok {
goto error
}
x += 3
}
} else if s[4] == '.' {
if (len(s)+1)%5 != 0 {
goto error
}
n := 2 * (len(s) + 1) / 5
if n != 6 && n != 8 {
goto error
}
hw = make(HardwareAddr, n)
for x, i := 0, 0; i < n; i += 2 {
var ok bool
if hw[i], ok = xtoi2(s[x:x+2], 0); !ok {
goto error
}
if hw[i+1], ok = xtoi2(s[x+2:], s[4]); !ok {
goto error
}
x += 5
}
} else {
goto error
}
return hw, nil
error:
return nil, errors.New("invalid MAC address: " + s)
}

54
libgo/go/net/mac_test.go Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"reflect"
"strings"
"testing"
)
var mactests = []struct {
in string
out HardwareAddr
err string
}{
{"01:23:45:67:89:AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"01-23-45-67-89-AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"0123.4567.89AB", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab}, ""},
{"ab:cd:ef:AB:CD:EF", HardwareAddr{0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef}, ""},
{"01.02.03.04.05.06", nil, "invalid MAC address"},
{"01:02:03:04:05:06:", nil, "invalid MAC address"},
{"x1:02:03:04:05:06", nil, "invalid MAC address"},
{"01002:03:04:05:06", nil, "invalid MAC address"},
{"01:02003:04:05:06", nil, "invalid MAC address"},
{"01:02:03004:05:06", nil, "invalid MAC address"},
{"01:02:03:04005:06", nil, "invalid MAC address"},
{"01:02:03:04:05006", nil, "invalid MAC address"},
{"01-02:03:04:05:06", nil, "invalid MAC address"},
{"01:02-03-04-05-06", nil, "invalid MAC address"},
{"0123:4567:89AF", nil, "invalid MAC address"},
{"0123-4567-89AF", nil, "invalid MAC address"},
{"01:23:45:67:89:AB:CD:EF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
{"01-23-45-67-89-AB-CD-EF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
{"0123.4567.89AB.CDEF", HardwareAddr{1, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, ""},
}
func match(err error, s string) bool {
if s == "" {
return err == nil
}
return err != nil && strings.Contains(err.Error(), s)
}
func TestParseMAC(t *testing.T) {
for _, tt := range mactests {
out, err := ParseMAC(tt.in)
if !reflect.DeepEqual(out, tt.out) || !match(err, tt.err) {
t.Errorf("ParseMAC(%q) = %v, %v, want %v, %v", tt.in, out, err, tt.out,
tt.err)
}
}
}

View File

@ -5,130 +5,12 @@
package net
import (
"flag"
"io"
"regexp"
"runtime"
"testing"
"time"
)
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: [nN]o such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: ([pP]ermission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use --run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}
var revAddrTests = []struct {
Addr string
Reverse string
ErrPrefix string
}{
{"1.2.3.4", "4.3.2.1.in-addr.arpa.", ""},
{"245.110.36.114", "114.36.110.245.in-addr.arpa.", ""},
{"::ffff:12.34.56.78", "78.56.34.12.in-addr.arpa.", ""},
{"::1", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", ""},
{"1::", "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.", ""},
{"1234:567::89a:bcde", "e.d.c.b.a.9.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
{"1234:567:fefe:bcbc:adad:9e4a:89a:bcde", "e.d.c.b.a.9.8.0.a.4.e.9.d.a.d.a.c.b.c.b.e.f.e.f.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
{"1.2.3", "", "unrecognized address"},
{"1.2.3.4.5", "", "unrecognized address"},
{"1234:567:bcbca::89a:bcde", "", "unrecognized address"},
{"1234:567::bcbc:adad::89a:bcde", "", "unrecognized address"},
}
func TestReverseAddress(t *testing.T) {
for i, tt := range revAddrTests {
a, err := reverseaddr(tt.Addr)
if len(tt.ErrPrefix) > 0 && err == nil {
t.Errorf("#%d: expected %q, got <nil> (error)", i, tt.ErrPrefix)
continue
}
if len(tt.ErrPrefix) == 0 && err != nil {
t.Errorf("#%d: expected <nil>, got %q (error)", i, err)
}
if err != nil && err.(*DNSError).Err != tt.ErrPrefix {
t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, err.(*DNSError).Err)
}
if a != tt.Reverse {
t.Errorf("#%d: expected %q, got %q (reverse address)", i, tt.Reverse, a)
}
}
}
func TestShutdown(t *testing.T) {
if runtime.GOOS == "plan9" {
return

View File

@ -140,7 +140,7 @@ func (client *Client) input() {
}
client.mutex.Unlock()
client.sending.Unlock()
if err != io.EOF || !closing {
if err != io.EOF && !closing {
log.Println("rpc: client protocol error:", err)
}
}

View File

@ -13,13 +13,19 @@
Only methods that satisfy these criteria will be made available for remote access;
other methods will be ignored:
- the method name is exported, that is, begins with an upper case letter.
- the method receiver is exported or local (defined in the package
registering the service).
- the method has two arguments, both exported or local types.
- the method is exported.
- the method has two arguments, both exported (or builtin) types.
- the method's second argument is a pointer.
- the method has return type error.
In effect, the method must look schematically like
func (t *T) MethodName(argType T1, replyType *T2) error
where T, T1 and T2 can be marshaled by encoding/gob.
These requirements apply even if a different codec is used.
(In future, these requirements may soften for custom codecs.)
The method's first argument represents the arguments provided by the caller; the
second argument represents the result parameters to be returned to the caller.
The method's return value, if non-nil, is passed back as a string that the client
@ -36,10 +42,12 @@
call, a pointer containing the arguments, and a pointer to receive the result
parameters.
Call waits for the remote call to complete; Go launches the call asynchronously
and returns a channel that will signal completion.
The Call method waits for the remote call to complete while the Go method
launches the call asynchronously and signals completion using the Call
structure's Done channel.
Package "gob" is used to transport the data.
Unless an explicit codec is set up, package encoding/gob is used to
transport the data.
Here is a simple example. A server wishes to export an object of type Arith:
@ -256,6 +264,7 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
method := s.typ.Method(m)
mtype := method.Type
mname := method.Name
// Method must be exported.
if method.PkgPath != "" {
continue
}
@ -267,7 +276,7 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
// First arg need not be a pointer.
argType := mtype.In(1)
if !isExportedOrBuiltinType(argType) {
log.Println(mname, "argument type not exported or local:", argType)
log.Println(mname, "argument type not exported:", argType)
continue
}
// Second arg must be a pointer.
@ -276,15 +285,17 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
log.Println("method", mname, "reply type not a pointer:", replyType)
continue
}
// Reply type must be exported.
if !isExportedOrBuiltinType(replyType) {
log.Println("method", mname, "reply type not exported or local:", replyType)
log.Println("method", mname, "reply type not exported:", replyType)
continue
}
// Method needs one out: error.
// Method needs one out.
if mtype.NumOut() != 1 {
log.Println("method", mname, "has wrong number of outs:", mtype.NumOut())
continue
}
// The return type of the method must be error.
if returnType := mtype.Out(0); returnType != typeOfError {
log.Println("method", mname, "returns", returnType.String(), "not error")
continue
@ -301,10 +312,10 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
return nil
}
// A value sent as a placeholder for the response when the server receives an invalid request.
type InvalidRequest struct{}
var invalidRequest = InvalidRequest{}
// A value sent as a placeholder for the server's response value when the server
// receives an invalid request. It is never decoded by the client since the Response
// contains an error when it is used.
var invalidRequest = struct{}{}
func (server *Server) sendResponse(sending *sync.Mutex, req *Request, reply interface{}, codec ServerCodec, errmsg string) {
resp := server.getResponse()

View File

@ -0,0 +1,39 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// IP-level socket options for NetBSD
package net
import "syscall"
func ipv4MulticastInterface(fd *netFD) (*Interface, error) {
// TODO: Implement this
return nil, syscall.EAFNOSUPPORT
}
func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
// TODO: Implement this
return syscall.EAFNOSUPPORT
}
func ipv4MulticastLoopback(fd *netFD) (bool, error) {
// TODO: Implement this
return false, syscall.EAFNOSUPPORT
}
func setIPv4MulticastLoopback(fd *netFD, v bool) error {
// TODO: Implement this
return syscall.EAFNOSUPPORT
}
func ipv4ReceiveInterface(fd *netFD) (bool, error) {
// TODO: Implement this
return false, syscall.EAFNOSUPPORT
}
func setIPv4ReceiveInterface(fd *netFD, v bool) error {
// TODO: Implement this
return syscall.EAFNOSUPPORT
}

View File

@ -40,7 +40,7 @@ func testTimeout(t *testing.T, network, addr string, readFrom bool) {
errc <- fmt.Errorf("fd.%s on %s %s did not return 0, timeout: %v, %v", what, network, addr, n, err1)
return
}
if dt := t1.Sub(t0); dt < 50*time.Millisecond || dt > 250*time.Millisecond {
if dt := t1.Sub(t0); dt < 50*time.Millisecond || !testing.Short() && dt > 250*time.Millisecond {
errc <- fmt.Errorf("fd.%s on %s %s took %s, expected 0.1s", what, network, addr, dt)
return
}

View File

@ -38,18 +38,18 @@ func testWriteToConn(t *testing.T, raddr string) {
_, err = c.(*UDPConn).WriteToUDP([]byte("Connection-oriented mode socket"), ra)
if err == nil {
t.Fatal("WriteToUDP should be failed")
t.Fatal("WriteToUDP should fail")
}
if err != nil && err.(*OpError).Err != ErrWriteToConnected {
t.Fatalf("WriteToUDP should be failed as ErrWriteToConnected: %v", err)
t.Fatalf("WriteToUDP should fail as ErrWriteToConnected: %v", err)
}
_, err = c.(*UDPConn).WriteTo([]byte("Connection-oriented mode socket"), ra)
if err == nil {
t.Fatal("WriteTo should be failed")
t.Fatal("WriteTo should fail")
}
if err != nil && err.(*OpError).Err != ErrWriteToConnected {
t.Fatalf("WriteTo should be failed as ErrWriteToConnected: %v", err)
t.Fatalf("WriteTo should fail as ErrWriteToConnected: %v", err)
}
_, err = c.Write([]byte("Connection-oriented mode socket"))
@ -82,6 +82,6 @@ func testWriteToPacketConn(t *testing.T, raddr string) {
_, err = c.(*UDPConn).Write([]byte("Connection-less mode socket"))
if err == nil {
t.Fatal("Write should be failed")
t.Fatal("Write should fail")
}
}

Some files were not shown because too many files have changed in this diff Show More