2010-12-03 05:34:57 +01:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package html
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io"
|
|
|
|
"strconv"
|
2011-10-27 01:57:58 +02:00
|
|
|
"strings"
|
2010-12-03 05:34:57 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// A TokenType is the type of a Token.
|
|
|
|
type TokenType int
|
|
|
|
|
|
|
|
const (
|
2011-01-21 19:19:03 +01:00
|
|
|
// ErrorToken means that an error occurred during tokenization.
|
|
|
|
ErrorToken TokenType = iota
|
|
|
|
// TextToken means a text node.
|
|
|
|
TextToken
|
|
|
|
// A StartTagToken looks like <a>.
|
|
|
|
StartTagToken
|
|
|
|
// An EndTagToken looks like </a>.
|
|
|
|
EndTagToken
|
|
|
|
// A SelfClosingTagToken tag looks like <br/>.
|
|
|
|
SelfClosingTagToken
|
2011-03-17 00:05:44 +01:00
|
|
|
// A CommentToken looks like <!--x-->.
|
|
|
|
CommentToken
|
2011-09-16 17:47:21 +02:00
|
|
|
// A DoctypeToken looks like <!DOCTYPE x>
|
|
|
|
DoctypeToken
|
2010-12-03 05:34:57 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// String returns a string representation of the TokenType.
|
|
|
|
func (t TokenType) String() string {
|
|
|
|
switch t {
|
2011-01-21 19:19:03 +01:00
|
|
|
case ErrorToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "Error"
|
2011-01-21 19:19:03 +01:00
|
|
|
case TextToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "Text"
|
2011-01-21 19:19:03 +01:00
|
|
|
case StartTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "StartTag"
|
2011-01-21 19:19:03 +01:00
|
|
|
case EndTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "EndTag"
|
2011-01-21 19:19:03 +01:00
|
|
|
case SelfClosingTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "SelfClosingTag"
|
2011-03-17 00:05:44 +01:00
|
|
|
case CommentToken:
|
|
|
|
return "Comment"
|
2011-09-16 17:47:21 +02:00
|
|
|
case DoctypeToken:
|
|
|
|
return "Doctype"
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
return "Invalid(" + strconv.Itoa(int(t)) + ")"
|
|
|
|
}
|
|
|
|
|
2012-01-25 21:56:26 +01:00
|
|
|
// An Attribute is an attribute namespace-key-value triple. Namespace is
|
|
|
|
// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
|
2010-12-03 05:34:57 +01:00
|
|
|
// does not contain escapable characters like '&', '<' or '>'), and Val is
|
|
|
|
// unescaped (it looks like "a<b" rather than "a<b").
|
2012-01-25 21:56:26 +01:00
|
|
|
//
|
|
|
|
// Namespace is only used by the parser, not the tokenizer.
|
2010-12-03 05:34:57 +01:00
|
|
|
type Attribute struct {
|
2012-01-25 21:56:26 +01:00
|
|
|
Namespace, Key, Val string
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// A Token consists of a TokenType and some Data (tag name for start and end
|
2011-09-16 17:47:21 +02:00
|
|
|
// tags, content for text, comments and doctypes). A tag Token may also contain
|
|
|
|
// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
|
|
|
|
// rather than "a<b").
|
2010-12-03 05:34:57 +01:00
|
|
|
type Token struct {
|
|
|
|
Type TokenType
|
|
|
|
Data string
|
|
|
|
Attr []Attribute
|
|
|
|
}
|
|
|
|
|
|
|
|
// tagString returns a string representation of a tag Token's Data and Attr.
|
|
|
|
func (t Token) tagString() string {
|
|
|
|
if len(t.Attr) == 0 {
|
|
|
|
return t.Data
|
|
|
|
}
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
buf.WriteString(t.Data)
|
|
|
|
for _, a := range t.Attr {
|
|
|
|
buf.WriteByte(' ')
|
|
|
|
buf.WriteString(a.Key)
|
|
|
|
buf.WriteString(`="`)
|
|
|
|
escape(buf, a.Val)
|
|
|
|
buf.WriteByte('"')
|
|
|
|
}
|
|
|
|
return buf.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a string representation of the Token.
|
|
|
|
func (t Token) String() string {
|
|
|
|
switch t.Type {
|
2011-01-21 19:19:03 +01:00
|
|
|
case ErrorToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return ""
|
2011-01-21 19:19:03 +01:00
|
|
|
case TextToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return EscapeString(t.Data)
|
2011-01-21 19:19:03 +01:00
|
|
|
case StartTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "<" + t.tagString() + ">"
|
2011-01-21 19:19:03 +01:00
|
|
|
case EndTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "</" + t.tagString() + ">"
|
2011-01-21 19:19:03 +01:00
|
|
|
case SelfClosingTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
return "<" + t.tagString() + "/>"
|
2011-03-17 00:05:44 +01:00
|
|
|
case CommentToken:
|
2011-10-27 01:57:58 +02:00
|
|
|
return "<!--" + t.Data + "-->"
|
2011-09-16 17:47:21 +02:00
|
|
|
case DoctypeToken:
|
2011-10-27 01:57:58 +02:00
|
|
|
return "<!DOCTYPE " + t.Data + ">"
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
|
|
|
|
// the end is exclusive.
|
|
|
|
type span struct {
|
|
|
|
start, end int
|
|
|
|
}
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
// A Tokenizer returns a stream of HTML Tokens.
|
|
|
|
type Tokenizer struct {
|
|
|
|
// r is the source of the HTML text.
|
|
|
|
r io.Reader
|
2011-10-27 01:57:58 +02:00
|
|
|
// tt is the TokenType of the current token.
|
2011-09-16 17:47:21 +02:00
|
|
|
tt TokenType
|
|
|
|
// err is the first error encountered during tokenization. It is possible
|
|
|
|
// for tt != Error && err != nil to hold: this means that Next returned a
|
|
|
|
// valid token but the subsequent Next call will return an error token.
|
|
|
|
// For example, if the HTML text input was just "plain", then the first
|
2011-12-07 02:11:29 +01:00
|
|
|
// Next call would set z.err to io.EOF but return a TextToken, and all
|
2011-09-16 17:47:21 +02:00
|
|
|
// subsequent Next calls would return an ErrorToken.
|
|
|
|
// err is never reset. Once it becomes non-nil, it stays non-nil.
|
2011-12-03 03:17:34 +01:00
|
|
|
err error
|
2011-10-27 01:57:58 +02:00
|
|
|
// buf[raw.start:raw.end] holds the raw bytes of the current token.
|
|
|
|
// buf[raw.end:] is buffered input that will yield future tokens.
|
|
|
|
raw span
|
|
|
|
buf []byte
|
|
|
|
// buf[data.start:data.end] holds the raw bytes of the current token's data:
|
|
|
|
// a text token's text, a tag token's tag name, etc.
|
|
|
|
data span
|
|
|
|
// pendingAttr is the attribute key and value currently being tokenized.
|
|
|
|
// When complete, pendingAttr is pushed onto attr. nAttrReturned is
|
|
|
|
// incremented on each call to TagAttr.
|
|
|
|
pendingAttr [2]span
|
|
|
|
attr [][2]span
|
|
|
|
nAttrReturned int
|
|
|
|
// rawTag is the "script" in "</script>" that closes the next token. If
|
|
|
|
// non-empty, the subsequent call to Next will return a raw or RCDATA text
|
|
|
|
// token: one that treats "<p>" as text instead of an element.
|
|
|
|
// rawTag's contents are lower-cased.
|
|
|
|
rawTag string
|
|
|
|
// textIsRaw is whether the current text token's data is not escaped.
|
|
|
|
textIsRaw bool
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2011-12-07 02:11:29 +01:00
|
|
|
// Err returns the error associated with the most recent ErrorToken token.
|
|
|
|
// This is typically io.EOF, meaning the end of tokenization.
|
|
|
|
func (z *Tokenizer) Err() error {
|
2011-01-21 19:19:03 +01:00
|
|
|
if z.tt != ErrorToken {
|
2010-12-03 05:34:57 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return z.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// readByte returns the next byte from the input stream, doing a buffered read
|
2011-10-27 01:57:58 +02:00
|
|
|
// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
|
2010-12-03 05:34:57 +01:00
|
|
|
// slice that holds all the bytes read so far for the current token.
|
2011-09-16 17:47:21 +02:00
|
|
|
// It sets z.err if the underlying reader returns an error.
|
|
|
|
// Pre-condition: z.err == nil.
|
|
|
|
func (z *Tokenizer) readByte() byte {
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.raw.end >= len(z.buf) {
|
2010-12-03 05:34:57 +01:00
|
|
|
// Our buffer is exhausted and we have to read from z.r.
|
2011-10-27 01:57:58 +02:00
|
|
|
// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
|
|
|
|
// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
|
2010-12-03 05:34:57 +01:00
|
|
|
// allocate a new buffer before the copy.
|
|
|
|
c := cap(z.buf)
|
2011-10-27 01:57:58 +02:00
|
|
|
d := z.raw.end - z.raw.start
|
2010-12-03 05:34:57 +01:00
|
|
|
var buf1 []byte
|
|
|
|
if 2*d > c {
|
|
|
|
buf1 = make([]byte, d, 2*c)
|
|
|
|
} else {
|
2011-09-16 17:47:21 +02:00
|
|
|
buf1 = z.buf[:d]
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
copy(buf1, z.buf[z.raw.start:z.raw.end])
|
|
|
|
if x := z.raw.start; x != 0 {
|
|
|
|
// Adjust the data/attr spans to refer to the same contents after the copy.
|
|
|
|
z.data.start -= x
|
|
|
|
z.data.end -= x
|
|
|
|
z.pendingAttr[0].start -= x
|
|
|
|
z.pendingAttr[0].end -= x
|
|
|
|
z.pendingAttr[1].start -= x
|
|
|
|
z.pendingAttr[1].end -= x
|
|
|
|
for i := range z.attr {
|
|
|
|
z.attr[i][0].start -= x
|
|
|
|
z.attr[i][0].end -= x
|
|
|
|
z.attr[i][1].start -= x
|
|
|
|
z.attr[i][1].end -= x
|
|
|
|
}
|
|
|
|
}
|
|
|
|
z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
|
2010-12-03 05:34:57 +01:00
|
|
|
// Now that we have copied the live bytes to the start of the buffer,
|
|
|
|
// we read from z.r into the remainder.
|
|
|
|
n, err := z.r.Read(buf1[d:cap(buf1)])
|
|
|
|
if err != nil {
|
2011-09-16 17:47:21 +02:00
|
|
|
z.err = err
|
|
|
|
return 0
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-09-16 17:47:21 +02:00
|
|
|
z.buf = buf1[:d+n]
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
x := z.buf[z.raw.end]
|
|
|
|
z.raw.end++
|
2011-09-16 17:47:21 +02:00
|
|
|
return x
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// skipWhiteSpace skips past any white space.
|
|
|
|
func (z *Tokenizer) skipWhiteSpace() {
|
|
|
|
if z.err != nil {
|
|
|
|
return
|
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
for {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
switch c {
|
2011-10-27 01:57:58 +02:00
|
|
|
case ' ', '\n', '\r', '\t', '\f':
|
|
|
|
// No-op.
|
|
|
|
default:
|
|
|
|
z.raw.end--
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
|
|
|
|
// is typically something like "script" or "textarea".
|
|
|
|
func (z *Tokenizer) readRawOrRCDATA() {
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
if c != '<' {
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
c = z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
if c != '/' {
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
for i := 0; i < len(z.rawTag); i++ {
|
|
|
|
c = z.readByte()
|
2011-09-16 17:47:21 +02:00
|
|
|
if z.err != nil {
|
2011-10-27 01:57:58 +02:00
|
|
|
break loop
|
|
|
|
}
|
|
|
|
if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
|
|
|
|
continue loop
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
c = z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
switch c {
|
|
|
|
case ' ', '\n', '\r', '\t', '\f', '/', '>':
|
|
|
|
// The 3 is 2 for the leading "</" plus 1 for the trailing character c.
|
|
|
|
z.raw.end -= 3 + len(z.rawTag)
|
|
|
|
break loop
|
|
|
|
case '<':
|
|
|
|
// Step back one, to catch "</foo</foo>".
|
|
|
|
z.raw.end--
|
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
z.data.end = z.raw.end
|
|
|
|
// A textarea's or title's RCDATA can contain escaped entities.
|
|
|
|
z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
|
|
|
|
z.rawTag = ""
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readComment reads the next comment token starting with "<!--". The opening
|
|
|
|
// "<!--" has already been consumed.
|
|
|
|
func (z *Tokenizer) readComment() {
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
defer func() {
|
|
|
|
if z.data.end < z.data.start {
|
|
|
|
// It's a comment with no data, like <!-->.
|
|
|
|
z.data.end = z.data.start
|
|
|
|
}
|
|
|
|
}()
|
2011-03-17 00:05:44 +01:00
|
|
|
for dashCount := 2; ; {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
2011-12-13 20:16:27 +01:00
|
|
|
// Ignore up to two dashes at EOF.
|
|
|
|
if dashCount > 2 {
|
|
|
|
dashCount = 2
|
|
|
|
}
|
|
|
|
z.data.end = z.raw.end - dashCount
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
|
|
|
switch c {
|
|
|
|
case '-':
|
|
|
|
dashCount++
|
2011-10-27 01:57:58 +02:00
|
|
|
continue
|
2011-03-17 00:05:44 +01:00
|
|
|
case '>':
|
|
|
|
if dashCount >= 2 {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.data.end = z.raw.end - len("-->")
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
case '!':
|
|
|
|
if dashCount >= 2 {
|
|
|
|
c = z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c == '>' {
|
|
|
|
z.data.end = z.raw.end - len("--!>")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
dashCount = 0
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readUntilCloseAngle reads until the next ">".
|
|
|
|
func (z *Tokenizer) readUntilCloseAngle() {
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c == '>' {
|
|
|
|
z.data.end = z.raw.end - len(">")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readMarkupDeclaration reads the next token starting with "<!". It might be
|
|
|
|
// a "<!--comment-->", a "<!DOCTYPE foo>", or "<!a bogus comment". The opening
|
|
|
|
// "<!" has already been consumed.
|
|
|
|
func (z *Tokenizer) readMarkupDeclaration() TokenType {
|
|
|
|
z.data.start = z.raw.end
|
2011-09-16 17:47:21 +02:00
|
|
|
var c [2]byte
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
c[i] = z.readByte()
|
|
|
|
if z.err != nil {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.data.end = z.raw.end
|
|
|
|
return CommentToken
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if c[0] == '-' && c[1] == '-' {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.readComment()
|
|
|
|
return CommentToken
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
z.raw.end -= 2
|
|
|
|
const s = "DOCTYPE"
|
|
|
|
for i := 0; i < len(s); i++ {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.data.end = z.raw.end
|
|
|
|
return CommentToken
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
if c != s[i] && c != s[i]+('a'-'A') {
|
|
|
|
// Back up to read the fragment of "DOCTYPE" again.
|
|
|
|
z.raw.end = z.data.start
|
|
|
|
z.readUntilCloseAngle()
|
|
|
|
return CommentToken
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.skipWhiteSpace(); z.err != nil {
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
return DoctypeToken
|
|
|
|
}
|
|
|
|
z.readUntilCloseAngle()
|
|
|
|
return DoctypeToken
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
|
|
|
|
2011-12-13 20:16:27 +01:00
|
|
|
// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
|
|
|
|
// case-insensitively matches any element of ss.
|
|
|
|
func (z *Tokenizer) startTagIn(ss ...string) bool {
|
|
|
|
loop:
|
|
|
|
for _, s := range ss {
|
|
|
|
if z.data.end-z.data.start != len(s) {
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
for i := 0; i < len(s); i++ {
|
|
|
|
c := z.buf[z.data.start+i]
|
|
|
|
if 'A' <= c && c <= 'Z' {
|
|
|
|
c += 'a' - 'A'
|
|
|
|
}
|
|
|
|
if c != s[i] {
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readStartTag reads the next start tag token. The opening "<a" has already
|
|
|
|
// been consumed, where 'a' means anything in [A-Za-z].
|
|
|
|
func (z *Tokenizer) readStartTag() TokenType {
|
|
|
|
z.attr = z.attr[:0]
|
|
|
|
z.nAttrReturned = 0
|
|
|
|
// Read the tag name and attribute key/value pairs.
|
|
|
|
z.readTagName()
|
|
|
|
if z.skipWhiteSpace(); z.err != nil {
|
|
|
|
return ErrorToken
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
for {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.err != nil || c == '>' {
|
|
|
|
break
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
z.raw.end--
|
|
|
|
z.readTagAttrKey()
|
|
|
|
z.readTagAttrVal()
|
|
|
|
// Save pendingAttr if it has a non-empty key.
|
|
|
|
if z.pendingAttr[0].start != z.pendingAttr[0].end {
|
|
|
|
z.attr = append(z.attr, z.pendingAttr)
|
|
|
|
}
|
|
|
|
if z.skipWhiteSpace(); z.err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2011-12-13 20:16:27 +01:00
|
|
|
// Several tags flag the tokenizer's next token as raw.
|
|
|
|
c, raw := z.buf[z.data.start], false
|
|
|
|
if 'A' <= c && c <= 'Z' {
|
|
|
|
c += 'a' - 'A'
|
|
|
|
}
|
|
|
|
switch c {
|
|
|
|
case 'i':
|
|
|
|
raw = z.startTagIn("iframe")
|
|
|
|
case 'n':
|
|
|
|
raw = z.startTagIn("noembed", "noframes", "noscript")
|
|
|
|
case 'p':
|
|
|
|
raw = z.startTagIn("plaintext")
|
|
|
|
case 's':
|
|
|
|
raw = z.startTagIn("script", "style")
|
|
|
|
case 't':
|
|
|
|
raw = z.startTagIn("textarea", "title")
|
|
|
|
case 'x':
|
|
|
|
raw = z.startTagIn("xmp")
|
|
|
|
}
|
|
|
|
if raw {
|
|
|
|
z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
// Look for a self-closing token like "<br/>".
|
|
|
|
if z.err == nil && z.buf[z.raw.end-2] == '/' {
|
|
|
|
return SelfClosingTagToken
|
|
|
|
}
|
|
|
|
return StartTagToken
|
|
|
|
}
|
|
|
|
|
|
|
|
// readEndTag reads the next end tag token. The opening "</a" has already
|
|
|
|
// been consumed, where 'a' means anything in [A-Za-z].
|
|
|
|
func (z *Tokenizer) readEndTag() {
|
|
|
|
z.attr = z.attr[:0]
|
|
|
|
z.nAttrReturned = 0
|
|
|
|
z.readTagName()
|
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil || c == '>' {
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
|
|
|
|
// is positioned such that the first byte of the tag name (the "d" in "<div")
|
|
|
|
// has already been consumed.
|
|
|
|
func (z *Tokenizer) readTagName() {
|
|
|
|
z.data.start = z.raw.end - 1
|
2010-12-03 05:34:57 +01:00
|
|
|
for {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.data.end = z.raw.end
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
switch c {
|
|
|
|
case ' ', '\n', '\r', '\t', '\f':
|
|
|
|
z.data.end = z.raw.end - 1
|
|
|
|
return
|
|
|
|
case '/', '>':
|
|
|
|
z.raw.end--
|
|
|
|
z.data.end = z.raw.end
|
2011-09-16 17:47:21 +02:00
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
|
|
|
|
// Precondition: z.err == nil.
|
|
|
|
func (z *Tokenizer) readTagAttrKey() {
|
|
|
|
z.pendingAttr[0].start = z.raw.end
|
2011-03-17 00:05:44 +01:00
|
|
|
for {
|
2011-09-16 17:47:21 +02:00
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
2011-10-27 01:57:58 +02:00
|
|
|
z.pendingAttr[0].end = z.raw.end
|
|
|
|
return
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
switch c {
|
|
|
|
case ' ', '\n', '\r', '\t', '\f', '/':
|
|
|
|
z.pendingAttr[0].end = z.raw.end - 1
|
|
|
|
return
|
|
|
|
case '=', '>':
|
|
|
|
z.raw.end--
|
|
|
|
z.pendingAttr[0].end = z.raw.end
|
|
|
|
return
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
|
|
|
|
func (z *Tokenizer) readTagAttrVal() {
|
|
|
|
z.pendingAttr[1].start = z.raw.end
|
|
|
|
z.pendingAttr[1].end = z.raw.end
|
|
|
|
if z.skipWhiteSpace(); z.err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c != '=' {
|
|
|
|
z.raw.end--
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if z.skipWhiteSpace(); z.err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
quote := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch quote {
|
|
|
|
case '>':
|
|
|
|
z.raw.end--
|
|
|
|
return
|
|
|
|
|
|
|
|
case '\'', '"':
|
|
|
|
z.pendingAttr[1].start = z.raw.end
|
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
z.pendingAttr[1].end = z.raw.end
|
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
if c == quote {
|
|
|
|
z.pendingAttr[1].end = z.raw.end - 1
|
|
|
|
return
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-27 01:57:58 +02:00
|
|
|
default:
|
|
|
|
z.pendingAttr[1].start = z.raw.end - 1
|
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
z.pendingAttr[1].end = z.raw.end
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch c {
|
|
|
|
case ' ', '\n', '\r', '\t', '\f':
|
|
|
|
z.pendingAttr[1].end = z.raw.end - 1
|
|
|
|
return
|
|
|
|
case '>':
|
|
|
|
z.raw.end--
|
|
|
|
z.pendingAttr[1].end = z.raw.end
|
|
|
|
return
|
|
|
|
}
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-30 00:02:54 +01:00
|
|
|
// Next scans the next token and returns its type.
|
|
|
|
func (z *Tokenizer) Next() TokenType {
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.err != nil {
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = ErrorToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
z.raw.start = z.raw.end
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
if z.rawTag != "" {
|
2011-12-13 00:40:51 +01:00
|
|
|
if z.rawTag == "plaintext" {
|
|
|
|
// Read everything up to EOF.
|
|
|
|
for z.err == nil {
|
|
|
|
z.readByte()
|
|
|
|
}
|
|
|
|
z.textIsRaw = true
|
|
|
|
} else {
|
|
|
|
z.readRawOrRCDATA()
|
|
|
|
}
|
|
|
|
if z.data.end > z.data.start {
|
|
|
|
z.tt = TextToken
|
|
|
|
return z.tt
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
z.textIsRaw = false
|
|
|
|
|
2011-09-16 17:47:21 +02:00
|
|
|
loop:
|
2011-10-27 01:57:58 +02:00
|
|
|
for {
|
|
|
|
c := z.readByte()
|
|
|
|
if z.err != nil {
|
2011-09-16 17:47:21 +02:00
|
|
|
break loop
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
if c != '<' {
|
|
|
|
continue loop
|
2011-09-16 17:47:21 +02:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
|
|
|
|
// Check if the '<' we have just read is part of a tag, comment
|
|
|
|
// or doctype. If not, it's part of the accumulated text token.
|
|
|
|
c = z.readByte()
|
|
|
|
if z.err != nil {
|
2011-09-16 17:47:21 +02:00
|
|
|
break loop
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
var tokenType TokenType
|
2010-12-03 05:34:57 +01:00
|
|
|
switch {
|
2011-10-27 01:57:58 +02:00
|
|
|
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
|
|
|
|
tokenType = StartTagToken
|
|
|
|
case c == '/':
|
|
|
|
tokenType = EndTagToken
|
|
|
|
case c == '!' || c == '?':
|
|
|
|
// We use CommentToken to mean any of "<!--actual comments-->",
|
|
|
|
// "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
|
|
|
|
tokenType = CommentToken
|
2010-12-03 05:34:57 +01:00
|
|
|
default:
|
2011-10-27 01:57:58 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have a non-text token, but we might have accumulated some text
|
|
|
|
// before that. If so, we return the text first, and return the non-
|
|
|
|
// text token on the subsequent call to Next.
|
|
|
|
if x := z.raw.end - len("<a"); z.raw.start < x {
|
|
|
|
z.raw.end = x
|
|
|
|
z.data.end = x
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = TextToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
switch tokenType {
|
|
|
|
case StartTagToken:
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = z.readStartTag()
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
case EndTagToken:
|
|
|
|
c = z.readByte()
|
|
|
|
if z.err != nil {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
if c == '>' {
|
|
|
|
// "</>" does not generate a token at all.
|
|
|
|
// Reset the tokenizer state and start again.
|
|
|
|
z.raw.start = z.raw.end
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
|
|
|
|
z.readEndTag()
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = EndTagToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
z.raw.end--
|
|
|
|
z.readUntilCloseAngle()
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = CommentToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
case CommentToken:
|
|
|
|
if c == '!' {
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = z.readMarkupDeclaration()
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
z.raw.end--
|
|
|
|
z.readUntilCloseAngle()
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = CommentToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if z.raw.start < z.raw.end {
|
|
|
|
z.data.end = z.raw.end
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = TextToken
|
2011-10-27 01:57:58 +02:00
|
|
|
return z.tt
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-11-30 00:02:54 +01:00
|
|
|
z.tt = ErrorToken
|
|
|
|
return z.tt
|
2011-10-27 01:57:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Raw returns the unmodified text of the current token. Calling Next, Token,
|
|
|
|
// Text, TagName or TagAttr may change the contents of the returned slice.
|
|
|
|
func (z *Tokenizer) Raw() []byte {
|
|
|
|
return z.buf[z.raw.start:z.raw.end]
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
2011-09-16 17:47:21 +02:00
|
|
|
// Text returns the unescaped text of a text, comment or doctype token. The
|
|
|
|
// contents of the returned slice may change on the next call to Next.
|
2010-12-03 05:34:57 +01:00
|
|
|
func (z *Tokenizer) Text() []byte {
|
2011-03-17 00:05:44 +01:00
|
|
|
switch z.tt {
|
2011-10-27 01:57:58 +02:00
|
|
|
case TextToken, CommentToken, DoctypeToken:
|
|
|
|
s := z.buf[z.data.start:z.data.end]
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
if !z.textIsRaw {
|
|
|
|
s = unescape(s)
|
|
|
|
}
|
|
|
|
return s
|
2011-03-17 00:05:44 +01:00
|
|
|
}
|
|
|
|
return nil
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TagName returns the lower-cased name of a tag token (the `img` out of
|
2011-03-17 00:05:44 +01:00
|
|
|
// `<IMG SRC="foo">`) and whether the tag has attributes.
|
2010-12-03 05:34:57 +01:00
|
|
|
// The contents of the returned slice may change on the next call to Next.
|
2011-03-17 00:05:44 +01:00
|
|
|
func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.data.start < z.data.end {
|
|
|
|
switch z.tt {
|
|
|
|
case StartTagToken, EndTagToken, SelfClosingTagToken:
|
|
|
|
s := z.buf[z.data.start:z.data.end]
|
|
|
|
z.data.start = z.raw.end
|
|
|
|
z.data.end = z.raw.end
|
|
|
|
return lower(s), z.nAttrReturned < len(z.attr)
|
|
|
|
}
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
return nil, false
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TagAttr returns the lower-cased key and unescaped value of the next unparsed
|
2011-03-17 00:05:44 +01:00
|
|
|
// attribute for the current tag token and whether there are more attributes.
|
2010-12-03 05:34:57 +01:00
|
|
|
// The contents of the returned slices may change on the next call to Next.
|
2011-03-17 00:05:44 +01:00
|
|
|
func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
|
2011-10-27 01:57:58 +02:00
|
|
|
if z.nAttrReturned < len(z.attr) {
|
|
|
|
switch z.tt {
|
|
|
|
case StartTagToken, SelfClosingTagToken:
|
|
|
|
x := z.attr[z.nAttrReturned]
|
|
|
|
z.nAttrReturned++
|
|
|
|
key = z.buf[x[0].start:x[0].end]
|
|
|
|
val = z.buf[x[1].start:x[1].end]
|
|
|
|
return lower(key), unescape(val), z.nAttrReturned < len(z.attr)
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
}
|
2011-10-27 01:57:58 +02:00
|
|
|
return nil, nil, false
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Token returns the next Token. The result's Data and Attr values remain valid
|
|
|
|
// after subsequent Next calls.
|
|
|
|
func (z *Tokenizer) Token() Token {
|
|
|
|
t := Token{Type: z.tt}
|
|
|
|
switch z.tt {
|
2011-09-16 17:47:21 +02:00
|
|
|
case TextToken, CommentToken, DoctypeToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
t.Data = string(z.Text())
|
2011-10-27 01:57:58 +02:00
|
|
|
case StartTagToken, SelfClosingTagToken:
|
2010-12-03 05:34:57 +01:00
|
|
|
var attr []Attribute
|
2011-03-17 00:05:44 +01:00
|
|
|
name, moreAttr := z.TagName()
|
|
|
|
for moreAttr {
|
2010-12-03 05:34:57 +01:00
|
|
|
var key, val []byte
|
2011-03-17 00:05:44 +01:00
|
|
|
key, val, moreAttr = z.TagAttr()
|
2012-01-25 21:56:26 +01:00
|
|
|
attr = append(attr, Attribute{"", string(key), string(val)})
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
t.Data = string(name)
|
|
|
|
t.Attr = attr
|
2011-10-27 01:57:58 +02:00
|
|
|
case EndTagToken:
|
|
|
|
name, _ := z.TagName()
|
|
|
|
t.Data = string(name)
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewTokenizer returns a new HTML Tokenizer for the given Reader.
|
|
|
|
// The input is assumed to be UTF-8 encoded.
|
|
|
|
func NewTokenizer(r io.Reader) *Tokenizer {
|
|
|
|
return &Tokenizer{
|
|
|
|
r: r,
|
|
|
|
buf: make([]byte, 0, 4096),
|
|
|
|
}
|
|
|
|
}
|