syntax: Rename TokenAndSpan
into Token
This commit is contained in:
parent
99b27d749c
commit
a3425edb46
@ -60,11 +60,11 @@ impl<'a> SpanUtils<'a> {
|
||||
let mut toks = self.retokenise_span(span);
|
||||
loop {
|
||||
let next = toks.real_token();
|
||||
if next.tok == token::Eof {
|
||||
if next == token::Eof {
|
||||
return None;
|
||||
}
|
||||
if next.tok == tok {
|
||||
return Some(next.sp);
|
||||
if next == tok {
|
||||
return Some(next.span);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -74,12 +74,12 @@ impl<'a> SpanUtils<'a> {
|
||||
// let mut toks = self.retokenise_span(span);
|
||||
// loop {
|
||||
// let ts = toks.real_token();
|
||||
// if ts.tok == token::Eof {
|
||||
// if ts == token::Eof {
|
||||
// return None;
|
||||
// }
|
||||
// if ts.tok == token::Not {
|
||||
// if ts == token::Not {
|
||||
// let ts = toks.real_token();
|
||||
// if ts.tok.is_ident() {
|
||||
// if ts.kind.is_ident() {
|
||||
// return Some(ts.sp);
|
||||
// } else {
|
||||
// return None;
|
||||
@ -93,12 +93,12 @@ impl<'a> SpanUtils<'a> {
|
||||
// let mut toks = self.retokenise_span(span);
|
||||
// let mut prev = toks.real_token();
|
||||
// loop {
|
||||
// if prev.tok == token::Eof {
|
||||
// if prev == token::Eof {
|
||||
// return None;
|
||||
// }
|
||||
// let ts = toks.real_token();
|
||||
// if ts.tok == token::Not {
|
||||
// if prev.tok.is_ident() {
|
||||
// if ts == token::Not {
|
||||
// if prev.kind.is_ident() {
|
||||
// return Some(prev.sp);
|
||||
// } else {
|
||||
// return None;
|
||||
|
@ -12,8 +12,8 @@ use std::io;
|
||||
use std::io::prelude::*;
|
||||
|
||||
use syntax::source_map::{SourceMap, FilePathMapping};
|
||||
use syntax::parse::lexer::{self, TokenAndSpan};
|
||||
use syntax::parse::token;
|
||||
use syntax::parse::lexer;
|
||||
use syntax::parse::token::{self, Token};
|
||||
use syntax::parse;
|
||||
use syntax::symbol::{kw, sym};
|
||||
use syntax_pos::{Span, FileName};
|
||||
@ -186,9 +186,9 @@ impl<'a> Classifier<'a> {
|
||||
}
|
||||
|
||||
/// Gets the next token out of the lexer.
|
||||
fn try_next_token(&mut self) -> Result<TokenAndSpan, HighlightError> {
|
||||
fn try_next_token(&mut self) -> Result<Token, HighlightError> {
|
||||
match self.lexer.try_next_token() {
|
||||
Ok(tas) => Ok(tas),
|
||||
Ok(token) => Ok(token),
|
||||
Err(_) => Err(HighlightError::LexError),
|
||||
}
|
||||
}
|
||||
@ -205,7 +205,7 @@ impl<'a> Classifier<'a> {
|
||||
-> Result<(), HighlightError> {
|
||||
loop {
|
||||
let next = self.try_next_token()?;
|
||||
if next.tok == token::Eof {
|
||||
if next == token::Eof {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -218,9 +218,9 @@ impl<'a> Classifier<'a> {
|
||||
// Handles an individual token from the lexer.
|
||||
fn write_token<W: Writer>(&mut self,
|
||||
out: &mut W,
|
||||
tas: TokenAndSpan)
|
||||
token: Token)
|
||||
-> Result<(), HighlightError> {
|
||||
let klass = match tas.tok {
|
||||
let klass = match token.kind {
|
||||
token::Shebang(s) => {
|
||||
out.string(Escape(&s.as_str()), Class::None)?;
|
||||
return Ok(());
|
||||
@ -234,7 +234,7 @@ impl<'a> Classifier<'a> {
|
||||
// reference or dereference operator or a reference or pointer type, instead of the
|
||||
// bit-and or multiplication operator.
|
||||
token::BinOp(token::And) | token::BinOp(token::Star)
|
||||
if self.lexer.peek().tok != token::Whitespace => Class::RefKeyWord,
|
||||
if self.lexer.peek().kind != token::Whitespace => Class::RefKeyWord,
|
||||
|
||||
// Consider this as part of a macro invocation if there was a
|
||||
// leading identifier.
|
||||
@ -257,7 +257,7 @@ impl<'a> Classifier<'a> {
|
||||
token::Question => Class::QuestionMark,
|
||||
|
||||
token::Dollar => {
|
||||
if self.lexer.peek().tok.is_ident() {
|
||||
if self.lexer.peek().kind.is_ident() {
|
||||
self.in_macro_nonterminal = true;
|
||||
Class::MacroNonTerminal
|
||||
} else {
|
||||
@ -280,9 +280,9 @@ impl<'a> Classifier<'a> {
|
||||
// as an attribute.
|
||||
|
||||
// Case 1: #![inner_attribute]
|
||||
if self.lexer.peek().tok == token::Not {
|
||||
if self.lexer.peek() == token::Not {
|
||||
self.try_next_token()?; // NOTE: consumes `!` token!
|
||||
if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
|
||||
if self.lexer.peek() == token::OpenDelim(token::Bracket) {
|
||||
self.in_attribute = true;
|
||||
out.enter_span(Class::Attribute)?;
|
||||
}
|
||||
@ -292,7 +292,7 @@ impl<'a> Classifier<'a> {
|
||||
}
|
||||
|
||||
// Case 2: #[outer_attribute]
|
||||
if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
|
||||
if self.lexer.peek() == token::OpenDelim(token::Bracket) {
|
||||
self.in_attribute = true;
|
||||
out.enter_span(Class::Attribute)?;
|
||||
}
|
||||
@ -335,13 +335,13 @@ impl<'a> Classifier<'a> {
|
||||
sym::Option | sym::Result => Class::PreludeTy,
|
||||
sym::Some | sym::None | sym::Ok | sym::Err => Class::PreludeVal,
|
||||
|
||||
_ if tas.tok.is_reserved_ident() => Class::KeyWord,
|
||||
_ if token.kind.is_reserved_ident() => Class::KeyWord,
|
||||
|
||||
_ => {
|
||||
if self.in_macro_nonterminal {
|
||||
self.in_macro_nonterminal = false;
|
||||
Class::MacroNonTerminal
|
||||
} else if self.lexer.peek().tok == token::Not {
|
||||
} else if self.lexer.peek() == token::Not {
|
||||
self.in_macro = true;
|
||||
Class::Macro
|
||||
} else {
|
||||
@ -359,7 +359,7 @@ impl<'a> Classifier<'a> {
|
||||
|
||||
// Anything that didn't return above is the simple case where we the
|
||||
// class just spans a single token, so we can use the `string` method.
|
||||
out.string(Escape(&self.snip(tas.sp)), klass)?;
|
||||
out.string(Escape(&self.snip(token.span)), klass)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
use errors::Applicability;
|
||||
use syntax::parse::lexer::{TokenAndSpan, StringReader as Lexer};
|
||||
use syntax::parse::lexer::{StringReader as Lexer};
|
||||
use syntax::parse::{ParseSess, token};
|
||||
use syntax::source_map::FilePathMapping;
|
||||
use syntax_pos::FileName;
|
||||
@ -33,8 +33,8 @@ impl<'a, 'tcx> SyntaxChecker<'a, 'tcx> {
|
||||
);
|
||||
|
||||
let errors = Lexer::new_or_buffered_errs(&sess, source_file, None).and_then(|mut lexer| {
|
||||
while let Ok(TokenAndSpan { tok, .. }) = lexer.try_next_token() {
|
||||
if tok == token::Eof {
|
||||
while let Ok(token::Token { kind, .. }) = lexer.try_next_token() {
|
||||
if kind == token::Eof {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::ast::{self, Ident};
|
||||
use crate::parse::ParseSess;
|
||||
use crate::parse::token::{self, TokenKind};
|
||||
use crate::parse::token::{self, Token, TokenKind};
|
||||
use crate::symbol::{sym, Symbol};
|
||||
use crate::parse::unescape;
|
||||
use crate::parse::unescape_error_reporting::{emit_unescape_error, push_escaped_char};
|
||||
@ -20,21 +20,6 @@ pub mod comments;
|
||||
mod tokentrees;
|
||||
mod unicode_chars;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TokenAndSpan {
|
||||
pub tok: TokenKind,
|
||||
pub sp: Span,
|
||||
}
|
||||
|
||||
impl Default for TokenAndSpan {
|
||||
fn default() -> Self {
|
||||
TokenAndSpan {
|
||||
tok: token::Whitespace,
|
||||
sp: syntax_pos::DUMMY_SP,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UnmatchedBrace {
|
||||
pub expected_delim: token::DelimToken,
|
||||
@ -87,7 +72,7 @@ impl<'a> StringReader<'a> {
|
||||
ident
|
||||
}
|
||||
|
||||
fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
|
||||
fn unwrap_or_abort(&mut self, res: Result<Token, ()>) -> Token {
|
||||
match res {
|
||||
Ok(tok) => tok,
|
||||
Err(_) => {
|
||||
@ -97,17 +82,17 @@ impl<'a> StringReader<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn next_token(&mut self) -> TokenAndSpan where Self: Sized {
|
||||
fn next_token(&mut self) -> Token where Self: Sized {
|
||||
let res = self.try_next_token();
|
||||
self.unwrap_or_abort(res)
|
||||
}
|
||||
|
||||
/// Returns the next token. EFFECT: advances the string_reader.
|
||||
pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
|
||||
pub fn try_next_token(&mut self) -> Result<Token, ()> {
|
||||
assert!(self.fatal_errs.is_empty());
|
||||
let ret_val = TokenAndSpan {
|
||||
tok: replace(&mut self.peek_tok, token::Whitespace),
|
||||
sp: self.peek_span,
|
||||
let ret_val = Token {
|
||||
kind: replace(&mut self.peek_tok, token::Whitespace),
|
||||
span: self.peek_span,
|
||||
};
|
||||
self.advance_token()?;
|
||||
Ok(ret_val)
|
||||
@ -135,10 +120,10 @@ impl<'a> StringReader<'a> {
|
||||
return None;
|
||||
}
|
||||
|
||||
fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
|
||||
fn try_real_token(&mut self) -> Result<Token, ()> {
|
||||
let mut t = self.try_next_token()?;
|
||||
loop {
|
||||
match t.tok {
|
||||
match t.kind {
|
||||
token::Whitespace | token::Comment | token::Shebang(_) => {
|
||||
t = self.try_next_token()?;
|
||||
}
|
||||
@ -149,7 +134,7 @@ impl<'a> StringReader<'a> {
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub fn real_token(&mut self) -> TokenAndSpan {
|
||||
pub fn real_token(&mut self) -> Token {
|
||||
let res = self.try_real_token();
|
||||
self.unwrap_or_abort(res)
|
||||
}
|
||||
@ -194,11 +179,11 @@ impl<'a> StringReader<'a> {
|
||||
buffer
|
||||
}
|
||||
|
||||
pub fn peek(&self) -> TokenAndSpan {
|
||||
pub fn peek(&self) -> Token {
|
||||
// FIXME(pcwalton): Bad copy!
|
||||
TokenAndSpan {
|
||||
tok: self.peek_tok.clone(),
|
||||
sp: self.peek_span,
|
||||
Token {
|
||||
kind: self.peek_tok.clone(),
|
||||
span: self.peek_span,
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,9 +326,9 @@ impl<'a> StringReader<'a> {
|
||||
fn advance_token(&mut self) -> Result<(), ()> {
|
||||
match self.scan_whitespace_or_comment() {
|
||||
Some(comment) => {
|
||||
self.peek_span_src_raw = comment.sp;
|
||||
self.peek_span = comment.sp;
|
||||
self.peek_tok = comment.tok;
|
||||
self.peek_span_src_raw = comment.span;
|
||||
self.peek_span = comment.span;
|
||||
self.peek_tok = comment.kind;
|
||||
}
|
||||
None => {
|
||||
if self.is_eof() {
|
||||
@ -527,7 +512,7 @@ impl<'a> StringReader<'a> {
|
||||
|
||||
/// PRECONDITION: self.ch is not whitespace
|
||||
/// Eats any kind of comment.
|
||||
fn scan_comment(&mut self) -> Option<TokenAndSpan> {
|
||||
fn scan_comment(&mut self) -> Option<Token> {
|
||||
if let Some(c) = self.ch {
|
||||
if c.is_whitespace() {
|
||||
let msg = "called consume_any_line_comment, but there was whitespace";
|
||||
@ -563,14 +548,14 @@ impl<'a> StringReader<'a> {
|
||||
self.bump();
|
||||
}
|
||||
|
||||
let tok = if doc_comment {
|
||||
let kind = if doc_comment {
|
||||
self.with_str_from(start_bpos, |string| {
|
||||
token::DocComment(Symbol::intern(string))
|
||||
})
|
||||
} else {
|
||||
token::Comment
|
||||
};
|
||||
Some(TokenAndSpan { tok, sp: self.mk_sp(start_bpos, self.pos) })
|
||||
Some(Token { kind, span: self.mk_sp(start_bpos, self.pos) })
|
||||
}
|
||||
Some('*') => {
|
||||
self.bump();
|
||||
@ -594,9 +579,9 @@ impl<'a> StringReader<'a> {
|
||||
while !self.ch_is('\n') && !self.is_eof() {
|
||||
self.bump();
|
||||
}
|
||||
return Some(TokenAndSpan {
|
||||
tok: token::Shebang(self.name_from(start)),
|
||||
sp: self.mk_sp(start, self.pos),
|
||||
return Some(Token {
|
||||
kind: token::Shebang(self.name_from(start)),
|
||||
span: self.mk_sp(start, self.pos),
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -608,7 +593,7 @@ impl<'a> StringReader<'a> {
|
||||
|
||||
/// If there is whitespace, shebang, or a comment, scan it. Otherwise,
|
||||
/// return `None`.
|
||||
fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
|
||||
fn scan_whitespace_or_comment(&mut self) -> Option<Token> {
|
||||
match self.ch.unwrap_or('\0') {
|
||||
// # to handle shebang at start of file -- this is the entry point
|
||||
// for skipping over all "junk"
|
||||
@ -622,9 +607,9 @@ impl<'a> StringReader<'a> {
|
||||
while is_pattern_whitespace(self.ch) {
|
||||
self.bump();
|
||||
}
|
||||
let c = Some(TokenAndSpan {
|
||||
tok: token::Whitespace,
|
||||
sp: self.mk_sp(start_bpos, self.pos),
|
||||
let c = Some(Token {
|
||||
kind: token::Whitespace,
|
||||
span: self.mk_sp(start_bpos, self.pos),
|
||||
});
|
||||
debug!("scanning whitespace: {:?}", c);
|
||||
c
|
||||
@ -634,7 +619,7 @@ impl<'a> StringReader<'a> {
|
||||
}
|
||||
|
||||
/// Might return a sugared-doc-attr
|
||||
fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
|
||||
fn scan_block_comment(&mut self) -> Option<Token> {
|
||||
// block comments starting with "/**" or "/*!" are doc-comments
|
||||
let is_doc_comment = self.ch_is('*') || self.ch_is('!');
|
||||
let start_bpos = self.pos - BytePos(2);
|
||||
@ -671,7 +656,7 @@ impl<'a> StringReader<'a> {
|
||||
|
||||
self.with_str_from(start_bpos, |string| {
|
||||
// but comments with only "*"s between two "/"s are not
|
||||
let tok = if is_block_doc_comment(string) {
|
||||
let kind = if is_block_doc_comment(string) {
|
||||
let string = if has_cr {
|
||||
self.translate_crlf(start_bpos,
|
||||
string,
|
||||
@ -684,9 +669,9 @@ impl<'a> StringReader<'a> {
|
||||
token::Comment
|
||||
};
|
||||
|
||||
Some(TokenAndSpan {
|
||||
tok,
|
||||
sp: self.mk_sp(start_bpos, self.pos),
|
||||
Some(Token {
|
||||
kind,
|
||||
span: self.mk_sp(start_bpos, self.pos),
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -1611,26 +1596,26 @@ mod tests {
|
||||
"/* my source file */ fn main() { println!(\"zebra\"); }\n"
|
||||
.to_string());
|
||||
let id = Ident::from_str("fn");
|
||||
assert_eq!(string_reader.next_token().tok, token::Comment);
|
||||
assert_eq!(string_reader.next_token().tok, token::Whitespace);
|
||||
assert_eq!(string_reader.next_token().kind, token::Comment);
|
||||
assert_eq!(string_reader.next_token().kind, token::Whitespace);
|
||||
let tok1 = string_reader.next_token();
|
||||
let tok2 = TokenAndSpan {
|
||||
tok: token::Ident(id, false),
|
||||
sp: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
|
||||
let tok2 = Token {
|
||||
kind: token::Ident(id, false),
|
||||
span: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
|
||||
};
|
||||
assert_eq!(tok1.tok, tok2.tok);
|
||||
assert_eq!(tok1.sp, tok2.sp);
|
||||
assert_eq!(string_reader.next_token().tok, token::Whitespace);
|
||||
assert_eq!(tok1.kind, tok2.kind);
|
||||
assert_eq!(tok1.span, tok2.span);
|
||||
assert_eq!(string_reader.next_token().kind, token::Whitespace);
|
||||
// the 'main' id is already read:
|
||||
assert_eq!(string_reader.pos.clone(), BytePos(28));
|
||||
// read another token:
|
||||
let tok3 = string_reader.next_token();
|
||||
let tok4 = TokenAndSpan {
|
||||
tok: mk_ident("main"),
|
||||
sp: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
|
||||
let tok4 = Token {
|
||||
kind: mk_ident("main"),
|
||||
span: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
|
||||
};
|
||||
assert_eq!(tok3.tok, tok4.tok);
|
||||
assert_eq!(tok3.sp, tok4.sp);
|
||||
assert_eq!(tok3.kind, tok4.kind);
|
||||
assert_eq!(tok3.span, tok4.span);
|
||||
// the lparen is already read:
|
||||
assert_eq!(string_reader.pos.clone(), BytePos(29))
|
||||
})
|
||||
@ -1640,7 +1625,7 @@ mod tests {
|
||||
// of tokens (stop checking after exhausting the expected vec)
|
||||
fn check_tokenization(mut string_reader: StringReader<'_>, expected: Vec<TokenKind>) {
|
||||
for expected_tok in &expected {
|
||||
assert_eq!(&string_reader.next_token().tok, expected_tok);
|
||||
assert_eq!(&string_reader.next_token().kind, expected_tok);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1698,7 +1683,7 @@ mod tests {
|
||||
with_default_globals(|| {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().kind,
|
||||
mk_lit(token::Char, "a", None));
|
||||
})
|
||||
}
|
||||
@ -1708,7 +1693,7 @@ mod tests {
|
||||
with_default_globals(|| {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().kind,
|
||||
mk_lit(token::Char, " ", None));
|
||||
})
|
||||
}
|
||||
@ -1718,7 +1703,7 @@ mod tests {
|
||||
with_default_globals(|| {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().kind,
|
||||
mk_lit(token::Char, "\\n", None));
|
||||
})
|
||||
}
|
||||
@ -1728,7 +1713,7 @@ mod tests {
|
||||
with_default_globals(|| {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().kind,
|
||||
token::Lifetime(Ident::from_str("'abc")));
|
||||
})
|
||||
}
|
||||
@ -1738,7 +1723,7 @@ mod tests {
|
||||
with_default_globals(|| {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().kind,
|
||||
mk_lit(token::StrRaw(3), "\"#a\\b\x00c\"", None));
|
||||
})
|
||||
}
|
||||
@ -1750,10 +1735,10 @@ mod tests {
|
||||
let sh = mk_sess(sm.clone());
|
||||
macro_rules! test {
|
||||
($input: expr, $tok_type: ident, $tok_contents: expr) => {{
|
||||
assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().kind,
|
||||
mk_lit(token::$tok_type, $tok_contents, Some("suffix")));
|
||||
// with a whitespace separator:
|
||||
assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().kind,
|
||||
mk_lit(token::$tok_type, $tok_contents, None));
|
||||
}}
|
||||
}
|
||||
@ -1768,11 +1753,11 @@ mod tests {
|
||||
test!("1.0", Float, "1.0");
|
||||
test!("1.0e10", Float, "1.0e10");
|
||||
|
||||
assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().kind,
|
||||
mk_lit(token::Integer, "2", Some("us")));
|
||||
assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().kind,
|
||||
mk_lit(token::StrRaw(3), "raw", Some("suffix")));
|
||||
assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
|
||||
assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().kind,
|
||||
mk_lit(token::ByteStrRaw(3), "raw", Some("suffix")));
|
||||
})
|
||||
}
|
||||
@ -1790,11 +1775,11 @@ mod tests {
|
||||
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
|
||||
let sh = mk_sess(sm.clone());
|
||||
let mut lexer = setup(&sm, &sh, "/* /* */ */'a'".to_string());
|
||||
match lexer.next_token().tok {
|
||||
match lexer.next_token().kind {
|
||||
token::Comment => {}
|
||||
_ => panic!("expected a comment!"),
|
||||
}
|
||||
assert_eq!(lexer.next_token().tok, mk_lit(token::Char, "a", None));
|
||||
assert_eq!(lexer.next_token().kind, mk_lit(token::Char, "a", None));
|
||||
})
|
||||
}
|
||||
|
||||
@ -1805,10 +1790,10 @@ mod tests {
|
||||
let sh = mk_sess(sm.clone());
|
||||
let mut lexer = setup(&sm, &sh, "// test\r\n/// test\r\n".to_string());
|
||||
let comment = lexer.next_token();
|
||||
assert_eq!(comment.tok, token::Comment);
|
||||
assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
|
||||
assert_eq!(lexer.next_token().tok, token::Whitespace);
|
||||
assert_eq!(lexer.next_token().tok,
|
||||
assert_eq!(comment.kind, token::Comment);
|
||||
assert_eq!((comment.span.lo(), comment.span.hi()), (BytePos(0), BytePos(7)));
|
||||
assert_eq!(lexer.next_token().kind, token::Whitespace);
|
||||
assert_eq!(lexer.next_token().kind,
|
||||
token::DocComment(Symbol::intern("/// test")));
|
||||
})
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ impl<'a> TokenTreesReader<'a> {
|
||||
|
||||
fn real_token(&mut self) {
|
||||
let t = self.string_reader.real_token();
|
||||
self.token = t.tok;
|
||||
self.span = t.sp;
|
||||
self.token = t.kind;
|
||||
self.span = t.span;
|
||||
}
|
||||
}
|
||||
|
@ -36,9 +36,9 @@ use crate::{ast, attr};
|
||||
use crate::ext::base::DummyResult;
|
||||
use crate::source_map::{self, SourceMap, Spanned, respan};
|
||||
use crate::parse::{SeqSep, classify, literal, token};
|
||||
use crate::parse::lexer::{TokenAndSpan, UnmatchedBrace};
|
||||
use crate::parse::lexer::UnmatchedBrace;
|
||||
use crate::parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
|
||||
use crate::parse::token::DelimToken;
|
||||
use crate::parse::token::{Token, DelimToken};
|
||||
use crate::parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership};
|
||||
use crate::util::parser::{AssocOp, Fixity};
|
||||
use crate::print::pprust;
|
||||
@ -295,7 +295,7 @@ impl TokenCursorFrame {
|
||||
}
|
||||
|
||||
impl TokenCursor {
|
||||
fn next(&mut self) -> TokenAndSpan {
|
||||
fn next(&mut self) -> Token {
|
||||
loop {
|
||||
let tree = if !self.frame.open_delim {
|
||||
self.frame.open_delim = true;
|
||||
@ -309,7 +309,7 @@ impl TokenCursor {
|
||||
self.frame = frame;
|
||||
continue
|
||||
} else {
|
||||
return TokenAndSpan { tok: token::Eof, sp: DUMMY_SP }
|
||||
return Token { kind: token::Eof, span: DUMMY_SP }
|
||||
};
|
||||
|
||||
match self.frame.last_token {
|
||||
@ -318,7 +318,7 @@ impl TokenCursor {
|
||||
}
|
||||
|
||||
match tree {
|
||||
TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp },
|
||||
TokenTree::Token(span, kind) => return Token { kind, span },
|
||||
TokenTree::Delimited(sp, delim, tts) => {
|
||||
let frame = TokenCursorFrame::new(sp, delim, &tts);
|
||||
self.stack.push(mem::replace(&mut self.frame, frame));
|
||||
@ -327,9 +327,9 @@ impl TokenCursor {
|
||||
}
|
||||
}
|
||||
|
||||
fn next_desugared(&mut self) -> TokenAndSpan {
|
||||
fn next_desugared(&mut self) -> Token {
|
||||
let (sp, name) = match self.next() {
|
||||
TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name),
|
||||
Token { span, kind: token::DocComment(name) } => (span, name),
|
||||
tok => return tok,
|
||||
};
|
||||
|
||||
@ -499,8 +499,8 @@ impl<'a> Parser<'a> {
|
||||
};
|
||||
|
||||
let tok = parser.next_tok();
|
||||
parser.token = tok.tok;
|
||||
parser.span = tok.sp;
|
||||
parser.token = tok.kind;
|
||||
parser.span = tok.span;
|
||||
|
||||
if let Some(directory) = directory {
|
||||
parser.directory = directory;
|
||||
@ -515,15 +515,15 @@ impl<'a> Parser<'a> {
|
||||
parser
|
||||
}
|
||||
|
||||
fn next_tok(&mut self) -> TokenAndSpan {
|
||||
fn next_tok(&mut self) -> Token {
|
||||
let mut next = if self.desugar_doc_comments {
|
||||
self.token_cursor.next_desugared()
|
||||
} else {
|
||||
self.token_cursor.next()
|
||||
};
|
||||
if next.sp.is_dummy() {
|
||||
if next.span.is_dummy() {
|
||||
// Tweak the location for better diagnostics, but keep syntactic context intact.
|
||||
next.sp = self.prev_span.with_ctxt(next.sp.ctxt());
|
||||
next.span = self.prev_span.with_ctxt(next.span.ctxt());
|
||||
}
|
||||
next
|
||||
}
|
||||
@ -1023,8 +1023,8 @@ impl<'a> Parser<'a> {
|
||||
};
|
||||
|
||||
let next = self.next_tok();
|
||||
self.span = next.sp;
|
||||
self.token = next.tok;
|
||||
self.token = next.kind;
|
||||
self.span = next.span;
|
||||
self.expected_tokens.clear();
|
||||
// check after each token
|
||||
self.process_potential_macro_variable();
|
||||
@ -1038,8 +1038,8 @@ impl<'a> Parser<'a> {
|
||||
// fortunately for tokens currently using `bump_with`, the
|
||||
// prev_token_kind will be of no use anyway.
|
||||
self.prev_token_kind = PrevTokenKind::Other;
|
||||
self.span = span;
|
||||
self.token = next;
|
||||
self.span = span;
|
||||
self.expected_tokens.clear();
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,12 @@ pub enum TokenKind {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
static_assert_size!(TokenKind, 16);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Token {
|
||||
pub kind: TokenKind,
|
||||
pub span: Span,
|
||||
}
|
||||
|
||||
impl TokenKind {
|
||||
/// Recovers a `TokenKind` from an `ast::Ident`. This creates a raw identifier if necessary.
|
||||
pub fn from_ast_ident(ident: ast::Ident) -> TokenKind {
|
||||
@ -602,6 +608,12 @@ impl TokenKind {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<TokenKind> for Token {
|
||||
fn eq(&self, rhs: &TokenKind) -> bool {
|
||||
self.kind == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, RustcEncodable, RustcDecodable)]
|
||||
/// For interpolation during macro expansion.
|
||||
pub enum Nonterminal {
|
||||
|
@ -580,7 +580,6 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::syntax::ast::Ident;
|
||||
use crate::with_default_globals;
|
||||
use crate::parse::token::TokenKind;
|
||||
use crate::util::parser_testing::string_to_stream;
|
||||
use syntax_pos::{Span, BytePos, NO_EXPANSION};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user