Auto merge of #36969 - nnethercote:rename-Parser-fields, r=eddyb

Clarify the positions of the lexer and parser

The lexer and parser use unclear names to indicate their positions in the
source code. I propose the following renamings.

Lexer:
```
pos      -> next_pos      # it's actually the next pos!
last_pos -> pos           # it's actually the current pos!
curr     -> ch            # the current char
curr_is  -> ch_is         # tests the current char
col (unchanged)           # the current column
```
parser
```
- last_span       -> prev_span          # the previous token's span
- last_token_kind -> prev_token_kind    # the previous token's kind
- LastTokenKind   -> PrevTokenKind      # ditto (but the type)
- token (unchanged)                     # the current token
- span (unchanged)                      # the current span
```

Things to note:
- This proposal removes all uses of "last", which is an unclear word because it
  could mean (a) previous, (b) final, or (c) most recent, i.e. current.
- The "current" things (ch, col, token, span) consistently lack a prefix. The
  "previous" and "next" things consistently have a prefix.
This commit is contained in:
bors 2016-10-17 22:27:57 -07:00 committed by GitHub
commit 3543a0f602
9 changed files with 414 additions and 415 deletions

View File

@ -582,7 +582,7 @@ impl<'a> CrateReader<'a> {
unreachable!();
}
};
let local_span = mk_sp(lo, p.last_span.hi);
let local_span = mk_sp(lo, p.prev_span.hi);
// Mark the attrs as used
for attr in &def.attrs {

View File

@ -139,9 +139,9 @@ impl<'a> SpanUtils<'a> {
let mut prev = toks.real_token();
let mut result = None;
let mut bracket_count = 0;
let mut last_span = None;
let mut prev_span = None;
while prev.tok != token::Eof {
last_span = None;
prev_span = None;
let mut next = toks.real_token();
if (next.tok == token::OpenDelim(token::Paren) || next.tok == token::Lt) &&
@ -166,12 +166,12 @@ impl<'a> SpanUtils<'a> {
};
if prev.tok.is_ident() && bracket_count == 0 {
last_span = Some(prev.sp);
prev_span = Some(prev.sp);
}
prev = next;
}
if result.is_none() && last_span.is_some() {
return self.make_sub_span(span, last_span);
if result.is_none() && prev_span.is_some() {
return self.make_sub_span(span, prev_span);
}
return self.make_sub_span(span, result);
}

View File

@ -804,7 +804,7 @@ impl CodeMap {
}
pub fn macro_backtrace(&self, span: Span) -> Vec<MacroBacktrace> {
let mut last_span = DUMMY_SP;
let mut prev_span = DUMMY_SP;
let mut span = span;
let mut result = vec![];
loop {
@ -827,14 +827,14 @@ impl CodeMap {
None => break,
Some((call_site, macro_decl_name, def_site_span)) => {
// Don't print recursive invocations
if !call_site.source_equal(&last_span) {
if !call_site.source_equal(&prev_span) {
result.push(MacroBacktrace {
call_site: call_site,
macro_decl_name: macro_decl_name,
def_site_span: def_site_span,
});
}
last_span = span;
prev_span = span;
span = call_site;
}
}

View File

@ -126,7 +126,7 @@ impl<'a> Parser<'a> {
self.expect(&token::OpenDelim(token::Bracket))?;
let meta_item = self.parse_meta_item()?;
self.expect(&token::CloseDelim(token::Bracket))?;
let hi = self.last_span.hi;
let hi = self.prev_span.hi;
(mk_sp(lo, hi), meta_item, style)
}
@ -231,16 +231,16 @@ impl<'a> Parser<'a> {
token::Eq => {
self.bump();
let lit = self.parse_unsuffixed_lit()?;
let hi = self.last_span.hi;
let hi = self.prev_span.hi;
Ok(P(spanned(lo, hi, ast::MetaItemKind::NameValue(name, lit))))
}
token::OpenDelim(token::Paren) => {
let inner_items = self.parse_meta_seq()?;
let hi = self.last_span.hi;
let hi = self.prev_span.hi;
Ok(P(spanned(lo, hi, ast::MetaItemKind::List(name, inner_items))))
}
_ => {
let hi = self.last_span.hi;
let hi = self.prev_span.hi;
Ok(P(spanned(lo, hi, ast::MetaItemKind::Word(name))))
}
}
@ -253,14 +253,14 @@ impl<'a> Parser<'a> {
match self.parse_unsuffixed_lit() {
Ok(lit) => {
return Ok(spanned(lo, self.last_span.hi, ast::NestedMetaItemKind::Literal(lit)))
return Ok(spanned(lo, self.prev_span.hi, ast::NestedMetaItemKind::Literal(lit)))
}
Err(ref mut err) => self.diagnostic().cancel(err)
}
match self.parse_meta_item() {
Ok(mi) => {
return Ok(spanned(lo, self.last_span.hi, ast::NestedMetaItemKind::MetaItem(mi)))
return Ok(spanned(lo, self.prev_span.hi, ast::NestedMetaItemKind::MetaItem(mi)))
}
Err(ref mut err) => self.diagnostic().cancel(err)
}

View File

@ -149,13 +149,13 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
pos: rdr.pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader, comments: &mut Vec<Comment>) {
while is_pattern_whitespace(rdr.curr) && !rdr.is_eof() {
if rdr.col == CharPos(0) && rdr.curr_is('\n') {
while is_pattern_whitespace(rdr.ch) && !rdr.is_eof() {
if rdr.col == CharPos(0) && rdr.ch_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
@ -167,7 +167,7 @@ fn read_shebang_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
let p = rdr.pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
@ -180,9 +180,9 @@ fn read_line_comments(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let p = rdr.pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
while rdr.ch_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
@ -240,7 +240,7 @@ fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> block comment");
let p = rdr.last_pos;
let p = rdr.pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
@ -249,9 +249,9 @@ fn read_block_comment(rdr: &mut StringReader,
let mut curr_line = String::from("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') && !rdr.nextch_is('*')) || rdr.curr_is('!') {
while !(rdr.curr_is('*') && rdr.nextch_is('/')) && !rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
if (rdr.ch_is('*') && !rdr.nextch_is('*')) || rdr.ch_is('!') {
while !(rdr.ch_is('*') && rdr.nextch_is('/')) && !rdr.is_eof() {
curr_line.push(rdr.ch.unwrap());
rdr.bump();
}
if !rdr.is_eof() {
@ -271,19 +271,19 @@ fn read_block_comment(rdr: &mut StringReader,
if rdr.is_eof() {
panic!(rdr.fatal("unterminated block comment"));
}
if rdr.curr_is('\n') {
if rdr.ch_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line, col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
curr_line.push(rdr.ch.unwrap());
if rdr.ch_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') {
if rdr.ch_is('*') && rdr.nextch_is('/') {
rdr.bump();
rdr.bump();
curr_line.push('/');
@ -305,7 +305,7 @@ fn read_block_comment(rdr: &mut StringReader,
Isolated
};
rdr.consume_non_eol_whitespace();
if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1 {
if !rdr.is_eof() && !rdr.ch_is('\n') && lines.len() == 1 {
style = Mixed;
}
debug!("<<< block comment");
@ -319,11 +319,11 @@ fn read_block_comment(rdr: &mut StringReader,
fn consume_comment(rdr: &mut StringReader, code_to_the_left: bool, comments: &mut Vec<Comment>) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
if rdr.ch_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
} else if rdr.ch_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
} else if rdr.ch_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else {
panic!();
@ -357,7 +357,7 @@ pub fn gather_comments_and_literals(span_diagnostic: &errors::Handler,
loop {
let mut code_to_the_left = !first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
if rdr.ch_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
@ -369,7 +369,7 @@ pub fn gather_comments_and_literals(span_diagnostic: &errors::Handler,
}
let bstart = rdr.last_pos;
let bstart = rdr.pos;
rdr.next_token();
// discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();

File diff suppressed because it is too large Load Diff

View File

@ -234,7 +234,7 @@ pub fn check_for_substitution<'a>(reader: &StringReader<'a>,
.iter()
.find(|&&(c, _, _)| c == ch)
.map(|&(_, u_name, ascii_char)| {
let span = make_span(reader.last_pos, reader.pos);
let span = make_span(reader.pos, reader.next_pos);
match ASCII_ARRAY.iter().find(|&&(c, _)| c == ascii_char) {
Some(&(ascii_char, ascii_name)) => {
let msg =

File diff suppressed because it is too large Load Diff

View File

@ -122,7 +122,7 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt,
let (constraint, _str_style) = panictry!(p.parse_str());
let span = p.last_span;
let span = p.prev_span;
panictry!(p.expect(&token::OpenDelim(token::Paren)));
let out = panictry!(p.parse_expr());
@ -167,9 +167,9 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt,
let (constraint, _str_style) = panictry!(p.parse_str());
if constraint.starts_with("=") {
cx.span_err(p.last_span, "input operand constraint contains '='");
cx.span_err(p.prev_span, "input operand constraint contains '='");
} else if constraint.starts_with("+") {
cx.span_err(p.last_span, "input operand constraint contains '+'");
cx.span_err(p.prev_span, "input operand constraint contains '+'");
}
panictry!(p.expect(&token::OpenDelim(token::Paren)));
@ -189,9 +189,9 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt,
let (s, _str_style) = panictry!(p.parse_str());
if OPTIONS.iter().any(|&opt| s == opt) {
cx.span_warn(p.last_span, "expected a clobber, found an option");
cx.span_warn(p.prev_span, "expected a clobber, found an option");
} else if s.starts_with("{") || s.ends_with("}") {
cx.span_err(p.last_span, "clobber should not be surrounded by braces");
cx.span_err(p.prev_span, "clobber should not be surrounded by braces");
}
clobs.push(s);
@ -209,7 +209,7 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt,
} else if option == "intel" {
dialect = AsmDialect::Intel;
} else {
cx.span_warn(p.last_span, "unrecognized option");
cx.span_warn(p.prev_span, "unrecognized option");
}
if p.token == token::Comma {