diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs
index 527ef553d99..111650f565c 100644
--- a/src/librustdoc/html/highlight.rs
+++ b/src/librustdoc/html/highlight.rs
@@ -128,13 +128,17 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
}
}
- // text literals
- token::Literal(token::Byte(..)) | token::Literal(token::Char(..)) |
- token::Literal(token::Binary(..)) | token::Literal(token::BinaryRaw(..)) |
- token::Literal(token::Str_(..)) | token::Literal(token::StrRaw(..)) => "string",
+ token::Literal(lit, _suf) => {
+ match lit {
+ // text literals
+ token::Byte(..) | token::Char(..) |
+ token::Binary(..) | token::BinaryRaw(..) |
+ token::Str_(..) | token::StrRaw(..) => "string",
- // number literals
- token::Literal(token::Integer(..)) | token::Literal(token::Float(..)) => "number",
+ // number literals
+ token::Integer(..) | token::Float(..) => "number",
+ }
+ }
// keywords are also included in the identifier set
token::Ident(ident, _is_mod_sep) => {
diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs
index 2158bdb416c..7b16c087859 100644
--- a/src/libsyntax/ast.rs
+++ b/src/libsyntax/ast.rs
@@ -838,7 +838,7 @@ impl TokenTree {
tts: vec![TtToken(sp, token::Ident(token::str_to_ident("doc"),
token::Plain)),
TtToken(sp, token::Eq),
- TtToken(sp, token::Literal(token::Str_(name)))],
+ TtToken(sp, token::Literal(token::Str_(name), None))],
close_span: sp,
}))
}
diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs
index b928fc778e8..281bde3129a 100644
--- a/src/libsyntax/diagnostics/plugin.rs
+++ b/src/libsyntax/diagnostics/plugin.rs
@@ -87,7 +87,7 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt,
},
[ast::TtToken(_, token::Ident(ref code, _)),
ast::TtToken(_, token::Comma),
- ast::TtToken(_, token::Literal(token::StrRaw(description, _)))] => {
+ ast::TtToken(_, token::Literal(token::StrRaw(description, _), None))] => {
(code, Some(description))
}
_ => unreachable!()
diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs
index ec51ce00605..eaa3632cf49 100644
--- a/src/libsyntax/ext/quote.rs
+++ b/src/libsyntax/ext/quote.rs
@@ -543,10 +543,13 @@ fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P {
#[allow(non_upper_case_globals)]
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P {
macro_rules! mk_lit {
- ($name: expr, $($args: expr),*) => {{
+ ($name: expr, $suffix: expr, $($args: expr),*) => {{
let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]);
-
- cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner])
+ let suffix = match $suffix {
+ Some(name) => cx.expr_some(sp, mk_name(cx, sp, ast::Ident::new(name))),
+ None => cx.expr_none(sp)
+ };
+ cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner, suffix])
}}
}
match *tok {
@@ -567,32 +570,32 @@ fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P {
vec![mk_delim(cx, sp, delim)]);
}
- token::Literal(token::Byte(i)) => {
+ token::Literal(token::Byte(i), suf) => {
let e_byte = mk_name(cx, sp, i.ident());
- return mk_lit!("Byte", e_byte);
+ return mk_lit!("Byte", suf, e_byte);
}
- token::Literal(token::Char(i)) => {
+ token::Literal(token::Char(i), suf) => {
let e_char = mk_name(cx, sp, i.ident());
- return mk_lit!("Char", e_char);
+ return mk_lit!("Char", suf, e_char);
}
- token::Literal(token::Integer(i)) => {
+ token::Literal(token::Integer(i), suf) => {
let e_int = mk_name(cx, sp, i.ident());
- return mk_lit!("Integer", e_int);
+ return mk_lit!("Integer", suf, e_int);
}
- token::Literal(token::Float(fident)) => {
+ token::Literal(token::Float(fident), suf) => {
let e_fident = mk_name(cx, sp, fident.ident());
- return mk_lit!("Float", e_fident);
+ return mk_lit!("Float", suf, e_fident);
}
- token::Literal(token::Str_(ident)) => {
- return mk_lit!("Str_", mk_name(cx, sp, ident.ident()))
+ token::Literal(token::Str_(ident), suf) => {
+ return mk_lit!("Str_", suf, mk_name(cx, sp, ident.ident()))
}
- token::Literal(token::StrRaw(ident, n)) => {
- return mk_lit!("StrRaw", mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n))
+ token::Literal(token::StrRaw(ident, n), suf) => {
+ return mk_lit!("StrRaw", suf, mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n))
}
token::Ident(ident, style) => {
diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs
index b7598c7c428..55c4335941d 100644
--- a/src/libsyntax/parse/lexer/mod.rs
+++ b/src/libsyntax/parse/lexer/mod.rs
@@ -369,6 +369,25 @@ impl<'a> StringReader<'a> {
self.nextnextch() == Some(c)
}
+ /// Eats *, if possible.
+ fn scan_optional_raw_name(&mut self) -> Option {
+ if !ident_start(self.curr) {
+ return None
+ }
+ let start = self.last_pos;
+ while ident_continue(self.curr) {
+ self.bump();
+ }
+
+ self.with_str_from(start, |string| {
+ if string == "_" {
+ None
+ } else {
+ Some(token::intern(string))
+ }
+ })
+ }
+
/// PRECONDITION: self.curr is not whitespace
/// Eats any kind of comment.
fn scan_comment(&mut self) -> Option {
@@ -638,7 +657,7 @@ impl<'a> StringReader<'a> {
}
/// Lex a LIT_INTEGER or a LIT_FLOAT
- fn scan_number(&mut self, c: char) -> token::Token {
+ fn scan_number(&mut self, c: char) -> token::Lit {
let mut num_digits;
let mut base = 10;
let start_bpos = self.last_pos;
@@ -655,17 +674,17 @@ impl<'a> StringReader<'a> {
}
'u' | 'i' => {
self.scan_int_suffix();
- return token::Literal(token::Integer(self.name_from(start_bpos)));
+ return token::Integer(self.name_from(start_bpos));
},
'f' => {
let last_pos = self.last_pos;
self.scan_float_suffix();
self.check_float_base(start_bpos, last_pos, base);
- return token::Literal(token::Float(self.name_from(start_bpos)));
+ return token::Float(self.name_from(start_bpos));
}
_ => {
// just a 0
- return token::Literal(token::Integer(self.name_from(start_bpos)));
+ return token::Integer(self.name_from(start_bpos));
}
}
} else if c.is_digit_radix(10) {
@@ -678,7 +697,7 @@ impl<'a> StringReader<'a> {
self.err_span_(start_bpos, self.last_pos, "no valid digits found for number");
// eat any suffix
self.scan_int_suffix();
- return token::Literal(token::Integer(token::intern("0")));
+ return token::Integer(token::intern("0"));
}
// might be a float, but don't be greedy if this is actually an
@@ -696,13 +715,13 @@ impl<'a> StringReader<'a> {
}
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
- return token::Literal(token::Float(self.name_from(start_bpos)));
+ return token::Float(self.name_from(start_bpos));
} else if self.curr_is('f') {
// or it might be an integer literal suffixed as a float
self.scan_float_suffix();
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
- return token::Literal(token::Float(self.name_from(start_bpos)));
+ return token::Float(self.name_from(start_bpos));
} else {
// it might be a float if it has an exponent
if self.curr_is('e') || self.curr_is('E') {
@@ -710,11 +729,11 @@ impl<'a> StringReader<'a> {
self.scan_float_suffix();
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
- return token::Literal(token::Float(self.name_from(start_bpos)));
+ return token::Float(self.name_from(start_bpos));
}
// but we certainly have an integer!
self.scan_int_suffix();
- return token::Literal(token::Integer(self.name_from(start_bpos)));
+ return token::Integer(self.name_from(start_bpos));
}
}
@@ -967,7 +986,9 @@ impl<'a> StringReader<'a> {
}
if is_dec_digit(c) {
- return self.scan_number(c.unwrap());
+ let num = self.scan_number(c.unwrap());
+ let suffix = self.scan_optional_raw_name();
+ return token::Literal(num, suffix)
}
if self.read_embedded_ident {
@@ -1126,17 +1147,19 @@ impl<'a> StringReader<'a> {
}
let id = if valid { self.name_from(start) } else { token::intern("0") };
self.bump(); // advance curr past token
- return token::Literal(token::Char(id));
+ let suffix = self.scan_optional_raw_name();
+ return token::Literal(token::Char(id), suffix);
}
'b' => {
self.bump();
- return match self.curr {
+ let lit = match self.curr {
Some('\'') => self.scan_byte(),
Some('"') => self.scan_byte_string(),
Some('r') => self.scan_raw_byte_string(),
_ => unreachable!() // Should have been a token::Ident above.
};
-
+ let suffix = self.scan_optional_raw_name();
+ return token::Literal(lit, suffix);
}
'"' => {
let start_bpos = self.last_pos;
@@ -1157,7 +1180,8 @@ impl<'a> StringReader<'a> {
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
else { token::intern("??") };
self.bump();
- return token::Literal(token::Str_(id));
+ let suffix = self.scan_optional_raw_name();
+ return token::Literal(token::Str_(id), suffix);
}
'r' => {
let start_bpos = self.last_pos;
@@ -1224,7 +1248,8 @@ impl<'a> StringReader<'a> {
} else {
token::intern("??")
};
- return token::Literal(token::StrRaw(id, hash_count));
+ let suffix = self.scan_optional_raw_name();
+ return token::Literal(token::StrRaw(id, hash_count), suffix);
}
'-' => {
if self.nextch_is('>') {
@@ -1293,7 +1318,7 @@ impl<'a> StringReader<'a> {
|| (self.curr_is('#') && self.nextch_is('!') && !self.nextnextch_is('['))
}
- fn scan_byte(&mut self) -> token::Token {
+ fn scan_byte(&mut self) -> token::Lit {
self.bump();
let start = self.last_pos;
@@ -1314,10 +1339,10 @@ impl<'a> StringReader<'a> {
let id = if valid { self.name_from(start) } else { token::intern("??") };
self.bump(); // advance curr past token
- return token::Literal(token::Byte(id));
+ return token::Byte(id);
}
- fn scan_byte_string(&mut self) -> token::Token {
+ fn scan_byte_string(&mut self) -> token::Lit {
self.bump();
let start = self.last_pos;
let mut valid = true;
@@ -1336,10 +1361,10 @@ impl<'a> StringReader<'a> {
}
let id = if valid { self.name_from(start) } else { token::intern("??") };
self.bump();
- return token::Literal(token::Binary(id));
+ return token::Binary(id);
}
- fn scan_raw_byte_string(&mut self) -> token::Token {
+ fn scan_raw_byte_string(&mut self) -> token::Lit {
let start_bpos = self.last_pos;
self.bump();
let mut hash_count = 0u;
@@ -1387,9 +1412,9 @@ impl<'a> StringReader<'a> {
self.bump();
}
self.bump();
- return token::Literal(token::BinaryRaw(self.name_from_to(content_start_bpos,
- content_end_bpos),
- hash_count));
+ return token::BinaryRaw(self.name_from_to(content_start_bpos,
+ content_end_bpos),
+ hash_count);
}
}
@@ -1536,17 +1561,17 @@ mod test {
#[test] fn character_a() {
assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok,
- token::Literal(token::Char(token::intern("a"))));
+ token::Literal(token::Char(token::intern("a")), None));
}
#[test] fn character_space() {
assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok,
- token::Literal(token::Char(token::intern(" "))));
+ token::Literal(token::Char(token::intern(" ")), None));
}
#[test] fn character_escaped() {
assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok,
- token::Literal(token::Char(token::intern("\\n"))));
+ token::Literal(token::Char(token::intern("\\n")), None));
}
#[test] fn lifetime_name() {
@@ -1558,7 +1583,38 @@ mod test {
assert_eq!(setup(&mk_sh(),
"r###\"\"#a\\b\x00c\"\"###".to_string()).next_token()
.tok,
- token::Literal(token::StrRaw(token::intern("\"#a\\b\x00c\""), 3)));
+ token::Literal(token::StrRaw(token::intern("\"#a\\b\x00c\""), 3), None));
+ }
+
+ #[test] fn literal_suffixes() {
+ macro_rules! test {
+ ($input: expr, $tok_type: ident, $tok_contents: expr) => {{
+ assert_eq!(setup(&mk_sh(), format!("{}suffix", $input)).next_token().tok,
+ token::Literal(token::$tok_type(token::intern($tok_contents)),
+ Some(token::intern("suffix"))));
+ // with a whitespace separator:
+ assert_eq!(setup(&mk_sh(), format!("{} suffix", $input)).next_token().tok,
+ token::Literal(token::$tok_type(token::intern($tok_contents)),
+ None));
+ }}
+ }
+
+ test!("'a'", Char, "a");
+ test!("b'a'", Byte, "a");
+ test!("\"a\"", Str_, "a");
+ test!("b\"a\"", Binary, "a");
+ test!("1234", Integer, "1234");
+ test!("0b101", Integer, "0b101");
+ test!("0xABC", Integer, "0xABC");
+ test!("1.0", Float, "1.0");
+ test!("1.0e10", Float, "1.0e10");
+
+ assert_eq!(setup(&mk_sh(), "r###\"raw\"###suffix".to_string()).next_token().tok,
+ token::Literal(token::StrRaw(token::intern("raw"), 3),
+ Some(token::intern("suffix"))));
+ assert_eq!(setup(&mk_sh(), "br###\"raw\"###suffix".to_string()).next_token().tok,
+ token::Literal(token::BinaryRaw(token::intern("raw"), 3),
+ Some(token::intern("suffix"))));
}
#[test] fn line_doc_comments() {
@@ -1574,7 +1630,7 @@ mod test {
token::Comment => { },
_ => panic!("expected a comment!")
}
- assert_eq!(lexer.next_token().tok, token::Literal(token::Char(token::intern("a"))));
+ assert_eq!(lexer.next_token().tok, token::Literal(token::Char(token::intern("a")), None));
}
}
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index 4edcb182e53..b9e1fe07e71 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -646,6 +646,20 @@ impl<'a> Parser<'a> {
}
}
+ pub fn expect_no_suffix(&mut self, sp: Span, kind: &str, suffix: Option) {
+ match suffix {
+ None => {/* everything ok */}
+ Some(suf) => {
+ let text = suf.as_str();
+ if text.is_empty() {
+ self.span_bug(sp, "found empty non-None literal suffix")
+ }
+ self.span_err(sp, &*format!("a {} with a suffix is illegal", kind));
+ }
+ }
+ }
+
+
/// Attempt to consume a `<`. If `<<` is seen, replace it with a single
/// `<` and continue. If a `<` is not seen, return false.
///
@@ -968,6 +982,9 @@ impl<'a> Parser<'a> {
pub fn span_err(&mut self, sp: Span, m: &str) {
self.sess.span_diagnostic.span_err(sp, m)
}
+ pub fn span_bug(&mut self, sp: Span, m: &str) -> ! {
+ self.sess.span_diagnostic.span_bug(sp, m)
+ }
pub fn abort_if_errors(&mut self) {
self.sess.span_diagnostic.handler().abort_if_errors();
}
@@ -1640,24 +1657,40 @@ impl<'a> Parser<'a> {
/// Matches token_lit = LIT_INTEGER | ...
pub fn lit_from_token(&mut self, tok: &token::Token) -> Lit_ {
match *tok {
- token::Literal(token::Byte(i)) => LitByte(parse::byte_lit(i.as_str()).val0()),
- token::Literal(token::Char(i)) => LitChar(parse::char_lit(i.as_str()).val0()),
- token::Literal(token::Integer(s)) => parse::integer_lit(s.as_str(),
- &self.sess.span_diagnostic,
- self.last_span),
- token::Literal(token::Float(s)) => parse::float_lit(s.as_str()),
- token::Literal(token::Str_(s)) => {
- LitStr(token::intern_and_get_ident(parse::str_lit(s.as_str()).as_slice()),
- ast::CookedStr)
+ token::Literal(lit, suf) => {
+ let (suffix_illegal, out) = match lit {
+ token::Byte(i) => (true, LitByte(parse::byte_lit(i.as_str()).val0())),
+ token::Char(i) => (true, LitChar(parse::char_lit(i.as_str()).val0())),
+ token::Integer(s) => (false, parse::integer_lit(s.as_str(),
+ &self.sess.span_diagnostic,
+ self.last_span)),
+ token::Float(s) => (false, parse::float_lit(s.as_str())),
+ token::Str_(s) => {
+ (true,
+ LitStr(token::intern_and_get_ident(parse::str_lit(s.as_str()).as_slice()),
+ ast::CookedStr))
+ }
+ token::StrRaw(s, n) => {
+ (true,
+ LitStr(
+ token::intern_and_get_ident(
+ parse::raw_str_lit(s.as_str()).as_slice()),
+ ast::RawStr(n)))
+ }
+ token::Binary(i) =>
+ (true, LitBinary(parse::binary_lit(i.as_str()))),
+ token::BinaryRaw(i, _) =>
+ (true,
+ LitBinary(Rc::new(i.as_str().as_bytes().iter().map(|&x| x).collect()))),
+ };
+
+ if suffix_illegal {
+ let sp = self.last_span;
+ self.expect_no_suffix(sp, &*format!("{} literal", lit.short_name()), suf)
+ }
+
+ out
}
- token::Literal(token::StrRaw(s, n)) => {
- LitStr(token::intern_and_get_ident(parse::raw_str_lit(s.as_str()).as_slice()),
- ast::RawStr(n))
- }
- token::Literal(token::Binary(i)) =>
- LitBinary(parse::binary_lit(i.as_str())),
- token::Literal(token::BinaryRaw(i, _)) =>
- LitBinary(Rc::new(i.as_str().as_bytes().iter().map(|&x| x).collect())),
_ => { self.unexpected_last(tok); }
}
}
@@ -2424,7 +2457,10 @@ impl<'a> Parser<'a> {
}
}
}
- token::Literal(token::Integer(n)) => {
+ token::Literal(token::Integer(n), suf) => {
+ let sp = self.span;
+ self.expect_no_suffix(sp, "tuple index", suf);
+
let index = n.as_str();
let dot = self.last_span.hi;
hi = self.span.hi;
@@ -2449,7 +2485,7 @@ impl<'a> Parser<'a> {
}
}
}
- token::Literal(token::Float(n)) => {
+ token::Literal(token::Float(n), _suf) => {
self.bump();
let last_span = self.last_span;
let fstr = n.as_str();
@@ -5085,12 +5121,17 @@ impl<'a> Parser<'a> {
self.expect(&token::Semi);
(path, the_ident)
},
- token::Literal(token::Str_(..)) | token::Literal(token::StrRaw(..)) => {
- let path = self.parse_str();
+ token::Literal(token::Str_(..), suf) | token::Literal(token::StrRaw(..), suf) => {
+ let sp = self.span;
+ self.expect_no_suffix(sp, "extern crate name", suf);
+ // forgo the internal suffix check of `parse_str` to
+ // avoid repeats (this unwrap will always succeed due
+ // to the restriction of the `match`)
+ let (s, style, _) = self.parse_optional_str().unwrap();
self.expect_keyword(keywords::As);
let the_ident = self.parse_ident();
self.expect(&token::Semi);
- (Some(path), the_ident)
+ (Some((s, style)), the_ident)
},
_ => {
let span = self.span;
@@ -5267,7 +5308,9 @@ impl<'a> Parser<'a> {
/// the `extern` keyword, if one is found.
fn parse_opt_abi(&mut self) -> Option {
match self.token {
- token::Literal(token::Str_(s)) | token::Literal(token::StrRaw(s, _)) => {
+ token::Literal(token::Str_(s), suf) | token::Literal(token::StrRaw(s, _), suf) => {
+ let sp = self.span;
+ self.expect_no_suffix(sp, "ABI spec", suf);
self.bump();
let the_string = s.as_str();
match abi::lookup(the_string) {
@@ -5902,21 +5945,27 @@ impl<'a> Parser<'a> {
}
pub fn parse_optional_str(&mut self)
- -> Option<(InternedString, ast::StrStyle)> {
- let (s, style) = match self.token {
- token::Literal(token::Str_(s)) => (self.id_to_interned_str(s.ident()), ast::CookedStr),
- token::Literal(token::StrRaw(s, n)) => {
- (self.id_to_interned_str(s.ident()), ast::RawStr(n))
+ -> Option<(InternedString, ast::StrStyle, Option)> {
+ let ret = match self.token {
+ token::Literal(token::Str_(s), suf) => {
+ (self.id_to_interned_str(s.ident()), ast::CookedStr, suf)
+ }
+ token::Literal(token::StrRaw(s, n), suf) => {
+ (self.id_to_interned_str(s.ident()), ast::RawStr(n), suf)
}
_ => return None
};
self.bump();
- Some((s, style))
+ Some(ret)
}
pub fn parse_str(&mut self) -> (InternedString, StrStyle) {
match self.parse_optional_str() {
- Some(s) => { s }
+ Some((s, style, suf)) => {
+ let sp = self.last_span;
+ self.expect_no_suffix(sp, "str literal", suf);
+ (s, style)
+ }
_ => self.fatal("expected string literal")
}
}
diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs
index bfa6ca798b2..4272b57a4dc 100644
--- a/src/libsyntax/parse/token.rs
+++ b/src/libsyntax/parse/token.rs
@@ -72,6 +72,19 @@ pub enum Lit {
BinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */
}
+impl Lit {
+ pub fn short_name(&self) -> &'static str {
+ match *self {
+ Byte(_) => "byte",
+ Char(_) => "char",
+ Integer(_) => "integer",
+ Float(_) => "float",
+ Str_(_) | StrRaw(..) => "str",
+ Binary(_) | BinaryRaw(..) => "binary str"
+ }
+ }
+}
+
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum Token {
@@ -111,7 +124,7 @@ pub enum Token {
CloseDelim(DelimToken),
/* Literals */
- Literal(Lit),
+ Literal(Lit, Option),
/* Name components */
Ident(ast::Ident, IdentStyle),
@@ -151,7 +164,7 @@ impl Token {
Ident(_, _) => true,
Underscore => true,
Tilde => true,
- Literal(_) => true,
+ Literal(_, _) => true,
Pound => true,
At => true,
Not => true,
@@ -172,7 +185,7 @@ impl Token {
/// Returns `true` if the token is any literal
pub fn is_lit(&self) -> bool {
match *self {
- Literal(_) => true,
+ Literal(_, _) => true,
_ => false,
}
}
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index 7997c1ba4ef..642ffa3745d 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -236,18 +236,28 @@ pub fn token_to_string(tok: &Token) -> String {
token::Question => "?".into_string(),
/* Literals */
- token::Literal(token::Byte(b)) => format!("b'{}'", b.as_str()),
- token::Literal(token::Char(c)) => format!("'{}'", c.as_str()),
- token::Literal(token::Float(c)) => c.as_str().into_string(),
- token::Literal(token::Integer(c)) => c.as_str().into_string(),
- token::Literal(token::Str_(s)) => format!("\"{}\"", s.as_str()),
- token::Literal(token::StrRaw(s, n)) => format!("r{delim}\"{string}\"{delim}",
- delim="#".repeat(n),
- string=s.as_str()),
- token::Literal(token::Binary(v)) => format!("b\"{}\"", v.as_str()),
- token::Literal(token::BinaryRaw(s, n)) => format!("br{delim}\"{string}\"{delim}",
- delim="#".repeat(n),
- string=s.as_str()),
+ token::Literal(lit, suf) => {
+ let mut out = match lit {
+ token::Byte(b) => format!("b'{}'", b.as_str()),
+ token::Char(c) => format!("'{}'", c.as_str()),
+ token::Float(c) => c.as_str().into_string(),
+ token::Integer(c) => c.as_str().into_string(),
+ token::Str_(s) => format!("\"{}\"", s.as_str()),
+ token::StrRaw(s, n) => format!("r{delim}\"{string}\"{delim}",
+ delim="#".repeat(n),
+ string=s.as_str()),
+ token::Binary(v) => format!("b\"{}\"", v.as_str()),
+ token::BinaryRaw(s, n) => format!("br{delim}\"{string}\"{delim}",
+ delim="#".repeat(n),
+ string=s.as_str()),
+ };
+
+ if let Some(s) = suf {
+ out.push_str(s.as_str())
+ }
+
+ out
+ }
/* Name components */
token::Ident(s, _) => token::get_ident(s).get().into_string(),
diff --git a/src/test/compile-fail/bad-lit-suffixes.rs b/src/test/compile-fail/bad-lit-suffixes.rs
new file mode 100644
index 00000000000..e48bb807488
--- /dev/null
+++ b/src/test/compile-fail/bad-lit-suffixes.rs
@@ -0,0 +1,36 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 or the MIT license
+// , at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+extern crate
+ "foo"suffix //~ ERROR extern crate name with a suffix is illegal
+ as foo;
+
+extern
+ "C"suffix //~ ERROR ABI spec with a suffix is illegal
+ fn foo() {}
+
+extern
+ "C"suffix //~ ERROR ABI spec with a suffix is illegal
+{}
+
+fn main() {
+ ""suffix; //~ ERROR str literal with a suffix is illegal
+ b""suffix; //~ ERROR binary str literal with a suffix is illegal
+ r#""#suffix; //~ ERROR str literal with a suffix is illegal
+ br#""#suffix; //~ ERROR binary str literal with a suffix is illegal
+ 'a'suffix; //~ ERROR char literal with a suffix is illegal
+ b'a'suffix; //~ ERROR byte literal with a suffix is illegal
+
+ 1234suffix;
+ 0b101suffix;
+ 1.0suffix;
+ 1.0e10suffix;
+}