Skip to content

Commit 5b5638f

Browse files
committed
Switch to an independent enum for Lit* subtokens.
1 parent c8d6e3b commit 5b5638f

File tree

9 files changed

+115
-118
lines changed

9 files changed

+115
-118
lines changed

src/grammar/verify.rs

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
6161
"SHL" => token::BinOp(token::Shl),
6262
"LBRACE" => token::OpenDelim(token::Brace),
6363
"RARROW" => token::Rarrow,
64-
"LIT_STR" => token::LitStr(Name(0)),
64+
"LIT_STR" => token::Literal(token::Str_(Name(0))),
6565
"DOTDOT" => token::DotDot,
6666
"MOD_SEP" => token::ModSep,
6767
"DOTDOTDOT" => token::DotDotDot,
@@ -71,7 +71,7 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
7171
"ANDAND" => token::AndAnd,
7272
"AT" => token::At,
7373
"LBRACKET" => token::OpenDelim(token::Bracket),
74-
"LIT_STR_RAW" => token::LitStrRaw(Name(0), 0),
74+
"LIT_STR_RAW" => token::Literal(token::StrRaw(Name(0), 0)),
7575
"RPAREN" => token::CloseDelim(token::Paren),
7676
"SLASH" => token::BinOp(token::Slash),
7777
"COMMA" => token::Comma,
@@ -80,8 +80,8 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
8080
"TILDE" => token::Tilde,
8181
"IDENT" => token::Id(),
8282
"PLUS" => token::BinOp(token::Plus),
83-
"LIT_CHAR" => token::LitChar(Name(0)),
84-
"LIT_BYTE" => token::LitByte(Name(0)),
83+
"LIT_CHAR" => token::Literal(token::Char(Name(0))),
84+
"LIT_BYTE" => token::Literal(token::Byte(Name(0))),
8585
"EQ" => token::Eq,
8686
"RBRACKET" => token::CloseDelim(token::Bracket),
8787
"COMMENT" => token::Comment,
@@ -95,9 +95,9 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
9595
"BINOP" => token::BinOp(token::Plus),
9696
"POUND" => token::Pound,
9797
"OROR" => token::OrOr,
98-
"LIT_INTEGER" => token::LitInteger(Name(0)),
98+
"LIT_INTEGER" => token::Literal(token::Integer(Name(0))),
9999
"BINOPEQ" => token::BinOpEq(token::Plus),
100-
"LIT_FLOAT" => token::LitFloat(Name(0)),
100+
"LIT_FLOAT" => token::Literal(token::Float(Name(0))),
101101
"WHITESPACE" => token::Whitespace,
102102
"UNDERSCORE" => token::Underscore,
103103
"MINUS" => token::BinOp(token::Minus),
@@ -107,8 +107,8 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
107107
"OR" => token::BinOp(token::Or),
108108
"GT" => token::Gt,
109109
"LE" => token::Le,
110-
"LIT_BINARY" => token::LitBinary(Name(0)),
111-
"LIT_BINARY_RAW" => token::LitBinaryRaw(Name(0), 0),
110+
"LIT_BINARY" => token::Literal(token::Binary(Name(0))),
111+
"LIT_BINARY_RAW" => token::Literal(token::BinaryRaw(Name(0), 0)),
112112
_ => continue,
113113
};
114114

@@ -189,15 +189,17 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
189189
token::BinOp(..) => token::BinOp(str_to_binop(content)),
190190
token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to(
191191
content.len() - 1))),
192-
token::LitStr(..) => token::LitStr(fix(content)),
193-
token::LitStrRaw(..) => token::LitStrRaw(fix(content), count(content)),
194-
token::LitChar(..) => token::LitChar(fixchar(content)),
195-
token::LitByte(..) => token::LitByte(fixchar(content)),
192+
token::Literal(token::Str_(..)) => token::Literal(token::Str_(fix(content))),
193+
token::Literal(token::StrRaw(..)) => token::Literal(token::StrRaw(fix(content),
194+
count(content))),
195+
token::Literal(token::Char(..)) => token::Literal(token::Char(fixchar(content))),
196+
token::Literal(token::Byte(..)) => token::Literal(token::Byte(fixchar(content))),
196197
token::DocComment(..) => token::DocComment(nm),
197-
token::LitInteger(..) => token::LitInteger(nm),
198-
token::LitFloat(..) => token::LitFloat(nm),
199-
token::LitBinary(..) => token::LitBinary(nm),
200-
token::LitBinaryRaw(..) => token::LitBinaryRaw(fix(content), count(content)),
198+
token::Literal(token::Integer(..)) => token::Literal(token::Integer(nm)),
199+
token::Literal(token::Float(..)) => token::Literal(token::Float(nm)),
200+
token::Literal(token::Binary(..)) => token::Literal(token::Binary(nm)),
201+
token::Literal(token::BinaryRaw(..)) => token::Literal(token::BinaryRaw(fix(content),
202+
count(content))),
201203
token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 },
202204
token::ModName),
203205
token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }),
@@ -284,14 +286,14 @@ fn main() {
284286
)
285287

286288
matches!(
287-
LitByte(..),
288-
LitChar(..),
289-
LitInteger(..),
290-
LitFloat(..),
291-
LitStr(..),
292-
LitStrRaw(..),
293-
LitBinary(..),
294-
LitBinaryRaw(..),
289+
token::Literal(token::Byte(..)),
290+
token::Literal(token::Char(..)),
291+
token::Literal(token::Integer(..)),
292+
token::Literal(token::Float(..)),
293+
token::Literal(token::Str_(..)),
294+
token::Literal(token::StrRaw(..)),
295+
token::Literal(token::Binary(..)),
296+
token::Literal(token::BinaryRaw(..)),
295297
Ident(..),
296298
Lifetime(..),
297299
Interpolated(..),

src/librustdoc/html/highlight.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,11 +129,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
129129
}
130130

131131
// text literals
132-
token::LitByte(..) | token::LitBinary(..) | token::LitBinaryRaw(..) |
133-
token::LitChar(..) | token::LitStr(..) | token::LitStrRaw(..) => "string",
132+
token::Literal(token::Byte(..)) | token::Literal(token::Char(..)) |
133+
token::Literal(token::Binary(..)) | token::Literal(token::BinaryRaw(..)) |
134+
token::Literal(token::Str_(..)) | token::Literal(token::StrRaw(..)) => "string",
134135

135136
// number literals
136-
token::LitInteger(..) | token::LitFloat(..) => "number",
137+
token::Literal(token::Integer(..)) | token::Literal(token::Float(..)) => "number",
137138

138139
// keywords are also included in the identifier set
139140
token::Ident(ident, _is_mod_sep) => {

src/libsyntax/ast.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -838,7 +838,7 @@ impl TokenTree {
838838
tts: vec![TtToken(sp, token::Ident(token::str_to_ident("doc"),
839839
token::Plain)),
840840
TtToken(sp, token::Eq),
841-
TtToken(sp, token::LitStr(name))],
841+
TtToken(sp, token::Literal(token::Str_(name)))],
842842
close_span: sp,
843843
}))
844844
}

src/libsyntax/diagnostics/plugin.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt,
8787
},
8888
[ast::TtToken(_, token::Ident(ref code, _)),
8989
ast::TtToken(_, token::Comma),
90-
ast::TtToken(_, token::LitStrRaw(description, _))] => {
90+
ast::TtToken(_, token::Literal(token::StrRaw(description, _)))] => {
9191
(code, Some(description))
9292
}
9393
_ => unreachable!()

src/libsyntax/ext/quote.rs

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -542,6 +542,13 @@ fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P<ast::Expr> {
542542

543543
#[allow(non_upper_case_globals)]
544544
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
545+
macro_rules! mk_lit {
546+
($name: expr, $($args: expr),*) => {{
547+
let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]);
548+
549+
cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner])
550+
}}
551+
}
545552
match *tok {
546553
token::BinOp(binop) => {
547554
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
@@ -560,38 +567,32 @@ fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
560567
vec![mk_delim(cx, sp, delim)]);
561568
}
562569

563-
token::LitByte(i) => {
570+
token::Literal(token::Byte(i)) => {
564571
let e_byte = mk_name(cx, sp, i.ident());
565-
566-
return cx.expr_call(sp, mk_token_path(cx, sp, "LitByte"), vec!(e_byte));
572+
return mk_lit!("Byte", e_byte);
567573
}
568574

569-
token::LitChar(i) => {
575+
token::Literal(token::Char(i)) => {
570576
let e_char = mk_name(cx, sp, i.ident());
571-
572-
return cx.expr_call(sp, mk_token_path(cx, sp, "LitChar"), vec!(e_char));
577+
return mk_lit!("Char", e_char);
573578
}
574579

575-
token::LitInteger(i) => {
580+
token::Literal(token::Integer(i)) => {
576581
let e_int = mk_name(cx, sp, i.ident());
577-
return cx.expr_call(sp, mk_token_path(cx, sp, "LitInteger"), vec!(e_int));
582+
return mk_lit!("Integer", e_int);
578583
}
579584

580-
token::LitFloat(fident) => {
585+
token::Literal(token::Float(fident)) => {
581586
let e_fident = mk_name(cx, sp, fident.ident());
582-
return cx.expr_call(sp, mk_token_path(cx, sp, "LitFloat"), vec!(e_fident));
587+
return mk_lit!("Float", e_fident);
583588
}
584589

585-
token::LitStr(ident) => {
586-
return cx.expr_call(sp,
587-
mk_token_path(cx, sp, "LitStr"),
588-
vec!(mk_name(cx, sp, ident.ident())));
590+
token::Literal(token::Str_(ident)) => {
591+
return mk_lit!("Str_", mk_name(cx, sp, ident.ident()))
589592
}
590593

591-
token::LitStrRaw(ident, n) => {
592-
return cx.expr_call(sp,
593-
mk_token_path(cx, sp, "LitStrRaw"),
594-
vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)));
594+
token::Literal(token::StrRaw(ident, n)) => {
595+
return mk_lit!("StrRaw", mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n))
595596
}
596597

597598
token::Ident(ident, style) => {

src/libsyntax/parse/lexer/mod.rs

Lines changed: 21 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -655,17 +655,17 @@ impl<'a> StringReader<'a> {
655655
}
656656
'u' | 'i' => {
657657
self.scan_int_suffix();
658-
return token::LitInteger(self.name_from(start_bpos));
658+
return token::Literal(token::Integer(self.name_from(start_bpos)));
659659
},
660660
'f' => {
661661
let last_pos = self.last_pos;
662662
self.scan_float_suffix();
663663
self.check_float_base(start_bpos, last_pos, base);
664-
return token::LitFloat(self.name_from(start_bpos));
664+
return token::Literal(token::Float(self.name_from(start_bpos)));
665665
}
666666
_ => {
667667
// just a 0
668-
return token::LitInteger(self.name_from(start_bpos));
668+
return token::Literal(token::Integer(self.name_from(start_bpos)));
669669
}
670670
}
671671
} else if c.is_digit_radix(10) {
@@ -678,7 +678,7 @@ impl<'a> StringReader<'a> {
678678
self.err_span_(start_bpos, self.last_pos, "no valid digits found for number");
679679
// eat any suffix
680680
self.scan_int_suffix();
681-
return token::LitInteger(token::intern("0"));
681+
return token::Literal(token::Integer(token::intern("0")));
682682
}
683683

684684
// might be a float, but don't be greedy if this is actually an
@@ -696,25 +696,25 @@ impl<'a> StringReader<'a> {
696696
}
697697
let last_pos = self.last_pos;
698698
self.check_float_base(start_bpos, last_pos, base);
699-
return token::LitFloat(self.name_from(start_bpos));
699+
return token::Literal(token::Float(self.name_from(start_bpos)));
700700
} else if self.curr_is('f') {
701701
// or it might be an integer literal suffixed as a float
702702
self.scan_float_suffix();
703703
let last_pos = self.last_pos;
704704
self.check_float_base(start_bpos, last_pos, base);
705-
return token::LitFloat(self.name_from(start_bpos));
705+
return token::Literal(token::Float(self.name_from(start_bpos)));
706706
} else {
707707
// it might be a float if it has an exponent
708708
if self.curr_is('e') || self.curr_is('E') {
709709
self.scan_float_exponent();
710710
self.scan_float_suffix();
711711
let last_pos = self.last_pos;
712712
self.check_float_base(start_bpos, last_pos, base);
713-
return token::LitFloat(self.name_from(start_bpos));
713+
return token::Literal(token::Float(self.name_from(start_bpos)));
714714
}
715715
// but we certainly have an integer!
716716
self.scan_int_suffix();
717-
return token::LitInteger(self.name_from(start_bpos));
717+
return token::Literal(token::Integer(self.name_from(start_bpos)));
718718
}
719719
}
720720

@@ -1126,7 +1126,7 @@ impl<'a> StringReader<'a> {
11261126
}
11271127
let id = if valid { self.name_from(start) } else { token::intern("0") };
11281128
self.bump(); // advance curr past token
1129-
return token::LitChar(id);
1129+
return token::Literal(token::Char(id));
11301130
}
11311131
'b' => {
11321132
self.bump();
@@ -1157,7 +1157,7 @@ impl<'a> StringReader<'a> {
11571157
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
11581158
else { token::intern("??") };
11591159
self.bump();
1160-
return token::LitStr(id);
1160+
return token::Literal(token::Str_(id));
11611161
}
11621162
'r' => {
11631163
let start_bpos = self.last_pos;
@@ -1224,7 +1224,7 @@ impl<'a> StringReader<'a> {
12241224
} else {
12251225
token::intern("??")
12261226
};
1227-
return token::LitStrRaw(id, hash_count);
1227+
return token::Literal(token::StrRaw(id, hash_count));
12281228
}
12291229
'-' => {
12301230
if self.nextch_is('>') {
@@ -1314,7 +1314,7 @@ impl<'a> StringReader<'a> {
13141314

13151315
let id = if valid { self.name_from(start) } else { token::intern("??") };
13161316
self.bump(); // advance curr past token
1317-
return token::LitByte(id);
1317+
return token::Literal(token::Byte(id));
13181318
}
13191319

13201320
fn scan_byte_string(&mut self) -> token::Token {
@@ -1336,7 +1336,7 @@ impl<'a> StringReader<'a> {
13361336
}
13371337
let id = if valid { self.name_from(start) } else { token::intern("??") };
13381338
self.bump();
1339-
return token::LitBinary(id);
1339+
return token::Literal(token::Binary(id));
13401340
}
13411341

13421342
fn scan_raw_byte_string(&mut self) -> token::Token {
@@ -1387,8 +1387,9 @@ impl<'a> StringReader<'a> {
13871387
self.bump();
13881388
}
13891389
self.bump();
1390-
return token::LitBinaryRaw(self.name_from_to(content_start_bpos, content_end_bpos),
1391-
hash_count);
1390+
return token::Literal(token::BinaryRaw(self.name_from_to(content_start_bpos,
1391+
content_end_bpos),
1392+
hash_count));
13921393
}
13931394
}
13941395

@@ -1535,17 +1536,17 @@ mod test {
15351536

15361537
#[test] fn character_a() {
15371538
assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok,
1538-
token::LitChar(token::intern("a")));
1539+
token::Literal(token::Char(token::intern("a"))));
15391540
}
15401541

15411542
#[test] fn character_space() {
15421543
assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok,
1543-
token::LitChar(token::intern(" ")));
1544+
token::Literal(token::Char(token::intern(" "))));
15441545
}
15451546

15461547
#[test] fn character_escaped() {
15471548
assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok,
1548-
token::LitChar(token::intern("\\n")));
1549+
token::Literal(token::Char(token::intern("\\n"))));
15491550
}
15501551

15511552
#[test] fn lifetime_name() {
@@ -1557,7 +1558,7 @@ mod test {
15571558
assert_eq!(setup(&mk_sh(),
15581559
"r###\"\"#a\\b\x00c\"\"###".to_string()).next_token()
15591560
.tok,
1560-
token::LitStrRaw(token::intern("\"#a\\b\x00c\""), 3));
1561+
token::Literal(token::StrRaw(token::intern("\"#a\\b\x00c\""), 3)));
15611562
}
15621563

15631564
#[test] fn line_doc_comments() {
@@ -1573,7 +1574,7 @@ mod test {
15731574
token::Comment => { },
15741575
_ => panic!("expected a comment!")
15751576
}
1576-
assert_eq!(lexer.next_token().tok, token::LitChar(token::intern("a")));
1577+
assert_eq!(lexer.next_token().tok, token::Literal(token::Char(token::intern("a"))));
15771578
}
15781579

15791580
}

0 commit comments

Comments
 (0)