1
1
//! Low-level Rust lexer.
2
2
//!
3
+ //! The idea with `librustc_lexer` is to make a reusable library,
4
+ //! by separating out pure lexing and rustc-specific concerns, like spans,
5
+ //! error reporting an interning. So, rustc_lexer operates directly on `&str`,
6
+ //! produces simple tokens which are a pair of type-tag and a bit of original text,
7
+ //! and does not report errors, instead storing them as flags on the token.
8
+ //!
3
9
//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax,
4
10
//! for that see `librustc_parse::lexer`, which converts this basic token stream
5
11
//! into wide tokens used by actual parser.
@@ -719,6 +725,9 @@ impl Cursor<'_> {
719
725
720
726
// Check that amount of closing '#' symbols
721
727
// is equal to the amount of opening ones.
728
+ // Note that this will not consume extra trailing `#` characters:
729
+ // `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }`
730
+ // followed by a `#` token.
722
731
let mut hashes_left = n_start_hashes;
723
732
let is_closing_hash = |c| {
724
733
if c == '#' && hashes_left != 0 {
@@ -739,8 +748,8 @@ impl Cursor<'_> {
739
748
possible_terminator_offset : None ,
740
749
} ;
741
750
} else if n_end_hashes > max_hashes {
742
- // Keep track of possible terminators to give a hint about where there might be
743
- // a missing terminator
751
+ // Keep track of possible terminators to give a hint about
752
+ // where there might be a missing terminator
744
753
possible_terminator_offset =
745
754
Some ( self . len_consumed ( ) - start_pos - n_end_hashes + prefix_len) ;
746
755
max_hashes = n_end_hashes;
0 commit comments