@@ -557,7 +557,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
557
557
if is_ident_start( tokenizer) { IDHash ( consume_name( tokenizer) ) }
558
558
else if !tokenizer. is_eof( ) && match tokenizer. next_byte_unchecked( ) {
559
559
// Any other valid case here already resulted in IDHash.
560
- b'0' ... b'9' | b'-' => true ,
560
+ b'0' ..= b'9' | b'-' => true ,
561
561
_ => false ,
562
562
} { Hash ( consume_name( tokenizer) ) }
563
563
else { Delim ( '#' ) }
@@ -576,11 +576,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
576
576
b'+' => {
577
577
if (
578
578
tokenizer. has_at_least( 1 )
579
- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
579
+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
580
580
) || (
581
581
tokenizer. has_at_least( 2 )
582
582
&& tokenizer. byte_at( 1 ) == b'.'
583
- && matches!( tokenizer. byte_at( 2 ) , b'0' ... b'9' )
583
+ && matches!( tokenizer. byte_at( 2 ) , b'0' ..= b'9' )
584
584
) {
585
585
consume_numeric( tokenizer)
586
586
} else {
@@ -592,11 +592,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
592
592
b'-' => {
593
593
if (
594
594
tokenizer. has_at_least( 1 )
595
- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
595
+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
596
596
) || (
597
597
tokenizer. has_at_least( 2 )
598
598
&& tokenizer. byte_at( 1 ) == b'.'
599
- && matches!( tokenizer. byte_at( 2 ) , b'0' ... b'9' )
599
+ && matches!( tokenizer. byte_at( 2 ) , b'0' ..= b'9' )
600
600
) {
601
601
consume_numeric( tokenizer)
602
602
} else if tokenizer. starts_with( b"-->" ) {
@@ -611,7 +611,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
611
611
} ,
612
612
b'.' => {
613
613
if tokenizer. has_at_least( 1 )
614
- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9'
614
+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9'
615
615
) {
616
616
consume_numeric( tokenizer)
617
617
} else {
@@ -627,7 +627,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
627
627
Delim ( '/' )
628
628
}
629
629
}
630
- b'0' ... b'9' => { consume_numeric( tokenizer) } ,
630
+ b'0' ..= b'9' => { consume_numeric( tokenizer) } ,
631
631
b':' => { tokenizer. advance( 1 ) ; Colon } ,
632
632
b';' => { tokenizer. advance( 1 ) ; Semicolon } ,
633
633
b'<' => {
@@ -644,7 +644,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
644
644
if is_ident_start( tokenizer) { AtKeyword ( consume_name( tokenizer) ) }
645
645
else { Delim ( '@' ) }
646
646
} ,
647
- b'a' ... b'z' | b'A' ... b'Z' | b'_' | b'\0' => { consume_ident_like( tokenizer) } ,
647
+ b'a' ..= b'z' | b'A' ..= b'Z' | b'_' | b'\0' => { consume_ident_like( tokenizer) } ,
648
648
b'[' => { tokenizer. advance( 1 ) ; SquareBracketBlock } ,
649
649
b'\\' => {
650
650
if !tokenizer. has_newline_at( 1 ) { consume_ident_like( tokenizer) }
@@ -745,8 +745,8 @@ fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
745
745
b'\n' | b'\x0C' | b'\r' => {
746
746
tokenizer. consume_newline( ) ;
747
747
}
748
- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
749
- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
748
+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
749
+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
750
750
_ => {
751
751
// ASCII or other leading byte.
752
752
tokenizer. advance( 1 ) ;
@@ -807,8 +807,8 @@ fn consume_quoted_string<'a>(
807
807
b'\n' | b'\r' | b'\x0C' => {
808
808
return Err ( tokenizer. slice_from( start_pos) . into( ) )
809
809
} ,
810
- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
811
- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
810
+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
811
+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
812
812
_ => {
813
813
// ASCII or other leading byte.
814
814
tokenizer. advance( 1 ) ;
@@ -859,8 +859,8 @@ fn consume_quoted_string<'a>(
859
859
string_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
860
860
continue ;
861
861
}
862
- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
863
- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
862
+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
863
+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
864
864
_ => {
865
865
// ASCII or other leading byte.
866
866
tokenizer. advance( 1 ) ;
@@ -882,10 +882,10 @@ fn consume_quoted_string<'a>(
882
882
fn is_ident_start ( tokenizer : & mut Tokenizer ) -> bool {
883
883
!tokenizer. is_eof ( )
884
884
&& match_byte ! { tokenizer. next_byte_unchecked( ) ,
885
- b'a' ... b'z' | b'A' ... b'Z' | b'_' | b'\0' => { true } ,
885
+ b'a' ..= b'z' | b'A' ..= b'Z' | b'_' | b'\0' => { true } ,
886
886
b'-' => {
887
887
tokenizer. has_at_least( 1 ) && match_byte! { tokenizer. byte_at( 1 ) ,
888
- b'a' ... b'z' | b'A' ... b'Z' | b'-' | b'_' | b'\0' => {
888
+ b'a' ..= b'z' | b'A' ..= b'Z' | b'-' | b'_' | b'\0' => {
889
889
true
890
890
}
891
891
b'\\' => { !tokenizer. has_newline_at( 1 ) }
@@ -921,7 +921,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
921
921
return tokenizer. slice_from ( start_pos) . into ( ) ;
922
922
}
923
923
match_byte ! { tokenizer. next_byte_unchecked( ) ,
924
- b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' | b'_' | b'-' => { tokenizer. advance( 1 ) } ,
924
+ b'a' ..= b'z' | b'A' ..= b'Z' | b'0' ..= b'9' | b'_' | b'-' => { tokenizer. advance( 1 ) } ,
925
925
b'\\' | b'\0' => {
926
926
// * The tokenizer’s input is UTF-8 since it’s `&str`.
927
927
// * start_pos is at a code point boundary
@@ -931,10 +931,10 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
931
931
value_bytes = tokenizer. slice_from( start_pos) . as_bytes( ) . to_owned( ) ;
932
932
break
933
933
}
934
- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
935
- b'\xC0' ... b'\xEF' => { tokenizer. advance( 1 ) ; }
936
- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
937
- b => {
934
+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
935
+ b'\xC0' ..= b'\xEF' => { tokenizer. advance( 1 ) ; }
936
+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
937
+ _b => {
938
938
return tokenizer. slice_from( start_pos) . into( ) ;
939
939
}
940
940
}
@@ -943,7 +943,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
943
943
while !tokenizer. is_eof ( ) {
944
944
let b = tokenizer. next_byte_unchecked ( ) ;
945
945
match_byte ! { b,
946
- b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' | b'_' | b'-' => {
946
+ b'a' ..= b'z' | b'A' ..= b'Z' | b'0' ..= b'9' | b'_' | b'-' => {
947
947
tokenizer. advance( 1 ) ;
948
948
value_bytes. push( b) // ASCII
949
949
}
@@ -957,19 +957,19 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
957
957
tokenizer. advance( 1 ) ;
958
958
value_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
959
959
} ,
960
- b'\x80' ... b'\xBF' => {
960
+ b'\x80' ..= b'\xBF' => {
961
961
// This byte *is* part of a multi-byte code point,
962
962
// we’ll end up copying the whole code point before this loop does something else.
963
963
tokenizer. consume_continuation_byte( ) ;
964
964
value_bytes. push( b)
965
965
}
966
- b'\xC0' ... b'\xEF' => {
966
+ b'\xC0' ..= b'\xEF' => {
967
967
// This byte *is* part of a multi-byte code point,
968
968
// we’ll end up copying the whole code point before this loop does something else.
969
969
tokenizer. advance( 1 ) ;
970
970
value_bytes. push( b)
971
971
}
972
- b'\xF0' ... b'\xFF' => {
972
+ b'\xF0' ..= b'\xFF' => {
973
973
tokenizer. consume_4byte_intro( ) ;
974
974
value_bytes. push( b)
975
975
}
@@ -985,9 +985,9 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
985
985
986
986
fn byte_to_hex_digit ( b : u8 ) -> Option < u32 > {
987
987
Some ( match_byte ! { b,
988
- b'0' ... b'9' => { b - b'0' } ,
989
- b'a' ... b'f' => { b - b'a' + 10 } ,
990
- b'A' ... b'F' => { b - b'A' + 10 } ,
988
+ b'0' ..= b'9' => { b - b'0' } ,
989
+ b'a' ..= b'f' => { b - b'a' + 10 } ,
990
+ b'A' ..= b'F' => { b - b'A' + 10 } ,
991
991
_ => {
992
992
return None
993
993
}
@@ -1032,7 +1032,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
1032
1032
let mut fractional_part: f64 = 0. ;
1033
1033
if tokenizer. has_at_least ( 1 )
1034
1034
&& tokenizer. next_byte_unchecked ( ) == b'.'
1035
- && matches ! ( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
1035
+ && matches ! ( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
1036
1036
{
1037
1037
is_integer = false ;
1038
1038
tokenizer. advance ( 1 ) ; // Consume '.'
@@ -1050,10 +1050,10 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
1050
1050
let mut value = sign * ( integral_part + fractional_part) ;
1051
1051
1052
1052
if tokenizer. has_at_least ( 1 ) && matches ! ( tokenizer. next_byte_unchecked( ) , b'e' | b'E' ) {
1053
- if matches ! ( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
1053
+ if matches ! ( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
1054
1054
|| ( tokenizer. has_at_least ( 2 )
1055
1055
&& matches ! ( tokenizer. byte_at( 1 ) , b'+' | b'-' )
1056
- && matches ! ( tokenizer. byte_at( 2 ) , b'0' ... b'9' ) )
1056
+ && matches ! ( tokenizer. byte_at( 2 ) , b'0' ..= b'9' ) )
1057
1057
{
1058
1058
is_integer = false ;
1059
1059
tokenizer. advance ( 1 ) ;
@@ -1202,7 +1202,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
1202
1202
tokenizer. advance( 1 ) ;
1203
1203
return UnquotedUrl ( value. into( ) )
1204
1204
}
1205
- b'\x01' ... b'\x08' | b'\x0B' | b'\x0E' ... b'\x1F' | b'\x7F' // non-printable
1205
+ b'\x01' ..= b'\x08' | b'\x0B' | b'\x0E' ..= b'\x1F' | b'\x7F' // non-printable
1206
1206
| b'"' | b'\'' | b'(' => {
1207
1207
tokenizer. advance( 1 ) ;
1208
1208
return consume_bad_url( tokenizer, start_pos)
@@ -1216,8 +1216,8 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
1216
1216
string_bytes = tokenizer. slice_from( start_pos) . as_bytes( ) . to_owned( ) ;
1217
1217
break
1218
1218
}
1219
- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
1220
- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
1219
+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
1220
+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
1221
1221
_ => {
1222
1222
// ASCII or other leading byte.
1223
1223
tokenizer. advance( 1 ) ;
@@ -1236,7 +1236,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
1236
1236
tokenizer. advance( 1 ) ;
1237
1237
break ;
1238
1238
}
1239
- b'\x01' ... b'\x08' | b'\x0B' | b'\x0E' ... b'\x1F' | b'\x7F' // non-printable
1239
+ b'\x01' ..= b'\x08' | b'\x0B' | b'\x0E' ..= b'\x1F' | b'\x7F' // non-printable
1240
1240
| b'"' | b'\'' | b'(' => {
1241
1241
tokenizer. advance( 1 ) ;
1242
1242
return consume_bad_url( tokenizer, start_pos) ;
@@ -1254,13 +1254,13 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
1254
1254
tokenizer. advance( 1 ) ;
1255
1255
string_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
1256
1256
}
1257
- b'\x80' ... b'\xBF' => {
1257
+ b'\x80' ..= b'\xBF' => {
1258
1258
// We’ll end up copying the whole code point
1259
1259
// before this loop does something else.
1260
1260
tokenizer. consume_continuation_byte( ) ;
1261
1261
string_bytes. push( b) ;
1262
1262
}
1263
- b'\xF0' ... b'\xFF' => {
1263
+ b'\xF0' ..= b'\xFF' => {
1264
1264
// We’ll end up copying the whole code point
1265
1265
// before this loop does something else.
1266
1266
tokenizer. consume_4byte_intro( ) ;
@@ -1367,7 +1367,7 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
1367
1367
return '\u{FFFD}' ;
1368
1368
} // Escaped EOF
1369
1369
match_byte ! { tokenizer. next_byte_unchecked( ) ,
1370
- b'0' ... b'9' | b'A' ... b'F' | b'a' ... b'f' => {
1370
+ b'0' ..= b'9' | b'A' ..= b'F' | b'a' ..= b'f' => {
1371
1371
let ( c, _) = consume_hex_digits( tokenizer) ;
1372
1372
if !tokenizer. is_eof( ) {
1373
1373
match_byte! { tokenizer. next_byte_unchecked( ) ,
0 commit comments