Skip to content

Commit 5bc93b5

Browse files
committed
Une new inclusive range literal syntax
Fix some deprecation warnings
1 parent e413e20 commit 5bc93b5

File tree

6 files changed

+47
-48
lines changed

6 files changed

+47
-48
lines changed

build.rs

-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ extern crate proc_macro2;
1010

1111
#[cfg(feature = "dummy_match_byte")]
1212
mod codegen {
13-
use std::path::Path;
1413
pub fn main() {}
1514
}
1615

src/color.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -530,9 +530,9 @@ pub fn parse_color_keyword(ident: &str) -> Result<Color, ()> {
530530
#[inline]
531531
fn from_hex(c: u8) -> Result<u8, ()> {
532532
match c {
533-
b'0'...b'9' => Ok(c - b'0'),
534-
b'a'...b'f' => Ok(c - b'a' + 10),
535-
b'A'...b'F' => Ok(c - b'A' + 10),
533+
b'0'..=b'9' => Ok(c - b'0'),
534+
b'a'..=b'f' => Ok(c - b'a' + 10),
535+
b'A'..=b'F' => Ok(c - b'A' + 10),
536536
_ => Err(()),
537537
}
538538
}

src/macros.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ macro_rules! cssparser_internal__to_lowercase {
130130
#[allow(non_snake_case)]
131131
pub fn _internal__to_lowercase<'a>(buffer: &'a mut [u8], input: &'a str) -> Option<&'a str> {
132132
if let Some(buffer) = buffer.get_mut(..input.len()) {
133-
if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'...b'Z')) {
133+
if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) {
134134
buffer.copy_from_slice(input.as_bytes());
135135
buffer[first_uppercase..].make_ascii_lowercase();
136136
// `buffer` was initialized to a copy of `input` (which is &str so well-formed UTF-8)

src/nth.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
105105
let bytes = string.as_bytes();
106106
if bytes.len() >= 3
107107
&& bytes[..2].eq_ignore_ascii_case(b"n-")
108-
&& bytes[2..].iter().all(|&c| matches!(c, b'0'...b'9'))
108+
&& bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9'))
109109
{
110110
Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
111111
} else {

src/serializer.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ where
207207
dest.write_str("-")?;
208208
value = &value[1..];
209209
}
210-
if let digit @ b'0'...b'9' = value.as_bytes()[0] {
210+
if let digit @ b'0'..=b'9' = value.as_bytes()[0] {
211211
hex_escape(digit, dest)?;
212212
value = &value[1..];
213213
}
@@ -226,7 +226,7 @@ where
226226
let mut chunk_start = 0;
227227
for (i, b) in value.bytes().enumerate() {
228228
let escaped = match b {
229-
b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' | b'_' | b'-' => continue,
229+
b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'_' | b'-' => continue,
230230
_ if !b.is_ascii() => continue,
231231
b'\0' => Some("\u{FFFD}"),
232232
_ => None,
@@ -251,7 +251,7 @@ where
251251
let mut chunk_start = 0;
252252
for (i, b) in value.bytes().enumerate() {
253253
let hex = match b {
254-
b'\0'...b' ' | b'\x7F' => true,
254+
b'\0'..=b' ' | b'\x7F' => true,
255255
b'(' | b')' | b'"' | b'\'' | b'\\' => false,
256256
_ => continue,
257257
};
@@ -318,7 +318,7 @@ where
318318
b'"' => Some("\\\""),
319319
b'\\' => Some("\\\\"),
320320
b'\0' => Some("\u{FFFD}"),
321-
b'\x01'...b'\x1F' | b'\x7F' => None,
321+
b'\x01'..=b'\x1F' | b'\x7F' => None,
322322
_ => continue,
323323
};
324324
self.inner.write_str(&s[chunk_start..i])?;

src/tokenizer.rs

+38-38
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
557557
if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
558558
else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
559559
// Any other valid case here already resulted in IDHash.
560-
b'0'...b'9' | b'-' => true,
560+
b'0'..=b'9' | b'-' => true,
561561
_ => false,
562562
} { Hash(consume_name(tokenizer)) }
563563
else { Delim('#') }
@@ -576,11 +576,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
576576
b'+' => {
577577
if (
578578
tokenizer.has_at_least(1)
579-
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
579+
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
580580
) || (
581581
tokenizer.has_at_least(2)
582582
&& tokenizer.byte_at(1) == b'.'
583-
&& matches!(tokenizer.byte_at(2), b'0'...b'9')
583+
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
584584
) {
585585
consume_numeric(tokenizer)
586586
} else {
@@ -592,11 +592,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
592592
b'-' => {
593593
if (
594594
tokenizer.has_at_least(1)
595-
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
595+
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
596596
) || (
597597
tokenizer.has_at_least(2)
598598
&& tokenizer.byte_at(1) == b'.'
599-
&& matches!(tokenizer.byte_at(2), b'0'...b'9')
599+
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
600600
) {
601601
consume_numeric(tokenizer)
602602
} else if tokenizer.starts_with(b"-->") {
@@ -611,7 +611,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
611611
},
612612
b'.' => {
613613
if tokenizer.has_at_least(1)
614-
&& matches!(tokenizer.byte_at(1), b'0'...b'9'
614+
&& matches!(tokenizer.byte_at(1), b'0'..=b'9'
615615
) {
616616
consume_numeric(tokenizer)
617617
} else {
@@ -627,7 +627,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
627627
Delim('/')
628628
}
629629
}
630-
b'0'...b'9' => { consume_numeric(tokenizer) },
630+
b'0'..=b'9' => { consume_numeric(tokenizer) },
631631
b':' => { tokenizer.advance(1); Colon },
632632
b';' => { tokenizer.advance(1); Semicolon },
633633
b'<' => {
@@ -644,7 +644,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
644644
if is_ident_start(tokenizer) { AtKeyword(consume_name(tokenizer)) }
645645
else { Delim('@') }
646646
},
647-
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) },
647+
b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) },
648648
b'[' => { tokenizer.advance(1); SquareBracketBlock },
649649
b'\\' => {
650650
if !tokenizer.has_newline_at(1) { consume_ident_like(tokenizer) }
@@ -745,8 +745,8 @@ fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
745745
b'\n' | b'\x0C' | b'\r' => {
746746
tokenizer.consume_newline();
747747
}
748-
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
749-
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
748+
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
749+
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
750750
_ => {
751751
// ASCII or other leading byte.
752752
tokenizer.advance(1);
@@ -807,8 +807,8 @@ fn consume_quoted_string<'a>(
807807
b'\n' | b'\r' | b'\x0C' => {
808808
return Err(tokenizer.slice_from(start_pos).into())
809809
},
810-
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
811-
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
810+
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
811+
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
812812
_ => {
813813
// ASCII or other leading byte.
814814
tokenizer.advance(1);
@@ -859,8 +859,8 @@ fn consume_quoted_string<'a>(
859859
string_bytes.extend("\u{FFFD}".as_bytes());
860860
continue;
861861
}
862-
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
863-
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
862+
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
863+
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
864864
_ => {
865865
// ASCII or other leading byte.
866866
tokenizer.advance(1);
@@ -882,10 +882,10 @@ fn consume_quoted_string<'a>(
882882
fn is_ident_start(tokenizer: &mut Tokenizer) -> bool {
883883
!tokenizer.is_eof()
884884
&& match_byte! { tokenizer.next_byte_unchecked(),
885-
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
885+
b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { true },
886886
b'-' => {
887887
tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
888-
b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
888+
b'a'..=b'z' | b'A'..=b'Z' | b'-' | b'_' | b'\0' => {
889889
true
890890
}
891891
b'\\' => { !tokenizer.has_newline_at(1) }
@@ -921,7 +921,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
921921
return tokenizer.slice_from(start_pos).into();
922922
}
923923
match_byte! { tokenizer.next_byte_unchecked(),
924-
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => { tokenizer.advance(1) },
924+
b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => { tokenizer.advance(1) },
925925
b'\\' | b'\0' => {
926926
// * The tokenizer’s input is UTF-8 since it’s `&str`.
927927
// * start_pos is at a code point boundary
@@ -931,10 +931,10 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
931931
value_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
932932
break
933933
}
934-
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
935-
b'\xC0'...b'\xEF' => { tokenizer.advance(1); }
936-
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
937-
b => {
934+
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
935+
b'\xC0'..=b'\xEF' => { tokenizer.advance(1); }
936+
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
937+
_b => {
938938
return tokenizer.slice_from(start_pos).into();
939939
}
940940
}
@@ -943,7 +943,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
943943
while !tokenizer.is_eof() {
944944
let b = tokenizer.next_byte_unchecked();
945945
match_byte! { b,
946-
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => {
946+
b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => {
947947
tokenizer.advance(1);
948948
value_bytes.push(b) // ASCII
949949
}
@@ -957,19 +957,19 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
957957
tokenizer.advance(1);
958958
value_bytes.extend("\u{FFFD}".as_bytes());
959959
},
960-
b'\x80'...b'\xBF' => {
960+
b'\x80'..=b'\xBF' => {
961961
// This byte *is* part of a multi-byte code point,
962962
// we’ll end up copying the whole code point before this loop does something else.
963963
tokenizer.consume_continuation_byte();
964964
value_bytes.push(b)
965965
}
966-
b'\xC0'...b'\xEF' => {
966+
b'\xC0'..=b'\xEF' => {
967967
// This byte *is* part of a multi-byte code point,
968968
// we’ll end up copying the whole code point before this loop does something else.
969969
tokenizer.advance(1);
970970
value_bytes.push(b)
971971
}
972-
b'\xF0'...b'\xFF' => {
972+
b'\xF0'..=b'\xFF' => {
973973
tokenizer.consume_4byte_intro();
974974
value_bytes.push(b)
975975
}
@@ -985,9 +985,9 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
985985

986986
fn byte_to_hex_digit(b: u8) -> Option<u32> {
987987
Some(match_byte! { b,
988-
b'0' ... b'9' => { b - b'0' },
989-
b'a' ... b'f' => { b - b'a' + 10 },
990-
b'A' ... b'F' => { b - b'A' + 10 },
988+
b'0' ..= b'9' => { b - b'0' },
989+
b'a' ..= b'f' => { b - b'a' + 10 },
990+
b'A' ..= b'F' => { b - b'A' + 10 },
991991
_ => {
992992
return None
993993
}
@@ -1032,7 +1032,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
10321032
let mut fractional_part: f64 = 0.;
10331033
if tokenizer.has_at_least(1)
10341034
&& tokenizer.next_byte_unchecked() == b'.'
1035-
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
1035+
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
10361036
{
10371037
is_integer = false;
10381038
tokenizer.advance(1); // Consume '.'
@@ -1050,10 +1050,10 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
10501050
let mut value = sign * (integral_part + fractional_part);
10511051

10521052
if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
1053-
if matches!(tokenizer.byte_at(1), b'0'...b'9')
1053+
if matches!(tokenizer.byte_at(1), b'0'..=b'9')
10541054
|| (tokenizer.has_at_least(2)
10551055
&& matches!(tokenizer.byte_at(1), b'+' | b'-')
1056-
&& matches!(tokenizer.byte_at(2), b'0'...b'9'))
1056+
&& matches!(tokenizer.byte_at(2), b'0'..=b'9'))
10571057
{
10581058
is_integer = false;
10591059
tokenizer.advance(1);
@@ -1202,7 +1202,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12021202
tokenizer.advance(1);
12031203
return UnquotedUrl(value.into())
12041204
}
1205-
b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable
1205+
b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable
12061206
| b'"' | b'\'' | b'(' => {
12071207
tokenizer.advance(1);
12081208
return consume_bad_url(tokenizer, start_pos)
@@ -1216,8 +1216,8 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12161216
string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
12171217
break
12181218
}
1219-
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
1220-
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
1219+
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
1220+
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
12211221
_ => {
12221222
// ASCII or other leading byte.
12231223
tokenizer.advance(1);
@@ -1236,7 +1236,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12361236
tokenizer.advance(1);
12371237
break;
12381238
}
1239-
b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable
1239+
b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable
12401240
| b'"' | b'\'' | b'(' => {
12411241
tokenizer.advance(1);
12421242
return consume_bad_url(tokenizer, start_pos);
@@ -1254,13 +1254,13 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12541254
tokenizer.advance(1);
12551255
string_bytes.extend("\u{FFFD}".as_bytes());
12561256
}
1257-
b'\x80'...b'\xBF' => {
1257+
b'\x80'..=b'\xBF' => {
12581258
// We’ll end up copying the whole code point
12591259
// before this loop does something else.
12601260
tokenizer.consume_continuation_byte();
12611261
string_bytes.push(b);
12621262
}
1263-
b'\xF0'...b'\xFF' => {
1263+
b'\xF0'..=b'\xFF' => {
12641264
// We’ll end up copying the whole code point
12651265
// before this loop does something else.
12661266
tokenizer.consume_4byte_intro();
@@ -1367,7 +1367,7 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
13671367
return '\u{FFFD}';
13681368
} // Escaped EOF
13691369
match_byte! { tokenizer.next_byte_unchecked(),
1370-
b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => {
1370+
b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f' => {
13711371
let (c, _) = consume_hex_digits(tokenizer);
13721372
if !tokenizer.is_eof() {
13731373
match_byte! { tokenizer.next_byte_unchecked(),

0 commit comments

Comments
 (0)