From 171ee21e45c9bacea74cbc7440b45246f729d6e4 Mon Sep 17 00:00:00 2001 From: Tiaan Louw Date: Fri, 22 Mar 2024 14:51:24 +0100 Subject: [PATCH 1/2] Run cargo clipp --fix on all files --- src/macros.rs | 4 +-- src/nth.rs | 14 ++++---- src/parser.rs | 12 +++---- src/rules_and_declarations.rs | 6 ++-- src/serializer.rs | 26 +++++++-------- src/tests.rs | 57 +++++++++++++++---------------- src/tokenizer.rs | 63 ++++++++++++++++------------------- src/unicode_range.rs | 20 +++++------ 8 files changed, 94 insertions(+), 108 deletions(-) diff --git a/src/macros.rs b/src/macros.rs index fc4b77a1..67d83658 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -182,7 +182,7 @@ pub fn _cssparser_internal_to_lowercase<'a>( let input_bytes = unsafe { &*(input.as_bytes() as *const [u8] as *const [MaybeUninit]) }; - buffer.copy_from_slice(&*input_bytes); + buffer.copy_from_slice(input_bytes); // Same as above re layout, plus these bytes have been initialized: let buffer = unsafe { &mut *(buffer as *mut [MaybeUninit] as *mut [u8]) }; @@ -195,7 +195,7 @@ pub fn _cssparser_internal_to_lowercase<'a>( } Some( - match input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) { + match input.bytes().position(|byte| byte.is_ascii_uppercase()) { Some(first_uppercase) => make_ascii_lowercase(buffer, input, first_uppercase), // common case: input is already lower-case None => input, diff --git a/src/nth.rs b/src/nth.rs index 518de4d9..4808deef 100644 --- a/src/nth.rs +++ b/src/nth.rs @@ -8,7 +8,7 @@ use super::{BasicParseError, Parser, ParserInput, Token}; /// The input is typically the arguments of a function, /// in which case the caller needs to check if the arguments’ parser is exhausted. /// Return `Ok((A, B))`, or `Err(())` for a syntax error. -pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> { +pub fn parse_nth<'i>(input: &mut Parser<'i, '_>) -> Result<(i32, i32), BasicParseError<'i>> { match *input.next()? { Token::Number { int_value: Some(b), .. @@ -22,7 +22,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic unit, "n" => Ok(parse_b(input, a)?), "n-" => Ok(parse_signless_b(input, a, -1)?), - _ => match parse_n_dash_digits(&*unit) { + _ => match parse_n_dash_digits(unit) { Ok(b) => Ok((a, b)), Err(()) => { let unit = unit.clone(); @@ -40,7 +40,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic "n-" => Ok(parse_signless_b(input, 1, -1)?), "-n-" => Ok(parse_signless_b(input, -1, -1)?), _ => { - let (slice, a) = if value.starts_with("-") { + let (slice, a) = if value.starts_with('-') { (&value[1..], -1) } else { (&**value, 1) @@ -81,7 +81,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic } } -fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> { +fn parse_b<'i>(input: &mut Parser<'i, '_>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> { let start = input.state(); match input.next() { Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1), @@ -98,8 +98,8 @@ fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), Bas } } -fn parse_signless_b<'i, 't>( - input: &mut Parser<'i, 't>, +fn parse_signless_b<'i>( + input: &mut Parser<'i, '_>, a: i32, b_sign: i32, ) -> Result<(i32, i32), BasicParseError<'i>> { @@ -118,7 +118,7 @@ fn parse_n_dash_digits(string: &str) -> Result { let bytes = string.as_bytes(); if bytes.len() >= 3 && bytes[..2].eq_ignore_ascii_case(b"n-") - && bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9')) + && bytes[2..].iter().all(|&c| c.is_ascii_digit()) { Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign } else { diff --git a/src/parser.rs b/src/parser.rs index dd7777a2..cce9c8b8 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -116,7 +116,7 @@ impl<'i, T> From> for ParseError<'i, T> { impl SourceLocation { /// Create a new BasicParseError at this location for an unexpected token #[inline] - pub fn new_basic_unexpected_token_error<'i>(self, token: Token<'i>) -> BasicParseError<'i> { + pub fn new_basic_unexpected_token_error(self, token: Token<'_>) -> BasicParseError<'_> { BasicParseError { kind: BasicParseErrorKind::UnexpectedToken(token), location: self, @@ -125,7 +125,7 @@ impl SourceLocation { /// Create a new ParseError at this location for an unexpected token #[inline] - pub fn new_unexpected_token_error<'i, E>(self, token: Token<'i>) -> ParseError<'i, E> { + pub fn new_unexpected_token_error(self, token: Token<'_>) -> ParseError<'_, E> { ParseError { kind: ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(token)), location: self, @@ -835,7 +835,7 @@ impl<'i: 't, 't> Parser<'i, 't> { /// expect_ident, but clone the CowRcStr #[inline] pub fn expect_ident_cloned(&mut self) -> Result, BasicParseError<'i>> { - self.expect_ident().map(|s| s.clone()) + self.expect_ident().cloned() } /// Parse a whose unescaped value is an ASCII-insensitive match for the given value. @@ -860,7 +860,7 @@ impl<'i: 't, 't> Parser<'i, 't> { /// expect_string, but clone the CowRcStr #[inline] pub fn expect_string_cloned(&mut self) -> Result, BasicParseError<'i>> { - self.expect_string().map(|s| s.clone()) + self.expect_string().cloned() } /// Parse either a or a , and return the unescaped value. @@ -879,7 +879,7 @@ impl<'i: 't, 't> Parser<'i, 't> { Token::UnquotedUrl(ref value) => Ok(value.clone()), Token::Function(ref name) if name.eq_ignore_ascii_case("url") => { self.parse_nested_block(|input| { - input.expect_string().map_err(Into::into).map(|s| s.clone()) + input.expect_string().map_err(Into::into).cloned() }) .map_err(ParseError::<()>::basic) } @@ -894,7 +894,7 @@ impl<'i: 't, 't> Parser<'i, 't> { Token::QuotedString(ref value) => Ok(value.clone()), Token::Function(ref name) if name.eq_ignore_ascii_case("url") => { self.parse_nested_block(|input| { - input.expect_string().map_err(Into::into).map(|s| s.clone()) + input.expect_string().map_err(Into::into).cloned() }) .map_err(ParseError::<()>::basic) } diff --git a/src/rules_and_declarations.rs b/src/rules_and_declarations.rs index fb33a7d0..0b180b4f 100644 --- a/src/rules_and_declarations.rs +++ b/src/rules_and_declarations.rs @@ -14,7 +14,7 @@ use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavi /// /// Typical usage is `input.try_parse(parse_important).is_ok()` /// at the end of a `DeclarationParser::parse_value` implementation. -pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> { +pub fn parse_important<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> { input.expect_delim('!')?; input.expect_ident_matching("important") } @@ -367,7 +367,7 @@ where let start = self.input.state(); let at_keyword = match self.input.next_byte()? { b'@' => match self.input.next_including_whitespace_and_comments() { - Ok(&Token::AtKeyword(ref name)) => Some(name.clone()), + Ok(Token::AtKeyword(name)) => Some(name.clone()), _ => { self.input.reset(&start); None @@ -503,5 +503,5 @@ where input.expect_curly_bracket_block()?; // Do this here so that we consume the `{` even if the prelude is `Err`. let prelude = prelude?; - parse_nested_block(input, |input| parser.parse_block(prelude, &start, input)) + parse_nested_block(input, |input| parser.parse_block(prelude, start, input)) } diff --git a/src/serializer.rs b/src/serializer.rs index 09c22402..a8f32104 100644 --- a/src/serializer.rs +++ b/src/serializer.rs @@ -4,7 +4,7 @@ use crate::match_byte; use dtoa_short::{self, Notation}; -use itoa; + use std::fmt::{self, Write}; use std::str; @@ -49,10 +49,8 @@ where dtoa_short::write(dest, value)? }; - if int_value.is_none() && value.fract() == 0. { - if !notation.decimal_point && !notation.scientific { - dest.write_str(".0")?; - } + if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific { + dest.write_str(".0")?; } Ok(()) } @@ -63,10 +61,10 @@ impl<'a> ToCss for Token<'a> { W: fmt::Write, { match *self { - Token::Ident(ref value) => serialize_identifier(&**value, dest)?, + Token::Ident(ref value) => serialize_identifier(value, dest)?, Token::AtKeyword(ref value) => { dest.write_str("@")?; - serialize_identifier(&**value, dest)?; + serialize_identifier(value, dest)?; } Token::Hash(ref value) => { dest.write_str("#")?; @@ -74,12 +72,12 @@ impl<'a> ToCss for Token<'a> { } Token::IDHash(ref value) => { dest.write_str("#")?; - serialize_identifier(&**value, dest)?; + serialize_identifier(value, dest)?; } - Token::QuotedString(ref value) => serialize_string(&**value, dest)?, + Token::QuotedString(ref value) => serialize_string(value, dest)?, Token::UnquotedUrl(ref value) => { dest.write_str("url(")?; - serialize_unquoted_url(&**value, dest)?; + serialize_unquoted_url(value, dest)?; dest.write_str(")")?; } Token::Delim(value) => dest.write_char(value)?, @@ -134,7 +132,7 @@ impl<'a> ToCss for Token<'a> { Token::CDC => dest.write_str("-->")?, Token::Function(ref name) => { - serialize_identifier(&**name, dest)?; + serialize_identifier(name, dest)?; dest.write_str("(")?; } Token::ParenthesisBlock => dest.write_str("(")?, @@ -167,7 +165,7 @@ fn hex_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result where W: fmt::Write, { - static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef"; + static HEX_DIGITS: &[u8; 16] = b"0123456789abcdef"; let b3; let b4; let bytes = if ascii_byte > 0x0F { @@ -179,7 +177,7 @@ where b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' ']; &b3[..] }; - dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) }) + dest.write_str(unsafe { str::from_utf8_unchecked(bytes) }) } fn char_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result @@ -240,7 +238,7 @@ where dest.write_str(&value[chunk_start..i])?; if let Some(escaped) = escaped { dest.write_str(escaped)?; - } else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' { + } else if (b'\x01'..=b'\x1F').contains(&b) || b == b'\x7F' { hex_escape(b, dest)?; } else { char_escape(b, dest)?; diff --git a/src/tests.rs b/src/tests.rs index f9dea193..96b2710d 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -5,7 +5,7 @@ #[cfg(feature = "bench")] extern crate test; -use encoding_rs; + use serde_json::{self, json, Map, Value}; #[cfg(feature = "bench")] @@ -26,19 +26,19 @@ macro_rules! JArray { fn almost_equals(a: &Value, b: &Value) -> bool { match (a, b) { - (&Value::Number(ref a), &Value::Number(ref b)) => { + (Value::Number(a), Value::Number(b)) => { let a = a.as_f64().unwrap(); let b = b.as_f64().unwrap(); (a - b).abs() <= a.abs() * 1e-6 } (&Value::Bool(a), &Value::Bool(b)) => a == b, - (&Value::String(ref a), &Value::String(ref b)) => a == b, - (&Value::Array(ref a), &Value::Array(ref b)) => { + (Value::String(a), Value::String(b)) => a == b, + (Value::Array(a), Value::Array(b)) => { a.len() == b.len() && a.iter() .zip(b.iter()) - .all(|(ref a, ref b)| almost_equals(*a, *b)) + .all(|(a, b)| almost_equals(a, b)) } (&Value::Object(_), &Value::Object(_)) => panic!("Not implemented"), (&Value::Null, &Value::Null) => true, @@ -77,7 +77,7 @@ fn assert_json_eq(results: Value, mut expected: Value, message: &str) { } } -fn run_raw_json_tests ()>(json_data: &str, run: F) { +fn run_raw_json_tests(json_data: &str, run: F) { let items = match serde_json::from_str(json_data) { Ok(Value::Array(items)) => items, other => panic!("Invalid JSON: {:?}", other), @@ -242,7 +242,7 @@ fn stylesheet_from_bytes() { fn get_string<'a>(map: &'a Map, key: &str) -> Option<&'a str> { match map.get(key) { - Some(&Value::String(ref s)) => Some(s), + Some(Value::String(s)) => Some(s), Some(&Value::Null) => None, None => None, _ => panic!("Unexpected JSON"), @@ -393,7 +393,7 @@ fn unicode_range() { if input.is_exhausted() { Ok(result) } else { - while let Ok(_) = input.next() {} + while input.next().is_ok() {} Ok(None) } }); @@ -434,10 +434,9 @@ fn serializer(preserve_comments: bool) { ) { while let Ok(token) = if preserve_comments { input - .next_including_whitespace_and_comments() - .map(|t| t.clone()) + .next_including_whitespace_and_comments().cloned() } else { - input.next_including_whitespace().map(|t| t.clone()) + input.next_including_whitespace().cloned() } { let token_type = token.serialization_type(); if !preserve_comments && previous_token.needs_separator_when_before(token_type) @@ -593,7 +592,7 @@ fn line_numbers() { #[test] fn overflow() { - use std::iter::repeat; + let css = r" 2147483646 @@ -619,7 +618,7 @@ fn overflow() { -3.402824e+38 " - .replace("{309 zeros}", &repeat('0').take(309).collect::()); + .replace("{309 zeros}", &"0".repeat(309)); let mut input = ParserInput::new(&css); let mut input = Parser::new(&mut input); @@ -637,12 +636,12 @@ fn overflow() { assert_eq!(input.expect_integer(), Ok(-2147483648)); assert_eq!(input.expect_integer(), Ok(-2147483648)); - assert_eq!(input.expect_number(), Ok(3.30282347e+38)); + assert_eq!(input.expect_number(), Ok(3.302_823_5e38)); assert_eq!(input.expect_number(), Ok(f32::MAX)); assert_eq!(input.expect_number(), Ok(f32::INFINITY)); assert!(f32::MAX != f32::INFINITY); - assert_eq!(input.expect_number(), Ok(-3.30282347e+38)); + assert_eq!(input.expect_number(), Ok(-3.302_823_5e38)); assert_eq!(input.expect_number(), Ok(f32::MIN)); assert_eq!(input.expect_number(), Ok(f32::NEG_INFINITY)); assert!(f32::MIN != f32::NEG_INFINITY); @@ -784,7 +783,7 @@ where impl<'a> ToJson for CowRcStr<'a> { fn to_json(&self) -> Value { - let s: &str = &*self; + let s: &str = self; s.to_json() } } @@ -847,7 +846,7 @@ fn no_stack_overflow_multiple_nested_blocks() { } let mut input = ParserInput::new(&input); let mut input = Parser::new(&mut input); - while let Ok(..) = input.next() {} + while input.next().is_ok() {} } impl<'i> DeclarationParser<'i> for JsonParser { @@ -863,18 +862,16 @@ impl<'i> DeclarationParser<'i> for JsonParser { let mut important = false; loop { let start = input.state(); - if let Ok(mut token) = input.next_including_whitespace().map(|t| t.clone()) { + if let Ok(mut token) = input.next_including_whitespace().cloned() { // Hack to deal with css-parsing-tests assuming that // `!important` in the middle of a declaration value is OK. // This can never happen per spec // (even CSS Variables forbid top-level `!`) if token == Token::Delim('!') { input.reset(&start); - if parse_important(input).is_ok() { - if input.is_exhausted() { - important = true; - break; - } + if parse_important(input).is_ok() && input.is_exhausted() { + important = true; + break; } input.reset(&start); token = input.next_including_whitespace().unwrap().clone(); @@ -905,7 +902,7 @@ impl<'i> AtRuleParser<'i> for JsonParser { ]; match_ignore_ascii_case! { &*name, "charset" => { - Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone()).into())) + Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone()))) }, _ => Ok(prelude), } @@ -968,7 +965,7 @@ impl<'i> RuleBodyItemParser<'i, Value, ()> for JsonParser { fn component_values_to_json(input: &mut Parser) -> Vec { let mut values = vec![]; - while let Ok(token) = input.next_including_whitespace().map(|t| t.clone()) { + while let Ok(token) = input.next_including_whitespace().cloned() { values.push(one_component_value_to_json(token, input)); } values @@ -978,9 +975,9 @@ fn one_component_value_to_json(token: Token, input: &mut Parser) -> Value { fn numeric(value: f32, int_value: Option, has_sign: bool) -> Vec { vec![ Token::Number { - value: value, - int_value: int_value, - has_sign: has_sign, + value, + int_value, + has_sign, } .to_css_string() .to_json(), @@ -1223,7 +1220,7 @@ fn parse_sourcemapping_comments() { for test in tests { let mut input = ParserInput::new(test.0); let mut parser = Parser::new(&mut input); - while let Ok(_) = parser.next_including_whitespace() {} + while parser.next_including_whitespace().is_ok() {} assert_eq!(parser.current_source_map_url(), test.1); } } @@ -1247,7 +1244,7 @@ fn parse_sourceurl_comments() { for test in tests { let mut input = ParserInput::new(test.0); let mut parser = Parser::new(&mut input); - while let Ok(_) = parser.next_including_whitespace() {} + while parser.next_including_whitespace().is_ok() {} assert_eq!(parser.current_source_url(), test.1); } } diff --git a/src/tokenizer.rs b/src/tokenizer.rs index a3b70063..29451002 100644 --- a/src/tokenizer.rs +++ b/src/tokenizer.rs @@ -255,10 +255,8 @@ impl<'a> Tokenizer<'a> { #[inline] pub fn see_function(&mut self, name: &str) { - if self.var_or_env_functions == SeenStatus::LookingForThem { - if name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env") { - self.var_or_env_functions = SeenStatus::SeenAtLeastOne; - } + if self.var_or_env_functions == SeenStatus::LookingForThem && (name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env")) { + self.var_or_env_functions = SeenStatus::SeenAtLeastOne; } } @@ -582,11 +580,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result, ()> { b'+' => { if ( tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'..=b'9') + && tokenizer.byte_at(1).is_ascii_digit() ) || ( tokenizer.has_at_least(2) && tokenizer.byte_at(1) == b'.' - && matches!(tokenizer.byte_at(2), b'0'..=b'9') + && tokenizer.byte_at(2).is_ascii_digit() ) { consume_numeric(tokenizer) } else { @@ -598,11 +596,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result, ()> { b'-' => { if ( tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'..=b'9') + && tokenizer.byte_at(1).is_ascii_digit() ) || ( tokenizer.has_at_least(2) && tokenizer.byte_at(1) == b'.' - && matches!(tokenizer.byte_at(2), b'0'..=b'9') + && tokenizer.byte_at(2).is_ascii_digit() ) { consume_numeric(tokenizer) } else if tokenizer.starts_with(b"-->") { @@ -617,8 +615,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result, ()> { }, b'.' => { if tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'..=b'9' - ) { + && tokenizer.byte_at(1).is_ascii_digit() { consume_numeric(tokenizer) } else { tokenizer.advance(1); @@ -1001,7 +998,7 @@ fn byte_to_hex_digit(b: u8) -> Option { } fn byte_to_decimal_digit(b: u8) -> Option { - if b >= b'0' && b <= b'9' { + if b.is_ascii_digit() { Some((b - b'0') as u32) } else { None @@ -1038,7 +1035,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> { let mut fractional_part: f64 = 0.; if tokenizer.has_at_least(1) && tokenizer.next_byte_unchecked() == b'.' - && matches!(tokenizer.byte_at(1), b'0'..=b'9') + && tokenizer.byte_at(1).is_ascii_digit() { is_integer = false; tokenizer.advance(1); // Consume '.' @@ -1055,32 +1052,28 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> { let mut value = sign * (integral_part + fractional_part); - if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') { - if matches!(tokenizer.byte_at(1), b'0'..=b'9') - || (tokenizer.has_at_least(2) + if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') && (tokenizer.byte_at(1).is_ascii_digit() || (tokenizer.has_at_least(2) && matches!(tokenizer.byte_at(1), b'+' | b'-') - && matches!(tokenizer.byte_at(2), b'0'..=b'9')) - { - is_integer = false; + && tokenizer.byte_at(2).is_ascii_digit())) { + is_integer = false; + tokenizer.advance(1); + let (has_sign, sign) = match tokenizer.next_byte_unchecked() { + b'-' => (true, -1.), + b'+' => (true, 1.), + _ => (false, 1.), + }; + if has_sign { tokenizer.advance(1); - let (has_sign, sign) = match tokenizer.next_byte_unchecked() { - b'-' => (true, -1.), - b'+' => (true, 1.), - _ => (false, 1.), - }; - if has_sign { - tokenizer.advance(1); - } - let mut exponent: f64 = 0.; - while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) { - exponent = exponent * 10. + digit as f64; - tokenizer.advance(1); - if tokenizer.is_eof() { - break; - } + } + let mut exponent: f64 = 0.; + while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) { + exponent = exponent * 10. + digit as f64; + tokenizer.advance(1); + if tokenizer.is_eof() { + break; } - value *= f64::powf(10., sign * exponent); } + value *= f64::powf(10., sign * exponent); } let int_value = if is_integer { @@ -1339,7 +1332,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result, } // (value, number of digits up to 6) -fn consume_hex_digits<'a>(tokenizer: &mut Tokenizer<'a>) -> (u32, u32) { +fn consume_hex_digits(tokenizer: &mut Tokenizer<'_>) -> (u32, u32) { let mut value = 0; let mut digits = 0; while digits < 6 && !tokenizer.is_eof() { diff --git a/src/unicode_range.rs b/src/unicode_range.rs index b0a2017c..ce6bb3b5 100644 --- a/src/unicode_range.rs +++ b/src/unicode_range.rs @@ -24,7 +24,7 @@ pub struct UnicodeRange { impl UnicodeRange { /// https://drafts.csswg.org/css-syntax/#urange-syntax - pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result> { + pub fn parse<'i>(input: &mut Parser<'i, '_>) -> Result> { // = // u '+' '?'* | // u '?'* | @@ -57,7 +57,7 @@ impl UnicodeRange { } } -fn parse_tokens<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> { +fn parse_tokens<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> { match input.next_including_whitespace()?.clone() { Token::Delim('+') => { match *input.next_including_whitespace()? { @@ -123,15 +123,13 @@ fn parse_concatenated(text: &[u8]) -> Result { start: first_hex_value, end: first_hex_value, }); - } else { - if let Some((&b'-', mut text)) = text.split_first() { - let (second_hex_value, hex_digit_count) = consume_hex(&mut text); - if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() { - return Ok(UnicodeRange { - start: first_hex_value, - end: second_hex_value, - }); - } + } else if let Some((&b'-', mut text)) = text.split_first() { + let (second_hex_value, hex_digit_count) = consume_hex(&mut text); + if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() { + return Ok(UnicodeRange { + start: first_hex_value, + end: second_hex_value, + }); } } Err(()) From c6fc7d4ff3d75446f6e8042a6d4b342f03183bc6 Mon Sep 17 00:00:00 2001 From: Tiaan Louw Date: Fri, 22 Mar 2024 14:51:49 +0100 Subject: [PATCH 2/2] Run cargo fmt on all files --- color/lib.rs | 6 ++---- color/tests.rs | 29 +++++++++++++++++------------ macros/lib.rs | 8 ++++---- src/cow_rc_str.rs | 2 +- src/parser.rs | 2 +- src/rules_and_declarations.rs | 10 +++++----- src/serializer.rs | 3 ++- src/tests.rs | 11 ++--------- src/tokenizer.rs | 23 +++++++++++++++++------ 9 files changed, 51 insertions(+), 43 deletions(-) diff --git a/color/lib.rs b/color/lib.rs index 1c52e38d..4787175d 100644 --- a/color/lib.rs +++ b/color/lib.rs @@ -55,10 +55,8 @@ where let token = input.next()?; match *token { Token::Hash(ref value) | Token::IDHash(ref value) => { - parse_hash_color(value.as_bytes()).map(|(r, g, b, a)| { - P::Output::from_rgba(r, g, b, a) - }) - }, + parse_hash_color(value.as_bytes()).map(|(r, g, b, a)| P::Output::from_rgba(r, g, b, a)) + } Token::Ident(ref value) => parse_color_keyword(value), Token::Function(ref name) => { let name = name.clone(); diff --git a/color/tests.rs b/color/tests.rs index babb076b..aa585a12 100644 --- a/color/tests.rs +++ b/color/tests.rs @@ -3,7 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use super::*; -use crate::{ColorParser, PredefinedColorSpace, Color, RgbaLegacy}; +use crate::{Color, ColorParser, PredefinedColorSpace, RgbaLegacy}; use cssparser::{Parser, ParserInput}; use serde_json::{self, json, Value}; @@ -43,7 +43,6 @@ fn assert_json_eq(results: Value, expected: Value, message: &str) { } } - fn run_raw_json_tests ()>(json_data: &str, run: F) { let items = match serde_json::from_str(json_data) { Ok(Value::Array(items)) => items, @@ -92,11 +91,14 @@ fn color3() { #[cfg_attr(all(miri, feature = "skip_long_tests"), ignore)] #[test] fn color3_hsl() { - run_color_tests(include_str!("../src/css-parsing-tests/color3_hsl.json"), |c| { - c.ok() - .map(|v| v.to_css_string().to_json()) - .unwrap_or(Value::Null) - }) + run_color_tests( + include_str!("../src/css-parsing-tests/color3_hsl.json"), + |c| { + c.ok() + .map(|v| v.to_css_string().to_json()) + .unwrap_or(Value::Null) + }, + ) } /// color3_keywords.json is different: R, G and B are in 0..255 rather than 0..1 @@ -115,11 +117,14 @@ fn color3_keywords() { #[cfg_attr(all(miri, feature = "skip_long_tests"), ignore)] #[test] fn color4_hwb() { - run_color_tests(include_str!("../src/css-parsing-tests/color4_hwb.json"), |c| { - c.ok() - .map(|v| v.to_css_string().to_json()) - .unwrap_or(Value::Null) - }) + run_color_tests( + include_str!("../src/css-parsing-tests/color4_hwb.json"), + |c| { + c.ok() + .map(|v| v.to_css_string().to_json()) + .unwrap_or(Value::Null) + }, + ) } #[cfg_attr(all(miri, feature = "skip_long_tests"), ignore)] diff --git a/macros/lib.rs b/macros/lib.rs index 4354cc8e..dda7efda 100644 --- a/macros/lib.rs +++ b/macros/lib.rs @@ -48,9 +48,7 @@ fn get_byte_from_lit(lit: &syn::Lit) -> u8 { fn get_byte_from_expr_lit(expr: &syn::Expr) -> u8 { match *expr { - syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => { - get_byte_from_lit(lit) - } + syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => get_byte_from_lit(lit), _ => unreachable!(), } } @@ -69,7 +67,9 @@ fn parse_pat_to_table<'a>( table[value as usize] = case_id; } } - &syn::Pat::Range(syn::PatRange { ref start, ref end, .. }) => { + &syn::Pat::Range(syn::PatRange { + ref start, ref end, .. + }) => { let lo = get_byte_from_expr_lit(&start.as_ref().unwrap()); let hi = get_byte_from_expr_lit(&end.as_ref().unwrap()); for value in lo..hi { diff --git a/src/cow_rc_str.rs b/src/cow_rc_str.rs index ecf14a0a..29e59e20 100644 --- a/src/cow_rc_str.rs +++ b/src/cow_rc_str.rs @@ -4,7 +4,7 @@ use std::borrow::{Borrow, Cow}; use std::rc::Rc; -use std::{cmp, fmt, hash, marker, mem, ops, slice, str, ptr}; +use std::{cmp, fmt, hash, marker, mem, ops, ptr, slice, str}; /// A string that is either shared (heap-allocated and reference-counted) or borrowed. /// diff --git a/src/parser.rs b/src/parser.rs index cce9c8b8..c822a225 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -748,7 +748,7 @@ impl<'i: 't, 't> Parser<'i, 't> { match self.parse_until_before(Delimiter::Comma, &mut parse_one) { Ok(v) => values.push(v), Err(e) if !ignore_errors => return Err(e), - Err(_) => {}, + Err(_) => {} } match self.next() { Err(_) => return Ok(values), diff --git a/src/rules_and_declarations.rs b/src/rules_and_declarations.rs index 0b180b4f..b23e9dc0 100644 --- a/src/rules_and_declarations.rs +++ b/src/rules_and_declarations.rs @@ -253,10 +253,10 @@ where self.input.skip_whitespace(); let start = self.input.state(); match self.input.next_including_whitespace_and_comments().ok()? { - Token::CloseCurlyBracket | - Token::WhiteSpace(..) | - Token::Semicolon | - Token::Comment(..) => continue, + Token::CloseCurlyBracket + | Token::WhiteSpace(..) + | Token::Semicolon + | Token::Comment(..) => continue, Token::AtKeyword(ref name) => { let name = name.clone(); return Some(parse_at_rule(&start, name, self.input, &mut *self.parser)); @@ -294,7 +294,7 @@ where &mut *self.parser, Delimiter::Semicolon | Delimiter::CurlyBracketBlock, ) { - return Some(Ok(qual)) + return Some(Ok(qual)); } } diff --git a/src/serializer.rs b/src/serializer.rs index a8f32104..1dab41c0 100644 --- a/src/serializer.rs +++ b/src/serializer.rs @@ -49,7 +49,8 @@ where dtoa_short::write(dest, value)? }; - if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific { + if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific + { dest.write_str(".0")?; } Ok(()) diff --git a/src/tests.rs b/src/tests.rs index 96b2710d..d9feb5ed 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -5,7 +5,6 @@ #[cfg(feature = "bench")] extern crate test; - use serde_json::{self, json, Map, Value}; #[cfg(feature = "bench")] @@ -35,10 +34,7 @@ fn almost_equals(a: &Value, b: &Value) -> bool { (&Value::Bool(a), &Value::Bool(b)) => a == b, (Value::String(a), Value::String(b)) => a == b, (Value::Array(a), Value::Array(b)) => { - a.len() == b.len() - && a.iter() - .zip(b.iter()) - .all(|(a, b)| almost_equals(a, b)) + a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| almost_equals(a, b)) } (&Value::Object(_), &Value::Object(_)) => panic!("Not implemented"), (&Value::Null, &Value::Null) => true, @@ -433,8 +429,7 @@ fn serializer(preserve_comments: bool) { preserve_comments: bool, ) { while let Ok(token) = if preserve_comments { - input - .next_including_whitespace_and_comments().cloned() + input.next_including_whitespace_and_comments().cloned() } else { input.next_including_whitespace().cloned() } { @@ -592,8 +587,6 @@ fn line_numbers() { #[test] fn overflow() { - - let css = r" 2147483646 2147483647 diff --git a/src/tokenizer.rs b/src/tokenizer.rs index 29451002..f1716c71 100644 --- a/src/tokenizer.rs +++ b/src/tokenizer.rs @@ -255,7 +255,9 @@ impl<'a> Tokenizer<'a> { #[inline] pub fn see_function(&mut self, name: &str) { - if self.var_or_env_functions == SeenStatus::LookingForThem && (name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env")) { + if self.var_or_env_functions == SeenStatus::LookingForThem + && (name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env")) + { self.var_or_env_functions = SeenStatus::SeenAtLeastOne; } } @@ -320,10 +322,12 @@ impl<'a> Tokenizer<'a> { pub fn current_source_line(&self) -> &'a str { let current = self.position(); - let start = self.slice(SourcePosition(0)..current) + let start = self + .slice(SourcePosition(0)..current) .rfind(|c| matches!(c, '\r' | '\n' | '\x0C')) .map_or(0, |start| start + 1); - let end = self.slice(current..SourcePosition(self.input.len())) + let end = self + .slice(current..SourcePosition(self.input.len())) .find(|c| matches!(c, '\r' | '\n' | '\x0C')) .map_or(self.input.len(), |end| current.0 + end); self.slice(SourcePosition(start)..SourcePosition(end)) @@ -422,7 +426,10 @@ impl<'a> Tokenizer<'a> { #[inline] fn next_char(&self) -> char { - unsafe { self.input.get_unchecked(self.position().0..) }.chars().next().unwrap() + unsafe { self.input.get_unchecked(self.position().0..) } + .chars() + .next() + .unwrap() } // Given that a newline has been seen, advance over the newline @@ -1052,9 +1059,13 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> { let mut value = sign * (integral_part + fractional_part); - if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') && (tokenizer.byte_at(1).is_ascii_digit() || (tokenizer.has_at_least(2) + if tokenizer.has_at_least(1) + && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') + && (tokenizer.byte_at(1).is_ascii_digit() + || (tokenizer.has_at_least(2) && matches!(tokenizer.byte_at(1), b'+' | b'-') - && tokenizer.byte_at(2).is_ascii_digit())) { + && tokenizer.byte_at(2).is_ascii_digit())) + { is_integer = false; tokenizer.advance(1); let (has_sign, sign) = match tokenizer.next_byte_unchecked() {