Skip to content

Run cargo fmt and cargo clippy --fix on all files #373

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Run cargo clipp --fix on all files
  • Loading branch information
tiaanl committed Mar 22, 2024
commit 171ee21e45c9bacea74cbc7440b45246f729d6e4
4 changes: 2 additions & 2 deletions src/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
let input_bytes =
unsafe { &*(input.as_bytes() as *const [u8] as *const [MaybeUninit<u8>]) };

buffer.copy_from_slice(&*input_bytes);
buffer.copy_from_slice(input_bytes);

// Same as above re layout, plus these bytes have been initialized:
let buffer = unsafe { &mut *(buffer as *mut [MaybeUninit<u8>] as *mut [u8]) };
Expand All @@ -195,7 +195,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
}

Some(
match input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) {
match input.bytes().position(|byte| byte.is_ascii_uppercase()) {
Some(first_uppercase) => make_ascii_lowercase(buffer, input, first_uppercase),
// common case: input is already lower-case
None => input,
Expand Down
14 changes: 7 additions & 7 deletions src/nth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use super::{BasicParseError, Parser, ParserInput, Token};
/// The input is typically the arguments of a function,
/// in which case the caller needs to check if the arguments’ parser is exhausted.
/// Return `Ok((A, B))`, or `Err(())` for a syntax error.
pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> {
pub fn parse_nth<'i>(input: &mut Parser<'i, '_>) -> Result<(i32, i32), BasicParseError<'i>> {
match *input.next()? {
Token::Number {
int_value: Some(b), ..
Expand All @@ -22,7 +22,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
unit,
"n" => Ok(parse_b(input, a)?),
"n-" => Ok(parse_signless_b(input, a, -1)?),
_ => match parse_n_dash_digits(&*unit) {
_ => match parse_n_dash_digits(unit) {
Ok(b) => Ok((a, b)),
Err(()) => {
let unit = unit.clone();
Expand All @@ -40,7 +40,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
"n-" => Ok(parse_signless_b(input, 1, -1)?),
"-n-" => Ok(parse_signless_b(input, -1, -1)?),
_ => {
let (slice, a) = if value.starts_with("-") {
let (slice, a) = if value.starts_with('-') {
(&value[1..], -1)
} else {
(&**value, 1)
Expand Down Expand Up @@ -81,7 +81,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
}
}

fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
fn parse_b<'i>(input: &mut Parser<'i, '_>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
let start = input.state();
match input.next() {
Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1),
Expand All @@ -98,8 +98,8 @@ fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), Bas
}
}

fn parse_signless_b<'i, 't>(
input: &mut Parser<'i, 't>,
fn parse_signless_b<'i>(
input: &mut Parser<'i, '_>,
a: i32,
b_sign: i32,
) -> Result<(i32, i32), BasicParseError<'i>> {
Expand All @@ -118,7 +118,7 @@ fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
let bytes = string.as_bytes();
if bytes.len() >= 3
&& bytes[..2].eq_ignore_ascii_case(b"n-")
&& bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9'))
&& bytes[2..].iter().all(|&c| c.is_ascii_digit())
{
Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
} else {
Expand Down
12 changes: 6 additions & 6 deletions src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ impl<'i, T> From<BasicParseError<'i>> for ParseError<'i, T> {
impl SourceLocation {
/// Create a new BasicParseError at this location for an unexpected token
#[inline]
pub fn new_basic_unexpected_token_error<'i>(self, token: Token<'i>) -> BasicParseError<'i> {
pub fn new_basic_unexpected_token_error(self, token: Token<'_>) -> BasicParseError<'_> {
BasicParseError {
kind: BasicParseErrorKind::UnexpectedToken(token),
location: self,
Expand All @@ -125,7 +125,7 @@ impl SourceLocation {

/// Create a new ParseError at this location for an unexpected token
#[inline]
pub fn new_unexpected_token_error<'i, E>(self, token: Token<'i>) -> ParseError<'i, E> {
pub fn new_unexpected_token_error<E>(self, token: Token<'_>) -> ParseError<'_, E> {
ParseError {
kind: ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(token)),
location: self,
Expand Down Expand Up @@ -835,7 +835,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_ident, but clone the CowRcStr
#[inline]
pub fn expect_ident_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_ident().map(|s| s.clone())
self.expect_ident().cloned()
}

/// Parse a <ident-token> whose unescaped value is an ASCII-insensitive match for the given value.
Expand All @@ -860,7 +860,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_string, but clone the CowRcStr
#[inline]
pub fn expect_string_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_string().map(|s| s.clone())
self.expect_string().cloned()
}

/// Parse either a <ident-token> or a <string-token>, and return the unescaped value.
Expand All @@ -879,7 +879,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::UnquotedUrl(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
input.expect_string().map_err(Into::into).map(|s| s.clone())
input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}
Expand All @@ -894,7 +894,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::QuotedString(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
input.expect_string().map_err(Into::into).map(|s| s.clone())
input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}
Expand Down
6 changes: 3 additions & 3 deletions src/rules_and_declarations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavi
///
/// Typical usage is `input.try_parse(parse_important).is_ok()`
/// at the end of a `DeclarationParser::parse_value` implementation.
pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
pub fn parse_important<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> {
input.expect_delim('!')?;
input.expect_ident_matching("important")
}
Expand Down Expand Up @@ -367,7 +367,7 @@ where
let start = self.input.state();
let at_keyword = match self.input.next_byte()? {
b'@' => match self.input.next_including_whitespace_and_comments() {
Ok(&Token::AtKeyword(ref name)) => Some(name.clone()),
Ok(Token::AtKeyword(name)) => Some(name.clone()),
_ => {
self.input.reset(&start);
None
Expand Down Expand Up @@ -503,5 +503,5 @@ where
input.expect_curly_bracket_block()?;
// Do this here so that we consume the `{` even if the prelude is `Err`.
let prelude = prelude?;
parse_nested_block(input, |input| parser.parse_block(prelude, &start, input))
parse_nested_block(input, |input| parser.parse_block(prelude, start, input))
}
26 changes: 12 additions & 14 deletions src/serializer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

use crate::match_byte;
use dtoa_short::{self, Notation};
use itoa;

use std::fmt::{self, Write};
use std::str;

Expand Down Expand Up @@ -49,10 +49,8 @@ where
dtoa_short::write(dest, value)?
};

if int_value.is_none() && value.fract() == 0. {
if !notation.decimal_point && !notation.scientific {
dest.write_str(".0")?;
}
if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific {
dest.write_str(".0")?;
}
Ok(())
}
Expand All @@ -63,23 +61,23 @@ impl<'a> ToCss for Token<'a> {
W: fmt::Write,
{
match *self {
Token::Ident(ref value) => serialize_identifier(&**value, dest)?,
Token::Ident(ref value) => serialize_identifier(value, dest)?,
Token::AtKeyword(ref value) => {
dest.write_str("@")?;
serialize_identifier(&**value, dest)?;
serialize_identifier(value, dest)?;
}
Token::Hash(ref value) => {
dest.write_str("#")?;
serialize_name(value, dest)?;
}
Token::IDHash(ref value) => {
dest.write_str("#")?;
serialize_identifier(&**value, dest)?;
serialize_identifier(value, dest)?;
}
Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
Token::QuotedString(ref value) => serialize_string(value, dest)?,
Token::UnquotedUrl(ref value) => {
dest.write_str("url(")?;
serialize_unquoted_url(&**value, dest)?;
serialize_unquoted_url(value, dest)?;
dest.write_str(")")?;
}
Token::Delim(value) => dest.write_char(value)?,
Expand Down Expand Up @@ -134,7 +132,7 @@ impl<'a> ToCss for Token<'a> {
Token::CDC => dest.write_str("-->")?,

Token::Function(ref name) => {
serialize_identifier(&**name, dest)?;
serialize_identifier(name, dest)?;
dest.write_str("(")?;
}
Token::ParenthesisBlock => dest.write_str("(")?,
Expand Down Expand Up @@ -167,7 +165,7 @@ fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
{
static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef";
static HEX_DIGITS: &[u8; 16] = b"0123456789abcdef";
let b3;
let b4;
let bytes = if ascii_byte > 0x0F {
Expand All @@ -179,7 +177,7 @@ where
b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' '];
&b3[..]
};
dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
dest.write_str(unsafe { str::from_utf8_unchecked(bytes) })
}

fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
Expand Down Expand Up @@ -240,7 +238,7 @@ where
dest.write_str(&value[chunk_start..i])?;
if let Some(escaped) = escaped {
dest.write_str(escaped)?;
} else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
} else if (b'\x01'..=b'\x1F').contains(&b) || b == b'\x7F' {
hex_escape(b, dest)?;
} else {
char_escape(b, dest)?;
Expand Down
Loading