Skip to content

Use MaybeUninit when available #252

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 6, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "cssparser"
version = "0.25.7"
version = "0.25.8"
authors = [ "Simon Sapin <simon.sapin@exyr.org>" ]

description = "Rust implementation of CSS Syntax Level 3"
Expand Down Expand Up @@ -30,6 +30,7 @@ serde = {version = "1.0", optional = true}
smallvec = "0.6"

[build-dependencies]
autocfg = "0.1.4"
syn = { version = "0.15.12", features = ["extra-traits", "fold", "full"] }
quote = "0.6"
proc-macro2 = "0.4"
Expand Down
4 changes: 3 additions & 1 deletion build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */

extern crate autocfg;
#[macro_use]
extern crate quote;
#[macro_use]
Expand All @@ -10,7 +11,6 @@ extern crate proc_macro2;

#[cfg(feature = "dummy_match_byte")]
mod codegen {
use std::path::Path;
pub fn main() {}
}

Expand Down Expand Up @@ -50,5 +50,7 @@ fn main() {
println!("cargo:rustc-cfg=rustc_has_pr45225")
}

autocfg::new().emit_has_path("std::mem::MaybeUninit");

codegen::main();
}
6 changes: 3 additions & 3 deletions src/color.rs
Original file line number Diff line number Diff line change
Expand Up @@ -530,9 +530,9 @@ pub fn parse_color_keyword(ident: &str) -> Result<Color, ()> {
#[inline]
fn from_hex(c: u8) -> Result<u8, ()> {
match c {
b'0'...b'9' => Ok(c - b'0'),
b'a'...b'f' => Ok(c - b'a' + 10),
b'A'...b'F' => Ok(c - b'A' + 10),
b'0'..=b'9' => Ok(c - b'0'),
b'a'..=b'f' => Ok(c - b'a' + 10),
b'A'..=b'F' => Ok(c - b'A' + 10),
_ => Err(()),
}
}
Expand Down
21 changes: 17 additions & 4 deletions src/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,13 +110,26 @@ macro_rules! ascii_case_insensitive_phf_map {
#[doc(hidden)]
macro_rules! cssparser_internal__to_lowercase {
($input: expr, $BUFFER_SIZE: expr => $output: ident) => {
// mem::uninitialized() is ok because `buffer` is only used in `_internal__to_lowercase`,
let mut buffer;
// Safety: `buffer` is only used in `_internal__to_lowercase`,
// which initializes with `copy_from_slice` the part of the buffer it uses,
// before it uses it.
#[allow(unsafe_code)]
let mut buffer: [u8; $BUFFER_SIZE] = unsafe { ::std::mem::uninitialized() };
let buffer = unsafe {
// FIXME: remove this when we require Rust 1.36
#[cfg(not(has_std__mem__MaybeUninit))]
{
buffer = ::std::mem::uninitialized::<[u8; $BUFFER_SIZE]>();
&mut buffer
}
#[cfg(has_std__mem__MaybeUninit)]
{
buffer = ::std::mem::MaybeUninit::<[u8; $BUFFER_SIZE]>::uninit();
&mut *(buffer.as_mut_ptr())
}
};
let input: &str = $input;
let $output = $crate::_internal__to_lowercase(&mut buffer, input);
let $output = $crate::_internal__to_lowercase(buffer, input);
};
}

Expand All @@ -130,7 +143,7 @@ macro_rules! cssparser_internal__to_lowercase {
#[allow(non_snake_case)]
pub fn _internal__to_lowercase<'a>(buffer: &'a mut [u8], input: &'a str) -> Option<&'a str> {
if let Some(buffer) = buffer.get_mut(..input.len()) {
if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'...b'Z')) {
if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) {
buffer.copy_from_slice(input.as_bytes());
buffer[first_uppercase..].make_ascii_lowercase();
// `buffer` was initialized to a copy of `input` (which is &str so well-formed UTF-8)
Expand Down
2 changes: 1 addition & 1 deletion src/nth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
let bytes = string.as_bytes();
if bytes.len() >= 3
&& bytes[..2].eq_ignore_ascii_case(b"n-")
&& bytes[2..].iter().all(|&c| matches!(c, b'0'...b'9'))
&& bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9'))
{
Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
} else {
Expand Down
8 changes: 4 additions & 4 deletions src/serializer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ where
dest.write_str("-")?;
value = &value[1..];
}
if let digit @ b'0'...b'9' = value.as_bytes()[0] {
if let digit @ b'0'..=b'9' = value.as_bytes()[0] {
hex_escape(digit, dest)?;
value = &value[1..];
}
Expand All @@ -226,7 +226,7 @@ where
let mut chunk_start = 0;
for (i, b) in value.bytes().enumerate() {
let escaped = match b {
b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' | b'_' | b'-' => continue,
b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'_' | b'-' => continue,
_ if !b.is_ascii() => continue,
b'\0' => Some("\u{FFFD}"),
_ => None,
Expand All @@ -251,7 +251,7 @@ where
let mut chunk_start = 0;
for (i, b) in value.bytes().enumerate() {
let hex = match b {
b'\0'...b' ' | b'\x7F' => true,
b'\0'..=b' ' | b'\x7F' => true,
b'(' | b')' | b'"' | b'\'' | b'\\' => false,
_ => continue,
};
Expand Down Expand Up @@ -318,7 +318,7 @@ where
b'"' => Some("\\\""),
b'\\' => Some("\\\\"),
b'\0' => Some("\u{FFFD}"),
b'\x01'...b'\x1F' | b'\x7F' => None,
b'\x01'..=b'\x1F' | b'\x7F' => None,
_ => continue,
};
self.inner.write_str(&s[chunk_start..i])?;
Expand Down
76 changes: 38 additions & 38 deletions src/tokenizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
// Any other valid case here already resulted in IDHash.
b'0'...b'9' | b'-' => true,
b'0'..=b'9' | b'-' => true,
_ => false,
} { Hash(consume_name(tokenizer)) }
else { Delim('#') }
Expand All @@ -576,11 +576,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'+' => {
if (
tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
&& matches!(tokenizer.byte_at(2), b'0'...b'9')
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
) {
consume_numeric(tokenizer)
} else {
Expand All @@ -592,11 +592,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'-' => {
if (
tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
&& matches!(tokenizer.byte_at(2), b'0'...b'9')
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
) {
consume_numeric(tokenizer)
} else if tokenizer.starts_with(b"-->") {
Expand All @@ -611,7 +611,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
},
b'.' => {
if tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'...b'9'
&& matches!(tokenizer.byte_at(1), b'0'..=b'9'
) {
consume_numeric(tokenizer)
} else {
Expand All @@ -627,7 +627,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
Delim('/')
}
}
b'0'...b'9' => { consume_numeric(tokenizer) },
b'0'..=b'9' => { consume_numeric(tokenizer) },
b':' => { tokenizer.advance(1); Colon },
b';' => { tokenizer.advance(1); Semicolon },
b'<' => {
Expand All @@ -644,7 +644,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
if is_ident_start(tokenizer) { AtKeyword(consume_name(tokenizer)) }
else { Delim('@') }
},
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) },
b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) },
b'[' => { tokenizer.advance(1); SquareBracketBlock },
b'\\' => {
if !tokenizer.has_newline_at(1) { consume_ident_like(tokenizer) }
Expand Down Expand Up @@ -745,8 +745,8 @@ fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
b'\n' | b'\x0C' | b'\r' => {
tokenizer.consume_newline();
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
Expand Down Expand Up @@ -807,8 +807,8 @@ fn consume_quoted_string<'a>(
b'\n' | b'\r' | b'\x0C' => {
return Err(tokenizer.slice_from(start_pos).into())
},
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
Expand Down Expand Up @@ -859,8 +859,8 @@ fn consume_quoted_string<'a>(
string_bytes.extend("\u{FFFD}".as_bytes());
continue;
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
Expand All @@ -882,10 +882,10 @@ fn consume_quoted_string<'a>(
fn is_ident_start(tokenizer: &mut Tokenizer) -> bool {
!tokenizer.is_eof()
&& match_byte! { tokenizer.next_byte_unchecked(),
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { true },
b'-' => {
tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
b'a'..=b'z' | b'A'..=b'Z' | b'-' | b'_' | b'\0' => {
true
}
b'\\' => { !tokenizer.has_newline_at(1) }
Expand Down Expand Up @@ -921,7 +921,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
return tokenizer.slice_from(start_pos).into();
}
match_byte! { tokenizer.next_byte_unchecked(),
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => { tokenizer.advance(1) },
b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => { tokenizer.advance(1) },
b'\\' | b'\0' => {
// * The tokenizer’s input is UTF-8 since it’s `&str`.
// * start_pos is at a code point boundary
Expand All @@ -931,10 +931,10 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
value_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
break
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xC0'...b'\xEF' => { tokenizer.advance(1); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b => {
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xC0'..=b'\xEF' => { tokenizer.advance(1); }
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
_b => {
return tokenizer.slice_from(start_pos).into();
}
}
Expand All @@ -943,7 +943,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
while !tokenizer.is_eof() {
let b = tokenizer.next_byte_unchecked();
match_byte! { b,
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => {
b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => {
tokenizer.advance(1);
value_bytes.push(b) // ASCII
}
Expand All @@ -957,19 +957,19 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
tokenizer.advance(1);
value_bytes.extend("\u{FFFD}".as_bytes());
},
b'\x80'...b'\xBF' => {
b'\x80'..=b'\xBF' => {
// This byte *is* part of a multi-byte code point,
// we’ll end up copying the whole code point before this loop does something else.
tokenizer.consume_continuation_byte();
value_bytes.push(b)
}
b'\xC0'...b'\xEF' => {
b'\xC0'..=b'\xEF' => {
// This byte *is* part of a multi-byte code point,
// we’ll end up copying the whole code point before this loop does something else.
tokenizer.advance(1);
value_bytes.push(b)
}
b'\xF0'...b'\xFF' => {
b'\xF0'..=b'\xFF' => {
tokenizer.consume_4byte_intro();
value_bytes.push(b)
}
Expand All @@ -985,9 +985,9 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {

fn byte_to_hex_digit(b: u8) -> Option<u32> {
Some(match_byte! { b,
b'0' ... b'9' => { b - b'0' },
b'a' ... b'f' => { b - b'a' + 10 },
b'A' ... b'F' => { b - b'A' + 10 },
b'0' ..= b'9' => { b - b'0' },
b'a' ..= b'f' => { b - b'a' + 10 },
b'A' ..= b'F' => { b - b'A' + 10 },
_ => {
return None
}
Expand Down Expand Up @@ -1032,7 +1032,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut fractional_part: f64 = 0.;
if tokenizer.has_at_least(1)
&& tokenizer.next_byte_unchecked() == b'.'
&& matches!(tokenizer.byte_at(1), b'0'...b'9')
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
{
is_integer = false;
tokenizer.advance(1); // Consume '.'
Expand All @@ -1050,10 +1050,10 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut value = sign * (integral_part + fractional_part);

if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
if matches!(tokenizer.byte_at(1), b'0'...b'9')
if matches!(tokenizer.byte_at(1), b'0'..=b'9')
|| (tokenizer.has_at_least(2)
&& matches!(tokenizer.byte_at(1), b'+' | b'-')
&& matches!(tokenizer.byte_at(2), b'0'...b'9'))
&& matches!(tokenizer.byte_at(2), b'0'..=b'9'))
{
is_integer = false;
tokenizer.advance(1);
Expand Down Expand Up @@ -1202,7 +1202,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
tokenizer.advance(1);
return UnquotedUrl(value.into())
}
b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable
b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable
| b'"' | b'\'' | b'(' => {
tokenizer.advance(1);
return consume_bad_url(tokenizer, start_pos)
Expand All @@ -1216,8 +1216,8 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
break
}
b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); }
b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); }
_ => {
// ASCII or other leading byte.
tokenizer.advance(1);
Expand All @@ -1236,7 +1236,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
tokenizer.advance(1);
break;
}
b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable
b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable
| b'"' | b'\'' | b'(' => {
tokenizer.advance(1);
return consume_bad_url(tokenizer, start_pos);
Expand All @@ -1254,13 +1254,13 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
tokenizer.advance(1);
string_bytes.extend("\u{FFFD}".as_bytes());
}
b'\x80'...b'\xBF' => {
b'\x80'..=b'\xBF' => {
// We’ll end up copying the whole code point
// before this loop does something else.
tokenizer.consume_continuation_byte();
string_bytes.push(b);
}
b'\xF0'...b'\xFF' => {
b'\xF0'..=b'\xFF' => {
// We’ll end up copying the whole code point
// before this loop does something else.
tokenizer.consume_4byte_intro();
Expand Down Expand Up @@ -1367,7 +1367,7 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
return '\u{FFFD}';
} // Escaped EOF
match_byte! { tokenizer.next_byte_unchecked(),
b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => {
b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f' => {
let (c, _) = consume_hex_digits(tokenizer);
if !tokenizer.is_eof() {
match_byte! { tokenizer.next_byte_unchecked(),
Expand Down