Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Store bad string and url values.
  • Loading branch information
jdm committed Jul 5, 2017
commit 5a2d392ee3121c687e3d1d235d5750e6927e2a04
8 changes: 4 additions & 4 deletions src/serializer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@ impl<'a> ToCss for Token<'a> {
Token::SquareBracketBlock => dest.write_str("[")?,
Token::CurlyBracketBlock => dest.write_str("{")?,

Token::BadUrl => dest.write_str("url(<bad url>)")?,
Token::BadString => dest.write_str("\"<bad string>\n")?,
Token::BadUrl(_) => dest.write_str("url(<bad url>)")?,
Token::BadString(_) => dest.write_str("\"<bad string>\n")?,
Token::CloseParenthesis => dest.write_str(")")?,
Token::CloseSquareBracket => dest.write_str("]")?,
Token::CloseCurlyBracket => dest.write_str("}")?,
Expand Down Expand Up @@ -376,7 +376,7 @@ impl<'a> Token<'a> {
TokenSerializationType(match *self {
Token::Ident(_) => Ident,
Token::AtKeyword(_) | Token::Hash(_) | Token::IDHash(_) => AtKeywordOrHash,
Token::UnquotedUrl(_) | Token::BadUrl => UrlOrBadUrl,
Token::UnquotedUrl(_) | Token::BadUrl(_) => UrlOrBadUrl,
Token::Delim('#') => DelimHash,
Token::Delim('@') => DelimAt,
Token::Delim('.') | Token::Delim('+') => DelimDotOrPlus,
Expand All @@ -400,7 +400,7 @@ impl<'a> Token<'a> {
Token::ParenthesisBlock => OpenParen,
Token::SquareBracketBlock | Token::CurlyBracketBlock |
Token::CloseParenthesis | Token::CloseSquareBracket | Token::CloseCurlyBracket |
Token::QuotedString(_) | Token::BadString |
Token::QuotedString(_) | Token::BadString(_) |
Token::Delim(_) | Token::Colon | Token::Semicolon | Token::Comma | Token::CDO |
Token::IncludeMatch | Token::PrefixMatch | Token::SuffixMatch
=> Other,
Expand Down
4 changes: 2 additions & 2 deletions src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -848,8 +848,8 @@ fn one_component_value_to_json(token: Token, input: &mut Parser) -> Json {
v.extend(nested(input));
v
}),
Token::BadUrl => JArray!["error", "bad-url"],
Token::BadString => JArray!["error", "bad-string"],
Token::BadUrl(_) => JArray!["error", "bad-url"],
Token::BadString(_) => JArray!["error", "bad-string"],
Token::CloseParenthesis => JArray!["error", ")"],
Token::CloseSquareBracket => JArray!["error", "]"],
Token::CloseCurlyBracket => JArray!["error", "}"],
Expand Down
24 changes: 16 additions & 8 deletions src/tokenizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,12 +157,12 @@ pub enum Token<'a> {
/// A `<bad-url-token>`
///
/// This token always indicates a parse error.
BadUrl,
BadUrl(CompactCowStr<'a>),

/// A `<bad-string-token>`
///
/// This token always indicates a parse error.
BadString,
BadString(CompactCowStr<'a>),

/// A `<)-token>`
///
Expand Down Expand Up @@ -194,7 +194,7 @@ impl<'a> Token<'a> {
pub fn is_parse_error(&self) -> bool {
matches!(
*self,
BadUrl | BadString | CloseParenthesis | CloseSquareBracket | CloseCurlyBracket
BadUrl(_) | BadString(_) | CloseParenthesis | CloseSquareBracket | CloseCurlyBracket
)
}
}
Expand Down Expand Up @@ -567,14 +567,14 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
fn consume_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool) -> Token<'a> {
match consume_quoted_string(tokenizer, single_quote) {
Ok(value) => QuotedString(value),
Err(()) => BadString
Err(value) => BadString(value)
}
}


/// Return `Err(())` on syntax error (ie. unescaped newline)
fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
-> Result<CompactCowStr<'a>, ()> {
-> Result<CompactCowStr<'a>, CompactCowStr<'a>> {
tokenizer.advance(1); // Skip the initial quote
// start_pos is at code point boundary, after " or '
let start_pos = tokenizer.position();
Expand Down Expand Up @@ -607,15 +607,22 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
break
}
b'\n' | b'\r' | b'\x0C' => { return Err(()) },
b'\n' | b'\r' | b'\x0C' => {
return Err(tokenizer.slice_from(start_pos).into())
},
_ => {}
}
tokenizer.consume_byte();
}

while !tokenizer.is_eof() {
if matches!(tokenizer.next_byte_unchecked(), b'\n' | b'\r' | b'\x0C') {
return Err(());
return Err(
// string_bytes is well-formed UTF-8, see other comments.
unsafe {
from_utf8_release_unchecked(string_bytes)
}.into()
);
}
let b = tokenizer.consume_byte();
match_byte! { b,
Expand Down Expand Up @@ -1024,6 +1031,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
}

fn consume_bad_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let start_pos = tokenizer.position();
// Consume up to the closing )
while !tokenizer.is_eof() {
match_byte! { tokenizer.consume_byte(),
Expand All @@ -1034,7 +1042,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
_ => {},
}
}
BadUrl
BadUrl(tokenizer.slice_from(start_pos).into())
}
}

Expand Down