@@ -964,7 +964,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
964
964
b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => {
965
965
let value = tokenizer. slice_from( start_pos) ;
966
966
tokenizer. advance( 1 ) ;
967
- return consume_url_end( tokenizer, value. into( ) )
967
+ return consume_url_end( tokenizer, start_pos , value. into( ) )
968
968
}
969
969
b')' => {
970
970
let value = tokenizer. slice_from( start_pos) ;
@@ -974,7 +974,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
974
974
b'\x01' ...b'\x08' | b'\x0B' | b'\x0E' ...b'\x1F' | b'\x7F' // non-printable
975
975
| b'"' | b'\'' | b'(' => {
976
976
tokenizer. advance( 1 ) ;
977
- return consume_bad_url( tokenizer)
977
+ return consume_bad_url( tokenizer, start_pos )
978
978
} ,
979
979
b'\\' | b'\0' => {
980
980
// * The tokenizer’s input is UTF-8 since it’s `&str`.
@@ -993,22 +993,20 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
993
993
while !tokenizer. is_eof ( ) {
994
994
match_byte ! { tokenizer. consume_byte( ) ,
995
995
b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => {
996
- return consume_url_end(
997
- tokenizer,
998
- // string_bytes is well-formed UTF-8, see other comments.
999
- unsafe { from_utf8_release_unchecked( string_bytes) } . into( )
1000
- )
996
+ // string_bytes is well-formed UTF-8, see other comments.
997
+ let string = unsafe { from_utf8_release_unchecked( string_bytes) } . into( ) ;
998
+ return consume_url_end( tokenizer, start_pos, string)
1001
999
}
1002
1000
b')' => {
1003
1001
break ;
1004
1002
}
1005
1003
b'\x01' ...b'\x08' | b'\x0B' | b'\x0E' ...b'\x1F' | b'\x7F' // non-printable
1006
1004
| b'"' | b'\'' | b'(' => {
1007
- return consume_bad_url( tokenizer) ;
1005
+ return consume_bad_url( tokenizer, start_pos ) ;
1008
1006
}
1009
1007
b'\\' => {
1010
1008
if tokenizer. has_newline_at( 0 ) {
1011
- return consume_bad_url( tokenizer)
1009
+ return consume_bad_url( tokenizer, start_pos )
1012
1010
}
1013
1011
1014
1012
// This pushes one well-formed code point to string_bytes
@@ -1028,21 +1026,23 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
1028
1026
)
1029
1027
}
1030
1028
1031
- fn consume_url_end < ' a > ( tokenizer : & mut Tokenizer < ' a > , string : CowRcStr < ' a > ) -> Token < ' a > {
1029
+ fn consume_url_end < ' a > ( tokenizer : & mut Tokenizer < ' a > ,
1030
+ start_pos : SourcePosition ,
1031
+ string : CowRcStr < ' a > )
1032
+ -> Token < ' a > {
1032
1033
while !tokenizer. is_eof ( ) {
1033
1034
match_byte ! { tokenizer. consume_byte( ) ,
1034
1035
b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => { } ,
1035
1036
b')' => { break } ,
1036
1037
_ => {
1037
- return consume_bad_url( tokenizer) ;
1038
+ return consume_bad_url( tokenizer, start_pos ) ;
1038
1039
}
1039
1040
}
1040
1041
}
1041
1042
UnquotedUrl ( string)
1042
1043
}
1043
1044
1044
- fn consume_bad_url < ' a > ( tokenizer : & mut Tokenizer < ' a > ) -> Token < ' a > {
1045
- let start_pos = tokenizer. position ( ) ;
1045
+ fn consume_bad_url < ' a > ( tokenizer : & mut Tokenizer < ' a > , start_pos : SourcePosition ) -> Token < ' a > {
1046
1046
// Consume up to the closing )
1047
1047
while !tokenizer. is_eof ( ) {
1048
1048
match_byte ! { tokenizer. consume_byte( ) ,
0 commit comments