Skip to content

Commit a720448

Browse files
author
bors-servo
authored
Auto merge of #193 - servo:…, r=SimonSapin
Use an array instead of `match` for hexadecimal digits. <!-- Reviewable:start --> This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/rust-cssparser/193) <!-- Reviewable:end -->
2 parents 7a1266c + f72149c commit a720448

File tree

2 files changed

+10
-16
lines changed

2 files changed

+10
-16
lines changed

src/parser.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -788,10 +788,10 @@ pub fn parse_until_before<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
788788
}
789789
// FIXME: have a special-purpose tokenizer method for this that does less work.
790790
loop {
791-
if delimiters.contains(Delimiters::from_byte((parser.input.tokenizer).next_byte())) {
791+
if delimiters.contains(Delimiters::from_byte(parser.input.tokenizer.next_byte())) {
792792
break
793793
}
794-
if let Ok(token) = (parser.input.tokenizer).next() {
794+
if let Ok(token) = parser.input.tokenizer.next() {
795795
if let Some(block_type) = BlockType::opening(&token) {
796796
consume_until_end_of_block(block_type, &mut parser.input.tokenizer);
797797
}
@@ -808,11 +808,11 @@ pub fn parse_until_after<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
808808
-> Result <T, ParseError<'i, E>>
809809
where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
810810
let result = parser.parse_until_before(delimiters, parse);
811-
let next_byte = (parser.input.tokenizer).next_byte();
811+
let next_byte = parser.input.tokenizer.next_byte();
812812
if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) {
813813
debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
814814
// We know this byte is ASCII.
815-
(parser.input.tokenizer).advance(1);
815+
parser.input.tokenizer.advance(1);
816816
if next_byte == Some(b'{') {
817817
consume_until_end_of_block(BlockType::CurlyBracket, &mut parser.input.tokenizer);
818818
}

src/serializer.rs

+6-12
Original file line numberDiff line numberDiff line change
@@ -132,23 +132,17 @@ impl<'a> ToCss for Token<'a> {
132132
}
133133
}
134134

135-
fn to_hex_byte(value: u8) -> u8 {
136-
match value {
137-
0...9 => value + b'0',
138-
_ => value - 10 + b'a',
139-
}
140-
}
141-
142135
fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
143-
let high = ascii_byte >> 4;
136+
static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef";
144137
let b3;
145138
let b4;
146-
let bytes = if high > 0 {
147-
let low = ascii_byte & 0x0F;
148-
b4 = [b'\\', to_hex_byte(high), to_hex_byte(low), b' '];
139+
let bytes = if ascii_byte > 0x0F {
140+
let high = (ascii_byte >> 4) as usize;
141+
let low = (ascii_byte & 0x0F) as usize;
142+
b4 = [b'\\', HEX_DIGITS[high], HEX_DIGITS[low], b' '];
149143
&b4[..]
150144
} else {
151-
b3 = [b'\\', to_hex_byte(ascii_byte), b' '];
145+
b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' '];
152146
&b3[..]
153147
};
154148
dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })

0 commit comments

Comments
 (0)