Skip to content

Commit be0acdf

Browse files
committed
Run more recent rustfmt
1 parent 831f795 commit be0acdf

File tree

5 files changed

+72
-67
lines changed

5 files changed

+72
-67
lines changed

build/match_byte.rs

+4-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,10 @@ impl Fold for MatchByteParser {
168168
match stmt {
169169
syn::Stmt::Item(syn::Item::Macro(syn::ItemMacro { ref mac, .. })) => {
170170
if mac.path == parse_quote!(match_byte) {
171-
return syn::fold::fold_stmt(self, syn::Stmt::Expr(expand_match_byte(&mac.tts)));
171+
return syn::fold::fold_stmt(
172+
self,
173+
syn::Stmt::Expr(expand_match_byte(&mac.tts)),
174+
);
172175
}
173176
}
174177
_ => {}

procedural-masquerade/lib.rs

+27-27
Original file line numberDiff line numberDiff line change
@@ -244,33 +244,33 @@ macro_rules! define_invoke_proc_macro {
244244
#[doc(hidden)]
245245
#[macro_export]
246246
macro_rules! $macro_name {
247-
($proc_macro_name: ident ! $paren: tt) => {
248-
#[derive($proc_macro_name)]
249-
#[allow(unused)]
250-
enum ProceduralMasqueradeDummyType {
251-
// The magic happens here.
252-
//
253-
// We use an `enum` with an explicit discriminant
254-
// because that is the only case where a type definition
255-
// can contain a (const) expression.
256-
//
257-
// `(0, "foo").0` evalutes to 0, with the `"foo"` part ignored.
258-
//
259-
// By the time the `#[proc_macro_derive]` function
260-
// implementing `#[derive($proc_macro_name)]` is called,
261-
// `$paren` has already been replaced with the input of this inner macro,
262-
// but `stringify!` has not been expanded yet.
263-
//
264-
// This how arbitrary tokens can be inserted
265-
// in the input to the `#[proc_macro_derive]` function.
266-
//
267-
// Later, `stringify!(...)` is expanded into a string literal
268-
// which is then ignored.
269-
// Using `stringify!` enables passing arbitrary tokens
270-
// rather than only what can be parsed as a const expression.
271-
Input = (0, stringify! $paren ).0
247+
($proc_macro_name: ident ! $paren: tt) => {
248+
#[derive($proc_macro_name)]
249+
#[allow(unused)]
250+
enum ProceduralMasqueradeDummyType {
251+
// The magic happens here.
252+
//
253+
// We use an `enum` with an explicit discriminant
254+
// because that is the only case where a type definition
255+
// can contain a (const) expression.
256+
//
257+
// `(0, "foo").0` evalutes to 0, with the `"foo"` part ignored.
258+
//
259+
// By the time the `#[proc_macro_derive]` function
260+
// implementing `#[derive($proc_macro_name)]` is called,
261+
// `$paren` has already been replaced with the input of this inner macro,
262+
// but `stringify!` has not been expanded yet.
263+
//
264+
// This how arbitrary tokens can be inserted
265+
// in the input to the `#[proc_macro_derive]` function.
266+
//
267+
// Later, `stringify!(...)` is expanded into a string literal
268+
// which is then ignored.
269+
// Using `stringify!` enables passing arbitrary tokens
270+
// rather than only what can be parsed as a const expression.
271+
Input = (0, stringify! $paren ).0
272+
}
273+
}
272274
}
273-
}
274-
}
275275
};
276276
}

src/parser.rs

+11-6
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,9 @@ impl<'i: 't, 't> Parser<'i, 't> {
354354
..
355355
}) => Ok(()),
356356
Err(e) => unreachable!("Unexpected error encountered: {:?}", e),
357-
Ok(t) => Err(start.source_location().new_basic_unexpected_token_error(t.clone())),
357+
Ok(t) => Err(start
358+
.source_location()
359+
.new_basic_unexpected_token_error(t.clone())),
358360
};
359361
self.reset(&start);
360362
result
@@ -433,7 +435,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
433435
pub fn new_error_for_next_token<E>(&mut self) -> ParseError<'i, E> {
434436
let token = match self.next() {
435437
Ok(token) => token.clone(),
436-
Err(e) => return e.into()
438+
Err(e) => return e.into(),
437439
};
438440
self.new_error(BasicParseErrorKind::UnexpectedToken(token))
439441
}
@@ -505,7 +507,9 @@ impl<'i: 't, 't> Parser<'i, 't> {
505507
/// The old name of `try_parse`, which requires raw identifiers in the Rust 2018 edition.
506508
#[inline]
507509
pub fn try<F, T, E>(&mut self, thing: F) -> Result<T, E>
508-
where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E> {
510+
where
511+
F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E>,
512+
{
509513
self.try_parse(thing)
510514
}
511515

@@ -997,9 +1001,10 @@ where
9971001
{
9981002
let result = parser.parse_until_before(delimiters, parse);
9991003
let next_byte = parser.input.tokenizer.next_byte();
1000-
if next_byte.is_some() && !parser
1001-
.stop_before
1002-
.contains(Delimiters::from_byte(next_byte))
1004+
if next_byte.is_some()
1005+
&& !parser
1006+
.stop_before
1007+
.contains(Delimiters::from_byte(next_byte))
10031008
{
10041009
debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
10051010
// We know this byte is ASCII.

src/tests.rs

+15-19
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ fn almost_equals(a: &Json, b: &Json) -> bool {
3636
(&Json::Boolean(a), &Json::Boolean(b)) => a == b,
3737
(&Json::String(ref a), &Json::String(ref b)) => a == b,
3838
(&Json::Array(ref a), &Json::Array(ref b)) => {
39-
a.len() == b.len() && a
40-
.iter()
41-
.zip(b.iter())
42-
.all(|(ref a, ref b)| almost_equals(*a, *b))
39+
a.len() == b.len()
40+
&& a.iter()
41+
.zip(b.iter())
42+
.all(|(ref a, ref b)| almost_equals(*a, *b))
4343
}
4444
(&Json::Object(_), &Json::Object(_)) => panic!("Not implemented"),
4545
(&Json::Null, &Json::Null) => true,
@@ -277,13 +277,11 @@ fn outer_block_end_consumed() {
277277
let mut input = ParserInput::new("(calc(true))");
278278
let mut input = Parser::new(&mut input);
279279
assert!(input.expect_parenthesis_block().is_ok());
280-
assert!(
281-
input
282-
.parse_nested_block(|input| input
283-
.expect_function_matching("calc")
284-
.map_err(Into::<ParseError<()>>::into))
285-
.is_ok()
286-
);
280+
assert!(input
281+
.parse_nested_block(|input| input
282+
.expect_function_matching("calc")
283+
.map_err(Into::<ParseError<()>>::into))
284+
.is_ok());
287285
println!("{:?}", input.position());
288286
assert!(input.next().is_err());
289287
}
@@ -682,14 +680,12 @@ fn line_delimited() {
682680
let mut input = ParserInput::new(" { foo ; bar } baz;,");
683681
let mut input = Parser::new(&mut input);
684682
assert_eq!(input.next(), Ok(&Token::CurlyBracketBlock));
685-
assert!(
686-
{
687-
let result: Result<_, ParseError<()>> =
688-
input.parse_until_after(Delimiter::Semicolon, |_| Ok(42));
689-
result
690-
}
691-
.is_err()
692-
);
683+
assert!({
684+
let result: Result<_, ParseError<()>> =
685+
input.parse_until_after(Delimiter::Semicolon, |_| Ok(42));
686+
result
687+
}
688+
.is_err());
693689
assert_eq!(input.next(), Ok(&Token::Comma));
694690
assert!(input.next().is_err());
695691
}

src/tokenizer.rs

+15-14
Original file line numberDiff line numberDiff line change
@@ -765,7 +765,7 @@ fn consume_quoted_string<'a>(
765765
single_quote: bool,
766766
) -> Result<CowRcStr<'a>, CowRcStr<'a>> {
767767
tokenizer.advance(1); // Skip the initial quote
768-
// start_pos is at code point boundary, after " or '
768+
// start_pos is at code point boundary, after " or '
769769
let start_pos = tokenizer.position();
770770
let mut string_bytes;
771771
loop {
@@ -874,20 +874,21 @@ fn consume_quoted_string<'a>(
874874

875875
#[inline]
876876
fn is_ident_start(tokenizer: &mut Tokenizer) -> bool {
877-
!tokenizer.is_eof() && match_byte! { tokenizer.next_byte_unchecked(),
878-
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
879-
b'-' => {
880-
tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
881-
b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
882-
true
877+
!tokenizer.is_eof()
878+
&& match_byte! { tokenizer.next_byte_unchecked(),
879+
b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
880+
b'-' => {
881+
tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
882+
b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
883+
true
884+
}
885+
b'\\' => { !tokenizer.has_newline_at(1) }
886+
b => { !b.is_ascii() },
883887
}
884-
b'\\' => { !tokenizer.has_newline_at(1) }
885-
b => { !b.is_ascii() },
886-
}
887-
},
888-
b'\\' => { !tokenizer.has_newline_at(1) },
889-
b => { !b.is_ascii() },
890-
}
888+
},
889+
b'\\' => { !tokenizer.has_newline_at(1) },
890+
b => { !b.is_ascii() },
891+
}
891892
}
892893

893894
fn consume_ident_like<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {

0 commit comments

Comments
 (0)