/src/rust-cssparser/fuzz/fuzz_targets/cssparser.rs
Line | Count | Source |
1 | | #![no_main] |
2 | | |
3 | | use cssparser::*; |
4 | | |
5 | | const DEBUG: bool = false; |
6 | | |
7 | 2.50k | fn parse_and_serialize(input: &str, preserving_comments: bool) -> String { |
8 | 2.50k | let mut input = ParserInput::new(input); |
9 | 2.50k | let mut parser = Parser::new(&mut input); |
10 | 2.50k | let mut serialization = String::new(); |
11 | 2.50k | let result = do_parse_and_serialize( |
12 | 2.50k | &mut parser, |
13 | 2.50k | preserving_comments, |
14 | 2.50k | TokenSerializationType::nothing(), |
15 | 2.50k | &mut serialization, |
16 | | 0, |
17 | | ); |
18 | 2.50k | if result.is_err() { |
19 | 341 | return String::new(); |
20 | 2.16k | } |
21 | 2.16k | serialization |
22 | 2.50k | } |
23 | | |
24 | 303k | fn do_parse_and_serialize<'i>( |
25 | 303k | input: &mut Parser<'i, '_>, |
26 | 303k | preserving_comments: bool, |
27 | 303k | mut previous_token_type: TokenSerializationType, |
28 | 303k | serialization: &mut String, |
29 | 303k | indent_level: usize, |
30 | 303k | ) -> Result<(), ParseError<'i, ()>> { |
31 | | loop { |
32 | 99.4M | let token = if preserving_comments { |
33 | 0 | input.next_including_whitespace_and_comments() |
34 | | } else { |
35 | 99.4M | input.next_including_whitespace() |
36 | | }; |
37 | 99.4M | let token = match token { |
38 | 99.1M | Ok(token) => token, |
39 | 294k | Err(..) => break, |
40 | | }; |
41 | 99.1M | if DEBUG { |
42 | 0 | for _ in 0..indent_level { |
43 | 0 | print!(" "); |
44 | 0 | } |
45 | 0 | println!("{:?}", token); |
46 | 99.1M | } |
47 | 99.1M | if token.is_parse_error() { |
48 | 341 | let token = token.clone(); |
49 | 341 | return Err(input.new_unexpected_token_error(token)) |
50 | 99.1M | } |
51 | 99.1M | let token_type = token.serialization_type(); |
52 | 99.1M | if previous_token_type.needs_separator_when_before(token_type) { |
53 | 92.9M | serialization.push_str("/**/"); |
54 | 92.9M | } |
55 | 99.1M | previous_token_type = token_type; |
56 | 99.1M | token.to_css(serialization).unwrap(); |
57 | 99.1M | let closing_token = match token { |
58 | 218k | Token::Function(_) | Token::ParenthesisBlock => Token::CloseParenthesis, |
59 | 12.1k | Token::SquareBracketBlock => Token::CloseSquareBracket, |
60 | 70.7k | Token::CurlyBracketBlock => Token::CloseCurlyBracket, |
61 | 98.8M | _ => continue, |
62 | | }; |
63 | | |
64 | 301k | input.parse_nested_block(|input| -> Result<_, ParseError<()>> { |
65 | 301k | do_parse_and_serialize(input, preserving_comments, previous_token_type, serialization, indent_level + 1) |
66 | 301k | })?; |
67 | | |
68 | 292k | closing_token.to_css(serialization).unwrap(); |
69 | | } |
70 | 294k | Ok(()) |
71 | 303k | } |
72 | | |
73 | 1.25k | fn fuzz(data: &str, preserving_comments: bool) { |
74 | 1.25k | let serialization = parse_and_serialize(data, preserving_comments); |
75 | 1.25k | let reserialization = parse_and_serialize(&serialization, preserving_comments); |
76 | 1.25k | if DEBUG { |
77 | 0 | println!("IN: {:?}", serialization); |
78 | 0 | println!("OUT: {:?}", reserialization); |
79 | 1.25k | } |
80 | | // TODO: This should ideally pass, but it doesn't for numbers near our |
81 | | // precision limits, so parsing e.g., 9999995e-45 generates a serialization |
82 | | // of 10e-39 because of dtoa rounding, and parsing _that_ generates a |
83 | | // serialization of 1e-38. |
84 | | // |
85 | | // assert_eq!( |
86 | | // serialization, reserialization, |
87 | | // "Serialization should be idempotent" |
88 | | // ); |
89 | 1.25k | } |
90 | | |
91 | | libfuzzer_sys::fuzz_target!(|data: &str| { |
92 | | fuzz(data, false); |
93 | | // TODO(emilio): Serialization when preserving comments is not idempotent. |
94 | | // But in browsers we never preserve comments so that's ok... |
95 | | // fuzz(data, true); |
96 | | }); |