/src/rust-cssparser/fuzz/fuzz_targets/cssparser.rs
Line | Count | Source (jump to first uncovered line) |
1 | | #![no_main] |
2 | | |
3 | | use cssparser::*; |
4 | | |
5 | | const DEBUG: bool = false; |
6 | | |
7 | 14.9k | fn parse_and_serialize(input: &str, preserving_comments: bool) -> String { |
8 | 14.9k | let mut input = ParserInput::new(input); |
9 | 14.9k | let mut parser = Parser::new(&mut input); |
10 | 14.9k | let mut serialization = String::new(); |
11 | 14.9k | let result = do_parse_and_serialize( |
12 | 14.9k | &mut parser, |
13 | 14.9k | preserving_comments, |
14 | 14.9k | TokenSerializationType::nothing(), |
15 | 14.9k | &mut serialization, |
16 | 14.9k | 0, |
17 | 14.9k | ); |
18 | 14.9k | if result.is_err() { |
19 | 1.27k | return String::new(); |
20 | 13.7k | } |
21 | 13.7k | serialization |
22 | 14.9k | } |
23 | | |
24 | 746k | fn do_parse_and_serialize<'i>( |
25 | 746k | input: &mut Parser<'i, '_>, |
26 | 746k | preserving_comments: bool, |
27 | 746k | mut previous_token_type: TokenSerializationType, |
28 | 746k | serialization: &mut String, |
29 | 746k | indent_level: usize, |
30 | 746k | ) -> Result<(), ParseError<'i, ()>> { |
31 | | loop { |
32 | 110M | let token = if preserving_comments { |
33 | 0 | input.next_including_whitespace_and_comments() |
34 | | } else { |
35 | 110M | input.next_including_whitespace() |
36 | | }; |
37 | 110M | let token = match token { |
38 | 109M | Ok(token) => token, |
39 | 732k | Err(..) => break, |
40 | | }; |
41 | 109M | if DEBUG { |
42 | 0 | for _ in 0..indent_level { |
43 | 0 | print!(" "); |
44 | 0 | } |
45 | 0 | println!("{:?}", token); |
46 | 109M | } |
47 | 109M | if token.is_parse_error() { |
48 | 1.27k | let token = token.clone(); |
49 | 1.27k | return Err(input.new_unexpected_token_error(token)) |
50 | 109M | } |
51 | 109M | let token_type = token.serialization_type(); |
52 | 109M | if previous_token_type.needs_separator_when_before(token_type) { |
53 | 102M | serialization.push_str("/**/"); |
54 | 102M | } |
55 | 109M | previous_token_type = token_type; |
56 | 109M | token.to_css(serialization).unwrap(); |
57 | 109M | let closing_token = match token { |
58 | 263k | Token::Function(_) | Token::ParenthesisBlock => Token::CloseParenthesis, |
59 | 88.4k | Token::SquareBracketBlock => Token::CloseSquareBracket, |
60 | 378k | Token::CurlyBracketBlock => Token::CloseCurlyBracket, |
61 | 108M | _ => continue, |
62 | | }; |
63 | | |
64 | 731k | input.parse_nested_block(|input| -> Result<_, ParseError<()>> { |
65 | 731k | do_parse_and_serialize(input, preserving_comments, previous_token_type, serialization, indent_level + 1) |
66 | 731k | })?; |
67 | | |
68 | 719k | closing_token.to_css(serialization).unwrap(); |
69 | | } |
70 | 732k | Ok(()) |
71 | 746k | } |
72 | | |
73 | 7.49k | fn fuzz(data: &str, preserving_comments: bool) { |
74 | 7.49k | let serialization = parse_and_serialize(data, preserving_comments); |
75 | 7.49k | let reserialization = parse_and_serialize(&serialization, preserving_comments); |
76 | 7.49k | if DEBUG { |
77 | 0 | println!("IN: {:?}", serialization); |
78 | 0 | println!("OUT: {:?}", reserialization); |
79 | 7.49k | } |
80 | | // TODO: This should ideally pass, but it doesn't for numbers near our |
81 | | // precision limits, so parsing e.g., 9999995e-45 generates a serialization |
82 | | // of 10e-39 because of dtoa rounding, and parsing _that_ generates a |
83 | | // serialization of 1e-38. |
84 | | // |
85 | | // assert_eq!( |
86 | | // serialization, reserialization, |
87 | | // "Serialization should be idempotent" |
88 | | // ); |
89 | 7.49k | } |
90 | | |
91 | | libfuzzer_sys::fuzz_target!(|data: &str| { |
92 | | fuzz(data, false); |
93 | | // TODO(emilio): Serialization when preserving comments is not idempotent. |
94 | | // But in browsers we never preserve comments so that's ok... |
95 | | // fuzz(data, true); |
96 | | }); |