Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2025 Google LLC |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | //////////////////////////////////////////////////////////////////////////////// |
16 | | #include <stddef.h> |
17 | | #include <stdint.h> |
18 | | #include <string.h> |
19 | | #include <vector> |
20 | | #include <algorithm> |
21 | | |
22 | | #include <fuzzer/FuzzedDataProvider.h> |
23 | | extern "C" { |
24 | | #include "md5_ext.h" |
25 | | #include "sha256_ext.h" |
26 | | } |
27 | | |
28 | | // Fuzzing target function pointer types for the enternal hash APIs |
29 | | template <typename HashType> using InitOnceFn = void (*)(HashType*); |
30 | | template <typename HashType> using UpdateFn = void (*)(HashType*, size_t, const uint8_t*); |
31 | | template <typename HashType> using FinishFn = void (*)(HashType*, uint8_t*); |
32 | | template <typename HashType> using DeinitFn = void (*)(HashType*); |
33 | | |
34 | | // Generic hashing flow that fuzz same hashing procedure for different algorithm |
35 | | template <typename HashType> |
36 | | static void fuzz_hash_ext_multi(FuzzedDataProvider &fdp, |
37 | | size_t block_size, |
38 | | InitOnceFn<HashType> init_once, |
39 | | UpdateFn<HashType> update_fn, |
40 | | FinishFn<HashType> finish_fn, |
41 | | DeinitFn<HashType> deinit_fn, |
42 | 2.05k | size_t digest_size) { |
43 | 2.05k | if (!fdp.remaining_bytes()) { |
44 | 22 | return; |
45 | 22 | } |
46 | | |
47 | | // Pull a random slice of data for fuzzing |
48 | 2.03k | size_t take_len = fdp.ConsumeIntegralInRange<size_t>(0, fdp.remaining_bytes()); |
49 | 2.03k | std::vector<uint8_t> input_bytes = fdp.ConsumeBytes<uint8_t>(take_len); |
50 | | |
51 | | // Create 1 to 4 independent hashing contexts with it own digest buffer |
52 | 2.03k | const unsigned num_contexts = fdp.ConsumeIntegralInRange<unsigned>(1, 4); |
53 | 2.03k | std::vector<HashType> contexts(num_contexts); |
54 | 2.03k | std::vector<std::vector<uint8_t>> digests(num_contexts, std::vector<uint8_t>(digest_size)); |
55 | 6.56k | for (unsigned i = 0; i < num_contexts; i++) { |
56 | 4.52k | init_once(&contexts[i]); |
57 | 4.52k | } |
58 | | |
59 | | // Intentionally misalign the data pointer to stress alignment sensitive paths |
60 | 2.03k | const size_t misalign_pad = fdp.ConsumeIntegralInRange<size_t>(0, 64); |
61 | 2.03k | std::vector<uint8_t> scratch_buf(misalign_pad + input_bytes.size()); |
62 | 2.03k | if (!input_bytes.empty()) { |
63 | 1.38k | memcpy(scratch_buf.data() + misalign_pad, input_bytes.data(), input_bytes.size()); |
64 | 1.38k | } |
65 | | |
66 | | // Define cursor and remaining bytes counter to keep track of the multiple hash update iterations |
67 | 2.03k | const uint8_t *cursor = scratch_buf.data() + misalign_pad; |
68 | 2.03k | size_t remaining = input_bytes.size(); |
69 | | |
70 | | // Perform multiple hash update iterations on the raw data |
71 | 2.03k | unsigned num_iterations = fdp.ConsumeIntegralInRange<unsigned>(1, 4); |
72 | 5.06k | while (num_iterations-- && remaining > 0) { |
73 | | // Pick which context to feed this iteration |
74 | 3.03k | const unsigned ctx_index = (num_contexts == 1) ? 0 : fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); |
75 | | |
76 | | // Choose a chunking pattern relative to block size. |
77 | 3.03k | enum Pattern { LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT }; |
78 | 3.03k | Pattern pattern = fdp.PickValueInArray<Pattern>({LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT}); |
79 | | |
80 | 3.03k | size_t chunk_len = 0; |
81 | 3.03k | switch (pattern) { |
82 | 637 | case LESS1: { |
83 | | // Consume 1 byte less from block size from the raw data for this iteration |
84 | 637 | if (block_size > 1) { |
85 | 637 | chunk_len = std::min(remaining, block_size - 1); |
86 | 637 | } |
87 | 637 | break; |
88 | 0 | } |
89 | 213 | case EQ: { |
90 | | // Consume block size bytes from the raw data for this iteration |
91 | 213 | chunk_len = std::min(remaining, block_size); |
92 | 213 | break; |
93 | 0 | } |
94 | 184 | case PLUS1: { |
95 | | // Consume 1 byte more from block size from the raw data for this iteration |
96 | 184 | chunk_len = std::min(remaining, block_size + 1); |
97 | 184 | break; |
98 | 0 | } |
99 | 512 | case SMALL: { |
100 | | // Consume 1~32 bytes from the raw data for this iteration |
101 | 512 | size_t small_len = (size_t)fdp.ConsumeIntegralInRange<int>(1, 32); |
102 | 512 | chunk_len = std::min(remaining, small_len); |
103 | 512 | break; |
104 | 0 | } |
105 | 484 | case RANDOM: { |
106 | | // Consume random bytes from the raw data for this iteration |
107 | 484 | chunk_len = (remaining >= 1) ? (size_t)fdp.ConsumeIntegralInRange<size_t>(1, remaining) : 0; |
108 | 484 | break; |
109 | 0 | } |
110 | 80 | case TAIL: { |
111 | | // Consume all remaining bytes from the raw data for this iteration |
112 | 80 | chunk_len = remaining; |
113 | 80 | break; |
114 | 0 | } |
115 | 923 | case HALT: { |
116 | | // Consume small chunk and consider reinitialisation or early halt of the hash iteration |
117 | 923 | size_t step = std::max<size_t>(1, fdp.ConsumeIntegralInRange<size_t>(1, block_size)); |
118 | 923 | size_t loops = fdp.ConsumeIntegralInRange<size_t>(1, 4); |
119 | 2.61k | for (size_t j = 0; j < loops && remaining > 0; j++) { |
120 | 1.69k | size_t w = std::min(remaining, step); |
121 | 1.69k | update_fn(&contexts[ctx_index], w, cursor); |
122 | 1.69k | cursor += w; |
123 | 1.69k | remaining -= w; |
124 | 1.69k | } |
125 | | |
126 | | // Randomly reinitialise the hash stream |
127 | 923 | if (fdp.ConsumeBool()) { |
128 | 370 | finish_fn(&contexts[ctx_index], digests[ctx_index].data()); |
129 | 370 | } |
130 | 923 | continue; |
131 | 0 | } |
132 | 3.03k | } |
133 | | |
134 | 2.11k | if (chunk_len == 0 || chunk_len > remaining) { |
135 | 0 | continue; |
136 | 0 | } |
137 | | |
138 | | // Fuzz the update function |
139 | 2.11k | update_fn(&contexts[ctx_index], chunk_len, cursor); |
140 | 2.11k | cursor += chunk_len; |
141 | 2.11k | remaining -= chunk_len; |
142 | 2.11k | } |
143 | | |
144 | | // Finalize all active contexts (finish_reset). |
145 | 6.56k | for (unsigned i = 0; i < num_contexts; i++) { |
146 | 4.52k | finish_fn(&contexts[i], digests[i].data()); |
147 | 4.52k | } |
148 | | |
149 | | // Additional fuzzing on special context chaining approach. |
150 | 2.03k | if (num_contexts >= 2 && digest_size && fdp.ConsumeBool()) { |
151 | 647 | unsigned src_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); |
152 | 647 | unsigned dst_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); |
153 | 647 | if (src_idx != dst_idx) { |
154 | 400 | size_t offset = fdp.ConsumeIntegralInRange<size_t>(0, digest_size - 1); |
155 | 400 | size_t max_avail = digest_size - offset; // >= 1 |
156 | 400 | size_t feed_len = fdp.ConsumeIntegralInRange<size_t>(1, max_avail); |
157 | 400 | update_fn(&contexts[dst_idx], feed_len, digests[src_idx].data() + offset); |
158 | 400 | finish_fn(&contexts[dst_idx], digests[dst_idx].data()); |
159 | 400 | } |
160 | 647 | } |
161 | | |
162 | | // Deinitialise all contexts after this iteration |
163 | 6.56k | for (unsigned i = 0; i < num_contexts; i++) { |
164 | 4.52k | deinit_fn(&contexts[i]); |
165 | 4.52k | } |
166 | 2.03k | } fuzz_crypto_ext.cpp:void fuzz_hash_ext_multi<mhd_Md5CtxExt>(FuzzedDataProvider&, unsigned long, void (*)(mhd_Md5CtxExt*), void (*)(mhd_Md5CtxExt*, unsigned long, unsigned char const*), void (*)(mhd_Md5CtxExt*, unsigned char*), void (*)(mhd_Md5CtxExt*), unsigned long) Line | Count | Source | 42 | 1.07k | size_t digest_size) { | 43 | 1.07k | if (!fdp.remaining_bytes()) { | 44 | 7 | return; | 45 | 7 | } | 46 | | | 47 | | // Pull a random slice of data for fuzzing | 48 | 1.06k | size_t take_len = fdp.ConsumeIntegralInRange<size_t>(0, fdp.remaining_bytes()); | 49 | 1.06k | std::vector<uint8_t> input_bytes = fdp.ConsumeBytes<uint8_t>(take_len); | 50 | | | 51 | | // Create 1 to 4 independent hashing contexts with it own digest buffer | 52 | 1.06k | const unsigned num_contexts = fdp.ConsumeIntegralInRange<unsigned>(1, 4); | 53 | 1.06k | std::vector<HashType> contexts(num_contexts); | 54 | 1.06k | std::vector<std::vector<uint8_t>> digests(num_contexts, std::vector<uint8_t>(digest_size)); | 55 | 3.63k | for (unsigned i = 0; i < num_contexts; i++) { | 56 | 2.56k | init_once(&contexts[i]); | 57 | 2.56k | } | 58 | | | 59 | | // Intentionally misalign the data pointer to stress alignment sensitive paths | 60 | 1.06k | const size_t misalign_pad = fdp.ConsumeIntegralInRange<size_t>(0, 64); | 61 | 1.06k | std::vector<uint8_t> scratch_buf(misalign_pad + input_bytes.size()); | 62 | 1.06k | if (!input_bytes.empty()) { | 63 | 757 | memcpy(scratch_buf.data() + misalign_pad, input_bytes.data(), input_bytes.size()); | 64 | 757 | } | 65 | | | 66 | | // Define cursor and remaining bytes counter to keep track of the multiple hash update iterations | 67 | 1.06k | const uint8_t *cursor = scratch_buf.data() + misalign_pad; | 68 | 1.06k | size_t remaining = input_bytes.size(); | 69 | | | 70 | | // Perform multiple hash update iterations on the raw data | 71 | 1.06k | unsigned num_iterations = fdp.ConsumeIntegralInRange<unsigned>(1, 4); | 72 | 2.74k | while (num_iterations-- && remaining > 0) { | 73 | | // Pick which context to feed this iteration | 74 | 1.67k | const unsigned ctx_index = (num_contexts == 1) ? 0 : fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 75 | | | 76 | | // Choose a chunking pattern relative to block size. | 77 | 1.67k | enum Pattern { LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT }; | 78 | 1.67k | Pattern pattern = fdp.PickValueInArray<Pattern>({LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT}); | 79 | | | 80 | 1.67k | size_t chunk_len = 0; | 81 | 1.67k | switch (pattern) { | 82 | 357 | case LESS1: { | 83 | | // Consume 1 byte less from block size from the raw data for this iteration | 84 | 357 | if (block_size > 1) { | 85 | 357 | chunk_len = std::min(remaining, block_size - 1); | 86 | 357 | } | 87 | 357 | break; | 88 | 0 | } | 89 | 124 | case EQ: { | 90 | | // Consume block size bytes from the raw data for this iteration | 91 | 124 | chunk_len = std::min(remaining, block_size); | 92 | 124 | break; | 93 | 0 | } | 94 | 107 | case PLUS1: { | 95 | | // Consume 1 byte more from block size from the raw data for this iteration | 96 | 107 | chunk_len = std::min(remaining, block_size + 1); | 97 | 107 | break; | 98 | 0 | } | 99 | 291 | case SMALL: { | 100 | | // Consume 1~32 bytes from the raw data for this iteration | 101 | 291 | size_t small_len = (size_t)fdp.ConsumeIntegralInRange<int>(1, 32); | 102 | 291 | chunk_len = std::min(remaining, small_len); | 103 | 291 | break; | 104 | 0 | } | 105 | 279 | case RANDOM: { | 106 | | // Consume random bytes from the raw data for this iteration | 107 | 279 | chunk_len = (remaining >= 1) ? (size_t)fdp.ConsumeIntegralInRange<size_t>(1, remaining) : 0; | 108 | 279 | break; | 109 | 0 | } | 110 | 52 | case TAIL: { | 111 | | // Consume all remaining bytes from the raw data for this iteration | 112 | 52 | chunk_len = remaining; | 113 | 52 | break; | 114 | 0 | } | 115 | 468 | case HALT: { | 116 | | // Consume small chunk and consider reinitialisation or early halt of the hash iteration | 117 | 468 | size_t step = std::max<size_t>(1, fdp.ConsumeIntegralInRange<size_t>(1, block_size)); | 118 | 468 | size_t loops = fdp.ConsumeIntegralInRange<size_t>(1, 4); | 119 | 1.38k | for (size_t j = 0; j < loops && remaining > 0; j++) { | 120 | 918 | size_t w = std::min(remaining, step); | 121 | 918 | update_fn(&contexts[ctx_index], w, cursor); | 122 | 918 | cursor += w; | 123 | 918 | remaining -= w; | 124 | 918 | } | 125 | | | 126 | | // Randomly reinitialise the hash stream | 127 | 468 | if (fdp.ConsumeBool()) { | 128 | 228 | finish_fn(&contexts[ctx_index], digests[ctx_index].data()); | 129 | 228 | } | 130 | 468 | continue; | 131 | 0 | } | 132 | 1.67k | } | 133 | | | 134 | 1.21k | if (chunk_len == 0 || chunk_len > remaining) { | 135 | 0 | continue; | 136 | 0 | } | 137 | | | 138 | | // Fuzz the update function | 139 | 1.21k | update_fn(&contexts[ctx_index], chunk_len, cursor); | 140 | 1.21k | cursor += chunk_len; | 141 | 1.21k | remaining -= chunk_len; | 142 | 1.21k | } | 143 | | | 144 | | // Finalize all active contexts (finish_reset). | 145 | 3.63k | for (unsigned i = 0; i < num_contexts; i++) { | 146 | 2.56k | finish_fn(&contexts[i], digests[i].data()); | 147 | 2.56k | } | 148 | | | 149 | | // Additional fuzzing on special context chaining approach. | 150 | 1.06k | if (num_contexts >= 2 && digest_size && fdp.ConsumeBool()) { | 151 | 384 | unsigned src_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 152 | 384 | unsigned dst_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 153 | 384 | if (src_idx != dst_idx) { | 154 | 209 | size_t offset = fdp.ConsumeIntegralInRange<size_t>(0, digest_size - 1); | 155 | 209 | size_t max_avail = digest_size - offset; // >= 1 | 156 | 209 | size_t feed_len = fdp.ConsumeIntegralInRange<size_t>(1, max_avail); | 157 | 209 | update_fn(&contexts[dst_idx], feed_len, digests[src_idx].data() + offset); | 158 | 209 | finish_fn(&contexts[dst_idx], digests[dst_idx].data()); | 159 | 209 | } | 160 | 384 | } | 161 | | | 162 | | // Deinitialise all contexts after this iteration | 163 | 3.63k | for (unsigned i = 0; i < num_contexts; i++) { | 164 | 2.56k | deinit_fn(&contexts[i]); | 165 | 2.56k | } | 166 | 1.06k | } |
fuzz_crypto_ext.cpp:void fuzz_hash_ext_multi<mhd_Sha256CtxExt>(FuzzedDataProvider&, unsigned long, void (*)(mhd_Sha256CtxExt*), void (*)(mhd_Sha256CtxExt*, unsigned long, unsigned char const*), void (*)(mhd_Sha256CtxExt*, unsigned char*), void (*)(mhd_Sha256CtxExt*), unsigned long) Line | Count | Source | 42 | 983 | size_t digest_size) { | 43 | 983 | if (!fdp.remaining_bytes()) { | 44 | 15 | return; | 45 | 15 | } | 46 | | | 47 | | // Pull a random slice of data for fuzzing | 48 | 968 | size_t take_len = fdp.ConsumeIntegralInRange<size_t>(0, fdp.remaining_bytes()); | 49 | 968 | std::vector<uint8_t> input_bytes = fdp.ConsumeBytes<uint8_t>(take_len); | 50 | | | 51 | | // Create 1 to 4 independent hashing contexts with it own digest buffer | 52 | 968 | const unsigned num_contexts = fdp.ConsumeIntegralInRange<unsigned>(1, 4); | 53 | 968 | std::vector<HashType> contexts(num_contexts); | 54 | 968 | std::vector<std::vector<uint8_t>> digests(num_contexts, std::vector<uint8_t>(digest_size)); | 55 | 2.92k | for (unsigned i = 0; i < num_contexts; i++) { | 56 | 1.96k | init_once(&contexts[i]); | 57 | 1.96k | } | 58 | | | 59 | | // Intentionally misalign the data pointer to stress alignment sensitive paths | 60 | 968 | const size_t misalign_pad = fdp.ConsumeIntegralInRange<size_t>(0, 64); | 61 | 968 | std::vector<uint8_t> scratch_buf(misalign_pad + input_bytes.size()); | 62 | 968 | if (!input_bytes.empty()) { | 63 | 627 | memcpy(scratch_buf.data() + misalign_pad, input_bytes.data(), input_bytes.size()); | 64 | 627 | } | 65 | | | 66 | | // Define cursor and remaining bytes counter to keep track of the multiple hash update iterations | 67 | 968 | const uint8_t *cursor = scratch_buf.data() + misalign_pad; | 68 | 968 | size_t remaining = input_bytes.size(); | 69 | | | 70 | | // Perform multiple hash update iterations on the raw data | 71 | 968 | unsigned num_iterations = fdp.ConsumeIntegralInRange<unsigned>(1, 4); | 72 | 2.32k | while (num_iterations-- && remaining > 0) { | 73 | | // Pick which context to feed this iteration | 74 | 1.35k | const unsigned ctx_index = (num_contexts == 1) ? 0 : fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 75 | | | 76 | | // Choose a chunking pattern relative to block size. | 77 | 1.35k | enum Pattern { LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT }; | 78 | 1.35k | Pattern pattern = fdp.PickValueInArray<Pattern>({LESS1, EQ, PLUS1, SMALL, RANDOM, TAIL, HALT}); | 79 | | | 80 | 1.35k | size_t chunk_len = 0; | 81 | 1.35k | switch (pattern) { | 82 | 280 | case LESS1: { | 83 | | // Consume 1 byte less from block size from the raw data for this iteration | 84 | 280 | if (block_size > 1) { | 85 | 280 | chunk_len = std::min(remaining, block_size - 1); | 86 | 280 | } | 87 | 280 | break; | 88 | 0 | } | 89 | 89 | case EQ: { | 90 | | // Consume block size bytes from the raw data for this iteration | 91 | 89 | chunk_len = std::min(remaining, block_size); | 92 | 89 | break; | 93 | 0 | } | 94 | 77 | case PLUS1: { | 95 | | // Consume 1 byte more from block size from the raw data for this iteration | 96 | 77 | chunk_len = std::min(remaining, block_size + 1); | 97 | 77 | break; | 98 | 0 | } | 99 | 221 | case SMALL: { | 100 | | // Consume 1~32 bytes from the raw data for this iteration | 101 | 221 | size_t small_len = (size_t)fdp.ConsumeIntegralInRange<int>(1, 32); | 102 | 221 | chunk_len = std::min(remaining, small_len); | 103 | 221 | break; | 104 | 0 | } | 105 | 205 | case RANDOM: { | 106 | | // Consume random bytes from the raw data for this iteration | 107 | 205 | chunk_len = (remaining >= 1) ? (size_t)fdp.ConsumeIntegralInRange<size_t>(1, remaining) : 0; | 108 | 205 | break; | 109 | 0 | } | 110 | 28 | case TAIL: { | 111 | | // Consume all remaining bytes from the raw data for this iteration | 112 | 28 | chunk_len = remaining; | 113 | 28 | break; | 114 | 0 | } | 115 | 455 | case HALT: { | 116 | | // Consume small chunk and consider reinitialisation or early halt of the hash iteration | 117 | 455 | size_t step = std::max<size_t>(1, fdp.ConsumeIntegralInRange<size_t>(1, block_size)); | 118 | 455 | size_t loops = fdp.ConsumeIntegralInRange<size_t>(1, 4); | 119 | 1.23k | for (size_t j = 0; j < loops && remaining > 0; j++) { | 120 | 776 | size_t w = std::min(remaining, step); | 121 | 776 | update_fn(&contexts[ctx_index], w, cursor); | 122 | 776 | cursor += w; | 123 | 776 | remaining -= w; | 124 | 776 | } | 125 | | | 126 | | // Randomly reinitialise the hash stream | 127 | 455 | if (fdp.ConsumeBool()) { | 128 | 142 | finish_fn(&contexts[ctx_index], digests[ctx_index].data()); | 129 | 142 | } | 130 | 455 | continue; | 131 | 0 | } | 132 | 1.35k | } | 133 | | | 134 | 900 | if (chunk_len == 0 || chunk_len > remaining) { | 135 | 0 | continue; | 136 | 0 | } | 137 | | | 138 | | // Fuzz the update function | 139 | 900 | update_fn(&contexts[ctx_index], chunk_len, cursor); | 140 | 900 | cursor += chunk_len; | 141 | 900 | remaining -= chunk_len; | 142 | 900 | } | 143 | | | 144 | | // Finalize all active contexts (finish_reset). | 145 | 2.92k | for (unsigned i = 0; i < num_contexts; i++) { | 146 | 1.96k | finish_fn(&contexts[i], digests[i].data()); | 147 | 1.96k | } | 148 | | | 149 | | // Additional fuzzing on special context chaining approach. | 150 | 968 | if (num_contexts >= 2 && digest_size && fdp.ConsumeBool()) { | 151 | 263 | unsigned src_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 152 | 263 | unsigned dst_idx = fdp.ConsumeIntegralInRange<unsigned>(0, num_contexts - 1); | 153 | 263 | if (src_idx != dst_idx) { | 154 | 191 | size_t offset = fdp.ConsumeIntegralInRange<size_t>(0, digest_size - 1); | 155 | 191 | size_t max_avail = digest_size - offset; // >= 1 | 156 | 191 | size_t feed_len = fdp.ConsumeIntegralInRange<size_t>(1, max_avail); | 157 | 191 | update_fn(&contexts[dst_idx], feed_len, digests[src_idx].data() + offset); | 158 | 191 | finish_fn(&contexts[dst_idx], digests[dst_idx].data()); | 159 | 191 | } | 160 | 263 | } | 161 | | | 162 | | // Deinitialise all contexts after this iteration | 163 | 2.92k | for (unsigned i = 0; i < num_contexts; i++) { | 164 | 1.96k | deinit_fn(&contexts[i]); | 165 | 1.96k | } | 166 | 968 | } |
|
167 | | |
168 | 1.11k | extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { |
169 | 1.11k | FuzzedDataProvider fdp(data, size); |
170 | | |
171 | 3.17k | for (unsigned i = 0; i < fdp.ConsumeIntegralInRange<unsigned>(1, 4); i++) { |
172 | 2.05k | if (fdp.ConsumeBool()) { |
173 | 1.07k | fuzz_hash_ext_multi<struct mhd_Md5CtxExt>( |
174 | 1.07k | fdp, 64, |
175 | 1.07k | mhd_MD5_init_one_time, mhd_MD5_update, mhd_MD5_finish_reset, mhd_MD5_deinit, |
176 | 1.07k | mhd_MD5_DIGEST_SIZE); |
177 | 1.07k | } else { |
178 | 983 | fuzz_hash_ext_multi<struct mhd_Sha256CtxExt>( |
179 | 983 | fdp, 64, |
180 | 983 | mhd_SHA256_init_one_time, mhd_SHA256_update, mhd_SHA256_finish_reset, mhd_SHA256_deinit, |
181 | 983 | mhd_SHA256_DIGEST_SIZE); |
182 | 983 | } |
183 | 2.05k | } |
184 | 1.11k | return 0; |
185 | 1.11k | } |