/src/libultrahdr/fuzzer/ultrahdr_legacy_fuzzer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2023 The Android Open Source Project |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <fuzzer/FuzzedDataProvider.h> |
18 | | #include <algorithm> |
19 | | #include <iostream> |
20 | | #include <memory> |
21 | | #include <random> |
22 | | |
23 | | #include "ultrahdr/ultrahdrcommon.h" |
24 | | #include "ultrahdr/gainmapmath.h" |
25 | | #include "ultrahdr/jpegr.h" |
26 | | |
27 | | using namespace ultrahdr; |
28 | | |
29 | | // Color gamuts for image data, sync with ultrahdr.h |
30 | | const int kCgMin = ULTRAHDR_COLORGAMUT_UNSPECIFIED; |
31 | | const int kCgMax = ULTRAHDR_COLORGAMUT_BT2100; |
32 | | |
33 | | // Transfer functions for image data, sync with ultrahdr.h |
34 | | const int kTfMin = ULTRAHDR_TF_UNSPECIFIED; |
35 | | const int kTfMax = ULTRAHDR_TF_SRGB; |
36 | | |
37 | | // Transfer functions for image data, sync with ultrahdr.h |
38 | | const int kOfMin = ULTRAHDR_OUTPUT_UNSPECIFIED; |
39 | | const int kOfMax = ULTRAHDR_OUTPUT_HDR_HLG; |
40 | | |
41 | | // quality factor |
42 | | const int kQfMin = -10; |
43 | | const int kQfMax = 110; |
44 | | |
45 | | class UltraHdrEncFuzzer { |
46 | | public: |
47 | 0 | UltraHdrEncFuzzer(const uint8_t* data, size_t size) : mFdp(data, size) {}; |
48 | | void process(); |
49 | | template <typename T> |
50 | | void fillBuffer(T* data, int width, int height, int stride); |
51 | | |
52 | | private: |
53 | | FuzzedDataProvider mFdp; |
54 | | }; |
55 | | |
56 | | template <typename T> |
57 | 0 | void UltraHdrEncFuzzer::fillBuffer(T* data, int width, int height, int stride) { |
58 | 0 | if (!mFdp.remaining_bytes()) return; |
59 | | |
60 | 0 | T* tmp = data; |
61 | 0 | std::vector<T> buffer(width); |
62 | 0 | for (int i = 0; i < buffer.size(); i++) { |
63 | 0 | buffer[i] = mFdp.ConsumeIntegral<T>(); |
64 | 0 | } |
65 | 0 | for (int j = 0; j < height; j++) { |
66 | 0 | for (int i = 0; i < width; i += buffer.size()) { |
67 | 0 | memcpy(tmp + i, buffer.data(), std::min((int)buffer.size(), (width - i)) * sizeof(*data)); |
68 | 0 | std::shuffle(buffer.begin(), buffer.end(), |
69 | 0 | std::default_random_engine(std::random_device{}())); |
70 | 0 | } |
71 | 0 | tmp += stride; |
72 | 0 | } |
73 | 0 | } Unexecuted instantiation: void UltraHdrEncFuzzer::fillBuffer<unsigned short>(unsigned short*, int, int, int) Unexecuted instantiation: void UltraHdrEncFuzzer::fillBuffer<unsigned char>(unsigned char*, int, int, int) |
74 | | |
75 | 0 | void UltraHdrEncFuzzer::process() { |
76 | 0 | if (mFdp.remaining_bytes()) { |
77 | 0 | struct jpegr_uncompressed_struct p010Img{}; |
78 | 0 | struct jpegr_uncompressed_struct yuv420Img{}; |
79 | 0 | struct jpegr_uncompressed_struct grayImg{}; |
80 | 0 | struct jpegr_compressed_struct jpegImgR{}; |
81 | 0 | struct jpegr_compressed_struct jpegImg{}; |
82 | 0 | struct jpegr_compressed_struct jpegGainMap{}; |
83 | | |
84 | | // which encode api to select |
85 | 0 | int muxSwitch = mFdp.ConsumeIntegralInRange<int>(0, 4); |
86 | | |
87 | | // quality factor |
88 | 0 | int quality = mFdp.ConsumeIntegralInRange<int>(kQfMin, kQfMax); |
89 | | |
90 | | // hdr_tf |
91 | 0 | auto tf = |
92 | 0 | static_cast<ultrahdr_transfer_function>(mFdp.ConsumeIntegralInRange<int>(kTfMin, kTfMax)); |
93 | | |
94 | | // p010 Cg |
95 | 0 | auto p010Cg = |
96 | 0 | static_cast<ultrahdr_color_gamut>(mFdp.ConsumeIntegralInRange<int>(kCgMin, kCgMax)); |
97 | | |
98 | | // 420 Cg |
99 | 0 | auto yuv420Cg = |
100 | 0 | static_cast<ultrahdr_color_gamut>(mFdp.ConsumeIntegralInRange<int>(kCgMin, kCgMax)); |
101 | | |
102 | | // hdr_of |
103 | 0 | auto of = static_cast<ultrahdr_output_format>(mFdp.ConsumeIntegralInRange<int>(kOfMin, kOfMax)); |
104 | |
|
105 | 0 | int width = mFdp.ConsumeIntegralInRange<int>(kMinWidth, kMaxWidth); |
106 | 0 | width = (width >> 1) << 1; |
107 | |
|
108 | 0 | int height = mFdp.ConsumeIntegralInRange<int>(kMinHeight, kMaxHeight); |
109 | 0 | height = (height >> 1) << 1; |
110 | | |
111 | | // gain_map quality factor |
112 | 0 | auto gainmap_quality = mFdp.ConsumeIntegral<int8_t>(); |
113 | | |
114 | | // multi channel gainmap |
115 | 0 | auto multi_channel_gainmap = mFdp.ConsumeIntegral<int8_t>(); |
116 | | |
117 | | // gainmap scale factor |
118 | 0 | auto gm_scale_factor = mFdp.ConsumeIntegralInRange<int16_t>(-32, 192); |
119 | | |
120 | | // encoding speed preset |
121 | 0 | auto enc_preset = mFdp.ConsumeBool() ? UHDR_USAGE_REALTIME : UHDR_USAGE_BEST_QUALITY; |
122 | | |
123 | | // gainmap metadata |
124 | 0 | auto minBoost = mFdp.ConsumeFloatingPointInRange<float>(-4.0f, 64.0f); |
125 | 0 | auto maxBoost = mFdp.ConsumeFloatingPointInRange<float>(-4.0f, 64.0f); |
126 | 0 | auto gamma = mFdp.ConsumeFloatingPointInRange<float>(-1.0f, 5); |
127 | 0 | auto offsetSdr = mFdp.ConsumeFloatingPointInRange<float>(-1.0f, 1.0f); |
128 | 0 | auto offsetHdr = mFdp.ConsumeFloatingPointInRange<float>(-1.0f, 1.0f); |
129 | 0 | auto minCapacity = mFdp.ConsumeFloatingPointInRange<float>(-4.0f, 48.0f); |
130 | 0 | auto maxCapacity = mFdp.ConsumeFloatingPointInRange<float>(-4.0f, 48.0f); |
131 | | |
132 | | // target display peak brightness |
133 | 0 | auto targetDispPeakBrightness = mFdp.ConsumeFloatingPointInRange<float>(100.0f, 10500.0f); |
134 | | |
135 | | // raw buffer config |
136 | 0 | bool hasP010Stride = mFdp.ConsumeBool(); |
137 | 0 | size_t yP010Stride = mFdp.ConsumeIntegralInRange<uint16_t>(width, width + 128); |
138 | 0 | if (!hasP010Stride) yP010Stride = width; |
139 | 0 | bool isP010UVContiguous = mFdp.ConsumeBool(); |
140 | 0 | bool hasP010UVStride = mFdp.ConsumeBool(); |
141 | 0 | size_t uvP010Stride = mFdp.ConsumeIntegralInRange<uint16_t>(width, width + 128); |
142 | 0 | if (!hasP010UVStride) uvP010Stride = width; |
143 | |
|
144 | 0 | bool hasYuv420Stride = mFdp.ConsumeBool(); |
145 | 0 | size_t yYuv420Stride = mFdp.ConsumeIntegralInRange<uint16_t>(width, width + 128); |
146 | 0 | if (!hasYuv420Stride) yYuv420Stride = width; |
147 | 0 | bool isYuv420UVContiguous = mFdp.ConsumeBool(); |
148 | 0 | bool hasYuv420UVStride = mFdp.ConsumeBool(); |
149 | 0 | size_t uvYuv420Stride = mFdp.ConsumeIntegralInRange<uint16_t>(width / 2, width / 2 + 128); |
150 | 0 | if (!hasYuv420UVStride) uvYuv420Stride = width / 2; |
151 | | |
152 | | // display boost |
153 | 0 | float displayBoost = mFdp.ConsumeFloatingPointInRange<float>(1.0, FLT_MAX); |
154 | |
|
155 | 0 | std::unique_ptr<uint16_t[]> bufferYHdr = nullptr; |
156 | 0 | std::unique_ptr<uint16_t[]> bufferUVHdr = nullptr; |
157 | 0 | std::unique_ptr<uint8_t[]> bufferYSdr = nullptr; |
158 | 0 | std::unique_ptr<uint8_t[]> bufferUVSdr = nullptr; |
159 | 0 | std::unique_ptr<uint8_t[]> grayImgRaw = nullptr; |
160 | 0 | if (muxSwitch != 4) { |
161 | | // init p010 image |
162 | 0 | p010Img.width = width; |
163 | 0 | p010Img.height = height; |
164 | 0 | p010Img.colorGamut = p010Cg; |
165 | 0 | p010Img.luma_stride = yP010Stride; |
166 | 0 | if (isP010UVContiguous) { |
167 | 0 | size_t p010Size = yP010Stride * height * 3 / 2; |
168 | 0 | bufferYHdr = std::make_unique<uint16_t[]>(p010Size); |
169 | 0 | p010Img.data = bufferYHdr.get(); |
170 | 0 | p010Img.chroma_data = nullptr; |
171 | 0 | p010Img.chroma_stride = 0; |
172 | 0 | fillBuffer<uint16_t>(bufferYHdr.get(), width, height, yP010Stride); |
173 | 0 | fillBuffer<uint16_t>(bufferYHdr.get() + yP010Stride * height, width, height / 2, |
174 | 0 | yP010Stride); |
175 | 0 | } else { |
176 | 0 | size_t p010YSize = yP010Stride * height; |
177 | 0 | bufferYHdr = std::make_unique<uint16_t[]>(p010YSize); |
178 | 0 | p010Img.data = bufferYHdr.get(); |
179 | 0 | fillBuffer<uint16_t>(bufferYHdr.get(), width, height, yP010Stride); |
180 | 0 | size_t p010UVSize = uvP010Stride * p010Img.height / 2; |
181 | 0 | bufferUVHdr = std::make_unique<uint16_t[]>(p010UVSize); |
182 | 0 | p010Img.chroma_data = bufferUVHdr.get(); |
183 | 0 | p010Img.chroma_stride = uvP010Stride; |
184 | 0 | fillBuffer<uint16_t>(bufferUVHdr.get(), width, height / 2, uvP010Stride); |
185 | 0 | } |
186 | 0 | } else { |
187 | 0 | size_t map_width = width / kMapDimensionScaleFactorDefault; |
188 | 0 | size_t map_height = height / kMapDimensionScaleFactorDefault; |
189 | | // init 400 image |
190 | 0 | grayImg.width = map_width; |
191 | 0 | grayImg.height = map_height; |
192 | 0 | grayImg.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; |
193 | 0 | const size_t graySize = map_width * map_height; |
194 | 0 | grayImgRaw = std::make_unique<uint8_t[]>(graySize); |
195 | 0 | grayImg.data = grayImgRaw.get(); |
196 | 0 | fillBuffer<uint8_t>(grayImgRaw.get(), map_width, map_height, map_width); |
197 | 0 | grayImg.chroma_data = nullptr; |
198 | 0 | grayImg.luma_stride = 0; |
199 | 0 | grayImg.chroma_stride = 0; |
200 | 0 | } |
201 | |
|
202 | 0 | if (muxSwitch > 0) { |
203 | | // init 420 image |
204 | 0 | yuv420Img.width = width; |
205 | 0 | yuv420Img.height = height; |
206 | 0 | yuv420Img.colorGamut = yuv420Cg; |
207 | 0 | yuv420Img.luma_stride = yYuv420Stride; |
208 | 0 | if (isYuv420UVContiguous) { |
209 | 0 | size_t yuv420Size = yYuv420Stride * height * 3 / 2; |
210 | 0 | bufferYSdr = std::make_unique<uint8_t[]>(yuv420Size); |
211 | 0 | yuv420Img.data = bufferYSdr.get(); |
212 | 0 | yuv420Img.chroma_data = nullptr; |
213 | 0 | yuv420Img.chroma_stride = 0; |
214 | 0 | fillBuffer<uint8_t>(bufferYSdr.get(), width, height, yYuv420Stride); |
215 | 0 | fillBuffer<uint8_t>(bufferYSdr.get() + yYuv420Stride * height, width / 2, height / 2, |
216 | 0 | yYuv420Stride / 2); |
217 | 0 | fillBuffer<uint8_t>(bufferYSdr.get() + yYuv420Stride * height * 5 / 4, width / 2, |
218 | 0 | height / 2, yYuv420Stride / 2); |
219 | 0 | } else { |
220 | 0 | size_t yuv420YSize = yYuv420Stride * height; |
221 | 0 | bufferYSdr = std::make_unique<uint8_t[]>(yuv420YSize); |
222 | 0 | yuv420Img.data = bufferYSdr.get(); |
223 | 0 | fillBuffer<uint8_t>(bufferYSdr.get(), width, height, yYuv420Stride); |
224 | 0 | size_t yuv420UVSize = uvYuv420Stride * yuv420Img.height / 2 * 2; |
225 | 0 | bufferUVSdr = std::make_unique<uint8_t[]>(yuv420UVSize); |
226 | 0 | yuv420Img.chroma_data = bufferUVSdr.get(); |
227 | 0 | yuv420Img.chroma_stride = uvYuv420Stride; |
228 | 0 | fillBuffer<uint8_t>(bufferUVSdr.get(), width / 2, height / 2, uvYuv420Stride); |
229 | 0 | fillBuffer<uint8_t>(bufferUVSdr.get() + uvYuv420Stride * height / 2, width / 2, height / 2, |
230 | 0 | uvYuv420Stride); |
231 | 0 | } |
232 | 0 | } |
233 | | |
234 | | // dest |
235 | | // 2 * p010 size as input data is random, DCT compression might not behave as expected |
236 | 0 | jpegImgR.maxLength = std::max(64 * 1024 /* min size 8kb */, width * height * 3 * 2); |
237 | 0 | auto jpegImgRaw = std::make_unique<uint8_t[]>(jpegImgR.maxLength); |
238 | 0 | jpegImgR.data = jpegImgRaw.get(); |
239 | | // #define DUMP_PARAM |
240 | | #ifdef DUMP_PARAM |
241 | | std::cout << "Api Select " << muxSwitch << std::endl; |
242 | | std::cout << "image dimensions " << width << " x " << height << std::endl; |
243 | | std::cout << "p010 color gamut " << p010Img.colorGamut << std::endl; |
244 | | std::cout << "p010 luma stride " << p010Img.luma_stride << std::endl; |
245 | | std::cout << "p010 chroma stride " << p010Img.chroma_stride << std::endl; |
246 | | std::cout << "420 color gamut " << yuv420Img.colorGamut << std::endl; |
247 | | std::cout << "420 luma stride " << yuv420Img.luma_stride << std::endl; |
248 | | std::cout << "420 chroma stride " << yuv420Img.chroma_stride << std::endl; |
249 | | std::cout << "quality factor " << quality << std::endl; |
250 | | #endif |
251 | 0 | JpegR jpegHdr(nullptr, gm_scale_factor, gainmap_quality, multi_channel_gainmap, gamma, |
252 | 0 | enc_preset, minBoost, maxBoost, targetDispPeakBrightness); |
253 | 0 | status_t status = JPEGR_UNKNOWN_ERROR; |
254 | 0 | if (muxSwitch == 0) { // api 0 |
255 | 0 | jpegImgR.length = 0; |
256 | 0 | status = jpegHdr.encodeJPEGR(&p010Img, tf, &jpegImgR, quality, nullptr); |
257 | 0 | } else if (muxSwitch == 1) { // api 1 |
258 | 0 | jpegImgR.length = 0; |
259 | 0 | status = jpegHdr.encodeJPEGR(&p010Img, &yuv420Img, tf, &jpegImgR, quality, nullptr); |
260 | 0 | } else { |
261 | | // compressed img |
262 | 0 | JpegEncoderHelper encoder; |
263 | 0 | struct jpegr_uncompressed_struct yuv420ImgCopy = yuv420Img; |
264 | 0 | if (yuv420ImgCopy.luma_stride == 0) yuv420ImgCopy.luma_stride = yuv420Img.width; |
265 | 0 | if (!yuv420ImgCopy.chroma_data) { |
266 | 0 | uint8_t* data = reinterpret_cast<uint8_t*>(yuv420Img.data); |
267 | 0 | yuv420ImgCopy.chroma_data = data + yuv420Img.luma_stride * yuv420Img.height; |
268 | 0 | yuv420ImgCopy.chroma_stride = yuv420Img.luma_stride >> 1; |
269 | 0 | } |
270 | 0 | const uint8_t* planes[3]{reinterpret_cast<uint8_t*>(yuv420ImgCopy.data), |
271 | 0 | reinterpret_cast<uint8_t*>(yuv420ImgCopy.chroma_data), |
272 | 0 | reinterpret_cast<uint8_t*>(yuv420ImgCopy.chroma_data) + |
273 | 0 | yuv420ImgCopy.chroma_stride * yuv420ImgCopy.height / 2}; |
274 | 0 | const unsigned int strides[3]{yuv420ImgCopy.luma_stride, yuv420ImgCopy.chroma_stride, |
275 | 0 | yuv420ImgCopy.chroma_stride}; |
276 | 0 | if (encoder |
277 | 0 | .compressImage(planes, strides, yuv420ImgCopy.width, yuv420ImgCopy.height, |
278 | 0 | UHDR_IMG_FMT_12bppYCbCr420, quality, nullptr, 0) |
279 | 0 | .error_code == UHDR_CODEC_OK) { |
280 | 0 | jpegImg.length = encoder.getCompressedImageSize(); |
281 | 0 | jpegImg.maxLength = jpegImg.length; |
282 | 0 | jpegImg.data = encoder.getCompressedImagePtr(); |
283 | 0 | jpegImg.colorGamut = yuv420Cg; |
284 | 0 | if (muxSwitch == 2) { // api 2 |
285 | 0 | jpegImgR.length = 0; |
286 | 0 | status = jpegHdr.encodeJPEGR(&p010Img, &yuv420Img, &jpegImg, tf, &jpegImgR); |
287 | 0 | } else if (muxSwitch == 3) { // api 3 |
288 | 0 | jpegImgR.length = 0; |
289 | 0 | status = jpegHdr.encodeJPEGR(&p010Img, &jpegImg, tf, &jpegImgR); |
290 | 0 | } else if (muxSwitch == 4) { // api 4 |
291 | 0 | jpegImgR.length = 0; |
292 | 0 | JpegEncoderHelper gainMapEncoder; |
293 | 0 | const uint8_t* planeGm[1]{reinterpret_cast<uint8_t*>(grayImg.data)}; |
294 | 0 | const unsigned int strideGm[1]{grayImg.width}; |
295 | 0 | if (gainMapEncoder |
296 | 0 | .compressImage(planeGm, strideGm, grayImg.width, grayImg.height, |
297 | 0 | UHDR_IMG_FMT_8bppYCbCr400, quality, nullptr, 0) |
298 | 0 | .error_code == UHDR_CODEC_OK) { |
299 | 0 | jpegGainMap.length = gainMapEncoder.getCompressedImageSize(); |
300 | 0 | jpegGainMap.maxLength = jpegImg.length; |
301 | 0 | jpegGainMap.data = gainMapEncoder.getCompressedImagePtr(); |
302 | 0 | jpegGainMap.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; |
303 | 0 | ultrahdr_metadata_struct metadata; |
304 | 0 | metadata.version = kJpegrVersion; |
305 | 0 | metadata.maxContentBoost = maxBoost; |
306 | 0 | metadata.minContentBoost = minBoost; |
307 | 0 | metadata.gamma = gamma; |
308 | 0 | metadata.offsetSdr = offsetSdr; |
309 | 0 | metadata.offsetHdr = offsetHdr; |
310 | 0 | metadata.hdrCapacityMin = minCapacity; |
311 | 0 | metadata.hdrCapacityMax = maxCapacity; |
312 | 0 | status = jpegHdr.encodeJPEGR(&jpegImg, &jpegGainMap, &metadata, &jpegImgR); |
313 | 0 | } |
314 | 0 | } |
315 | 0 | } |
316 | 0 | } |
317 | 0 | if (status == JPEGR_NO_ERROR) { |
318 | 0 | jpegr_info_struct info{}; |
319 | 0 | status = jpegHdr.getJPEGRInfo(&jpegImgR, &info); |
320 | 0 | if (status == JPEGR_NO_ERROR) { |
321 | 0 | size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_HDR_LINEAR) ? 8 : 4); |
322 | 0 | jpegr_uncompressed_struct decodedJpegR; |
323 | 0 | auto decodedRaw = std::make_unique<uint8_t[]>(outSize); |
324 | 0 | decodedJpegR.data = decodedRaw.get(); |
325 | 0 | ultrahdr_metadata_struct metadata; |
326 | 0 | status = jpegHdr.decodeJPEGR(&jpegImgR, &decodedJpegR, displayBoost, nullptr, of, nullptr, |
327 | 0 | &metadata); |
328 | 0 | if (status != JPEGR_NO_ERROR) { |
329 | 0 | ALOGE("encountered error during decoding %d", status); |
330 | 0 | } |
331 | 0 | } else { |
332 | 0 | ALOGE("encountered error during get jpeg info %d", status); |
333 | 0 | } |
334 | 0 | } else { |
335 | 0 | ALOGE("encountered error during encoding %d", status); |
336 | 0 | } |
337 | 0 | } |
338 | 0 | } |
339 | | |
340 | 0 | extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { |
341 | 0 | UltraHdrEncFuzzer fuzzHandle(data, size); |
342 | 0 | fuzzHandle.process(); |
343 | 0 | return 0; |
344 | 0 | } |