/src/exiv2/src/quicktimevideo.cpp
Line | Count | Source |
1 | | // ***************************************************************** -*- C++ -*- |
2 | | /* |
3 | | * Copyright (C) 2004-2021 Exiv2 authors |
4 | | * This program is part of the Exiv2 distribution. |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or |
7 | | * modify it under the terms of the GNU General Public License |
8 | | * as published by the Free Software Foundation; either version 2 |
9 | | * of the License, or (at your option) any later version. |
10 | | * |
11 | | * This program is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; if not, write to the Free Software |
18 | | * Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA. |
19 | | */ |
20 | | // ***************************************************************************** |
21 | | // included header files |
22 | | #include "quicktimevideo.hpp" |
23 | | #include "basicio.hpp" |
24 | | #include "config.h" |
25 | | #include "enforce.hpp" |
26 | | #include "error.hpp" |
27 | | #include "futils.hpp" |
28 | | #include "helper_functions.hpp" |
29 | | #include "image_int.hpp" |
30 | | #include "properties.hpp" |
31 | | #include "safe_op.hpp" |
32 | | #include "tags.hpp" |
33 | | #include "tags_int.hpp" |
34 | | // + standard includes |
35 | | #include <array> |
36 | | #include <cmath> |
37 | | #include <string> |
38 | | // ***************************************************************************** |
39 | | // class member definitions |
40 | | namespace Exiv2::Internal { |
41 | | |
42 | | static constexpr TagVocabulary qTimeFileType[] = { |
43 | | {"3g2a", "3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-0 V1.0"}, |
44 | | {"3g2b", "3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-A V1.0.0"}, |
45 | | {"3g2c", "3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-B v1.0"}, |
46 | | {"3ge6", "3GPP (.3GP) Release 6 MBMS Extended Presentations"}, |
47 | | {"3ge7", "3GPP (.3GP) Release 7 MBMS Extended Presentations"}, |
48 | | {"3gg6", "3GPP Release 6 General Profile"}, |
49 | | {"3gp1", "3GPP Media (.3GP) Release 1 (probably non-existent)"}, |
50 | | {"3gp2", "3GPP Media (.3GP) Release 2 (probably non-existent)"}, |
51 | | {"3gp3", "3GPP Media (.3GP) Release 3 (probably non-existent)"}, |
52 | | {"3gp4", "3GPP Media (.3GP) Release 4"}, |
53 | | {"3gp5", "3GPP Media (.3GP) Release 5"}, |
54 | | {"3gp6", "3GPP Media (.3GP) Release 6 Streaming Servers"}, |
55 | | {"3gs7", "3GPP Media (.3GP) Release 7 Streaming Servers"}, |
56 | | {"CAEP", "Canon Digital Camera"}, |
57 | | {"CDes", "Convergent Design"}, |
58 | | {"F4A ", "Audio for Adobe Flash Player 9+ (.F4A)"}, |
59 | | {"F4B ", "Audio Book for Adobe Flash Player 9+ (.F4B)"}, |
60 | | {"F4P ", "Protected Video for Adobe Flash Player 9+ (.F4P)"}, |
61 | | {"F4V ", "Video for Adobe Flash Player 9+ (.F4V)"}, |
62 | | {"JP2 ", "JPEG 2000 Image (.JP2) [ISO 15444-1 ?]"}, |
63 | | {"JP20", "Unknown, from GPAC samples (prob non-existent)"}, |
64 | | {"KDDI", "3GPP2 EZmovie for KDDI 3G cellphones"}, |
65 | | {"M4A ", "Apple iTunes AAC-LC (.M4A) Audio"}, |
66 | | {"M4B ", "Apple iTunes AAC-LC (.M4B) Audio Book"}, |
67 | | {"M4P ", "Apple iTunes AAC-LC (.M4P) AES Protected Audio"}, |
68 | | {"M4V ", "Apple iTunes Video (.M4V) Video"}, |
69 | | {"M4VH", "Apple TV (.M4V)"}, |
70 | | {"M4VP", "Apple iPhone (.M4V)"}, |
71 | | {"MPPI", "Photo Player, MAF [ISO/IEC 23000-3]"}, |
72 | | {"MSNV", "MPEG-4 (.MP4) for SonyPSP"}, |
73 | | {"NDAS", "MP4 v2 [ISO 14496-14] Nero Digital AAC Audio"}, |
74 | | {"NDSC", "MPEG-4 (.MP4) Nero Cinema Profile"}, |
75 | | {"NDSH", "MPEG-4 (.MP4) Nero HDTV Profile"}, |
76 | | {"NDSM", "MPEG-4 (.MP4) Nero Mobile Profile"}, |
77 | | {"NDSP", "MPEG-4 (.MP4) Nero Portable Profile"}, |
78 | | {"NDSS", "MPEG-4 (.MP4) Nero Standard Profile"}, |
79 | | {"NDXC", "H.264/MPEG-4 AVC (.MP4) Nero Cinema Profile"}, |
80 | | {"NDXH", "H.264/MPEG-4 AVC (.MP4) Nero HDTV Profile"}, |
81 | | {"NDXM", "H.264/MPEG-4 AVC (.MP4) Nero Mobile Profile"}, |
82 | | {"NDXP", "H.264/MPEG-4 AVC (.MP4) Nero Portable Profile"}, |
83 | | {"NDXS", "H.264/MPEG-4 AVC (.MP4) Nero Standard Profile"}, |
84 | | {"NIKO", "Nikon"}, |
85 | | {"ROSS", "Ross Video"}, |
86 | | {"avc1", "MP4 Base w/ AVC ext [ISO 14496-12:2005]"}, |
87 | | {"caqv", "Casio Digital Camera"}, |
88 | | {"da0a", "DMB MAF w/ MPEG Layer II aud, MOT slides, DLS, JPG/PNG/MNG images"}, |
89 | | {"da0b", "DMB MAF, extending DA0A, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
90 | | {"da1a", "DMB MAF audio with ER-BSAC audio, JPG/PNG/MNG images"}, |
91 | | {"da1b", "DMB MAF, extending da1a, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
92 | | {"da2a", "DMB MAF aud w/ HE-AAC v2 aud, MOT slides, DLS, JPG/PNG/MNG images"}, |
93 | | {"da2b", "DMB MAF, extending da2a, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
94 | | {"da3a", "DMB MAF aud with HE-AAC aud, JPG/PNG/MNG images"}, |
95 | | {"da3b", "DMB MAF, extending da3a w/ BIFS, 3GPP timed text, DID, TVA, REL, IPMP"}, |
96 | | {"dmb1", "DMB MAF supporting all the components defined in the specification"}, |
97 | | {"dmpf", "Digital Media Project"}, |
98 | | {"drc1", "Dirac (wavelet compression), encapsulated in ISO base media (MP4)"}, |
99 | | {"dv1a", "DMB MAF vid w/ AVC vid, ER-BSAC aud, BIFS, JPG/PNG/MNG images, TS"}, |
100 | | {"dv1b", "DMB MAF, extending dv1a, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
101 | | {"dv2a", "DMB MAF vid w/ AVC vid, HE-AAC v2 aud, BIFS, JPG/PNG/MNG images, TS"}, |
102 | | {"dv2b", "DMB MAF, extending dv2a, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
103 | | {"dv3a", "DMB MAF vid w/ AVC vid, HE-AAC aud, BIFS, JPG/PNG/MNG images, TS"}, |
104 | | {"dv3b", "DMB MAF, extending dv3a, with 3GPP timed text, DID, TVA, REL, IPMP"}, |
105 | | {"dvr1", "DVB (.DVB) over RTP"}, |
106 | | {"dvt1", "DVB (.DVB) over MPEG-2 Transport Stream"}, |
107 | | {"isc2", "ISMACryp 2.0 Encrypted File"}, |
108 | | {"iso2", "MP4 Base Media v2 [ISO 14496-12:2005]"}, |
109 | | {"isom", "MP4 Base Media v1 [IS0 14496-12:2003]"}, |
110 | | {"jpm ", "JPEG 2000 Compound Image (.JPM) [ISO 15444-6]"}, |
111 | | {"jpx ", "JPEG 2000 with extensions (.JPX) [ISO 15444-2]"}, |
112 | | {"mj2s", "Motion JPEG 2000 [ISO 15444-3] Simple Profile"}, |
113 | | {"mjp2", "Motion JPEG 2000 [ISO 15444-3] General Profile"}, |
114 | | {"mmp4", "MPEG-4/3GPP Mobile Profile (.MP4/3GP) (for NTT)"}, |
115 | | {"mp21", "MPEG-21 [ISO/IEC 21000-9]"}, |
116 | | {"mp41", "MP4 v1 [ISO 14496-1:ch13]"}, |
117 | | {"mp42", "MP4 v2 [ISO 14496-14]"}, |
118 | | {"mp71", "MP4 w/ MPEG-7 Metadata [per ISO 14496-12]"}, |
119 | | {"mqt ", "Sony / Mobile QuickTime (.MQV) US Patent 7,477,830 (Sony Corp)"}, |
120 | | {"niko", "Nikon"}, |
121 | | {"odcf", "OMA DCF DRM Format 2.0 (OMA-TS-DRM-DCF-V2_0-20060303-A)"}, |
122 | | {"opf2", "OMA PDCF DRM Format 2.1 (OMA-TS-DRM-DCF-V2_1-20070724-C)"}, |
123 | | {"opx2", "OMA PDCF DRM + XBS extensions (OMA-TS-DRM_XBS-V1_0-20070529-C)"}, |
124 | | {"pana", "Panasonic Digital Camera"}, |
125 | | {"qt ", "Apple QuickTime (.MOV/QT)"}, |
126 | | {"sdv ", "SD Memory Card Video"}, |
127 | | {"ssc1", "Samsung stereoscopic, single stream"}, |
128 | | {"ssc2", "Samsung stereoscopic, dual stream"}, |
129 | | }; |
130 | | |
131 | | static constexpr TagVocabulary handlerClassTags[] = { |
132 | | {"dhlr", "Data Handler"}, |
133 | | {"mhlr", "Media Handler"}, |
134 | | }; |
135 | | |
136 | | static constexpr TagVocabulary handlerTypeTags[] = { |
137 | | {"alis", "Alias Data"}, |
138 | | {"crsm", "Clock Reference"}, |
139 | | {"hint", "Hint Track"}, |
140 | | {"ipsm", "IPMP"}, |
141 | | {"m7sm", "MPEG-7 Stream"}, |
142 | | {"mdir", "Metadata"}, |
143 | | {"mdta", "Metadata Tags"}, |
144 | | {"mjsm", "MPEG-J"}, |
145 | | {"ocsm", "Object Content"}, |
146 | | {"odsm", "Object Descriptor"}, |
147 | | {"sdsm", "Scene Description"}, |
148 | | {"soun", "Audio Track"}, |
149 | | {"text", "Text"}, |
150 | | {"tmcd", "Time Code"}, |
151 | | {"url ", "URL"}, |
152 | | {"vide", "Video Track"}, |
153 | | }; |
154 | | |
155 | | static constexpr TagVocabulary vendorIDTags[] = { |
156 | | {"FFMP", "FFmpeg"}, |
157 | | {"appl", "Apple"}, |
158 | | {"olym", "Olympus"}, |
159 | | {"GIC ", "General Imaging Co."}, |
160 | | {"fe20", "Olympus (fe20)"}, |
161 | | {"pana", "Panasonic"}, |
162 | | {"KMPI", "Konica-Minolta"}, |
163 | | {"kdak", "Kodak"}, |
164 | | {"pent", "Pentax"}, |
165 | | {"NIKO", "Nikon"}, |
166 | | {"leic", "Leica"}, |
167 | | {"pr01", "Olympus (pr01)"}, |
168 | | {"SMI ", "Sorenson Media Inc."}, |
169 | | {"mino", "Minolta"}, |
170 | | {"sany", "Sanyo"}, |
171 | | {"ZORA", "Zoran Corporation"}, |
172 | | {"niko", "Nikon"}, |
173 | | }; |
174 | | |
175 | | static constexpr TagVocabulary cameraByteOrderTags[] = { |
176 | | {"II", "Little-endian (Intel, II)"}, |
177 | | {"MM", "Big-endian (Motorola, MM)"}, |
178 | | }; |
179 | | |
180 | | static constexpr TagDetails graphicsModetags[] = { |
181 | | {0x0, "srcCopy"}, |
182 | | {0x1, "srcOr"}, |
183 | | {0x2, "srcXor"}, |
184 | | {0x3, "srcBic"}, |
185 | | {0x4, "notSrcCopy"}, |
186 | | {0x5, "notSrcOr"}, |
187 | | {0x6, "notSrcXor"}, |
188 | | {0x7, "notSrcBic"}, |
189 | | {0x8, "patCopy"}, |
190 | | {0x9, "patOr"}, |
191 | | {0xa, "patXor"}, |
192 | | {0xb, "patBic"}, |
193 | | {0xc, "notPatCopy"}, |
194 | | {0xd, "notPatOr"}, |
195 | | {0xe, "notPatXor"}, |
196 | | {0xf, "notPatBic"}, |
197 | | {0x20, "blend"}, |
198 | | {0x21, "addPin"}, |
199 | | {0x22, "addOver"}, |
200 | | {0x23, "subPin"}, |
201 | | {0x24, "transparent"}, |
202 | | {0x25, "addMax"}, |
203 | | {0x26, "subOver"}, |
204 | | {0x27, "addMin"}, |
205 | | {0x31, "grayishTextOr"}, |
206 | | {0x32, "hilite"}, |
207 | | {0x40, "ditherCopy"}, |
208 | | {0x100, "Alpha"}, |
209 | | {0x101, "White Alpha"}, |
210 | | {0x102, "Pre-multiplied Black Alpha"}, |
211 | | {0x110, "Component Alpha"}, |
212 | | }; |
213 | | |
214 | | static constexpr TagVocabulary userDatatags[] = { |
215 | | {"AllF", "PlayAllFrames"}, |
216 | | {"CNCV", "CompressorVersion"}, |
217 | | {"CNFV", "FirmwareVersion"}, |
218 | | {"CNMN", "Model"}, |
219 | | {"CNTH", "CanonCNTH"}, |
220 | | {"DcMD", "DcMD"}, |
221 | | {"FFMV", "FujiFilmFFMV"}, |
222 | | {"INFO", "SamsungINFO"}, |
223 | | {"LOOP", "LoopStyle"}, |
224 | | {"MMA0", "MinoltaMMA0"}, |
225 | | {"MMA1", "MinoltaMMA1"}, |
226 | | {"MVTG", "FujiFilmMVTG"}, |
227 | | {"NCDT", "NikonNCDT"}, |
228 | | {"PANA", "PanasonicPANA"}, |
229 | | {"PENT", "PentaxPENT"}, |
230 | | {"PXMN", "MakerNotePentax5b"}, |
231 | | {"PXTH", "PentaxPreview"}, |
232 | | {"QVMI", "CasioQVMI"}, |
233 | | {"SDLN", "PlayMode"}, |
234 | | {"SelO", "PlaySelection"}, |
235 | | {"TAGS", "KodakTags/KonicaMinoltaTags/MinoltaTags/NikonTags/OlympusTags/PentaxTags/SamsungTags/SanyoMOV/SanyoMP4"}, |
236 | | {"WLOC", "WindowLocation"}, |
237 | | {"XMP_", "XMP"}, |
238 | | {"Xtra", "Xtra"}, |
239 | | {"hinf", "HintTrackInfo"}, |
240 | | {"hinv", "HintVersion"}, |
241 | | {"hnti", "Hint"}, |
242 | | {"meta", "Meta"}, |
243 | | {"name", "Name"}, |
244 | | {"ptv ", "PrintToVideo"}, |
245 | | {"scrn", "OlympusPreview"}, |
246 | | {"thmb", "MakerNotePentax5a/OlympusThumbnail"}, |
247 | | }; |
248 | | |
249 | | static constexpr TagVocabulary userDataReferencetags[] = { |
250 | | {"CNCV", "Xmp.video.CompressorVersion"}, |
251 | | {"CNFV", "Xmp.video.FirmwareVersion"}, |
252 | | {"CNMN", "Xmp.video.Model"}, |
253 | | {"NCHD", "Xmp.video.MakerNoteType"}, |
254 | | {"WLOC", "Xmp.video.WindowLocation"}, |
255 | | {"SDLN", "Xmp.video.PlayMode"}, |
256 | | {"FFMV", "Xmp.video.StreamName"}, |
257 | | {"SelO", "Xmp.video.PlaySelection"}, |
258 | | {"name", "Xmp.video.Name"}, |
259 | | {"vndr", "Xmp.video.Vendor"}, |
260 | | {" ART", "Xmp.video.Artist"}, |
261 | | {" alb", "Xmp.video.Album"}, |
262 | | {" arg", "Xmp.video.Arranger"}, |
263 | | {" ark", "Xmp.video.ArrangerKeywords"}, |
264 | | {" cmt", "Xmp.video.Comment"}, |
265 | | {" cok", "Xmp.video.ComposerKeywords"}, |
266 | | {" com", "Xmp.video.Composer"}, |
267 | | {" cpy", "Xmp.video.Copyright"}, |
268 | | {" day", "Xmp.video.CreateDate"}, |
269 | | {" dir", "Xmp.video.Director"}, |
270 | | {" ed1", "Xmp.video.Edit1"}, |
271 | | {" ed2", "Xmp.video.Edit2"}, |
272 | | {" ed3", "Xmp.video.Edit3"}, |
273 | | {" ed4", "Xmp.video.Edit4"}, |
274 | | {" ed5", "Xmp.video.Edit5"}, |
275 | | {" ed6", "Xmp.video.Edit6"}, |
276 | | {" ed7", "Xmp.video.Edit7"}, |
277 | | {" ed8", "Xmp.video.Edit8"}, |
278 | | {" ed9", "Xmp.video.Edit9"}, |
279 | | {" enc", "Xmp.video.Encoder"}, |
280 | | {" fmt", "Xmp.video.Format"}, |
281 | | {" gen", "Xmp.video.Genre"}, |
282 | | {" grp", "Xmp.video.Grouping"}, |
283 | | {" inf", "Xmp.video.Information"}, |
284 | | {" isr", "Xmp.video.ISRCCode"}, |
285 | | {" lab", "Xmp.video.RecordLabelName"}, |
286 | | {" lal", "Xmp.video.RecordLabelURL"}, |
287 | | {" lyr", "Xmp.video.Lyrics"}, |
288 | | {" mak", "Xmp.video.Make"}, |
289 | | {" mal", "Xmp.video.MakerURL"}, |
290 | | {" mod", "Xmp.video.Model"}, |
291 | | {" nam", "Xmp.video.Title"}, |
292 | | {" pdk", "Xmp.video.ProducerKeywords"}, |
293 | | {" phg", "Xmp.video.RecordingCopyright"}, |
294 | | {" prd", "Xmp.video.Producer"}, |
295 | | {" prf", "Xmp.video.Performers"}, |
296 | | {" prk", "Xmp.video.PerformerKeywords"}, |
297 | | {" prl", "Xmp.video.PerformerURL"}, |
298 | | {" req", "Xmp.video.Requirements"}, |
299 | | {" snk", "Xmp.video.SubtitleKeywords"}, |
300 | | {" snm", "Xmp.video.Subtitle"}, |
301 | | {" src", "Xmp.video.SourceCredits"}, |
302 | | {" swf", "Xmp.video.SongWriter"}, |
303 | | {" swk", "Xmp.video.SongWriterKeywords"}, |
304 | | {" swr", "Xmp.video.SoftwareVersion"}, |
305 | | {" too", "Xmp.video.Encoder"}, |
306 | | {" trk", "Xmp.video.Track"}, |
307 | | {" wrt", "Xmp.video.Composer"}, |
308 | | {" xyz", "Xmp.video.GPSCoordinates"}, |
309 | | {"CMbo", "Xmp.video.CameraByteOrder"}, |
310 | | {"Cmbo", "Xmp.video.CameraByteOrder"}, |
311 | | }; |
312 | | |
313 | | static constexpr TagDetails NikonNCTGTags[] = { |
314 | | {0x0001, "Xmp.video.Make"}, |
315 | | {0x0002, "Xmp.video.Model"}, |
316 | | {0x0003, "Xmp.video.Software"}, |
317 | | {0x0011, "Xmp.video.CreationDate"}, |
318 | | {0x0012, "Xmp.video.DateTimeOriginal"}, |
319 | | {0x0013, "Xmp.video.FrameCount"}, |
320 | | {0x0016, "Xmp.video.FrameRate"}, |
321 | | {0x0022, "Xmp.video.FrameWidth"}, |
322 | | {0x0023, "Xmp.video.FrameHeight"}, |
323 | | {0x0032, "Xmp.audio.channelType"}, |
324 | | {0x0033, "Xmp.audio.BitsPerSample"}, |
325 | | {0x0034, "Xmp.audio.sampleRate"}, |
326 | | {0x1108822, "Xmp.video.ExposureProgram"}, |
327 | | {0x1109204, "Xmp.video.ExposureCompensation"}, |
328 | | {0x1109207, "Xmp.video.MeteringMode"}, |
329 | | {0x110a434, "Xmp.video.LensModel"}, |
330 | | {0x1200000, "Xmp.video.GPSVersionID"}, |
331 | | {0x1200001, "Xmp.video.GPSLatitudeRef"}, |
332 | | {0x1200002, "Xmp.video.GPSLatitude"}, |
333 | | {0x1200003, "Xmp.video.GPSLongitudeRef"}, |
334 | | {0x1200004, "Xmp.video.GPSLongitude"}, |
335 | | {0x1200005, "Xmp.video.GPSAltitudeRef"}, |
336 | | {0x1200006, "Xmp.video.GPSAltitude"}, |
337 | | {0x1200007, "Xmp.video.GPSTimeStamp"}, |
338 | | {0x1200008, "Xmp.video.GPSSatellites"}, |
339 | | {0x1200010, "Xmp.video.GPSImgDirectionRef"}, |
340 | | {0x1200011, "Xmp.video.GPSImgDirection"}, |
341 | | {0x1200012, "Xmp.video.GPSMapDatum"}, |
342 | | {0x120001d, "Xmp.video.GPSDateStamp"}, |
343 | | {0x2000001, "Xmp.video.MakerNoteVersion"}, |
344 | | {0x2000005, "Xmp.video.WhiteBalance"}, |
345 | | {0x200000b, "Xmp.video.WhiteBalanceFineTune"}, |
346 | | {0x200001e, "Xmp.video.ColorSpace"}, |
347 | | {0x2000023, "Xmp.video.PictureControlData"}, |
348 | | {0x2000024, "Xmp.video.WorldTime"}, |
349 | | {0x200002c, "Xmp.video.UnknownInfo"}, |
350 | | {0x2000032, "Xmp.video.UnknownInfo2"}, |
351 | | {0x2000039, "Xmp.video.LocationInfo"}, |
352 | | {0x2000083, "Xmp.video.LensType"}, |
353 | | {0x2000084, "Xmp.video.LensModel"}, |
354 | | {0x20000ab, "Xmp.video.VariProgram"}, |
355 | | }; |
356 | | |
357 | | [[maybe_unused]] static constexpr TagDetails NikonColorSpace[] = { |
358 | | {1, "sRGB"}, |
359 | | {2, "Adobe RGB"}, |
360 | | }; |
361 | | |
362 | | [[maybe_unused]] static constexpr TagVocabulary NikonGPS_Latitude_Longitude_ImgDirection_Reference[] = { |
363 | | {"N", "North"}, {"S", "South"}, {"E", "East"}, {"W", "West"}, {"M", "Magnetic North"}, {"T", "True North"}, |
364 | | }; |
365 | | |
366 | | [[maybe_unused]] static constexpr TagDetails NikonGPSAltitudeRef[] = { |
367 | | {0, "Above Sea Level"}, |
368 | | {1, "Below Sea Level"}, |
369 | | }; |
370 | | |
371 | | [[maybe_unused]] static constexpr TagDetails NikonExposureProgram[] = { |
372 | | {0, "Not Defined"}, |
373 | | {1, "Manual"}, |
374 | | {2, "Program AE"}, |
375 | | {3, "Aperture-priority AE"}, |
376 | | {4, "Shutter speed priority AE"}, |
377 | | {5, "Creative (Slow speed)"}, |
378 | | {6, "Action (High speed)"}, |
379 | | {7, "Portrait"}, |
380 | | {8, "Landscape"}, |
381 | | }; |
382 | | |
383 | | [[maybe_unused]] static constexpr TagDetails NikonMeteringMode[] = { |
384 | | {0, "Unknown"}, {1, "Average"}, {2, "Center-weighted average"}, |
385 | | {3, "Spot"}, {4, "Multi-spot"}, {5, "Multi-segment"}, |
386 | | {6, "Partial"}, {255, "Other"}, |
387 | | }; |
388 | | |
389 | | static constexpr TagDetails PictureControlAdjust[] = { |
390 | | {0, "Default Settings"}, |
391 | | {1, "Quick Adjust"}, |
392 | | {2, "Full Control"}, |
393 | | }; |
394 | | |
395 | | //! Contrast and Sharpness |
396 | | static constexpr TagDetails NormalSoftHard[] = { |
397 | | {0, "Normal"}, |
398 | | {1, "Soft"}, |
399 | | {2, "Hard"}, |
400 | | }; |
401 | | |
402 | | //! Saturation |
403 | | static constexpr TagDetails Saturation[] = { |
404 | | {0, "Normal"}, |
405 | | {1, "Low"}, |
406 | | {2, "High"}, |
407 | | }; |
408 | | |
409 | | //! YesNo, used for DaylightSavings |
410 | | static constexpr TagDetails YesNo[] = { |
411 | | {0, "No"}, |
412 | | {1, "Yes"}, |
413 | | }; |
414 | | |
415 | | //! DateDisplayFormat |
416 | | static constexpr TagDetails DateDisplayFormat[] = { |
417 | | {0, "Y/M/D"}, |
418 | | {1, "M/D/Y"}, |
419 | | {2, "D/M/Y"}, |
420 | | }; |
421 | | |
422 | | static constexpr TagDetails FilterEffect[] = { |
423 | | {0x80, "Off"}, {0x81, "Yellow"}, {0x82, "Orange"}, {0x83, "Red"}, {0x84, "Green"}, {0xff, "n/a"}, |
424 | | }; |
425 | | |
426 | | static constexpr TagDetails ToningEffect[] = { |
427 | | {0x80, "B&W"}, {0x81, "Sepia"}, {0x82, "Cyanotype"}, {0x83, "Red"}, |
428 | | {0x84, "Yellow"}, {0x85, "Green"}, {0x86, "Blue-green"}, {0x87, "Blue"}, |
429 | | {0x88, "Purple-blue"}, {0x89, "Red-purple"}, {0xff, "n/a"}, |
430 | | }; |
431 | | |
432 | | static constexpr TagDetails whiteBalance[] = { |
433 | | {0, "Auto"}, {1, "Daylight"}, {2, "Shade"}, {3, "Fluorescent"}, {4, "Tungsten"}, {5, "Manual"}, |
434 | | }; |
435 | | |
436 | | enum movieHeaderTags { |
437 | | MovieHeaderVersion, |
438 | | CreateDate, |
439 | | ModifyDate, |
440 | | TimeScale, |
441 | | Duration, |
442 | | PreferredRate, |
443 | | PreferredVolume, |
444 | | PreviewTime = 18, |
445 | | PreviewDuration, |
446 | | PosterTime, |
447 | | SelectionTime, |
448 | | SelectionDuration, |
449 | | CurrentTime, |
450 | | NextTrackID |
451 | | }; |
452 | | enum trackHeaderTags { |
453 | | TrackHeaderVersion, |
454 | | TrackCreateDate, |
455 | | TrackModifyDate, |
456 | | TrackID, |
457 | | TrackDuration = 5, |
458 | | TrackLayer = 8, |
459 | | TrackVolume, |
460 | | ImageWidth = 19, |
461 | | ImageHeight |
462 | | }; |
463 | | enum mediaHeaderTags { |
464 | | MediaHeaderVersion, |
465 | | MediaCreateDate, |
466 | | MediaModifyDate, |
467 | | MediaTimeScale, |
468 | | MediaDuration, |
469 | | MediaLanguageCode |
470 | | }; |
471 | | enum handlerTags { HandlerClass = 1, HandlerType, HandlerVendorID }; |
472 | | enum videoHeaderTags { GraphicsMode = 2, OpColor }; |
473 | | enum stream { Video, Audio, Hint, Null, GenMediaHeader }; |
474 | | enum imageDescTags { |
475 | | codec, |
476 | | VendorID = 4, |
477 | | SourceImageWidth_Height = 7, |
478 | | XResolution, |
479 | | YResolution, |
480 | | CompressorName = 10, |
481 | | BitDepth |
482 | | }; |
483 | | enum audioDescTags { AudioFormat, AudioVendorID = 4, AudioChannels, AudioSampleRate = 7, MOV_AudioFormat = 13 }; |
484 | | |
485 | | /*! |
486 | | @brief Function used to check equality of a Tags with a |
487 | | particular string (ignores case while comparing). |
488 | | @param buf Data buffer that will contain Tag to compare |
489 | | @param str char* Pointer to string |
490 | | @return Returns true if the buffer value is equal to string. |
491 | | */ |
492 | 58.0k | static bool equalsQTimeTag(Exiv2::DataBuf& buf, const char str[5]) { |
493 | 58.0k | return std::equal(buf.begin(), buf.begin() + 4, str, |
494 | 62.6k | [](auto b, auto s) { return std::tolower(b) == std::tolower(s); }); |
495 | 58.0k | } |
496 | | |
497 | | /*! |
498 | | @brief Function used to ignore Tags and values stored in them, |
499 | | since they are not necessary as metadata information |
500 | | @param buf Data buffer that will contain Tag to compare |
501 | | @return Returns true, if Tag is found in the ignoreList[] |
502 | | */ |
503 | 2.69k | static bool ignoreList(Exiv2::DataBuf& buf) { |
504 | 2.69k | const char ignoreList[13][5] = { |
505 | 2.69k | "mdat", "edts", "junk", "iods", "alis", "stsc", "stsz", "stco", "ctts", "stss", "skip", "wide", "cmvd", |
506 | 2.69k | }; |
507 | | |
508 | 2.69k | for (auto i : ignoreList) |
509 | 34.9k | if (equalsQTimeTag(buf, i)) |
510 | 19 | return true; |
511 | | |
512 | 2.67k | return false; |
513 | 2.69k | } |
514 | | |
515 | | /*! |
516 | | @brief Function used to ignore Tags, basically Tags which |
517 | | contain other tags inside them, since they are not necessary |
518 | | as metadata information |
519 | | @param buf Data buffer that will contain Tag to compare |
520 | | @return Returns true, if Tag is found in the ignoreList[] |
521 | | */ |
522 | 1.33k | static bool dataIgnoreList(Exiv2::DataBuf& buf) { |
523 | 1.33k | const char ignoreList[8][5] = { |
524 | 1.33k | "moov", "mdia", "minf", "dinf", "alis", "stbl", "cmov", "meta", |
525 | 1.33k | }; |
526 | | |
527 | 1.33k | for (auto i : ignoreList) |
528 | 10.6k | if (equalsQTimeTag(buf, i)) |
529 | 28 | return true; |
530 | | |
531 | 1.31k | return false; |
532 | 1.33k | } |
533 | | } // namespace Exiv2::Internal |
534 | | |
535 | | namespace Exiv2 { |
536 | | |
537 | | using namespace Exiv2::Internal; |
538 | | |
539 | | QuickTimeVideo::QuickTimeVideo(BasicIo::UniquePtr io, size_t max_recursion_depth) : |
540 | 647 | Image(ImageType::qtime, mdNone, std::move(io)), |
541 | 647 | mvhdTimeScale_(1), |
542 | 647 | mdhdTimeScale_(1), |
543 | 647 | currentStream_(Null), |
544 | 647 | max_recursion_depth_(max_recursion_depth) { |
545 | 647 | } // QuickTimeVideo::QuickTimeVideo |
546 | | |
547 | 704 | std::string QuickTimeVideo::mimeType() const { |
548 | 704 | return "video/quicktime"; |
549 | 704 | } |
550 | | |
551 | 0 | void QuickTimeVideo::writeMetadata() { |
552 | 0 | } |
553 | | |
554 | 641 | void QuickTimeVideo::readMetadata() { |
555 | 641 | if (io_->open() != 0) |
556 | 0 | throw Error(ErrorCode::kerDataSourceOpenFailed, io_->path(), strError()); |
557 | | |
558 | | // Ensure that this is the correct image type |
559 | 641 | if (!isQTimeType(*io_, false)) { |
560 | 0 | if (io_->error() || io_->eof()) |
561 | 0 | throw Error(ErrorCode::kerFailedToReadImageData); |
562 | 0 | throw Error(ErrorCode::kerNotAnImage, "QuickTime"); |
563 | 0 | } |
564 | | |
565 | 641 | IoCloser closer(*io_); |
566 | 641 | clearMetadata(); |
567 | 641 | continueTraversing_ = true; |
568 | 641 | height_ = width_ = 1; |
569 | | |
570 | 641 | xmpData_["Xmp.video.FileSize"] = static_cast<double>(io_->size()) / 1048576.0; |
571 | 641 | xmpData_["Xmp.video.MimeType"] = mimeType(); |
572 | | |
573 | 2.58k | while (continueTraversing_) |
574 | 1.94k | decodeBlock(0); |
575 | | |
576 | 641 | xmpData_["Xmp.video.AspectRatio"] = getAspectRatio(width_, height_); |
577 | 641 | } // QuickTimeVideo::readMetadata |
578 | | |
579 | 1.97k | void QuickTimeVideo::decodeBlock(size_t recursion_depth, std::string const& entered_from) { |
580 | 1.97k | enforce(recursion_depth < max_recursion_depth_, Exiv2::ErrorCode::kerCorruptedMetadata); |
581 | | |
582 | 1.97k | const long bufMinSize = 4; |
583 | 1.97k | DataBuf buf(bufMinSize + 1); |
584 | 1.97k | uint64_t size = 0; |
585 | 1.97k | buf.data()[4] = '\0'; |
586 | | |
587 | 1.97k | io_->read(buf.data(), 4); |
588 | 1.97k | if (io_->eof()) { |
589 | 63 | continueTraversing_ = false; |
590 | 63 | return; |
591 | 63 | } |
592 | | |
593 | 1.91k | size = buf.read_uint32(0, bigEndian); |
594 | | |
595 | 1.91k | io_->readOrThrow(buf.data(), 4); |
596 | | |
597 | | // we have read 2x 4 bytes |
598 | 1.91k | size_t hdrsize = 8; |
599 | | |
600 | 1.91k | if (size == 1) { |
601 | | // The box size is encoded as a uint64_t, so we need to read another 8 bytes. |
602 | 241 | DataBuf data(8); |
603 | 241 | hdrsize += 8; |
604 | 241 | io_->readOrThrow(data.data(), data.size()); |
605 | 241 | size = data.read_uint64(0, bigEndian); |
606 | 1.67k | } else if (size == 0 && entered_from == "meta") { |
607 | 14 | size = buf.read_uint32(0, bigEndian); |
608 | 14 | io_->readOrThrow(buf.data(), 4, Exiv2::ErrorCode::kerCorruptedMetadata); |
609 | 14 | } |
610 | | |
611 | 1.91k | enforce(size >= hdrsize, Exiv2::ErrorCode::kerCorruptedMetadata); |
612 | 1.91k | enforce(size - hdrsize <= io_->size() - io_->tell(), Exiv2::ErrorCode::kerCorruptedMetadata); |
613 | 1.91k | enforce(size - hdrsize <= std::numeric_limits<size_t>::max(), Exiv2::ErrorCode::kerCorruptedMetadata); |
614 | | |
615 | | // std::cerr<<"Tag=>"<<buf.data()<<" size=>"<<size-hdrsize << '\n'; |
616 | 1.91k | const auto newsize = static_cast<size_t>(size - hdrsize); |
617 | 1.91k | if (ignoreList(buf)) { |
618 | 19 | discard(newsize); |
619 | 19 | return; |
620 | 19 | } |
621 | 1.89k | if (newsize > buf.size()) { |
622 | 1.03k | buf.resize(newsize); |
623 | 1.03k | } |
624 | 1.89k | tagDecoder(buf, newsize, recursion_depth + 1); |
625 | 1.89k | } // QuickTimeVideo::decodeBlock |
626 | | |
627 | 0 | static std::string readString(BasicIo& io, size_t size) { |
628 | 0 | enforce(size <= io.size() - io.tell(), Exiv2::ErrorCode::kerCorruptedMetadata); |
629 | 0 | Exiv2::DataBuf str(size + 1); |
630 | 0 | io.readOrThrow(str.data(), size); |
631 | 0 | str.write_uint8(size, 0); // nul-terminate string |
632 | 0 | return Exiv2::toString(str.data()); |
633 | 0 | } |
634 | | |
635 | 1.33k | void QuickTimeVideo::tagDecoder(Exiv2::DataBuf& buf, size_t size, size_t recursion_depth) { |
636 | 1.33k | enforce(recursion_depth < max_recursion_depth_, Exiv2::ErrorCode::kerCorruptedMetadata); |
637 | 1.33k | assert(buf.size() > 4); |
638 | | |
639 | 1.33k | if (ignoreList(buf)) |
640 | 0 | discard(size); |
641 | | |
642 | 1.33k | else if (dataIgnoreList(buf)) { |
643 | 28 | decodeBlock(recursion_depth + 1, Exiv2::toString(buf.data())); |
644 | 1.31k | } else if (equalsQTimeTag(buf, "ftyp")) |
645 | 625 | fileTypeDecoder(size); |
646 | | |
647 | 685 | else if (equalsQTimeTag(buf, "trak")) |
648 | 0 | setMediaStream(); |
649 | | |
650 | 685 | else if (equalsQTimeTag(buf, "mvhd")) |
651 | 0 | movieHeaderDecoder(size); |
652 | | |
653 | 685 | else if (equalsQTimeTag(buf, "tkhd")) |
654 | 0 | trackHeaderDecoder(size); |
655 | | |
656 | 685 | else if (equalsQTimeTag(buf, "mdhd")) |
657 | 0 | mediaHeaderDecoder(size); |
658 | | |
659 | 685 | else if (equalsQTimeTag(buf, "hdlr")) |
660 | 0 | handlerDecoder(size); |
661 | | |
662 | 685 | else if (equalsQTimeTag(buf, "vmhd")) |
663 | 0 | videoHeaderDecoder(size); |
664 | | |
665 | 685 | else if (equalsQTimeTag(buf, "udta")) |
666 | 19 | userDataDecoder(size, recursion_depth + 1); |
667 | | |
668 | 666 | else if (equalsQTimeTag(buf, "dref")) |
669 | 0 | multipleEntriesDecoder(recursion_depth + 1); |
670 | | |
671 | 666 | else if (equalsQTimeTag(buf, "stsd")) |
672 | 0 | sampleDesc(size); |
673 | | |
674 | 666 | else if (equalsQTimeTag(buf, "stts")) |
675 | 35 | timeToSampleDecoder(); |
676 | | |
677 | 631 | else if (equalsQTimeTag(buf, "pnot")) |
678 | 18 | previewTagDecoder(size); |
679 | | |
680 | 613 | else if (equalsQTimeTag(buf, "tapt")) |
681 | 0 | trackApertureTagDecoder(size); |
682 | | |
683 | 613 | else if (equalsQTimeTag(buf, "keys")) |
684 | 0 | keysTagDecoder(size); |
685 | | |
686 | 613 | else if (equalsQTimeTag(buf, "url ")) { |
687 | 0 | if (currentStream_ == Video) |
688 | 0 | xmpData_["Xmp.video.URL"] = readString(*io_, size); |
689 | 0 | else if (currentStream_ == Audio) |
690 | 0 | xmpData_["Xmp.audio.URL"] = readString(*io_, size); |
691 | 0 | else |
692 | 0 | discard(size); |
693 | 0 | } |
694 | | |
695 | 613 | else if (equalsQTimeTag(buf, "urn ")) { |
696 | 0 | if (currentStream_ == Video) |
697 | 0 | xmpData_["Xmp.video.URN"] = readString(*io_, size); |
698 | 0 | else if (currentStream_ == Audio) |
699 | 0 | xmpData_["Xmp.audio.URN"] = readString(*io_, size); |
700 | 0 | else |
701 | 0 | discard(size); |
702 | 0 | } |
703 | | |
704 | 613 | else if (equalsQTimeTag(buf, "dcom")) { |
705 | 0 | xmpData_["Xmp.video.Compressor"] = readString(*io_, size); |
706 | 0 | } |
707 | | |
708 | 613 | else if (equalsQTimeTag(buf, "smhd")) { |
709 | 0 | io_->readOrThrow(buf.data(), 4); |
710 | 0 | io_->readOrThrow(buf.data(), 4); |
711 | 0 | xmpData_["Xmp.audio.Balance"] = buf.read_uint16(0, bigEndian); |
712 | 0 | } |
713 | | |
714 | 613 | else { |
715 | 613 | discard(size); |
716 | 613 | } |
717 | 1.33k | } // QuickTimeVideo::tagDecoder |
718 | | |
719 | 632 | void QuickTimeVideo::discard(size_t size) { |
720 | 632 | size_t cur_pos = io_->tell(); |
721 | 632 | io_->seek(cur_pos + size, BasicIo::beg); |
722 | 632 | } // QuickTimeVideo::discard |
723 | | |
724 | 18 | void QuickTimeVideo::previewTagDecoder(size_t size) { |
725 | 18 | DataBuf buf(4); |
726 | 18 | size_t cur_pos = io_->tell(); |
727 | 18 | io_->readOrThrow(buf.data(), 4); |
728 | 18 | xmpData_["Xmp.video.PreviewDate"] = buf.read_uint32(0, bigEndian); |
729 | 18 | io_->readOrThrow(buf.data(), 2); |
730 | 18 | xmpData_["Xmp.video.PreviewVersion"] = getShort(buf.data(), bigEndian); |
731 | | |
732 | 18 | io_->readOrThrow(buf.data(), 4); |
733 | 18 | if (equalsQTimeTag(buf, "PICT")) |
734 | 0 | xmpData_["Xmp.video.PreviewAtomType"] = "QuickDraw Picture"; |
735 | 18 | else |
736 | 18 | xmpData_["Xmp.video.PreviewAtomType"] = std::string{buf.c_str(), 4}; |
737 | | |
738 | 18 | io_->seek(cur_pos + size, BasicIo::beg); |
739 | 18 | } // QuickTimeVideo::previewTagDecoder |
740 | | |
741 | 0 | void QuickTimeVideo::keysTagDecoder(size_t size) { |
742 | 0 | DataBuf buf(4); |
743 | 0 | size_t cur_pos = io_->tell(); |
744 | 0 | io_->readOrThrow(buf.data(), 4); |
745 | 0 | xmpData_["Xmp.video.PreviewDate"] = buf.read_uint32(0, bigEndian); |
746 | 0 | io_->readOrThrow(buf.data(), 2); |
747 | 0 | xmpData_["Xmp.video.PreviewVersion"] = getShort(buf.data(), bigEndian); |
748 | |
|
749 | 0 | io_->readOrThrow(buf.data(), 4); |
750 | 0 | if (equalsQTimeTag(buf, "PICT")) |
751 | 0 | xmpData_["Xmp.video.PreviewAtomType"] = "QuickDraw Picture"; |
752 | 0 | else |
753 | 0 | xmpData_["Xmp.video.PreviewAtomType"] = std::string{buf.c_str(), 4}; |
754 | |
|
755 | 0 | io_->seek(cur_pos + size, BasicIo::beg); |
756 | 0 | } // QuickTimeVideo::keysTagDecoder |
757 | | |
758 | 0 | void QuickTimeVideo::trackApertureTagDecoder(size_t size) { |
759 | 0 | DataBuf buf(4); |
760 | 0 | DataBuf buf2(2); |
761 | 0 | size_t cur_pos = io_->tell(); |
762 | 0 | byte n = 3; |
763 | |
|
764 | 0 | while (n--) { |
765 | 0 | io_->seek(4L, BasicIo::cur); |
766 | 0 | io_->readOrThrow(buf.data(), 4); |
767 | |
|
768 | 0 | if (equalsQTimeTag(buf, "clef")) { |
769 | 0 | io_->seek(4L, BasicIo::cur); |
770 | 0 | io_->readOrThrow(buf.data(), 2); |
771 | 0 | io_->readOrThrow(buf2.data(), 2); |
772 | 0 | xmpData_["Xmp.video.CleanApertureWidth"] = |
773 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
774 | 0 | io_->readOrThrow(buf.data(), 2); |
775 | 0 | io_->readOrThrow(buf2.data(), 2); |
776 | 0 | xmpData_["Xmp.video.CleanApertureHeight"] = |
777 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
778 | 0 | } |
779 | | |
780 | 0 | else if (equalsQTimeTag(buf, "prof")) { |
781 | 0 | io_->seek(4L, BasicIo::cur); |
782 | 0 | io_->readOrThrow(buf.data(), 2); |
783 | 0 | io_->readOrThrow(buf2.data(), 2); |
784 | 0 | xmpData_["Xmp.video.ProductionApertureWidth"] = |
785 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
786 | 0 | io_->readOrThrow(buf.data(), 2); |
787 | 0 | io_->readOrThrow(buf2.data(), 2); |
788 | 0 | xmpData_["Xmp.video.ProductionApertureHeight"] = |
789 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
790 | 0 | } |
791 | | |
792 | 0 | else if (equalsQTimeTag(buf, "enof")) { |
793 | 0 | io_->seek(4L, BasicIo::cur); |
794 | 0 | io_->readOrThrow(buf.data(), 2); |
795 | 0 | io_->readOrThrow(buf2.data(), 2); |
796 | 0 | xmpData_["Xmp.video.EncodedPixelsWidth"] = |
797 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
798 | 0 | io_->readOrThrow(buf.data(), 2); |
799 | 0 | io_->readOrThrow(buf2.data(), 2); |
800 | 0 | xmpData_["Xmp.video.EncodedPixelsHeight"] = |
801 | 0 | stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
802 | 0 | } |
803 | 0 | } |
804 | 0 | io_->seek(cur_pos + size, BasicIo::beg); |
805 | 0 | } // QuickTimeVideo::trackApertureTagDecoder |
806 | | |
807 | 0 | void QuickTimeVideo::CameraTagsDecoder(size_t size) { |
808 | 0 | size_t cur_pos = io_->tell(); |
809 | 0 | DataBuf buf(50); |
810 | 0 | DataBuf buf2(4); |
811 | |
|
812 | 0 | io_->readOrThrow(buf.data(), 4); |
813 | 0 | if (equalsQTimeTag(buf, "NIKO")) { |
814 | 0 | io_->seek(cur_pos, BasicIo::beg); |
815 | |
|
816 | 0 | io_->readOrThrow(buf.data(), 24); |
817 | 0 | xmpData_["Xmp.video.Make"] = buf.data(); |
818 | 0 | io_->readOrThrow(buf.data(), 14); |
819 | 0 | xmpData_["Xmp.video.Model"] = buf.data(); |
820 | 0 | io_->readOrThrow(buf.data(), 4); |
821 | 0 | xmpData_["Xmp.video.ExposureTime"] = stringFormat("1/{}", std::ceil(buf.read_uint32(0, littleEndian) / 10.0)); |
822 | 0 | io_->readOrThrow(buf.data(), 4); |
823 | 0 | io_->readOrThrow(buf2.data(), 4); |
824 | 0 | xmpData_["Xmp.video.FNumber"] = |
825 | 0 | buf.read_uint32(0, littleEndian) / static_cast<double>(buf2.read_uint32(0, littleEndian)); |
826 | 0 | io_->readOrThrow(buf.data(), 4); |
827 | 0 | io_->readOrThrow(buf2.data(), 4); |
828 | 0 | xmpData_["Xmp.video.ExposureCompensation"] = |
829 | 0 | buf.read_uint32(0, littleEndian) / static_cast<double>(buf2.read_uint32(0, littleEndian)); |
830 | 0 | io_->readOrThrow(buf.data(), 10); |
831 | 0 | io_->readOrThrow(buf.data(), 4); |
832 | 0 | if (auto td = Exiv2::find(whiteBalance, buf.read_uint32(0, littleEndian))) |
833 | 0 | xmpData_["Xmp.video.WhiteBalance"] = _(td->label_); |
834 | 0 | io_->readOrThrow(buf.data(), 4); |
835 | 0 | io_->readOrThrow(buf2.data(), 4); |
836 | 0 | xmpData_["Xmp.video.FocalLength"] = |
837 | 0 | buf.read_uint32(0, littleEndian) / static_cast<double>(buf2.read_uint32(0, littleEndian)); |
838 | 0 | io_->seek(95L, BasicIo::cur); |
839 | 0 | io_->readOrThrow(buf.data(), 48); |
840 | 0 | buf.write_uint8(48, 0); |
841 | 0 | xmpData_["Xmp.video.Software"] = buf.data(); |
842 | 0 | io_->readOrThrow(buf.data(), 4); |
843 | 0 | xmpData_["Xmp.video.ISO"] = buf.read_uint32(0, littleEndian); |
844 | 0 | } |
845 | |
|
846 | 0 | io_->seek(cur_pos + size, BasicIo::beg); |
847 | 0 | } // QuickTimeVideo::CameraTagsDecoder |
848 | | |
849 | 19 | void QuickTimeVideo::userDataDecoder(size_t outer_size, size_t recursion_depth) { |
850 | 19 | enforce(recursion_depth < max_recursion_depth_, Exiv2::ErrorCode::kerCorruptedMetadata); |
851 | 19 | size_t cur_pos = io_->tell(); |
852 | 19 | const TagVocabulary* td; |
853 | 19 | const TagVocabulary* tv; |
854 | 19 | const TagVocabulary* tv_internal; |
855 | | |
856 | 19 | const long bufMinSize = 100; |
857 | 19 | DataBuf buf(bufMinSize); |
858 | 19 | size_t size_internal = outer_size; |
859 | 19 | std::memset(buf.data(), 0x0, buf.size()); |
860 | | |
861 | 19 | while ((size_internal / 4 != 0) && (size_internal > 0)) { |
862 | 19 | buf.data()[4] = '\0'; |
863 | 19 | io_->readOrThrow(buf.data(), 4); |
864 | 19 | const size_t size = buf.read_uint32(0, bigEndian); |
865 | 19 | if (size > size_internal) |
866 | 19 | break; |
867 | 0 | size_internal -= size; |
868 | 0 | io_->readOrThrow(buf.data(), 4); |
869 | |
|
870 | 0 | if (buf.data()[0] == 169) |
871 | 0 | buf.data()[0] = ' '; |
872 | 0 | td = Exiv2::find(userDatatags, Exiv2::toString(buf.data())); |
873 | |
|
874 | 0 | tv = Exiv2::find(userDataReferencetags, Exiv2::toString(buf.data())); |
875 | |
|
876 | 0 | if (size <= 12) |
877 | 0 | break; |
878 | | |
879 | 0 | if (equalsQTimeTag(buf, "DcMD") || equalsQTimeTag(buf, "NCDT")) |
880 | 0 | userDataDecoder(size - 8, recursion_depth + 1); |
881 | | |
882 | 0 | else if (equalsQTimeTag(buf, "NCTG")) |
883 | 0 | NikonTagsDecoder(size - 8); |
884 | | |
885 | 0 | else if (equalsQTimeTag(buf, "TAGS")) |
886 | 0 | CameraTagsDecoder(size - 8); |
887 | | |
888 | 0 | else if (equalsQTimeTag(buf, "CNCV") || equalsQTimeTag(buf, "CNFV") || equalsQTimeTag(buf, "CNMN") || |
889 | 0 | equalsQTimeTag(buf, "NCHD") || equalsQTimeTag(buf, "FFMV")) { |
890 | 0 | enforce(tv, Exiv2::ErrorCode::kerCorruptedMetadata); |
891 | 0 | xmpData_[_(tv->label_)] = readString(*io_, size - 8); |
892 | 0 | } |
893 | | |
894 | 0 | else if (equalsQTimeTag(buf, "CMbo") || equalsQTimeTag(buf, "Cmbo")) { |
895 | 0 | enforce(tv, Exiv2::ErrorCode::kerCorruptedMetadata); |
896 | 0 | io_->readOrThrow(buf.data(), 2); |
897 | 0 | buf.data()[2] = '\0'; |
898 | 0 | tv_internal = Exiv2::find(cameraByteOrderTags, Exiv2::toString(buf.data())); |
899 | |
|
900 | 0 | if (tv_internal) |
901 | 0 | xmpData_[_(tv->label_)] = _(tv_internal->label_); |
902 | 0 | else |
903 | 0 | xmpData_[_(tv->label_)] = buf.data(); |
904 | 0 | } |
905 | | |
906 | 0 | else if (tv) { |
907 | 0 | io_->readOrThrow(buf.data(), 4); |
908 | 0 | xmpData_[_(tv->label_)] = readString(*io_, size - 12); |
909 | 0 | } |
910 | | |
911 | 0 | else if (td) |
912 | 0 | tagDecoder(buf, size - 8, recursion_depth + 1); |
913 | 0 | } |
914 | | |
915 | 19 | io_->seek(cur_pos + outer_size, BasicIo::beg); |
916 | 19 | } // QuickTimeVideo::userDataDecoder |
917 | | |
918 | 0 | void QuickTimeVideo::NikonTagsDecoder(size_t size) { |
919 | 0 | size_t cur_pos = io_->tell(); |
920 | 0 | DataBuf buf(201); |
921 | 0 | DataBuf buf2(4 + 1); |
922 | 0 | uint32_t TagID = 0; |
923 | 0 | uint16_t dataLength = 0; |
924 | 0 | uint16_t dataType = 2; |
925 | 0 | const TagDetails* td; |
926 | 0 | const TagDetails* td2; |
927 | |
|
928 | 0 | for (int i = 0; i < 100; i++) { |
929 | 0 | io_->readOrThrow(buf.data(), 4); |
930 | 0 | TagID = buf.read_uint32(0, bigEndian); |
931 | 0 | td = Exiv2::find(NikonNCTGTags, TagID); |
932 | |
|
933 | 0 | io_->readOrThrow(buf.data(), 2); |
934 | 0 | dataType = buf.read_uint16(0, bigEndian); |
935 | |
|
936 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
937 | 0 | io_->readOrThrow(buf.data(), 2); |
938 | |
|
939 | 0 | if (TagID == 0x2000023) { |
940 | 0 | size_t local_pos = io_->tell(); |
941 | 0 | dataLength = buf.read_uint16(0, bigEndian); |
942 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
943 | |
|
944 | 0 | io_->readOrThrow(buf.data(), 4); |
945 | 0 | xmpData_["Xmp.video.PictureControlVersion"] = buf.data(); |
946 | 0 | io_->readOrThrow(buf.data(), 20); |
947 | 0 | xmpData_["Xmp.video.PictureControlName"] = buf.data(); |
948 | 0 | io_->readOrThrow(buf.data(), 20); |
949 | 0 | xmpData_["Xmp.video.PictureControlBase"] = buf.data(); |
950 | 0 | io_->readOrThrow(buf.data(), 4); |
951 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
952 | |
|
953 | 0 | io_->readOrThrow(buf.data(), 1); |
954 | 0 | td2 = Exiv2::find(PictureControlAdjust, static_cast<int>(buf.data()[0]) & 7); |
955 | 0 | if (td2) |
956 | 0 | xmpData_["Xmp.video.PictureControlAdjust"] = _(td2->label_); |
957 | 0 | else |
958 | 0 | xmpData_["Xmp.video.PictureControlAdjust"] = static_cast<int>(buf.data()[0]) & 7; |
959 | |
|
960 | 0 | io_->readOrThrow(buf.data(), 1); |
961 | 0 | td2 = Exiv2::find(NormalSoftHard, static_cast<int>(buf.data()[0]) & 7); |
962 | 0 | if (td2) |
963 | 0 | xmpData_["Xmp.video.PictureControlQuickAdjust"] = _(td2->label_); |
964 | |
|
965 | 0 | io_->readOrThrow(buf.data(), 1); |
966 | 0 | td2 = Exiv2::find(NormalSoftHard, static_cast<int>(buf.data()[0]) & 7); |
967 | 0 | if (td2) |
968 | 0 | xmpData_["Xmp.video.Sharpness"] = _(td2->label_); |
969 | 0 | else |
970 | 0 | xmpData_["Xmp.video.Sharpness"] = static_cast<int>(buf.data()[0]) & 7; |
971 | |
|
972 | 0 | io_->readOrThrow(buf.data(), 1); |
973 | 0 | td2 = Exiv2::find(NormalSoftHard, static_cast<int>(buf.data()[0]) & 7); |
974 | 0 | if (td2) |
975 | 0 | xmpData_["Xmp.video.Contrast"] = _(td2->label_); |
976 | 0 | else |
977 | 0 | xmpData_["Xmp.video.Contrast"] = static_cast<int>(buf.data()[0]) & 7; |
978 | |
|
979 | 0 | io_->readOrThrow(buf.data(), 1); |
980 | 0 | td2 = Exiv2::find(NormalSoftHard, static_cast<int>(buf.data()[0]) & 7); |
981 | 0 | if (td2) |
982 | 0 | xmpData_["Xmp.video.Brightness"] = _(td2->label_); |
983 | 0 | else |
984 | 0 | xmpData_["Xmp.video.Brightness"] = static_cast<int>(buf.data()[0]) & 7; |
985 | |
|
986 | 0 | io_->readOrThrow(buf.data(), 1); |
987 | 0 | td2 = Exiv2::find(Saturation, static_cast<int>(buf.data()[0]) & 7); |
988 | 0 | if (td2) |
989 | 0 | xmpData_["Xmp.video.Saturation"] = _(td2->label_); |
990 | 0 | else |
991 | 0 | xmpData_["Xmp.video.Saturation"] = static_cast<int>(buf.data()[0]) & 7; |
992 | |
|
993 | 0 | io_->readOrThrow(buf.data(), 1); |
994 | 0 | xmpData_["Xmp.video.HueAdjustment"] = static_cast<int>(buf.data()[0]) & 7; |
995 | |
|
996 | 0 | io_->readOrThrow(buf.data(), 1); |
997 | 0 | td2 = Exiv2::find(FilterEffect, static_cast<int>(buf.data()[0])); |
998 | 0 | if (td2) |
999 | 0 | xmpData_["Xmp.video.FilterEffect"] = _(td2->label_); |
1000 | 0 | else |
1001 | 0 | xmpData_["Xmp.video.FilterEffect"] = static_cast<int>(buf.data()[0]); |
1002 | |
|
1003 | 0 | io_->readOrThrow(buf.data(), 1); |
1004 | 0 | td2 = Exiv2::find(ToningEffect, static_cast<int>(buf.data()[0])); |
1005 | 0 | if (td2) |
1006 | 0 | xmpData_["Xmp.video.ToningEffect"] = _(td2->label_); |
1007 | 0 | else |
1008 | 0 | xmpData_["Xmp.video.ToningEffect"] = static_cast<int>(buf.data()[0]); |
1009 | |
|
1010 | 0 | io_->readOrThrow(buf.data(), 1); |
1011 | 0 | xmpData_["Xmp.video.ToningSaturation"] = static_cast<int>(buf.data()[0]); |
1012 | |
|
1013 | 0 | io_->seek(local_pos + dataLength, BasicIo::beg); |
1014 | 0 | } |
1015 | | |
1016 | 0 | else if (TagID == 0x2000024) { |
1017 | 0 | size_t local_pos = io_->tell(); |
1018 | 0 | dataLength = buf.read_uint16(0, bigEndian); |
1019 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1020 | |
|
1021 | 0 | io_->readOrThrow(buf.data(), 2); |
1022 | 0 | xmpData_["Xmp.video.TimeZone"] = Exiv2::getShort(buf.data(), bigEndian); |
1023 | 0 | io_->readOrThrow(buf.data(), 1); |
1024 | 0 | td2 = Exiv2::find(YesNo, static_cast<int>(buf.data()[0])); |
1025 | 0 | if (td2) |
1026 | 0 | xmpData_["Xmp.video.DayLightSavings"] = _(td2->label_); |
1027 | |
|
1028 | 0 | io_->readOrThrow(buf.data(), 1); |
1029 | 0 | td2 = Exiv2::find(DateDisplayFormat, static_cast<int>(buf.data()[0])); |
1030 | 0 | if (td2) |
1031 | 0 | xmpData_["Xmp.video.DateDisplayFormat"] = _(td2->label_); |
1032 | |
|
1033 | 0 | io_->seek(local_pos + dataLength, BasicIo::beg); |
1034 | 0 | } |
1035 | | |
1036 | 0 | else if (dataType == 2 || dataType == 7) { |
1037 | 0 | dataLength = buf.read_uint16(0, bigEndian); |
1038 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1039 | | |
1040 | | // Sanity check with an "unreasonably" large number |
1041 | 0 | if (dataLength >= buf.size()) { |
1042 | 0 | #ifndef SUPPRESS_WARNINGS |
1043 | 0 | EXV_ERROR << "Xmp.video Nikon Tags, dataLength was found to be larger than 200." |
1044 | 0 | << " Entries considered invalid. Not Processed.\n"; |
1045 | 0 | #endif |
1046 | 0 | io_->seek(io_->tell() + dataLength, BasicIo::beg); |
1047 | 0 | buf.data()[0] = '\0'; |
1048 | 0 | } else { |
1049 | 0 | io_->readOrThrow(buf.data(), dataLength); |
1050 | 0 | buf.data()[dataLength] = '\0'; |
1051 | 0 | } |
1052 | |
|
1053 | 0 | if (td) { |
1054 | 0 | xmpData_[_(td->label_)] = buf.data(); |
1055 | 0 | } |
1056 | 0 | } else if (dataType == 4) { |
1057 | 0 | dataLength = buf.read_uint16(0, bigEndian) * 4; |
1058 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1059 | 0 | io_->readOrThrow(buf.data(), 4); |
1060 | 0 | if (td) |
1061 | 0 | xmpData_[_(td->label_)] = buf.read_uint32(0, bigEndian); |
1062 | | |
1063 | | // Sanity check with an "unreasonably" large number |
1064 | 0 | if (dataLength > 200 || dataLength < 4) { |
1065 | 0 | #ifndef SUPPRESS_WARNINGS |
1066 | 0 | EXV_ERROR << "Xmp.video Nikon Tags, dataLength was found to be of inappropriate size." |
1067 | 0 | << " Entries considered invalid. Not Processed.\n"; |
1068 | 0 | #endif |
1069 | 0 | io_->seek(io_->tell() + dataLength - 4, BasicIo::beg); |
1070 | 0 | } else |
1071 | 0 | io_->readOrThrow(buf.data(), dataLength - 4); |
1072 | 0 | } else if (dataType == 3) { |
1073 | 0 | dataLength = buf.read_uint16(0, bigEndian) * 2; |
1074 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1075 | 0 | io_->readOrThrow(buf.data(), 2); |
1076 | 0 | if (td) |
1077 | 0 | xmpData_[_(td->label_)] = buf.read_uint16(0, bigEndian); |
1078 | | |
1079 | | // Sanity check with an "unreasonably" large number |
1080 | 0 | if (dataLength > 200 || dataLength < 2) { |
1081 | 0 | #ifndef SUPPRESS_WARNINGS |
1082 | 0 | EXV_ERROR << "Xmp.video Nikon Tags, dataLength was found to be of inappropriate size." |
1083 | 0 | << " Entries considered invalid. Not Processed.\n"; |
1084 | 0 | #endif |
1085 | 0 | io_->seek(io_->tell() + dataLength - 2, BasicIo::beg); |
1086 | 0 | } else |
1087 | 0 | io_->readOrThrow(buf.data(), dataLength - 2); |
1088 | 0 | } else if (dataType == 5) { |
1089 | 0 | dataLength = buf.read_uint16(0, bigEndian) * 8; |
1090 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1091 | 0 | io_->readOrThrow(buf.data(), 4); |
1092 | 0 | io_->readOrThrow(buf2.data(), 4); |
1093 | 0 | if (td) |
1094 | 0 | xmpData_[_(td->label_)] = |
1095 | 0 | static_cast<double>(buf.read_uint32(0, bigEndian)) / static_cast<double>(buf2.read_uint32(0, bigEndian)); |
1096 | | |
1097 | | // Sanity check with an "unreasonably" large number |
1098 | 0 | if (dataLength > 200 || dataLength < 8) { |
1099 | 0 | #ifndef SUPPRESS_WARNINGS |
1100 | 0 | EXV_ERROR << "Xmp.video Nikon Tags, dataLength was found to be of inappropriate size." |
1101 | 0 | << " Entries considered invalid. Not Processed.\n"; |
1102 | 0 | #endif |
1103 | 0 | io_->seek(io_->tell() + dataLength - 8, BasicIo::beg); |
1104 | 0 | } else |
1105 | 0 | io_->readOrThrow(buf.data(), dataLength - 8); |
1106 | 0 | } else if (dataType == 8) { |
1107 | 0 | dataLength = buf.read_uint16(0, bigEndian) * 2; |
1108 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1109 | 0 | io_->readOrThrow(buf.data(), 2); |
1110 | 0 | io_->readOrThrow(buf2.data(), 2); |
1111 | 0 | if (td) |
1112 | 0 | xmpData_[_(td->label_)] = stringFormat("{}.{}", buf.read_uint16(0, bigEndian), buf2.read_uint16(0, bigEndian)); |
1113 | | |
1114 | | // Sanity check with an "unreasonably" large number |
1115 | 0 | if (dataLength > 200 || dataLength < 4) { |
1116 | 0 | #ifndef SUPPRESS_WARNINGS |
1117 | 0 | EXV_ERROR << "Xmp.video Nikon Tags, dataLength was found to be of inappropriate size." |
1118 | 0 | << " Entries considered invalid. Not Processed.\n"; |
1119 | 0 | #endif |
1120 | 0 | io_->seek(io_->tell() + dataLength - 4, BasicIo::beg); |
1121 | 0 | } else |
1122 | 0 | io_->readOrThrow(buf.data(), dataLength - 4); |
1123 | 0 | } |
1124 | 0 | } |
1125 | |
|
1126 | 0 | io_->seek(cur_pos + size, BasicIo::beg); |
1127 | 0 | } // QuickTimeVideo::NikonTagsDecoder |
1128 | | |
1129 | 0 | void QuickTimeVideo::setMediaStream() { |
1130 | 0 | size_t current_position = io_->tell(); |
1131 | 0 | DataBuf buf(4 + 1); |
1132 | |
|
1133 | 0 | while (!io_->eof()) { |
1134 | 0 | io_->readOrThrow(buf.data(), 4); |
1135 | 0 | if (equalsQTimeTag(buf, "hdlr")) { |
1136 | 0 | io_->readOrThrow(buf.data(), 4); |
1137 | 0 | io_->readOrThrow(buf.data(), 4); |
1138 | 0 | io_->readOrThrow(buf.data(), 4); |
1139 | |
|
1140 | 0 | if (equalsQTimeTag(buf, "vide")) |
1141 | 0 | currentStream_ = Video; |
1142 | 0 | else if (equalsQTimeTag(buf, "soun")) |
1143 | 0 | currentStream_ = Audio; |
1144 | 0 | else if (equalsQTimeTag(buf, "hint")) |
1145 | 0 | currentStream_ = Hint; |
1146 | 0 | else |
1147 | 0 | currentStream_ = GenMediaHeader; |
1148 | 0 | break; |
1149 | 0 | } |
1150 | 0 | } |
1151 | |
|
1152 | 0 | io_->seek(current_position, BasicIo::beg); |
1153 | 0 | } // QuickTimeVideo::setMediaStream |
1154 | | |
1155 | 35 | void QuickTimeVideo::timeToSampleDecoder() { |
1156 | 35 | DataBuf buf(4 + 1); |
1157 | 35 | io_->readOrThrow(buf.data(), 4); |
1158 | 35 | io_->readOrThrow(buf.data(), 4); |
1159 | 35 | uint64_t totalframes = 0; |
1160 | 35 | uint64_t timeOfFrames = 0; |
1161 | 35 | const uint32_t noOfEntries = buf.read_uint32(0, bigEndian); |
1162 | | |
1163 | 618 | for (uint32_t i = 0; i < noOfEntries; i++) { |
1164 | 583 | io_->readOrThrow(buf.data(), 4); |
1165 | 583 | const uint64_t temp = buf.read_uint32(0, bigEndian); |
1166 | 583 | totalframes = Safe::add(totalframes, temp); |
1167 | 583 | io_->readOrThrow(buf.data(), 4); |
1168 | 583 | timeOfFrames = Safe::add(timeOfFrames, temp * buf.read_uint32(0, bigEndian)); |
1169 | 583 | } |
1170 | 35 | if (currentStream_ == Video) { |
1171 | 0 | if (timeOfFrames == 0) |
1172 | 0 | timeOfFrames = 1; |
1173 | 0 | xmpData_["Xmp.video.FrameRate"] = |
1174 | 0 | static_cast<double>(totalframes) * static_cast<double>(mdhdTimeScale_) / static_cast<double>(timeOfFrames); |
1175 | 0 | } |
1176 | 35 | } // QuickTimeVideo::timeToSampleDecoder |
1177 | | |
1178 | 0 | void QuickTimeVideo::sampleDesc(size_t size) { |
1179 | 0 | DataBuf buf(100); |
1180 | 0 | size_t cur_pos = io_->tell(); |
1181 | 0 | io_->readOrThrow(buf.data(), 4); |
1182 | 0 | io_->readOrThrow(buf.data(), 4); |
1183 | 0 | const uint32_t noOfEntries = buf.read_uint32(0, bigEndian); |
1184 | |
|
1185 | 0 | for (uint32_t i = 0; i < noOfEntries; i++) { |
1186 | 0 | if (currentStream_ == Video) |
1187 | 0 | imageDescDecoder(); |
1188 | 0 | else if (currentStream_ == Audio) |
1189 | 0 | audioDescDecoder(); |
1190 | 0 | else |
1191 | 0 | break; |
1192 | 0 | } |
1193 | 0 | io_->seek(Safe::add(cur_pos, size), BasicIo::beg); |
1194 | 0 | } // QuickTimeVideo::sampleDesc |
1195 | | |
1196 | 0 | void QuickTimeVideo::audioDescDecoder() { |
1197 | 0 | DataBuf buf(40); |
1198 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1199 | 0 | buf.data()[4] = '\0'; |
1200 | 0 | io_->readOrThrow(buf.data(), 4); |
1201 | 0 | size_t size = 82; |
1202 | |
|
1203 | 0 | const TagVocabulary* td; |
1204 | |
|
1205 | 0 | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1206 | 0 | io_->readOrThrow(buf.data(), 4); |
1207 | 0 | switch (i) { |
1208 | 0 | case AudioFormat: |
1209 | 0 | td = Exiv2::find(qTimeFileType, Exiv2::toString(buf.data())); |
1210 | 0 | if (td) |
1211 | 0 | xmpData_["Xmp.audio.Compressor"] = _(td->label_); |
1212 | 0 | else |
1213 | 0 | xmpData_["Xmp.audio.Compressor"] = buf.data(); |
1214 | 0 | break; |
1215 | 0 | case AudioVendorID: |
1216 | 0 | td = Exiv2::find(vendorIDTags, Exiv2::toString(buf.data())); |
1217 | 0 | if (td) |
1218 | 0 | xmpData_["Xmp.audio.VendorID"] = _(td->label_); |
1219 | 0 | break; |
1220 | 0 | case AudioChannels: |
1221 | 0 | xmpData_["Xmp.audio.ChannelType"] = buf.read_uint16(0, bigEndian); |
1222 | 0 | xmpData_["Xmp.audio.BitsPerSample"] = ((buf.data()[2] * 256) + buf.data()[3]); |
1223 | 0 | break; |
1224 | 0 | case AudioSampleRate: |
1225 | 0 | xmpData_["Xmp.audio.SampleRate"] = |
1226 | 0 | buf.read_uint16(0, bigEndian) + ((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1227 | 0 | break; |
1228 | 0 | default: |
1229 | 0 | break; |
1230 | 0 | } |
1231 | 0 | } |
1232 | 0 | io_->readOrThrow(buf.data(), static_cast<long>(size % 4)); // cause size is so small, this cast should be right. |
1233 | 0 | } // QuickTimeVideo::audioDescDecoder |
1234 | | |
1235 | 0 | void QuickTimeVideo::imageDescDecoder() { |
1236 | 0 | DataBuf buf(40); |
1237 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1238 | 0 | buf.data()[4] = '\0'; |
1239 | 0 | io_->readOrThrow(buf.data(), 4); |
1240 | 0 | size_t size = 82; |
1241 | |
|
1242 | 0 | const TagVocabulary* td; |
1243 | |
|
1244 | 0 | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1245 | 0 | io_->readOrThrow(buf.data(), 4); |
1246 | |
|
1247 | 0 | switch (i) { |
1248 | 0 | case codec: |
1249 | 0 | td = Exiv2::find(qTimeFileType, Exiv2::toString(buf.data())); |
1250 | 0 | if (td) |
1251 | 0 | xmpData_["Xmp.video.Codec"] = _(td->label_); |
1252 | 0 | else |
1253 | 0 | xmpData_["Xmp.video.Codec"] = buf.data(); |
1254 | 0 | break; |
1255 | 0 | case VendorID: |
1256 | 0 | td = Exiv2::find(vendorIDTags, Exiv2::toString(buf.data())); |
1257 | 0 | if (td) |
1258 | 0 | xmpData_["Xmp.video.VendorID"] = _(td->label_); |
1259 | 0 | break; |
1260 | 0 | case SourceImageWidth_Height: |
1261 | 0 | xmpData_["Xmp.video.SourceImageWidth"] = buf.read_uint16(0, bigEndian); |
1262 | 0 | xmpData_["Xmp.video.SourceImageHeight"] = ((buf.data()[2] * 256) + buf.data()[3]); |
1263 | 0 | break; |
1264 | 0 | case XResolution: |
1265 | 0 | xmpData_["Xmp.video.XResolution"] = |
1266 | 0 | buf.read_uint16(0, bigEndian) + ((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1267 | 0 | break; |
1268 | 0 | case YResolution: |
1269 | 0 | xmpData_["Xmp.video.YResolution"] = |
1270 | 0 | buf.read_uint16(0, bigEndian) + ((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1271 | 0 | io_->readOrThrow(buf.data(), 3); |
1272 | 0 | size -= 3; |
1273 | 0 | break; |
1274 | 0 | case CompressorName: |
1275 | 0 | io_->readOrThrow(buf.data(), 32); |
1276 | 0 | size -= 32; |
1277 | 0 | xmpData_["Xmp.video.Compressor"] = buf.data(); |
1278 | 0 | break; |
1279 | 0 | default: |
1280 | 0 | break; |
1281 | 0 | } |
1282 | 0 | } |
1283 | 0 | io_->readOrThrow(buf.data(), static_cast<long>(size % 4)); |
1284 | 0 | xmpData_["Xmp.video.BitDepth"] = static_cast<int>(buf.read_uint8(0)); |
1285 | 0 | } // QuickTimeVideo::imageDescDecoder |
1286 | | |
1287 | 0 | void QuickTimeVideo::multipleEntriesDecoder(size_t recursion_depth) { |
1288 | 0 | enforce(recursion_depth < max_recursion_depth_, Exiv2::ErrorCode::kerCorruptedMetadata); |
1289 | 0 | DataBuf buf(4 + 1); |
1290 | 0 | io_->readOrThrow(buf.data(), 4); |
1291 | 0 | io_->readOrThrow(buf.data(), 4); |
1292 | 0 | uint32_t noOfEntries; |
1293 | |
|
1294 | 0 | noOfEntries = buf.read_uint32(0, bigEndian); |
1295 | |
|
1296 | 0 | for (uint32_t i = 0; i < noOfEntries && continueTraversing_; i++) { |
1297 | 0 | decodeBlock(recursion_depth + 1); |
1298 | 0 | } |
1299 | 0 | } // QuickTimeVideo::multipleEntriesDecoder |
1300 | | |
1301 | 0 | void QuickTimeVideo::videoHeaderDecoder(size_t size) { |
1302 | 0 | DataBuf buf(3); |
1303 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1304 | 0 | buf.data()[2] = '\0'; |
1305 | 0 | currentStream_ = Video; |
1306 | |
|
1307 | 0 | const TagDetails* td; |
1308 | |
|
1309 | 0 | for (int i = 0; size / 2 != 0; size -= 2, i++) { |
1310 | 0 | io_->readOrThrow(buf.data(), 2); |
1311 | |
|
1312 | 0 | switch (i) { |
1313 | 0 | case GraphicsMode: |
1314 | 0 | td = Exiv2::find(graphicsModetags, buf.read_uint16(0, bigEndian)); |
1315 | 0 | if (td) |
1316 | 0 | xmpData_["Xmp.video.GraphicsMode"] = _(td->label_); |
1317 | 0 | break; |
1318 | 0 | case OpColor: |
1319 | 0 | xmpData_["Xmp.video.OpColor"] = buf.read_uint16(0, bigEndian); |
1320 | 0 | break; |
1321 | 0 | default: |
1322 | 0 | break; |
1323 | 0 | } |
1324 | 0 | } |
1325 | 0 | io_->readOrThrow(buf.data(), size % 2); |
1326 | 0 | } // QuickTimeVideo::videoHeaderDecoder |
1327 | | |
1328 | 0 | void QuickTimeVideo::handlerDecoder(size_t size) { |
1329 | 0 | size_t cur_pos = io_->tell(); |
1330 | 0 | DataBuf buf(100); |
1331 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1332 | 0 | buf.data()[4] = '\0'; |
1333 | |
|
1334 | 0 | const TagVocabulary* tv; |
1335 | |
|
1336 | 0 | for (int i = 0; i < 5; i++) { |
1337 | 0 | io_->readOrThrow(buf.data(), 4); |
1338 | |
|
1339 | 0 | switch (i) { |
1340 | 0 | case HandlerClass: |
1341 | 0 | tv = Exiv2::find(handlerClassTags, Exiv2::toString(buf.data())); |
1342 | 0 | if (tv) { |
1343 | 0 | if (currentStream_ == Video) |
1344 | 0 | xmpData_["Xmp.video.HandlerClass"] = _(tv->label_); |
1345 | 0 | else if (currentStream_ == Audio) |
1346 | 0 | xmpData_["Xmp.audio.HandlerClass"] = _(tv->label_); |
1347 | 0 | } |
1348 | 0 | break; |
1349 | 0 | case HandlerType: |
1350 | 0 | tv = Exiv2::find(handlerTypeTags, Exiv2::toString(buf.data())); |
1351 | 0 | if (tv) { |
1352 | 0 | if (currentStream_ == Video) |
1353 | 0 | xmpData_["Xmp.video.HandlerType"] = _(tv->label_); |
1354 | 0 | else if (currentStream_ == Audio) |
1355 | 0 | xmpData_["Xmp.audio.HandlerType"] = _(tv->label_); |
1356 | 0 | } |
1357 | 0 | break; |
1358 | 0 | case HandlerVendorID: |
1359 | 0 | tv = Exiv2::find(vendorIDTags, Exiv2::toString(buf.data())); |
1360 | 0 | if (tv) { |
1361 | 0 | if (currentStream_ == Video) |
1362 | 0 | xmpData_["Xmp.video.HandlerVendorID"] = _(tv->label_); |
1363 | 0 | else if (currentStream_ == Audio) |
1364 | 0 | xmpData_["Xmp.audio.HandlerVendorID"] = _(tv->label_); |
1365 | 0 | } |
1366 | 0 | break; |
1367 | 0 | } |
1368 | 0 | } |
1369 | 0 | io_->seek(cur_pos + size, BasicIo::beg); |
1370 | 0 | } // QuickTimeVideo::handlerDecoder |
1371 | | |
1372 | 625 | void QuickTimeVideo::fileTypeDecoder(size_t size) { |
1373 | 625 | DataBuf buf(5); |
1374 | 625 | std::memset(buf.data(), 0x0, buf.size()); |
1375 | 625 | buf.data()[4] = '\0'; |
1376 | 625 | Exiv2::Value::UniquePtr v = Exiv2::Value::create(Exiv2::xmpSeq); |
1377 | 625 | const TagVocabulary* td; |
1378 | | |
1379 | 337k | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1380 | 336k | io_->readOrThrow(buf.data(), 4); |
1381 | 336k | td = Exiv2::find(qTimeFileType, Exiv2::toString(buf.data())); |
1382 | | |
1383 | 336k | switch (i) { |
1384 | 619 | case 0: |
1385 | 619 | if (td) |
1386 | 563 | xmpData_["Xmp.video.MajorBrand"] = _(td->label_); |
1387 | 619 | break; |
1388 | 527 | case 1: |
1389 | 527 | xmpData_["Xmp.video.MinorVersion"] = buf.read_uint32(0, bigEndian); |
1390 | 527 | break; |
1391 | 335k | default: |
1392 | 335k | if (td) |
1393 | 98 | v->read(_(td->label_)); |
1394 | 335k | else |
1395 | 335k | v->read(Exiv2::toString(buf.data())); |
1396 | 335k | break; |
1397 | 336k | } |
1398 | 336k | } |
1399 | 625 | xmpData_.add(Exiv2::XmpKey("Xmp.video.CompatibleBrands"), v.get()); |
1400 | 625 | io_->readOrThrow(buf.data(), size % 4); |
1401 | 625 | } // QuickTimeVideo::fileTypeDecoder |
1402 | | |
1403 | 0 | void QuickTimeVideo::mediaHeaderDecoder(size_t size) { |
1404 | 0 | DataBuf buf(5); |
1405 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1406 | 0 | buf.data()[4] = '\0'; |
1407 | 0 | int64_t time_scale = 1; |
1408 | |
|
1409 | 0 | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1410 | 0 | io_->readOrThrow(buf.data(), 4); |
1411 | |
|
1412 | 0 | switch (i) { |
1413 | 0 | case MediaHeaderVersion: |
1414 | 0 | if (currentStream_ == Video) |
1415 | 0 | xmpData_["Xmp.video.MediaHeaderVersion"] = static_cast<int>(buf.read_uint8(0)); |
1416 | 0 | else if (currentStream_ == Audio) |
1417 | 0 | xmpData_["Xmp.audio.MediaHeaderVersion"] = static_cast<int>(buf.read_uint8(0)); |
1418 | 0 | break; |
1419 | 0 | case MediaCreateDate: |
1420 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1421 | 0 | if (currentStream_ == Video) |
1422 | 0 | xmpData_["Xmp.video.MediaCreateDate"] = buf.read_uint32(0, bigEndian); |
1423 | 0 | else if (currentStream_ == Audio) |
1424 | 0 | xmpData_["Xmp.audio.MediaCreateDate"] = buf.read_uint32(0, bigEndian); |
1425 | 0 | break; |
1426 | 0 | case MediaModifyDate: |
1427 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1428 | 0 | if (currentStream_ == Video) |
1429 | 0 | xmpData_["Xmp.video.MediaModifyDate"] = buf.read_uint32(0, bigEndian); |
1430 | 0 | else if (currentStream_ == Audio) |
1431 | 0 | xmpData_["Xmp.audio.MediaModifyDate"] = buf.read_uint32(0, bigEndian); |
1432 | 0 | break; |
1433 | 0 | case MediaTimeScale: |
1434 | 0 | if (currentStream_ == Video) |
1435 | 0 | xmpData_["Xmp.video.MediaTimeScale"] = buf.read_uint32(0, bigEndian); |
1436 | 0 | else if (currentStream_ == Audio) |
1437 | 0 | xmpData_["Xmp.audio.MediaTimeScale"] = buf.read_uint32(0, bigEndian); |
1438 | 0 | time_scale = std::max(1U, buf.read_uint32(0, bigEndian)); |
1439 | 0 | mdhdTimeScale_ = time_scale; |
1440 | 0 | break; |
1441 | 0 | case MediaDuration: |
1442 | 0 | if (currentStream_ == Video) |
1443 | 0 | xmpData_["Xmp.video.MediaDuration"] = time_scale ? buf.read_uint32(0, bigEndian) / time_scale : 0; |
1444 | 0 | else if (currentStream_ == Audio) |
1445 | 0 | xmpData_["Xmp.audio.MediaDuration"] = time_scale ? buf.read_uint32(0, bigEndian) / time_scale : 0; |
1446 | 0 | break; |
1447 | 0 | case MediaLanguageCode: |
1448 | 0 | if (currentStream_ == Video) |
1449 | 0 | xmpData_["Xmp.video.MediaLangCode"] = buf.read_uint16(0, bigEndian); |
1450 | 0 | else if (currentStream_ == Audio) |
1451 | 0 | xmpData_["Xmp.audio.MediaLangCode"] = buf.read_uint16(0, bigEndian); |
1452 | 0 | break; |
1453 | | |
1454 | 0 | default: |
1455 | 0 | break; |
1456 | 0 | } |
1457 | 0 | } |
1458 | 0 | io_->readOrThrow(buf.data(), size % 4); |
1459 | 0 | } // QuickTimeVideo::mediaHeaderDecoder |
1460 | | |
1461 | 0 | void QuickTimeVideo::trackHeaderDecoder(size_t size) { |
1462 | 0 | DataBuf buf(5); |
1463 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1464 | 0 | buf.data()[4] = '\0'; |
1465 | 0 | int64_t temp = 0; |
1466 | |
|
1467 | 0 | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1468 | 0 | io_->readOrThrow(buf.data(), 4); |
1469 | |
|
1470 | 0 | switch (i) { |
1471 | 0 | case TrackHeaderVersion: |
1472 | 0 | if (currentStream_ == Video) |
1473 | 0 | xmpData_["Xmp.video.TrackHeaderVersion"] = static_cast<int>(buf.read_uint8(0)); |
1474 | 0 | else if (currentStream_ == Audio) |
1475 | 0 | xmpData_["Xmp.audio.TrackHeaderVersion"] = static_cast<int>(buf.read_uint8(0)); |
1476 | 0 | break; |
1477 | 0 | case TrackCreateDate: |
1478 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1479 | 0 | if (currentStream_ == Video) |
1480 | 0 | xmpData_["Xmp.video.TrackCreateDate"] = buf.read_uint32(0, bigEndian); |
1481 | 0 | else if (currentStream_ == Audio) |
1482 | 0 | xmpData_["Xmp.audio.TrackCreateDate"] = buf.read_uint32(0, bigEndian); |
1483 | 0 | break; |
1484 | 0 | case TrackModifyDate: |
1485 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1486 | 0 | if (currentStream_ == Video) |
1487 | 0 | xmpData_["Xmp.video.TrackModifyDate"] = buf.read_uint32(0, bigEndian); |
1488 | 0 | else if (currentStream_ == Audio) |
1489 | 0 | xmpData_["Xmp.audio.TrackModifyDate"] = buf.read_uint32(0, bigEndian); |
1490 | 0 | break; |
1491 | 0 | case TrackID: |
1492 | 0 | if (currentStream_ == Video) |
1493 | 0 | xmpData_["Xmp.video.TrackID"] = buf.read_uint32(0, bigEndian); |
1494 | 0 | else if (currentStream_ == Audio) |
1495 | 0 | xmpData_["Xmp.audio.TrackID"] = buf.read_uint32(0, bigEndian); |
1496 | 0 | break; |
1497 | 0 | case TrackDuration: |
1498 | 0 | if (currentStream_ == Video) |
1499 | 0 | xmpData_["Xmp.video.TrackDuration"] = mvhdTimeScale_ ? buf.read_uint32(0, bigEndian) / mvhdTimeScale_ : 0; |
1500 | 0 | else if (currentStream_ == Audio) |
1501 | 0 | xmpData_["Xmp.audio.TrackDuration"] = mvhdTimeScale_ ? buf.read_uint32(0, bigEndian) / mvhdTimeScale_ : 0; |
1502 | 0 | break; |
1503 | 0 | case TrackLayer: |
1504 | 0 | if (currentStream_ == Video) |
1505 | 0 | xmpData_["Xmp.video.TrackLayer"] = buf.read_uint16(0, bigEndian); |
1506 | 0 | else if (currentStream_ == Audio) |
1507 | 0 | xmpData_["Xmp.audio.TrackLayer"] = buf.read_uint16(0, bigEndian); |
1508 | 0 | break; |
1509 | 0 | case TrackVolume: |
1510 | 0 | if (currentStream_ == Video) |
1511 | 0 | xmpData_["Xmp.video.TrackVolume"] = (static_cast<int>(buf.read_uint8(0)) + (buf.data()[2] * 0.1)) * 100; |
1512 | 0 | else if (currentStream_ == Audio) |
1513 | 0 | xmpData_["Xmp.video.TrackVolume"] = (static_cast<int>(buf.read_uint8(0)) + (buf.data()[2] * 0.1)) * 100; |
1514 | 0 | break; |
1515 | 0 | case ImageWidth: |
1516 | 0 | if (currentStream_ == Video) { |
1517 | 0 | temp = buf.read_uint16(0, bigEndian) + static_cast<int64_t>((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1518 | 0 | xmpData_["Xmp.video.Width"] = temp; |
1519 | 0 | width_ = temp; |
1520 | 0 | } |
1521 | 0 | break; |
1522 | 0 | case ImageHeight: |
1523 | 0 | if (currentStream_ == Video) { |
1524 | 0 | temp = buf.read_uint16(0, bigEndian) + static_cast<int64_t>((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1525 | 0 | xmpData_["Xmp.video.Height"] = temp; |
1526 | 0 | height_ = temp; |
1527 | 0 | } |
1528 | 0 | break; |
1529 | 0 | default: |
1530 | 0 | break; |
1531 | 0 | } |
1532 | 0 | } |
1533 | 0 | io_->readOrThrow(buf.data(), size % 4); |
1534 | 0 | } // QuickTimeVideo::trackHeaderDecoder |
1535 | | |
1536 | 0 | void QuickTimeVideo::movieHeaderDecoder(size_t size) { |
1537 | 0 | DataBuf buf(5); |
1538 | 0 | std::memset(buf.data(), 0x0, buf.size()); |
1539 | 0 | buf.data()[4] = '\0'; |
1540 | |
|
1541 | 0 | for (int i = 0; size / 4 != 0; size -= 4, i++) { |
1542 | 0 | io_->readOrThrow(buf.data(), 4); |
1543 | |
|
1544 | 0 | switch (i) { |
1545 | 0 | case MovieHeaderVersion: |
1546 | 0 | xmpData_["Xmp.video.MovieHeaderVersion"] = static_cast<int>(buf.read_uint8(0)); |
1547 | 0 | break; |
1548 | 0 | case CreateDate: |
1549 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1550 | 0 | xmpData_["Xmp.video.DateUTC"] = buf.read_uint32(0, bigEndian); |
1551 | 0 | break; |
1552 | 0 | case ModifyDate: |
1553 | | // A 32-bit integer that specifies (in seconds since midnight, January 1, 1904) when the movie atom was created. |
1554 | 0 | xmpData_["Xmp.video.ModificationDate"] = buf.read_uint32(0, bigEndian); |
1555 | 0 | break; |
1556 | 0 | case TimeScale: |
1557 | 0 | xmpData_["Xmp.video.TimeScale"] = buf.read_uint32(0, bigEndian); |
1558 | 0 | mvhdTimeScale_ = std::max(1U, buf.read_uint32(0, bigEndian)); |
1559 | 0 | break; |
1560 | 0 | case Duration: |
1561 | 0 | if (mvhdTimeScale_ != 0) { // To prevent division by zero |
1562 | 0 | xmpData_["Xmp.video.Duration"] = buf.read_uint32(0, bigEndian) * 1000 / mvhdTimeScale_; |
1563 | 0 | } |
1564 | 0 | break; |
1565 | 0 | case PreferredRate: |
1566 | 0 | xmpData_["Xmp.video.PreferredRate"] = |
1567 | 0 | buf.read_uint16(0, bigEndian) + ((buf.data()[2] * 256 + buf.data()[3]) * 0.01); |
1568 | 0 | break; |
1569 | 0 | case PreferredVolume: |
1570 | 0 | xmpData_["Xmp.video.PreferredVolume"] = (static_cast<int>(buf.read_uint8(0)) + (buf.data()[2] * 0.1)) * 100; |
1571 | 0 | break; |
1572 | 0 | case PreviewTime: |
1573 | 0 | xmpData_["Xmp.video.PreviewTime"] = buf.read_uint32(0, bigEndian); |
1574 | 0 | break; |
1575 | 0 | case PreviewDuration: |
1576 | 0 | xmpData_["Xmp.video.PreviewDuration"] = buf.read_uint32(0, bigEndian); |
1577 | 0 | break; |
1578 | 0 | case PosterTime: |
1579 | 0 | xmpData_["Xmp.video.PosterTime"] = buf.read_uint32(0, bigEndian); |
1580 | 0 | break; |
1581 | 0 | case SelectionTime: |
1582 | 0 | xmpData_["Xmp.video.SelectionTime"] = buf.read_uint32(0, bigEndian); |
1583 | 0 | break; |
1584 | 0 | case SelectionDuration: |
1585 | 0 | xmpData_["Xmp.video.SelectionDuration"] = buf.read_uint32(0, bigEndian); |
1586 | 0 | break; |
1587 | 0 | case CurrentTime: |
1588 | 0 | xmpData_["Xmp.video.CurrentTime"] = buf.read_uint32(0, bigEndian); |
1589 | 0 | break; |
1590 | 0 | case NextTrackID: |
1591 | 0 | xmpData_["Xmp.video.NextTrackID"] = buf.read_uint32(0, bigEndian); |
1592 | 0 | break; |
1593 | 0 | default: |
1594 | 0 | break; |
1595 | 0 | } |
1596 | 0 | } |
1597 | 0 | io_->readOrThrow(buf.data(), size % 4); |
1598 | 0 | } // QuickTimeVideo::movieHeaderDecoder |
1599 | | |
1600 | 647 | Image::UniquePtr newQTimeInstance(BasicIo::UniquePtr io, bool /*create*/) { |
1601 | 647 | auto image = std::make_unique<QuickTimeVideo>(std::move(io)); |
1602 | 647 | if (!image->good()) { |
1603 | 6 | return nullptr; |
1604 | 6 | } |
1605 | 641 | return image; |
1606 | 647 | } |
1607 | | |
1608 | 5.55k | bool isQTimeType(BasicIo& iIo, bool advance) { |
1609 | 5.55k | auto buf = DataBuf(12); |
1610 | 5.55k | iIo.read(buf.data(), 12); |
1611 | | |
1612 | 5.55k | if (iIo.error() || iIo.eof()) { |
1613 | 235 | return false; |
1614 | 235 | } |
1615 | 5.32k | auto qTimeTags = std::array{"PICT", "free", "ftyp", "junk", "mdat", "moov", "pict", "pnot", "skip", "uuid", "wide"}; |
1616 | | |
1617 | 5.32k | bool matched = false; |
1618 | | |
1619 | 38.2k | for (auto const& tag : qTimeTags) { |
1620 | 38.2k | auto tmp = buf.cmpBytes(4, tag, 4); |
1621 | 38.2k | if (tmp == 0) { |
1622 | | // we only match if we actually know the video type. This is done |
1623 | | // to avoid matching just on ftyp because bmffimage also has that |
1624 | | // header. |
1625 | 2.70k | if (Exiv2::find(qTimeFileType, std::string{buf.c_str(8), 4})) { |
1626 | 1.92k | matched = true; |
1627 | 1.92k | } |
1628 | 2.70k | break; |
1629 | 2.70k | } |
1630 | 38.2k | } |
1631 | | |
1632 | 5.32k | if (!advance || !matched) { |
1633 | 5.32k | iIo.seek(0L, BasicIo::beg); |
1634 | 5.32k | } |
1635 | | |
1636 | 5.32k | return matched; |
1637 | 5.55k | } |
1638 | | |
1639 | | } // namespace Exiv2 |