/src/serenity/Userland/Libraries/LibPDF/DocumentParser.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2021-2022, Matthew Olsson <mattco@serenityos.org> |
3 | | * Copyright (c) 2022, Julian Offenhäuser <offenhaeuser@protonmail.com> |
4 | | * |
5 | | * SPDX-License-Identifier: BSD-2-Clause |
6 | | */ |
7 | | |
8 | | #include <AK/BitStream.h> |
9 | | #include <AK/Endian.h> |
10 | | #include <AK/MemoryStream.h> |
11 | | #include <LibPDF/CommonNames.h> |
12 | | #include <LibPDF/Document.h> |
13 | | #include <LibPDF/DocumentParser.h> |
14 | | #include <LibPDF/ObjectDerivatives.h> |
15 | | |
16 | | namespace PDF { |
17 | | |
18 | | DocumentParser::DocumentParser(Document* document, ReadonlyBytes bytes) |
19 | 11 | : Parser(document, bytes) |
20 | 11 | { |
21 | 11 | } |
22 | | |
23 | | PDFErrorOr<Version> DocumentParser::initialize() |
24 | 11 | { |
25 | 11 | m_reader.set_reading_forwards(); |
26 | 11 | if (m_reader.remaining() == 0) |
27 | 0 | return error("Empty PDF document"); |
28 | | |
29 | 11 | auto maybe_version = parse_header(); |
30 | 11 | if (maybe_version.is_error()) { |
31 | 0 | warnln("{}", maybe_version.error().message()); |
32 | 0 | warnln("No valid PDF header detected, continuing anyway."); |
33 | 0 | maybe_version = Version { 1, 6 }; // ¯\_(ツ)_/¯ |
34 | 0 | } |
35 | | |
36 | 11 | auto const linearization_result = TRY(initialize_linearization_dict()); |
37 | | |
38 | 11 | if (linearization_result == LinearizationResult::NotLinearized) { |
39 | 5 | TRY(initialize_non_linearized_xref_table()); |
40 | 0 | return maybe_version.value(); |
41 | 5 | } |
42 | | |
43 | 6 | bool is_linearized = m_linearization_dictionary.has_value(); |
44 | 6 | if (is_linearized) { |
45 | | // If the length given in the linearization dictionary is not equal to the length |
46 | | // of the document, then this file has most likely been incrementally updated, and |
47 | | // should no longer be treated as linearized. |
48 | | // FIXME: This check requires knowing the full size of the file, while linearization |
49 | | // is all about being able to render some of it without having to download all of it. |
50 | | // PDF 2.0 Annex G.7 "Accessing an updated file" talks about this some, |
51 | | // but mostly just throws its hand in the air. |
52 | 6 | is_linearized = m_linearization_dictionary.value().length_of_file == m_reader.bytes().size(); |
53 | 6 | } |
54 | | |
55 | 6 | if (is_linearized) |
56 | 0 | TRY(initialize_linearized_xref_table()); |
57 | 6 | else |
58 | 6 | TRY(initialize_non_linearized_xref_table()); |
59 | | |
60 | 6 | return maybe_version.value(); |
61 | 6 | } |
62 | | |
63 | | PDFErrorOr<Value> DocumentParser::parse_object_with_index(u32 index) |
64 | 0 | { |
65 | 0 | VERIFY(m_xref_table->has_object(index)); |
66 | | |
67 | | // PDF spec 1.7, Indirect Objects: |
68 | | // "An indirect reference to an undefined object is not an error; it is simply treated as a reference to the null object." |
69 | | // FIXME: Should this apply to the !has_object() case right above too? |
70 | 0 | if (!m_xref_table->is_object_in_use(index)) |
71 | 0 | return nullptr; |
72 | | |
73 | | // If this is called to resolve an indirect object reference while parsing another object, |
74 | | // make sure to restore the current position after parsing the indirect object, so that the |
75 | | // parser can keep parsing the original object stream afterwards. |
76 | | // parse_compressed_object_with_index() also moves the reader's position, so this needs |
77 | | // to be before the potential call to parse_compressed_object_with_index(). |
78 | 0 | class SavePoint { |
79 | 0 | public: |
80 | 0 | SavePoint(Reader& reader) |
81 | 0 | : m_reader(reader) |
82 | 0 | { |
83 | 0 | m_reader.save(); |
84 | 0 | } |
85 | 0 | ~SavePoint() { m_reader.load(); } |
86 | |
|
87 | 0 | private: |
88 | 0 | Reader& m_reader; |
89 | 0 | }; |
90 | 0 | SavePoint restore_current_position { m_reader }; |
91 | |
|
92 | 0 | if (m_xref_table->is_object_compressed(index)) |
93 | | // The object can be found in a object stream |
94 | 0 | return parse_compressed_object_with_index(index); |
95 | | |
96 | 0 | auto byte_offset = m_xref_table->byte_offset_for_object(index); |
97 | |
|
98 | 0 | m_reader.move_to(byte_offset); |
99 | 0 | auto indirect_value = TRY(parse_indirect_value()); |
100 | 0 | VERIFY(indirect_value->index() == index); |
101 | 0 | return indirect_value->value(); |
102 | 0 | } |
103 | | |
104 | | PDFErrorOr<size_t> DocumentParser::scan_for_header_start(ReadonlyBytes bytes) |
105 | 14 | { |
106 | | // PDF 1.7 spec, APPENDIX H, 3.4.1 "File Header": |
107 | | // "13. Acrobat viewers require only that the header appear somewhere within the first 1024 bytes of the file." |
108 | | // ...which of course means files depend on it. |
109 | | // All offsets in the file are relative to the header start, not to the start of the file. |
110 | 14 | StringView first_bytes { bytes.data(), min(bytes.size(), 1024 - "1.4"sv.length()) }; |
111 | 14 | Optional<size_t> start_offset = first_bytes.find("%PDF-"sv); |
112 | 14 | if (!start_offset.has_value()) |
113 | 3 | return Error { Error::Type::Parse, "Failed to find PDF start" }; |
114 | 11 | return start_offset.value(); |
115 | 14 | } |
116 | | |
117 | | PDFErrorOr<Version> DocumentParser::parse_header() |
118 | 11 | { |
119 | 11 | m_reader.move_to(0); |
120 | 11 | if (m_reader.remaining() < 8 || !m_reader.matches("%PDF-")) |
121 | 0 | return error("Not a PDF document"); |
122 | | |
123 | 11 | m_reader.move_by(5); |
124 | | |
125 | 11 | char major_ver = m_reader.read(); |
126 | 11 | if (major_ver != '1' && major_ver != '2') { |
127 | 0 | dbgln_if(PDF_DEBUG, "Unknown major version \"{}\"", major_ver); |
128 | 0 | return error("Unknown major version"); |
129 | 0 | } |
130 | | |
131 | 11 | if (m_reader.read() != '.') |
132 | 0 | return error("Malformed PDF version"); |
133 | | |
134 | 11 | char minor_ver = m_reader.read(); |
135 | 11 | if (minor_ver < '0' || minor_ver > '7') { |
136 | 0 | dbgln_if(PDF_DEBUG, "Unknown minor version \"{}\"", minor_ver); |
137 | 0 | return error("Unknown minor version"); |
138 | 0 | } |
139 | | |
140 | 11 | m_reader.consume_eol(); |
141 | 11 | m_reader.consume_whitespace(); |
142 | | |
143 | | // Parse optional high-byte comment, which signifies a binary file |
144 | | // FIXME: Do something with this? |
145 | 11 | auto comment = parse_comment(); |
146 | 11 | if (!comment.is_empty()) { |
147 | 9 | auto binary = comment.length() >= 4; |
148 | 9 | if (binary) { |
149 | 45 | for (size_t i = 0; i < comment.length() && binary; i++) |
150 | 36 | binary = static_cast<u8>(comment[i]) > 128; |
151 | 9 | } |
152 | 9 | } |
153 | | |
154 | 11 | return Version { major_ver - '0', minor_ver - '0' }; |
155 | 11 | } |
156 | | |
157 | | PDFErrorOr<DocumentParser::LinearizationResult> DocumentParser::initialize_linearization_dict() |
158 | 11 | { |
159 | | // parse_header() is called immediately before this, so we are at the right location. |
160 | | // There may not actually be a linearization dict, or even a valid PDF object here. |
161 | | // If that is the case, this file may be completely valid but not linearized. |
162 | | |
163 | | // If there is indeed a linearization dict, there should be an object number here. |
164 | 11 | if (!m_reader.matches_number()) |
165 | 0 | return LinearizationResult::NotLinearized; |
166 | | |
167 | | // At this point, we still don't know for sure if we are dealing with a valid object. |
168 | | |
169 | | // The linearization dict is read before decryption state is initialized. |
170 | | // A linearization dict only contains numbers, so the decryption dictionary is not been needed (only strings and streams get decrypted, and only streams get unfiltered). |
171 | | // But we don't know if the first object is a linearization dictionary until after parsing it, so the object might be a stream. |
172 | | // If that stream is encrypted and filtered, we'd try to unfilter it while it's still encrypted, handing encrypted data to the unfiltering algorithms. |
173 | | // This makes them assert, since they can't make sense of the encrypted data. |
174 | | // So read the first object without unfiltering. |
175 | | // If it is a linearization dict, there's no stream data and this has no effect. |
176 | | // If it is a stream, this isn't a linearized file and the object will be read on demand (and unfiltered) later, when the object is lazily read via an xref entry. |
177 | 11 | set_filters_enabled(false); |
178 | 11 | auto indirect_value_or_error = parse_indirect_value(); |
179 | 11 | set_filters_enabled(true); |
180 | | |
181 | 11 | if (indirect_value_or_error.is_error()) |
182 | 1 | return LinearizationResult::NotLinearized; |
183 | | |
184 | 10 | auto dict_value = indirect_value_or_error.value()->value(); |
185 | 10 | if (!dict_value.has<NonnullRefPtr<Object>>()) |
186 | 0 | return error("Expected linearization object to be a dictionary"); |
187 | | |
188 | 10 | auto dict_object = dict_value.get<NonnullRefPtr<Object>>(); |
189 | 10 | if (!dict_object->is<DictObject>()) |
190 | 0 | return LinearizationResult::NotLinearized; |
191 | | |
192 | 10 | auto dict = dict_object->cast<DictObject>(); |
193 | | |
194 | 10 | if (!dict->contains(CommonNames::Linearized)) |
195 | 4 | return LinearizationResult::NotLinearized; |
196 | | |
197 | 6 | if (!dict->contains(CommonNames::L, CommonNames::H, CommonNames::O, CommonNames::E, CommonNames::N, CommonNames::T)) |
198 | 0 | return error("Malformed linearization dictionary"); |
199 | | |
200 | 6 | auto length_of_file = dict->get_value(CommonNames::L); |
201 | 6 | auto hint_table = dict->get_value(CommonNames::H); |
202 | 6 | auto first_page_object_number = dict->get_value(CommonNames::O); |
203 | 6 | auto offset_of_first_page_end = dict->get_value(CommonNames::E); |
204 | 6 | auto number_of_pages = dict->get_value(CommonNames::N); |
205 | 6 | auto offset_of_main_xref_table = dict->get_value(CommonNames::T); |
206 | 6 | auto first_page = dict->get(CommonNames::P).value_or({}); |
207 | | |
208 | | // Validation |
209 | 6 | if (!length_of_file.has_u32() |
210 | 6 | || !hint_table.has<NonnullRefPtr<Object>>() |
211 | 6 | || !first_page_object_number.has_u32() |
212 | 6 | || !number_of_pages.has_u16() |
213 | 6 | || !offset_of_main_xref_table.has_u32() |
214 | 6 | || (!first_page.has<Empty>() && !first_page.has_u32())) { |
215 | 0 | return error("Malformed linearization dictionary parameters"); |
216 | 0 | } |
217 | | |
218 | 6 | auto hint_table_array = hint_table.get<NonnullRefPtr<Object>>()->cast<ArrayObject>(); |
219 | 6 | auto hint_table_size = hint_table_array->size(); |
220 | 6 | if (hint_table_size != 2 && hint_table_size != 4) |
221 | 0 | return error("Expected hint table to be of length 2 or 4"); |
222 | | |
223 | 6 | auto primary_hint_stream_offset = hint_table_array->at(0); |
224 | 6 | auto primary_hint_stream_length = hint_table_array->at(1); |
225 | 6 | Value overflow_hint_stream_offset; |
226 | 6 | Value overflow_hint_stream_length; |
227 | | |
228 | 6 | if (hint_table_size == 4) { |
229 | 0 | overflow_hint_stream_offset = hint_table_array->at(2); |
230 | 0 | overflow_hint_stream_length = hint_table_array->at(3); |
231 | 0 | } |
232 | | |
233 | 6 | if (!primary_hint_stream_offset.has_u32() |
234 | 6 | || !primary_hint_stream_length.has_u32() |
235 | 6 | || (!overflow_hint_stream_offset.has<Empty>() && !overflow_hint_stream_offset.has_u32()) |
236 | 6 | || (!overflow_hint_stream_length.has<Empty>() && !overflow_hint_stream_length.has_u32())) { |
237 | 0 | return error("Malformed hint stream"); |
238 | 0 | } |
239 | | |
240 | 6 | m_linearization_dictionary = LinearizationDictionary { |
241 | 6 | length_of_file.get_u32(), |
242 | 6 | primary_hint_stream_offset.get_u32(), |
243 | 6 | primary_hint_stream_length.get_u32(), |
244 | 6 | overflow_hint_stream_offset.has<Empty>() ? NumericLimits<u32>::max() : overflow_hint_stream_offset.get_u32(), |
245 | 6 | overflow_hint_stream_length.has<Empty>() ? NumericLimits<u32>::max() : overflow_hint_stream_length.get_u32(), |
246 | 6 | first_page_object_number.get_u32(), |
247 | 6 | offset_of_first_page_end.get_u32(), |
248 | 6 | number_of_pages.get_u16(), |
249 | 6 | offset_of_main_xref_table.get_u32(), |
250 | 6 | first_page.has<Empty>() ? NumericLimits<u32>::max() : first_page.get_u32(), |
251 | 6 | }; |
252 | | |
253 | 6 | return LinearizationResult::Linearized; |
254 | 6 | } |
255 | | |
256 | | PDFErrorOr<void> DocumentParser::initialize_linearized_xref_table() |
257 | 0 | { |
258 | | // The linearization parameter dictionary has just been parsed, and the xref table |
259 | | // comes immediately after it. We are in the correct spot. |
260 | 0 | m_xref_table = TRY(parse_xref_table()); |
261 | | |
262 | | // Also parse the main xref table and merge into the first-page xref table. Note |
263 | | // that we don't use the main xref table offset from the linearization dict because |
264 | | // for some reason, it specified the offset of the whitespace after the object |
265 | | // index start and length? So it's much easier to do it this way. |
266 | 0 | auto main_xref_table_offset = m_xref_table->trailer()->get_value(CommonNames::Prev).to_int(); |
267 | 0 | m_reader.move_to(main_xref_table_offset); |
268 | 0 | auto main_xref_table = TRY(parse_xref_table()); |
269 | 0 | TRY(m_xref_table->merge(move(*main_xref_table))); |
270 | |
|
271 | 0 | return validate_xref_table_and_fix_if_necessary(); |
272 | 0 | } |
273 | | |
274 | | PDFErrorOr<void> DocumentParser::initialize_hint_tables() |
275 | 0 | { |
276 | 0 | auto linearization_dict = m_linearization_dictionary.value(); |
277 | 0 | auto primary_offset = linearization_dict.primary_hint_stream_offset; |
278 | 0 | auto overflow_offset = linearization_dict.overflow_hint_stream_offset; |
279 | |
|
280 | 0 | auto parse_hint_table = [&](size_t offset) -> RefPtr<StreamObject> { |
281 | 0 | m_reader.move_to(offset); |
282 | 0 | auto stream_indirect_value = parse_indirect_value(); |
283 | 0 | if (stream_indirect_value.is_error()) |
284 | 0 | return {}; |
285 | | |
286 | 0 | auto stream_value = stream_indirect_value.value()->value(); |
287 | 0 | if (!stream_value.has<NonnullRefPtr<Object>>()) |
288 | 0 | return {}; |
289 | | |
290 | 0 | auto stream_object = stream_value.get<NonnullRefPtr<Object>>(); |
291 | 0 | if (!stream_object->is<StreamObject>()) |
292 | 0 | return {}; |
293 | | |
294 | 0 | return stream_object->cast<StreamObject>(); |
295 | 0 | }; |
296 | |
|
297 | 0 | auto primary_hint_stream = parse_hint_table(primary_offset); |
298 | 0 | if (!primary_hint_stream) |
299 | 0 | return error("Invalid primary hint stream"); |
300 | | |
301 | 0 | RefPtr<StreamObject> overflow_hint_stream; |
302 | 0 | if (overflow_offset != NumericLimits<u32>::max()) |
303 | 0 | overflow_hint_stream = parse_hint_table(overflow_offset); |
304 | |
|
305 | 0 | ByteBuffer possible_merged_stream_buffer; |
306 | 0 | ReadonlyBytes hint_stream_bytes; |
307 | |
|
308 | 0 | if (overflow_hint_stream) { |
309 | 0 | auto primary_size = primary_hint_stream->bytes().size(); |
310 | 0 | auto overflow_size = overflow_hint_stream->bytes().size(); |
311 | 0 | auto total_size = primary_size + overflow_size; |
312 | |
|
313 | 0 | auto buffer_result = ByteBuffer::create_uninitialized(total_size); |
314 | 0 | if (buffer_result.is_error()) |
315 | 0 | return Error { Error::Type::Internal, "Failed to allocate hint stream buffer" }; |
316 | 0 | possible_merged_stream_buffer = buffer_result.release_value(); |
317 | 0 | MUST(possible_merged_stream_buffer.try_append(primary_hint_stream->bytes())); |
318 | 0 | MUST(possible_merged_stream_buffer.try_append(overflow_hint_stream->bytes())); |
319 | 0 | hint_stream_bytes = possible_merged_stream_buffer.bytes(); |
320 | 0 | } else { |
321 | 0 | hint_stream_bytes = primary_hint_stream->bytes(); |
322 | 0 | } |
323 | | |
324 | 0 | auto hint_table = TRY(parse_page_offset_hint_table(hint_stream_bytes)); |
325 | 0 | auto hint_table_entries = TRY(parse_all_page_offset_hint_table_entries(hint_table, hint_stream_bytes)); |
326 | | |
327 | | // FIXME: Do something with the hint tables |
328 | 0 | return {}; |
329 | 0 | } |
330 | | |
331 | | PDFErrorOr<void> DocumentParser::initialize_non_linearized_xref_table() |
332 | 11 | { |
333 | 11 | m_reader.move_to(m_reader.bytes().size() - 1); |
334 | 11 | if (!navigate_to_before_eof_marker()) |
335 | 5 | return error("No EOF marker"); |
336 | 6 | if (!navigate_to_after_startxref()) |
337 | 0 | return error("No xref"); |
338 | | |
339 | 6 | m_reader.set_reading_forwards(); |
340 | 6 | auto xref_offset_value = TRY(parse_number()); |
341 | 6 | auto xref_offset = TRY(m_document->resolve_to<int>(xref_offset_value)); |
342 | 6 | m_reader.move_to(xref_offset); |
343 | | |
344 | | // As per 7.5.6 Incremental Updates: |
345 | | // When a conforming reader reads the file, it shall build its cross-reference |
346 | | // information in such a way that the most recent copy of each object shall be |
347 | | // the one accessed from the file. |
348 | | // NOTE: This means that we have to follow back the chain of XRef table sections |
349 | | // and only add objects that were not already specified in a previous |
350 | | // (and thus newer) XRef section. |
351 | 6 | while (1) { |
352 | 6 | auto xref_table = TRY(parse_xref_table()); |
353 | 0 | if (!m_xref_table) |
354 | 0 | m_xref_table = xref_table; |
355 | 0 | else |
356 | 0 | TRY(m_xref_table->merge(move(*xref_table))); |
357 | |
|
358 | 0 | if (!xref_table->trailer() || !xref_table->trailer()->contains(CommonNames::Prev)) |
359 | 0 | break; |
360 | | |
361 | 0 | auto offset = TRY(m_document->resolve_to<int>(xref_table->trailer()->get_value(CommonNames::Prev))); |
362 | 0 | m_reader.move_to(offset); |
363 | 0 | } |
364 | | |
365 | 0 | return validate_xref_table_and_fix_if_necessary(); |
366 | 6 | } |
367 | | |
368 | | PDFErrorOr<void> DocumentParser::validate_xref_table_and_fix_if_necessary() |
369 | 0 | { |
370 | | /* While an xref table may start with an object number other than zero, this is |
371 | | very uncommon and likely a sign of a document with broken indices. |
372 | | Like most other PDF parsers seem to do, we still try to salvage the situation. |
373 | | NOTE: This is probably not spec-compliant behavior.*/ |
374 | 0 | size_t first_valid_index = 0; |
375 | 0 | while (!m_xref_table->has_object(first_valid_index)) |
376 | 0 | first_valid_index++; |
377 | |
|
378 | 0 | if (first_valid_index) { |
379 | 0 | auto& entries = m_xref_table->entries(); |
380 | |
|
381 | 0 | bool need_to_rebuild_table = true; |
382 | 0 | for (size_t i = first_valid_index; i < entries.size(); ++i) { |
383 | 0 | if (!entries[i].in_use) |
384 | 0 | continue; |
385 | | |
386 | 0 | size_t actual_object_number = 0; |
387 | 0 | if (entries[i].compressed) { |
388 | 0 | auto object_stream_index = m_xref_table->object_stream_for_object(i); |
389 | 0 | auto stream_offset = m_xref_table->byte_offset_for_object(object_stream_index); |
390 | 0 | m_reader.move_to(stream_offset); |
391 | 0 | auto first_number = TRY(parse_number()); |
392 | 0 | actual_object_number = first_number.get_u32(); |
393 | 0 | } else { |
394 | 0 | auto byte_offset = m_xref_table->byte_offset_for_object(i); |
395 | 0 | m_reader.move_to(byte_offset); |
396 | 0 | auto indirect_value = TRY(parse_indirect_value()); |
397 | 0 | actual_object_number = indirect_value->index(); |
398 | 0 | } |
399 | |
|
400 | 0 | if (actual_object_number != i - first_valid_index) { |
401 | | /* Our suspicion was wrong, not all object numbers are shifted equally. |
402 | | This could mean that the document is hopelessly broken, or it just |
403 | | starts at a non-zero object index for some reason. */ |
404 | 0 | need_to_rebuild_table = false; |
405 | 0 | break; |
406 | 0 | } |
407 | 0 | } |
408 | | |
409 | 0 | if (need_to_rebuild_table) { |
410 | 0 | warnln("Broken xref table detected, trying to fix it."); |
411 | 0 | entries.remove(0, first_valid_index); |
412 | 0 | } |
413 | 0 | } |
414 | | |
415 | 0 | return {}; |
416 | 0 | } |
417 | | |
418 | | static PDFErrorOr<NonnullRefPtr<StreamObject>> indirect_value_as_stream(NonnullRefPtr<IndirectValue> indirect_value) |
419 | 0 | { |
420 | 0 | auto value = indirect_value->value(); |
421 | 0 | if (!value.has<NonnullRefPtr<Object>>()) |
422 | 0 | return Error { Error::Type::Parse, "Expected indirect value to be a stream" }; |
423 | 0 | auto value_object = value.get<NonnullRefPtr<Object>>(); |
424 | 0 | if (!value_object->is<StreamObject>()) |
425 | 0 | return Error { Error::Type::Parse, "Expected indirect value to be a stream" }; |
426 | 0 | return value_object->cast<StreamObject>(); |
427 | 0 | } |
428 | | |
429 | | PDFErrorOr<NonnullRefPtr<XRefTable>> DocumentParser::parse_xref_stream() |
430 | 6 | { |
431 | | // PDF 1.7 spec, 3.4.7 "Cross-Reference Streams" |
432 | 6 | auto xref_stream = TRY(parse_indirect_value()); |
433 | 0 | auto stream = TRY(indirect_value_as_stream(xref_stream)); |
434 | |
|
435 | 0 | auto dict = stream->dict(); |
436 | 0 | auto type = TRY(dict->get_name(m_document, CommonNames::Type))->name(); |
437 | 0 | if (type != "XRef") |
438 | 0 | return error("Malformed xref dictionary"); |
439 | | |
440 | 0 | auto field_sizes = TRY(dict->get_array(m_document, CommonNames::W)); |
441 | 0 | if (field_sizes->size() != 3) |
442 | 0 | return error("Malformed xref dictionary"); |
443 | 0 | if (field_sizes->at(1).get_u32() == 0) |
444 | 0 | return error("Malformed xref dictionary"); |
445 | | |
446 | 0 | auto number_of_object_entries = dict->get_value("Size").get<int>(); |
447 | |
|
448 | 0 | struct Subsection { |
449 | 0 | int start; |
450 | 0 | int count; |
451 | 0 | }; |
452 | |
|
453 | 0 | Vector<Subsection> subsections; |
454 | 0 | if (dict->contains(CommonNames::Index)) { |
455 | 0 | auto index_array = TRY(dict->get_array(m_document, CommonNames::Index)); |
456 | 0 | if (index_array->size() % 2 != 0) |
457 | 0 | return error("Malformed xref dictionary"); |
458 | | |
459 | 0 | for (size_t i = 0; i < index_array->size(); i += 2) |
460 | 0 | subsections.append({ index_array->at(i).get<int>(), index_array->at(i + 1).get<int>() }); |
461 | 0 | } else { |
462 | 0 | subsections.append({ 0, number_of_object_entries }); |
463 | 0 | } |
464 | 0 | auto table = adopt_ref(*new XRefTable()); |
465 | |
|
466 | 0 | auto field_to_long = [](ReadonlyBytes field) -> long { |
467 | 0 | long value = 0; |
468 | 0 | u8 const max = (field.size() - 1) * 8; |
469 | 0 | for (size_t i = 0; i < field.size(); ++i) { |
470 | 0 | value |= static_cast<long>(field[i]) << (max - (i * 8)); |
471 | 0 | } |
472 | 0 | return value; |
473 | 0 | }; |
474 | |
|
475 | 0 | size_t byte_index = 0; |
476 | |
|
477 | 0 | for (auto [start, count] : subsections) { |
478 | 0 | Vector<XRefEntry> entries; |
479 | |
|
480 | 0 | for (int i = 0; i < count; i++) { |
481 | 0 | Array<u64, 3> fields; |
482 | 0 | for (size_t field_index = 0; field_index < 3; ++field_index) { |
483 | 0 | if (!field_sizes->at(field_index).has_u32()) |
484 | 0 | return error("Malformed xref stream"); |
485 | | |
486 | 0 | auto field_size = field_sizes->at(field_index).get_u32(); |
487 | 0 | if (field_size > 8) |
488 | 0 | return error("Malformed xref stream"); |
489 | | |
490 | 0 | if (byte_index + field_size > stream->bytes().size()) |
491 | 0 | return error("The xref stream data cut off early"); |
492 | | |
493 | 0 | auto field = stream->bytes().slice(byte_index, field_size); |
494 | 0 | fields[field_index] = field_to_long(field); |
495 | 0 | byte_index += field_size; |
496 | 0 | } |
497 | | |
498 | 0 | u8 type = 1; |
499 | 0 | if (field_sizes->at(0).get_u32() != 0) |
500 | 0 | type = fields[0]; |
501 | |
|
502 | 0 | entries.append({ fields[1], static_cast<u16>(fields[2]), type != 0, type == 2 }); |
503 | 0 | } |
504 | | |
505 | 0 | table->add_section({ start, count, move(entries) }); |
506 | 0 | } |
507 | | |
508 | 0 | table->set_trailer(dict); |
509 | |
|
510 | 0 | return table; |
511 | 0 | } |
512 | | |
513 | | PDFErrorOr<NonnullRefPtr<XRefTable>> DocumentParser::parse_xref_table() |
514 | 6 | { |
515 | 6 | if (!m_reader.matches("xref")) { |
516 | | // Since version 1.5, there may be a cross-reference stream instead |
517 | 6 | return parse_xref_stream(); |
518 | 6 | } |
519 | | |
520 | 0 | m_reader.move_by(4); |
521 | 0 | m_reader.consume_non_eol_whitespace(); |
522 | 0 | if (!m_reader.consume_eol()) |
523 | 0 | return error("Expected newline after \"xref\""); |
524 | | |
525 | 0 | auto table = adopt_ref(*new XRefTable()); |
526 | |
|
527 | 0 | while (m_reader.matches_number()) { |
528 | 0 | Vector<XRefEntry> entries; |
529 | |
|
530 | 0 | auto starting_index_value = TRY(parse_number()); |
531 | 0 | auto object_count_value = TRY(parse_number()); |
532 | 0 | if (!(starting_index_value.has_u32() && object_count_value.has_u32())) |
533 | 0 | return error("Malformed xref entry"); |
534 | | |
535 | 0 | auto object_count = object_count_value.get<int>(); |
536 | 0 | auto starting_index = starting_index_value.get<int>(); |
537 | |
|
538 | 0 | for (int i = 0; i < object_count; i++) { |
539 | 0 | auto offset_string = ByteString(m_reader.bytes().slice(m_reader.offset(), 10)); |
540 | 0 | m_reader.move_by(10); |
541 | 0 | if (!m_reader.consume(' ')) |
542 | 0 | return error("Malformed xref entry"); |
543 | | |
544 | 0 | auto generation_string = ByteString(m_reader.bytes().slice(m_reader.offset(), 5)); |
545 | 0 | m_reader.move_by(5); |
546 | 0 | if (!m_reader.consume(' ')) |
547 | 0 | return error("Malformed xref entry"); |
548 | | |
549 | 0 | auto letter = m_reader.read(); |
550 | 0 | if (letter != 'n' && letter != 'f') |
551 | 0 | return error("Malformed xref entry"); |
552 | | |
553 | | // The line ending sequence can be one of the following: |
554 | | // SP CR, SP LF, or CR LF |
555 | 0 | if (m_reader.matches(' ')) { |
556 | 0 | m_reader.consume(); |
557 | 0 | auto ch = m_reader.consume(); |
558 | 0 | if (ch != '\r' && ch != '\n') |
559 | 0 | return error("Malformed xref entry"); |
560 | 0 | } else { |
561 | 0 | if (!m_reader.matches("\r\n")) |
562 | 0 | return error("Malformed xref entry"); |
563 | 0 | m_reader.move_by(2); |
564 | 0 | } |
565 | | |
566 | 0 | u64 offset = strtoll(offset_string.characters(), nullptr, 10); |
567 | 0 | auto generation = strtol(generation_string.characters(), nullptr, 10); |
568 | |
|
569 | 0 | entries.append({ offset, static_cast<u16>(generation), letter == 'n' }); |
570 | 0 | } |
571 | | |
572 | 0 | table->add_section({ starting_index, object_count, entries }); |
573 | 0 | } |
574 | | |
575 | 0 | m_reader.consume_whitespace(); |
576 | 0 | if (m_reader.matches("trailer")) |
577 | 0 | table->set_trailer(TRY(parse_file_trailer())); |
578 | |
|
579 | 0 | return table; |
580 | 0 | } |
581 | | |
582 | | PDFErrorOr<NonnullRefPtr<DictObject>> DocumentParser::parse_file_trailer() |
583 | 0 | { |
584 | 0 | while (m_reader.matches_eol()) |
585 | 0 | m_reader.consume_eol(); |
586 | |
|
587 | 0 | if (!m_reader.matches("trailer")) |
588 | 0 | return error("Expected \"trailer\" keyword"); |
589 | 0 | m_reader.move_by(7); |
590 | 0 | m_reader.consume_whitespace(); |
591 | 0 | return parse_dict(); |
592 | 0 | } |
593 | | |
594 | | PDFErrorOr<Value> DocumentParser::parse_compressed_object_with_index(u32 index) |
595 | 0 | { |
596 | 0 | auto object_stream_index = m_xref_table->object_stream_for_object(index); |
597 | 0 | auto stream_offset = m_xref_table->byte_offset_for_object(object_stream_index); |
598 | |
|
599 | 0 | m_reader.move_to(stream_offset); |
600 | |
|
601 | 0 | auto obj_stream = TRY(parse_indirect_value()); |
602 | 0 | auto stream = TRY(indirect_value_as_stream(obj_stream)); |
603 | |
|
604 | 0 | if (obj_stream->index() != object_stream_index) |
605 | 0 | return error("Mismatching object stream index"); |
606 | | |
607 | 0 | auto dict = stream->dict(); |
608 | |
|
609 | 0 | auto type = TRY(dict->get_name(m_document, CommonNames::Type))->name(); |
610 | 0 | if (type != "ObjStm") |
611 | 0 | return error("Invalid object stream type"); |
612 | | |
613 | 0 | auto object_count = dict->get_value("N").get_u32(); |
614 | 0 | auto first_object_offset = dict->get_value("First").get_u32(); |
615 | |
|
616 | 0 | Parser stream_parser(m_document, stream->bytes()); |
617 | | |
618 | | // The data was already decrypted when reading the outer compressed ObjStm. |
619 | 0 | stream_parser.set_encryption_enabled(false); |
620 | |
|
621 | 0 | for (u32 i = 0; i < object_count; ++i) { |
622 | 0 | auto object_number = TRY(stream_parser.parse_number()); |
623 | 0 | auto object_offset = TRY(stream_parser.parse_number()); |
624 | |
|
625 | 0 | if (object_number.get_u32() == index) { |
626 | 0 | stream_parser.move_to(first_object_offset + object_offset.get_u32()); |
627 | 0 | break; |
628 | 0 | } |
629 | 0 | } |
630 | | |
631 | 0 | stream_parser.push_reference({ index, 0 }); |
632 | 0 | stream_parser.consume_whitespace(); |
633 | 0 | auto value = TRY(stream_parser.parse_value()); |
634 | 0 | stream_parser.pop_reference(); |
635 | |
|
636 | 0 | return value; |
637 | 0 | } |
638 | | |
639 | | PDFErrorOr<DocumentParser::PageOffsetHintTable> DocumentParser::parse_page_offset_hint_table(ReadonlyBytes hint_stream_bytes) |
640 | 0 | { |
641 | 0 | if (hint_stream_bytes.size() < sizeof(PageOffsetHintTable)) |
642 | 0 | return error("Hint stream is too small"); |
643 | | |
644 | 0 | size_t offset = 0; |
645 | |
|
646 | 0 | auto read_u32 = [&] { |
647 | 0 | u32 data = reinterpret_cast<u32 const*>(hint_stream_bytes.data() + offset)[0]; |
648 | 0 | offset += 4; |
649 | 0 | return AK::convert_between_host_and_big_endian(data); |
650 | 0 | }; |
651 | |
|
652 | 0 | auto read_u16 = [&] { |
653 | 0 | u16 data = reinterpret_cast<u16 const*>(hint_stream_bytes.data() + offset)[0]; |
654 | 0 | offset += 2; |
655 | 0 | return AK::convert_between_host_and_big_endian(data); |
656 | 0 | }; |
657 | |
|
658 | 0 | PageOffsetHintTable hint_table { |
659 | 0 | read_u32(), |
660 | 0 | read_u32(), |
661 | 0 | read_u16(), |
662 | 0 | read_u32(), |
663 | 0 | read_u16(), |
664 | 0 | read_u32(), |
665 | 0 | read_u16(), |
666 | 0 | read_u32(), |
667 | 0 | read_u16(), |
668 | 0 | read_u16(), |
669 | 0 | read_u16(), |
670 | 0 | read_u16(), |
671 | 0 | read_u16(), |
672 | 0 | }; |
673 | | |
674 | | // Verify that all of the bits_required_for_xyz fields are <= 32, since all of the numeric |
675 | | // fields in PageOffsetHintTableEntry are u32 |
676 | 0 | VERIFY(hint_table.bits_required_for_object_number <= 32); |
677 | 0 | VERIFY(hint_table.bits_required_for_page_length <= 32); |
678 | 0 | VERIFY(hint_table.bits_required_for_content_stream_offsets <= 32); |
679 | 0 | VERIFY(hint_table.bits_required_for_content_stream_length <= 32); |
680 | 0 | VERIFY(hint_table.bits_required_for_number_of_shared_obj_refs <= 32); |
681 | 0 | VERIFY(hint_table.bits_required_for_greatest_shared_obj_identifier <= 32); |
682 | 0 | VERIFY(hint_table.bits_required_for_fraction_numerator <= 32); |
683 | | |
684 | 0 | return hint_table; |
685 | 0 | } |
686 | | |
687 | | PDFErrorOr<Vector<DocumentParser::PageOffsetHintTableEntry>> DocumentParser::parse_all_page_offset_hint_table_entries(PageOffsetHintTable const& hint_table, ReadonlyBytes hint_stream_bytes) |
688 | 0 | { |
689 | 0 | auto input_stream = TRY(try_make<FixedMemoryStream>(hint_stream_bytes)); |
690 | 0 | TRY(input_stream->seek(sizeof(PageOffsetHintTable))); |
691 | |
|
692 | 0 | LittleEndianInputBitStream bit_stream { move(input_stream) }; |
693 | |
|
694 | 0 | auto number_of_pages = m_linearization_dictionary.value().number_of_pages; |
695 | 0 | Vector<PageOffsetHintTableEntry> entries; |
696 | 0 | for (size_t i = 0; i < number_of_pages; i++) |
697 | 0 | entries.append(PageOffsetHintTableEntry {}); |
698 | |
|
699 | 0 | auto bits_required_for_object_number = hint_table.bits_required_for_object_number; |
700 | 0 | auto bits_required_for_page_length = hint_table.bits_required_for_page_length; |
701 | 0 | auto bits_required_for_content_stream_offsets = hint_table.bits_required_for_content_stream_offsets; |
702 | 0 | auto bits_required_for_content_stream_length = hint_table.bits_required_for_content_stream_length; |
703 | 0 | auto bits_required_for_number_of_shared_obj_refs = hint_table.bits_required_for_number_of_shared_obj_refs; |
704 | 0 | auto bits_required_for_greatest_shared_obj_identifier = hint_table.bits_required_for_greatest_shared_obj_identifier; |
705 | 0 | auto bits_required_for_fraction_numerator = hint_table.bits_required_for_fraction_numerator; |
706 | |
|
707 | 0 | auto parse_int_entry = [&](u32 PageOffsetHintTableEntry::* field, u32 bit_size) -> ErrorOr<void> { |
708 | 0 | if (bit_size <= 0) |
709 | 0 | return {}; |
710 | | |
711 | 0 | for (int i = 0; i < number_of_pages; i++) { |
712 | 0 | auto& entry = entries[i]; |
713 | 0 | entry.*field = TRY(bit_stream.read_bits(bit_size)); |
714 | 0 | } |
715 | |
|
716 | 0 | return {}; |
717 | 0 | }; |
718 | |
|
719 | 0 | auto parse_vector_entry = [&](Vector<u32> PageOffsetHintTableEntry::* field, u32 bit_size) -> ErrorOr<void> { |
720 | 0 | if (bit_size <= 0) |
721 | 0 | return {}; |
722 | | |
723 | 0 | for (int page = 1; page < number_of_pages; page++) { |
724 | 0 | auto number_of_shared_objects = entries[page].number_of_shared_objects; |
725 | 0 | Vector<u32> items; |
726 | 0 | items.ensure_capacity(number_of_shared_objects); |
727 | |
|
728 | 0 | for (size_t i = 0; i < number_of_shared_objects; i++) |
729 | 0 | items.unchecked_append(TRY(bit_stream.read_bits(bit_size))); |
730 | |
|
731 | 0 | entries[page].*field = move(items); |
732 | 0 | } |
733 | |
|
734 | 0 | return {}; |
735 | 0 | }; |
736 | |
|
737 | 0 | TRY(parse_int_entry(&PageOffsetHintTableEntry::objects_in_page_number, bits_required_for_object_number)); |
738 | 0 | TRY(parse_int_entry(&PageOffsetHintTableEntry::page_length_number, bits_required_for_page_length)); |
739 | 0 | TRY(parse_int_entry(&PageOffsetHintTableEntry::number_of_shared_objects, bits_required_for_number_of_shared_obj_refs)); |
740 | 0 | TRY(parse_vector_entry(&PageOffsetHintTableEntry::shared_object_identifiers, bits_required_for_greatest_shared_obj_identifier)); |
741 | 0 | TRY(parse_vector_entry(&PageOffsetHintTableEntry::shared_object_location_numerators, bits_required_for_fraction_numerator)); |
742 | 0 | TRY(parse_int_entry(&PageOffsetHintTableEntry::page_content_stream_offset_number, bits_required_for_content_stream_offsets)); |
743 | 0 | TRY(parse_int_entry(&PageOffsetHintTableEntry::page_content_stream_length_number, bits_required_for_content_stream_length)); |
744 | |
|
745 | 0 | return entries; |
746 | 0 | } |
747 | | |
748 | | bool DocumentParser::navigate_to_before_eof_marker() |
749 | 11 | { |
750 | 11 | m_reader.set_reading_backwards(); |
751 | | |
752 | 137k | while (!m_reader.done()) { |
753 | 137k | m_reader.consume_eol(); |
754 | 137k | m_reader.consume_whitespace(); |
755 | 137k | if (m_reader.matches("%%EOF")) { |
756 | 6 | m_reader.move_by(5); |
757 | 6 | return true; |
758 | 6 | } |
759 | | |
760 | 11.5M | m_reader.move_until([&](auto) { return m_reader.matches_eol(); }); |
761 | 137k | } |
762 | | |
763 | 5 | return false; |
764 | 11 | } |
765 | | |
766 | | bool DocumentParser::navigate_to_after_startxref() |
767 | 6 | { |
768 | 6 | m_reader.set_reading_backwards(); |
769 | | |
770 | 12 | while (!m_reader.done()) { |
771 | 18 | m_reader.move_until([&](auto) { return m_reader.matches_eol(); }); |
772 | 12 | auto offset = m_reader.offset() + 1; |
773 | | |
774 | 12 | m_reader.consume_eol(); |
775 | 12 | m_reader.consume_whitespace(); |
776 | | |
777 | 12 | if (!m_reader.matches("startxref")) |
778 | 6 | continue; |
779 | | |
780 | 6 | m_reader.move_by(9); |
781 | 6 | if (!m_reader.matches_eol()) |
782 | 0 | continue; |
783 | | |
784 | 6 | m_reader.move_to(offset); |
785 | 6 | return true; |
786 | 6 | } |
787 | | |
788 | 0 | return false; |
789 | 6 | } |
790 | | |
791 | | PDFErrorOr<RefPtr<DictObject>> DocumentParser::conditionally_parse_page_tree_node(u32 object_index) |
792 | 0 | { |
793 | 0 | auto dict_value = TRY(parse_object_with_index(object_index)); |
794 | 0 | auto dict_object = dict_value.get<NonnullRefPtr<Object>>(); |
795 | 0 | if (!dict_object->is<DictObject>()) |
796 | 0 | return error(ByteString::formatted("Invalid page tree with xref index {}", object_index)); |
797 | | |
798 | 0 | auto dict = dict_object->cast<DictObject>(); |
799 | 0 | if (!dict->contains_any_of(CommonNames::Type, CommonNames::Parent, CommonNames::Kids, CommonNames::Count)) |
800 | | // This is a page, not a page tree node |
801 | 0 | return RefPtr<DictObject> {}; |
802 | | |
803 | 0 | if (!dict->contains(CommonNames::Type)) |
804 | 0 | return RefPtr<DictObject> {}; |
805 | 0 | auto type_object = TRY(dict->get_object(m_document, CommonNames::Type)); |
806 | 0 | if (!type_object->is<NameObject>()) |
807 | 0 | return RefPtr<DictObject> {}; |
808 | 0 | auto type_name = type_object->cast<NameObject>(); |
809 | 0 | if (type_name->name() != CommonNames::Pages) |
810 | 0 | return RefPtr<DictObject> {}; |
811 | | |
812 | 0 | return dict; |
813 | 0 | } |
814 | | |
815 | | } |
816 | | |
817 | | namespace AK { |
818 | | |
819 | | template<> |
820 | | struct Formatter<PDF::DocumentParser::LinearizationDictionary> : Formatter<StringView> { |
821 | | ErrorOr<void> format(FormatBuilder& format_builder, PDF::DocumentParser::LinearizationDictionary const& dict) |
822 | 0 | { |
823 | 0 | StringBuilder builder; |
824 | 0 | builder.append("{\n"sv); |
825 | 0 | builder.appendff(" length_of_file={}\n", dict.length_of_file); |
826 | 0 | builder.appendff(" primary_hint_stream_offset={}\n", dict.primary_hint_stream_offset); |
827 | 0 | builder.appendff(" primary_hint_stream_length={}\n", dict.primary_hint_stream_length); |
828 | 0 | builder.appendff(" overflow_hint_stream_offset={}\n", dict.overflow_hint_stream_offset); |
829 | 0 | builder.appendff(" overflow_hint_stream_length={}\n", dict.overflow_hint_stream_length); |
830 | 0 | builder.appendff(" first_page_object_number={}\n", dict.first_page_object_number); |
831 | 0 | builder.appendff(" offset_of_first_page_end={}\n", dict.offset_of_first_page_end); |
832 | 0 | builder.appendff(" number_of_pages={}\n", dict.number_of_pages); |
833 | 0 | builder.appendff(" offset_of_main_xref_table={}\n", dict.offset_of_main_xref_table); |
834 | 0 | builder.appendff(" first_page={}\n", dict.first_page); |
835 | 0 | builder.append('}'); |
836 | 0 | return Formatter<StringView>::format(format_builder, builder.to_byte_string()); |
837 | 0 | } |
838 | | }; |
839 | | |
840 | | template<> |
841 | | struct Formatter<PDF::DocumentParser::PageOffsetHintTable> : Formatter<StringView> { |
842 | | ErrorOr<void> format(FormatBuilder& format_builder, PDF::DocumentParser::PageOffsetHintTable const& table) |
843 | 0 | { |
844 | 0 | StringBuilder builder; |
845 | 0 | builder.append("{\n"sv); |
846 | 0 | builder.appendff(" least_number_of_objects_in_a_page={}\n", table.least_number_of_objects_in_a_page); |
847 | 0 | builder.appendff(" location_of_first_page_object={}\n", table.location_of_first_page_object); |
848 | 0 | builder.appendff(" bits_required_for_object_number={}\n", table.bits_required_for_object_number); |
849 | 0 | builder.appendff(" least_length_of_a_page={}\n", table.least_length_of_a_page); |
850 | 0 | builder.appendff(" bits_required_for_page_length={}\n", table.bits_required_for_page_length); |
851 | 0 | builder.appendff(" least_offset_of_any_content_stream={}\n", table.least_offset_of_any_content_stream); |
852 | 0 | builder.appendff(" bits_required_for_content_stream_offsets={}\n", table.bits_required_for_content_stream_offsets); |
853 | 0 | builder.appendff(" least_content_stream_length={}\n", table.least_content_stream_length); |
854 | 0 | builder.appendff(" bits_required_for_content_stream_length={}\n", table.bits_required_for_content_stream_length); |
855 | 0 | builder.appendff(" bits_required_for_number_of_shared_obj_refs={}\n", table.bits_required_for_number_of_shared_obj_refs); |
856 | 0 | builder.appendff(" bits_required_for_greatest_shared_obj_identifier={}\n", table.bits_required_for_greatest_shared_obj_identifier); |
857 | 0 | builder.appendff(" bits_required_for_fraction_numerator={}\n", table.bits_required_for_fraction_numerator); |
858 | 0 | builder.appendff(" shared_object_reference_fraction_denominator={}\n", table.shared_object_reference_fraction_denominator); |
859 | 0 | builder.append('}'); |
860 | 0 | return Formatter<StringView>::format(format_builder, builder.to_byte_string()); |
861 | 0 | } |
862 | | }; |
863 | | |
864 | | template<> |
865 | | struct Formatter<PDF::DocumentParser::PageOffsetHintTableEntry> : Formatter<StringView> { |
866 | | ErrorOr<void> format(FormatBuilder& format_builder, PDF::DocumentParser::PageOffsetHintTableEntry const& entry) |
867 | 0 | { |
868 | 0 | StringBuilder builder; |
869 | 0 | builder.append("{\n"sv); |
870 | 0 | builder.appendff(" objects_in_page_number={}\n", entry.objects_in_page_number); |
871 | 0 | builder.appendff(" page_length_number={}\n", entry.page_length_number); |
872 | 0 | builder.appendff(" number_of_shared_objects={}\n", entry.number_of_shared_objects); |
873 | 0 | builder.append(" shared_object_identifiers=["sv); |
874 | 0 | for (auto& identifier : entry.shared_object_identifiers) |
875 | 0 | builder.appendff(" {}", identifier); |
876 | 0 | builder.append(" ]\n"sv); |
877 | 0 | builder.append(" shared_object_location_numerators=["sv); |
878 | 0 | for (auto& numerator : entry.shared_object_location_numerators) |
879 | 0 | builder.appendff(" {}", numerator); |
880 | 0 | builder.append(" ]\n"sv); |
881 | 0 | builder.appendff(" page_content_stream_offset_number={}\n", entry.page_content_stream_offset_number); |
882 | 0 | builder.appendff(" page_content_stream_length_number={}\n", entry.page_content_stream_length_number); |
883 | 0 | builder.append('}'); |
884 | 0 | return Formatter<StringView>::format(format_builder, builder.to_byte_string()); |
885 | 0 | } |
886 | | }; |
887 | | |
888 | | } |