/work/workdir/UnpackedTarball/harfbuzz/src/hb-repacker.hh
Line | Count | Source |
1 | | /* |
2 | | * Copyright © 2020 Google, Inc. |
3 | | * |
4 | | * This is part of HarfBuzz, a text shaping library. |
5 | | * |
6 | | * Permission is hereby granted, without written agreement and without |
7 | | * license or royalty fees, to use, copy, modify, and distribute this |
8 | | * software and its documentation for any purpose, provided that the |
9 | | * above copyright notice and the following two paragraphs appear in |
10 | | * all copies of this software. |
11 | | * |
12 | | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
13 | | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
14 | | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
15 | | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
16 | | * DAMAGE. |
17 | | * |
18 | | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
19 | | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
20 | | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
21 | | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
22 | | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
23 | | * |
24 | | * Google Author(s): Garret Rieger |
25 | | */ |
26 | | |
27 | | #ifndef HB_REPACKER_HH |
28 | | #define HB_REPACKER_HH |
29 | | |
30 | | #include "hb-open-type.hh" |
31 | | #include "hb-map.hh" |
32 | | #include "hb-vector.hh" |
33 | | #include "graph/graph.hh" |
34 | | #include "graph/gsubgpos-graph.hh" |
35 | | #include "graph/serialize.hh" |
36 | | |
37 | | using graph::graph_t; |
38 | | |
39 | | /* |
40 | | * For a detailed writeup on the overflow resolution algorithm see: |
41 | | * docs/repacker.md |
42 | | */ |
43 | | |
44 | | struct lookup_size_t |
45 | | { |
46 | | unsigned lookup_index; |
47 | | size_t size; |
48 | | unsigned num_subtables; |
49 | | |
50 | | static int cmp (const void* a, const void* b) |
51 | 0 | { |
52 | 0 | return cmp ((const lookup_size_t*) a, |
53 | 0 | (const lookup_size_t*) b); |
54 | 0 | } |
55 | | |
56 | | static int cmp (const lookup_size_t* a, const lookup_size_t* b) |
57 | 0 | { |
58 | 0 | double subtables_per_byte_a = (double) a->num_subtables / (double) a->size; |
59 | 0 | double subtables_per_byte_b = (double) b->num_subtables / (double) b->size; |
60 | 0 | if (subtables_per_byte_a == subtables_per_byte_b) { |
61 | 0 | return b->lookup_index - a->lookup_index; |
62 | 0 | } |
63 | | |
64 | 0 | double cmp = subtables_per_byte_b - subtables_per_byte_a; |
65 | 0 | if (cmp < 0) return -1; |
66 | 0 | if (cmp > 0) return 1; |
67 | 0 | return 0; |
68 | 0 | } |
69 | | }; |
70 | | |
71 | | static inline |
72 | | bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context) |
73 | 0 | { |
74 | | // For each lookup this will check the size of subtables and split them as needed |
75 | | // so that no subtable is at risk of overflowing. (where we support splitting for |
76 | | // that subtable type). |
77 | | // |
78 | | // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup |
79 | | // pass after this processing is done. Not super necessary as splits are |
80 | | // only done where overflow is likely, so de-dup probably will get undone |
81 | | // later anyways. |
82 | | |
83 | | // The loop below can modify the contents of ext_context.lookups if new subtables are added |
84 | | // to a lookup during a split. So save the initial set of lookup indices so the iteration doesn't |
85 | | // risk access free'd memory if ext_context.lookups gets resized. |
86 | 0 | hb_set_t lookup_indices(ext_context.lookups.keys ()); |
87 | 0 | for (unsigned lookup_index : lookup_indices) |
88 | 0 | { |
89 | 0 | graph::Lookup* lookup = ext_context.lookups.get(lookup_index); |
90 | 0 | if (!lookup->split_subtables_if_needed (ext_context, lookup_index)) |
91 | 0 | return false; |
92 | 0 | } |
93 | | |
94 | 0 | return true; |
95 | 0 | } Unexecuted instantiation: hb-subset.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-layout.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-var.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-cff.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-color.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-other.cc:_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t&) |
96 | | |
97 | | /* |
98 | | * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted |
99 | | * to extension lookups. |
100 | | */ |
101 | | static inline |
102 | | bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context) |
103 | 0 | { |
104 | | // Simple Algorithm (v1, current): |
105 | | // 1. Calculate how many bytes each non-extension lookup consumes. |
106 | | // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first) |
107 | | // 3. Promote the rest. |
108 | | // |
109 | | // Advanced Algorithm (v2, not implemented): |
110 | | // 1. Perform connected component analysis using lookups as roots. |
111 | | // 2. Compute size of each connected component. |
112 | | // 3. Select up to 64k worth of connected components to remain as non-extensions. |
113 | | // (greedy, highest subtables per byte first) |
114 | | // 4. Promote the rest. |
115 | | |
116 | | // TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo. |
117 | | // TODO(garretrieger): also support extension promotion during iterative resolution phase, then |
118 | | // we can use a less conservative threshold here. |
119 | | // TODO(grieger): skip this for the 24 bit case. |
120 | 0 | if (!ext_context.lookups) return true; |
121 | | |
122 | 0 | unsigned total_lookup_table_sizes = 0; |
123 | 0 | hb_vector_t<lookup_size_t> lookup_sizes; |
124 | 0 | lookup_sizes.alloc (ext_context.lookups.get_population (), true); |
125 | |
|
126 | 0 | for (unsigned lookup_index : ext_context.lookups.keys ()) |
127 | 0 | { |
128 | 0 | const auto& lookup_v = ext_context.graph.vertices_[lookup_index]; |
129 | 0 | total_lookup_table_sizes += lookup_v.table_size (); |
130 | |
|
131 | 0 | const graph::Lookup* lookup = ext_context.lookups.get(lookup_index); |
132 | 0 | hb_set_t visited; |
133 | 0 | lookup_sizes.push (lookup_size_t { |
134 | 0 | lookup_index, |
135 | 0 | ext_context.graph.find_subgraph_size (lookup_index, visited), |
136 | 0 | lookup->number_of_subtables (), |
137 | 0 | }); |
138 | 0 | } |
139 | |
|
140 | 0 | lookup_sizes.qsort (); |
141 | |
|
142 | 0 | size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size (); |
143 | 0 | size_t l2_l3_size = lookup_list_size + total_lookup_table_sizes; // Lookup List + Lookups |
144 | 0 | size_t l3_l4_size = total_lookup_table_sizes; // Lookups + SubTables |
145 | 0 | size_t l4_plus_size = 0; // SubTables + their descendants |
146 | | |
147 | | // Start by assuming all lookups are using extension subtables, this size will be removed later |
148 | | // if it's decided to not make a lookup extension. |
149 | 0 | for (auto p : lookup_sizes) |
150 | 0 | { |
151 | | // TODO(garretrieger): this overestimates the extension subtables size because some extension subtables may be |
152 | | // reused. However, we can't correct this until we have connected component analysis in place. |
153 | 0 | unsigned subtables_size = p.num_subtables * 8; |
154 | 0 | l3_l4_size += subtables_size; |
155 | 0 | l4_plus_size += subtables_size; |
156 | 0 | } |
157 | |
|
158 | 0 | bool layers_full = false; |
159 | 0 | for (auto p : lookup_sizes) |
160 | 0 | { |
161 | 0 | const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index); |
162 | 0 | if (lookup->is_extension (ext_context.table_tag)) |
163 | | // already an extension so size is counted by the loop above. |
164 | 0 | continue; |
165 | | |
166 | 0 | if (!layers_full) |
167 | 0 | { |
168 | 0 | size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size (); |
169 | 0 | hb_set_t visited; |
170 | 0 | size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size; |
171 | 0 | size_t remaining_size = p.size - subtables_size - lookup_size; |
172 | |
|
173 | 0 | l3_l4_size += subtables_size; |
174 | 0 | l3_l4_size -= p.num_subtables * 8; |
175 | 0 | l4_plus_size += subtables_size + remaining_size; |
176 | |
|
177 | 0 | if (l2_l3_size < (1 << 16) |
178 | 0 | && l3_l4_size < (1 << 16) |
179 | 0 | && l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups |
180 | | |
181 | 0 | layers_full = true; |
182 | 0 | } |
183 | | |
184 | 0 | if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index)) |
185 | 0 | return false; |
186 | 0 | } |
187 | | |
188 | 0 | return true; |
189 | 0 | } Unexecuted instantiation: hb-subset.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-layout.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-var.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-cff.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-color.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) Unexecuted instantiation: hb-subset-table-other.cc:_promote_extensions_if_needed(graph::gsubgpos_graph_context_t&) |
190 | | |
191 | | static inline |
192 | | bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows, |
193 | | graph_t& sorted_graph) |
194 | 0 | { |
195 | 0 | unsigned space = 0; |
196 | 0 | hb_set_t roots_to_isolate; |
197 | |
|
198 | 0 | for (int i = overflows.length - 1; i >= 0; i--) |
199 | 0 | { |
200 | 0 | const graph::overflow_record_t& r = overflows[i]; |
201 | |
|
202 | 0 | unsigned root; |
203 | 0 | unsigned overflow_space = sorted_graph.space_for (r.parent, &root); |
204 | 0 | if (!overflow_space) continue; |
205 | 0 | if (sorted_graph.num_roots_for_space (overflow_space) <= 1) continue; |
206 | | |
207 | 0 | if (!space) { |
208 | 0 | space = overflow_space; |
209 | 0 | } |
210 | |
|
211 | 0 | if (space == overflow_space) |
212 | 0 | roots_to_isolate.add(root); |
213 | 0 | } |
214 | |
|
215 | 0 | if (!roots_to_isolate) return false; |
216 | | |
217 | 0 | unsigned maximum_to_move = hb_max ((sorted_graph.num_roots_for_space (space) / 2u), 1u); |
218 | 0 | if (roots_to_isolate.get_population () > maximum_to_move) { |
219 | | // Only move at most half of the roots in a space at a time. |
220 | | // |
221 | | // Note: this was ported from non-stable ids to stable ids. So to retain the same behaviour |
222 | | // with regards to which roots are removed from the set we need to remove them in the topological |
223 | | // order, not the object id order. |
224 | 0 | int extra = roots_to_isolate.get_population () - maximum_to_move; |
225 | 0 | for (unsigned id : sorted_graph.ordering_) { |
226 | 0 | if (!extra) break; |
227 | 0 | if (roots_to_isolate.has(id)) { |
228 | 0 | roots_to_isolate.del(id); |
229 | 0 | extra--; |
230 | 0 | } |
231 | 0 | } |
232 | 0 | } |
233 | |
|
234 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
235 | 0 | "Overflow in space %u (%u roots). Moving %u roots to space %u.", |
236 | 0 | space, |
237 | 0 | sorted_graph.num_roots_for_space (space), |
238 | 0 | roots_to_isolate.get_population (), |
239 | 0 | sorted_graph.next_space ()); |
240 | |
|
241 | 0 | sorted_graph.isolate_subgraph (roots_to_isolate); |
242 | 0 | sorted_graph.move_to_new_space (roots_to_isolate); |
243 | |
|
244 | 0 | return true; |
245 | 0 | } Unexecuted instantiation: hb-subset.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-layout.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-var.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-cff.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-color.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-other.cc:_try_isolating_subgraphs(hb_vector_t<graph::overflow_record_t, false> const&, graph::graph_t&) |
246 | | |
247 | | static inline |
248 | | bool _resolve_shared_overflow(const hb_vector_t<graph::overflow_record_t>& overflows, |
249 | | int overflow_index, |
250 | | graph_t& sorted_graph) |
251 | 0 | { |
252 | 0 | const graph::overflow_record_t& r = overflows[overflow_index]; |
253 | | |
254 | | // Find all of the parents in overflowing links that link to this |
255 | | // same child node. We will then try duplicating the child node and |
256 | | // re-assigning all of these parents to the duplicate. |
257 | 0 | hb_set_t parents; |
258 | 0 | parents.add(r.parent); |
259 | 0 | for (int i = overflow_index - 1; i >= 0; i--) { |
260 | 0 | const graph::overflow_record_t& r2 = overflows[i]; |
261 | 0 | if (r2.child == r.child) { |
262 | 0 | parents.add(r2.parent); |
263 | 0 | } |
264 | 0 | } |
265 | |
|
266 | 0 | unsigned result = sorted_graph.duplicate(&parents, r.child); |
267 | 0 | if (result == (unsigned) -1 && parents.get_population() > 2) { |
268 | | // All links to the child are overflowing, so we can't include all |
269 | | // in the duplication. Remove one parent from the duplication. |
270 | | // Remove the lowest index parent, which will be the closest to the child. |
271 | 0 | parents.del(parents.get_min()); |
272 | 0 | result = sorted_graph.duplicate(&parents, r.child); |
273 | 0 | } |
274 | |
|
275 | 0 | if (result == (unsigned) -1) return false; |
276 | | |
277 | 0 | if (parents.get_population() > 1) { |
278 | | // If the duplicated node has more than one parent pre-emptively raise it's priority to the maximum. |
279 | | // This will place it close to the parents. Node's with only one parent, don't need this as normal overflow |
280 | | // resolution will raise priority if needed. |
281 | | // |
282 | | // Reasoning: most of the parents to this child are likely at the same layer in the graph. Duplicating |
283 | | // the child will theoretically allow it to be placed closer to it's parents. However, due to the shortest |
284 | | // distance sort by default it's placement will remain in the same layer, thus it will remain in roughly the |
285 | | // same position (and distance from parents) as the original child node. The overflow resolution will attempt |
286 | | // to move nodes closer, but only for non-shared nodes. Since this node is shared, it will simply be given |
287 | | // further duplication which defeats the attempt to duplicate with multiple parents. To fix this we |
288 | | // pre-emptively raise priority now which allows the duplicated node to pack into the same layer as it's parents. |
289 | 0 | sorted_graph.vertices_[result].give_max_priority(); |
290 | 0 | } |
291 | |
|
292 | 0 | return true; |
293 | 0 | } Unexecuted instantiation: hb-subset.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) Unexecuted instantiation: hb-subset-table-layout.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) Unexecuted instantiation: hb-subset-table-var.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) Unexecuted instantiation: hb-subset-table-cff.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) Unexecuted instantiation: hb-subset-table-color.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) Unexecuted instantiation: hb-subset-table-other.cc:_resolve_shared_overflow(hb_vector_t<graph::overflow_record_t, false> const&, int, graph::graph_t&) |
294 | | |
295 | | static inline |
296 | | bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows, |
297 | | hb_set_t& priority_bumped_parents, |
298 | | graph_t& sorted_graph) |
299 | 0 | { |
300 | 0 | bool resolution_attempted = false; |
301 | | |
302 | | // Try resolving the furthest overflows first. |
303 | 0 | for (int i = overflows.length - 1; i >= 0; i--) |
304 | 0 | { |
305 | 0 | const graph::overflow_record_t& r = overflows[i]; |
306 | 0 | const auto& child = sorted_graph.vertices_[r.child]; |
307 | 0 | if (child.is_shared ()) |
308 | 0 | { |
309 | | // The child object is shared, we may be able to eliminate the overflow |
310 | | // by duplicating it. |
311 | 0 | if (_resolve_shared_overflow(overflows, i, sorted_graph)) |
312 | 0 | return true; |
313 | | |
314 | | // Sometimes we can't duplicate a node which looks shared because it's not actually shared |
315 | | // (eg. all links from the same parent) in this case continue on to other resolution options. |
316 | 0 | } |
317 | | |
318 | 0 | if (child.is_leaf () && !priority_bumped_parents.has (r.parent)) |
319 | 0 | { |
320 | | // This object is too far from it's parent, attempt to move it closer. |
321 | | // |
322 | | // TODO(garretrieger): initially limiting this to leaf's since they can be |
323 | | // moved closer with fewer consequences. However, this can |
324 | | // likely can be used for non-leafs as well. |
325 | | // TODO(garretrieger): also try lowering priority of the parent. Make it |
326 | | // get placed further up in the ordering, closer to it's children. |
327 | | // this is probably preferable if the total size of the parent object |
328 | | // is < then the total size of the children (and the parent can be moved). |
329 | | // Since in that case moving the parent will cause a smaller increase in |
330 | | // the length of other offsets. |
331 | 0 | if (sorted_graph.raise_childrens_priority (r.parent)) { |
332 | 0 | priority_bumped_parents.add (r.parent); |
333 | 0 | resolution_attempted = true; |
334 | 0 | } |
335 | 0 | continue; |
336 | 0 | } |
337 | | |
338 | | // TODO(garretrieger): add additional offset resolution strategies |
339 | | // - Promotion to extension lookups. |
340 | | // - Table splitting. |
341 | 0 | } |
342 | | |
343 | 0 | return resolution_attempted; |
344 | 0 | } Unexecuted instantiation: hb-subset.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-layout.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-var.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-cff.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-color.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) Unexecuted instantiation: hb-subset-table-other.cc:_process_overflows(hb_vector_t<graph::overflow_record_t, false> const&, hb_set_t&, graph::graph_t&) |
345 | | |
346 | | inline bool |
347 | | hb_resolve_graph_overflows (hb_tag_t table_tag, |
348 | | unsigned max_rounds , |
349 | | bool always_recalculate_extensions, |
350 | | graph_t& sorted_graph /* IN/OUT */) |
351 | 0 | { |
352 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Repacking %c%c%c%c.", HB_UNTAG(table_tag)); |
353 | 0 | sorted_graph.sort_shortest_distance (); |
354 | 0 | if (sorted_graph.in_error ()) |
355 | 0 | { |
356 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state after initial sort."); |
357 | 0 | return false; |
358 | 0 | } |
359 | | |
360 | 0 | bool will_overflow = graph::will_overflow (sorted_graph); |
361 | 0 | if (!will_overflow) |
362 | 0 | return true; |
363 | | |
364 | 0 | bool is_gsub_or_gpos = (table_tag == HB_OT_TAG_GPOS || table_tag == HB_OT_TAG_GSUB); |
365 | 0 | graph::gsubgpos_graph_context_t ext_context (table_tag, sorted_graph); |
366 | 0 | if (is_gsub_or_gpos && will_overflow) |
367 | 0 | { |
368 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Applying GSUB/GPOS repacking specializations."); |
369 | 0 | if (always_recalculate_extensions) |
370 | 0 | { |
371 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Splitting subtables if needed."); |
372 | 0 | if (!_presplit_subtables_if_needed (ext_context)) { |
373 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Subtable splitting failed."); |
374 | 0 | return false; |
375 | 0 | } |
376 | | |
377 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Promoting lookups to extensions if needed."); |
378 | 0 | if (!_promote_extensions_if_needed (ext_context)) { |
379 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed."); |
380 | 0 | return false; |
381 | 0 | } |
382 | 0 | } |
383 | | |
384 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs."); |
385 | 0 | if (sorted_graph.assign_spaces ()) |
386 | 0 | sorted_graph.sort_shortest_distance (); |
387 | 0 | else |
388 | 0 | sorted_graph.sort_shortest_distance_if_needed (); |
389 | 0 | } |
390 | | |
391 | 0 | unsigned round = 0; |
392 | 0 | hb_vector_t<graph::overflow_record_t> overflows; |
393 | | // TODO(garretrieger): select a good limit for max rounds. |
394 | 0 | while (!sorted_graph.in_error () |
395 | 0 | && graph::will_overflow (sorted_graph, &overflows) |
396 | 0 | && round < max_rounds) { |
397 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %u ===", round); |
398 | 0 | print_overflows (sorted_graph, overflows); |
399 | |
|
400 | 0 | hb_set_t priority_bumped_parents; |
401 | |
|
402 | 0 | if (!_try_isolating_subgraphs (overflows, sorted_graph)) |
403 | 0 | { |
404 | | // Don't count space isolation towards round limit. Only increment |
405 | | // round counter if space isolation made no changes. |
406 | 0 | round++; |
407 | 0 | if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph)) |
408 | 0 | { |
409 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :("); |
410 | 0 | break; |
411 | 0 | } |
412 | 0 | } |
413 | | |
414 | 0 | sorted_graph.sort_shortest_distance (); |
415 | 0 | } |
416 | |
|
417 | 0 | if (sorted_graph.in_error ()) |
418 | 0 | { |
419 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state."); |
420 | 0 | return false; |
421 | 0 | } |
422 | | |
423 | 0 | if (graph::will_overflow (sorted_graph)) |
424 | 0 | { |
425 | 0 | if (is_gsub_or_gpos && !always_recalculate_extensions) { |
426 | | // If this a GSUB/GPOS table and we didn't try to extension promotion and table splitting then |
427 | | // as a last ditch effort, re-run the repacker with it enabled. |
428 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Failed to find a resolution. Re-running with extension promotion and table splitting enabled."); |
429 | 0 | return hb_resolve_graph_overflows (table_tag, max_rounds, true, sorted_graph); |
430 | 0 | } |
431 | | |
432 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, "Offset overflow resolution failed."); |
433 | 0 | return false; |
434 | 0 | } |
435 | | |
436 | 0 | return true; |
437 | 0 | } |
438 | | |
439 | | /* |
440 | | * Attempts to modify the topological sorting of the provided object graph to |
441 | | * eliminate offset overflows in the links between objects of the graph. If a |
442 | | * non-overflowing ordering is found the updated graph is serialized it into the |
443 | | * provided serialization context. |
444 | | * |
445 | | * If necessary the structure of the graph may be modified in ways that do not |
446 | | * affect the functionality of the graph. For example shared objects may be |
447 | | * duplicated. |
448 | | * |
449 | | * For a detailed writeup describing how the algorithm operates see: |
450 | | * docs/repacker.md |
451 | | */ |
452 | | template<typename T> |
453 | | inline hb_blob_t* |
454 | | hb_resolve_overflows (const T& packed, |
455 | | hb_tag_t table_tag, |
456 | | unsigned max_rounds = 32, |
457 | 0 | bool recalculate_extensions = false) { |
458 | 0 | graph_t sorted_graph (packed); |
459 | 0 | if (sorted_graph.in_error ()) |
460 | 0 | { |
461 | | // Invalid graph definition. |
462 | 0 | return nullptr; |
463 | 0 | } |
464 | | |
465 | 0 | if (!sorted_graph.is_fully_connected ()) |
466 | 0 | { |
467 | 0 | sorted_graph.print_orphaned_nodes (); |
468 | 0 | return nullptr; |
469 | 0 | } |
470 | | |
471 | 0 | if (sorted_graph.in_error ()) |
472 | 0 | { |
473 | | // Allocations failed somewhere |
474 | 0 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
475 | 0 | "Graph is in error, likely due to a memory allocation error."); |
476 | 0 | return nullptr; |
477 | 0 | } |
478 | | |
479 | 0 | if (!hb_resolve_graph_overflows (table_tag, max_rounds, recalculate_extensions, sorted_graph)) |
480 | 0 | return nullptr; |
481 | | |
482 | 0 | return graph::serialize (sorted_graph); |
483 | 0 | } |
484 | | |
485 | | #endif /* HB_REPACKER_HH */ |