/work/workdir/UnpackedTarball/harfbuzz/src/hb-ot-var-gvar-table.hh
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright © 2019 Adobe Inc. |
3 | | * Copyright © 2019 Ebrahim Byagowi |
4 | | * |
5 | | * This is part of HarfBuzz, a text shaping library. |
6 | | * |
7 | | * Permission is hereby granted, without written agreement and without |
8 | | * license or royalty fees, to use, copy, modify, and distribute this |
9 | | * software and its documentation for any purpose, provided that the |
10 | | * above copyright notice and the following two paragraphs appear in |
11 | | * all copies of this software. |
12 | | * |
13 | | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
14 | | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
15 | | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
16 | | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
17 | | * DAMAGE. |
18 | | * |
19 | | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
20 | | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
21 | | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
22 | | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
23 | | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
24 | | * |
25 | | * Adobe Author(s): Michiharu Ariza |
26 | | */ |
27 | | |
28 | | #ifndef HB_OT_VAR_GVAR_TABLE_HH |
29 | | #define HB_OT_VAR_GVAR_TABLE_HH |
30 | | |
31 | | #include "hb-decycler.hh" |
32 | | #include "hb-open-type.hh" |
33 | | #include "hb-ot-var-common.hh" |
34 | | |
35 | | /* |
36 | | * gvar -- Glyph Variation Table |
37 | | * https://docs.microsoft.com/en-us/typography/opentype/spec/gvar |
38 | | */ |
39 | | #define HB_OT_TAG_gvar HB_TAG('g','v','a','r') |
40 | | #define HB_OT_TAG_GVAR HB_TAG('G','V','A','R') |
41 | | |
42 | | struct hb_glyf_scratch_t |
43 | | { |
44 | | // glyf |
45 | | contour_point_vector_t all_points; |
46 | | contour_point_vector_t comp_points; |
47 | | hb_decycler_t decycler; |
48 | | |
49 | | // gvar |
50 | | contour_point_vector_t orig_points; |
51 | | hb_vector_t<int> x_deltas; |
52 | | hb_vector_t<int> y_deltas; |
53 | | contour_point_vector_t deltas; |
54 | | hb_vector_t<unsigned int> shared_indices; |
55 | | hb_vector_t<unsigned int> private_indices; |
56 | | }; |
57 | | |
58 | | namespace OT { |
59 | | |
60 | | template <typename OffsetType> |
61 | | struct glyph_variations_t |
62 | | { |
63 | | // TODO: Move tuple_variations_t to outside of TupleVariationData |
64 | | using tuple_variations_t = typename TupleVariationData<OffsetType>::tuple_variations_t; |
65 | | using GlyphVariationData = TupleVariationData<OffsetType>; |
66 | | |
67 | | hb_vector_t<tuple_variations_t> glyph_variations; |
68 | | |
69 | | hb_vector_t<char> compiled_shared_tuples; |
70 | | private: |
71 | | unsigned shared_tuples_count = 0; |
72 | | |
73 | | /* shared coords-> index map after instantiation */ |
74 | | hb_hashmap_t<const hb_vector_t<char>*, unsigned> shared_tuples_idx_map; |
75 | | |
76 | | public: |
77 | | unsigned compiled_shared_tuples_count () const |
78 | | { return shared_tuples_count; } |
79 | | |
80 | | unsigned compiled_byte_size () const |
81 | | { |
82 | | unsigned byte_size = 0; |
83 | | for (const auto& _ : glyph_variations) |
84 | | byte_size += _.get_compiled_byte_size (); |
85 | | |
86 | | return byte_size; |
87 | | } |
88 | | |
89 | | bool create_from_glyphs_var_data (unsigned axis_count, |
90 | | const hb_array_t<const F2DOT14> shared_tuples, |
91 | | const hb_subset_plan_t *plan, |
92 | | const hb_hashmap_t<hb_codepoint_t, hb_bytes_t>& new_gid_var_data_map) |
93 | | { |
94 | | if (unlikely (!glyph_variations.alloc_exact (plan->new_to_old_gid_list.length))) |
95 | | return false; |
96 | | |
97 | | auto it = hb_iter (plan->new_to_old_gid_list); |
98 | | for (auto &_ : it) |
99 | | { |
100 | | hb_codepoint_t new_gid = _.first; |
101 | | contour_point_vector_t *all_contour_points; |
102 | | if (!new_gid_var_data_map.has (new_gid) || |
103 | | !plan->new_gid_contour_points_map.has (new_gid, &all_contour_points)) |
104 | | return false; |
105 | | hb_bytes_t var_data = new_gid_var_data_map.get (new_gid); |
106 | | |
107 | | const GlyphVariationData* p = reinterpret_cast<const GlyphVariationData*> (var_data.arrayZ); |
108 | | typename GlyphVariationData::tuple_iterator_t iterator; |
109 | | tuple_variations_t tuple_vars; |
110 | | |
111 | | hb_vector_t<unsigned> shared_indices; |
112 | | |
113 | | /* in case variation data is empty, push an empty struct into the vector, |
114 | | * keep the vector in sync with the new_to_old_gid_list */ |
115 | | if (!var_data || ! p->has_data () || !all_contour_points->length || |
116 | | !GlyphVariationData::get_tuple_iterator (var_data, axis_count, |
117 | | var_data.arrayZ, |
118 | | shared_indices, &iterator)) |
119 | | { |
120 | | glyph_variations.push (std::move (tuple_vars)); |
121 | | continue; |
122 | | } |
123 | | |
124 | | bool is_composite_glyph = false; |
125 | | is_composite_glyph = plan->composite_new_gids.has (new_gid); |
126 | | |
127 | | if (!p->decompile_tuple_variations (all_contour_points->length, true /* is_gvar */, |
128 | | iterator, &(plan->axes_old_index_tag_map), |
129 | | shared_indices, shared_tuples, |
130 | | tuple_vars, /* OUT */ |
131 | | is_composite_glyph)) |
132 | | return false; |
133 | | glyph_variations.push (std::move (tuple_vars)); |
134 | | } |
135 | | return !glyph_variations.in_error () && glyph_variations.length == plan->new_to_old_gid_list.length; |
136 | | } |
137 | | |
138 | | bool instantiate (const hb_subset_plan_t *plan) |
139 | | { |
140 | | unsigned count = plan->new_to_old_gid_list.length; |
141 | | bool iup_optimize = false; |
142 | | iup_optimize = plan->flags & HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS; |
143 | | for (unsigned i = 0; i < count; i++) |
144 | | { |
145 | | hb_codepoint_t new_gid = plan->new_to_old_gid_list[i].first; |
146 | | contour_point_vector_t *all_points; |
147 | | if (!plan->new_gid_contour_points_map.has (new_gid, &all_points)) |
148 | | return false; |
149 | | if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points, iup_optimize)) |
150 | | return false; |
151 | | } |
152 | | return true; |
153 | | } |
154 | | |
155 | | bool compile_bytes (const hb_map_t& axes_index_map, |
156 | | const hb_map_t& axes_old_index_tag_map) |
157 | | { |
158 | | if (!compile_shared_tuples (axes_index_map, axes_old_index_tag_map)) |
159 | | return false; |
160 | | for (tuple_variations_t& vars: glyph_variations) |
161 | | if (!vars.compile_bytes (axes_index_map, axes_old_index_tag_map, |
162 | | true, /* use shared points*/ |
163 | | true, |
164 | | &shared_tuples_idx_map)) |
165 | | return false; |
166 | | |
167 | | return true; |
168 | | } |
169 | | |
170 | | bool compile_shared_tuples (const hb_map_t& axes_index_map, |
171 | | const hb_map_t& axes_old_index_tag_map) |
172 | | { |
173 | | /* key is pointer to compiled_peak_coords inside each tuple, hashing |
174 | | * function will always deref pointers first */ |
175 | | hb_hashmap_t<const hb_vector_t<char>*, unsigned> coords_count_map; |
176 | | |
177 | | /* count the num of shared coords */ |
178 | | for (tuple_variations_t& vars: glyph_variations) |
179 | | { |
180 | | for (tuple_delta_t& var : vars.tuple_vars) |
181 | | { |
182 | | if (!var.compile_peak_coords (axes_index_map, axes_old_index_tag_map)) |
183 | | return false; |
184 | | unsigned* count; |
185 | | if (coords_count_map.has (&(var.compiled_peak_coords), &count)) |
186 | | coords_count_map.set (&(var.compiled_peak_coords), *count + 1); |
187 | | else |
188 | | coords_count_map.set (&(var.compiled_peak_coords), 1); |
189 | | } |
190 | | } |
191 | | |
192 | | if (!coords_count_map || coords_count_map.in_error ()) |
193 | | return false; |
194 | | |
195 | | /* add only those coords that are used more than once into the vector and sort */ |
196 | | hb_vector_t<const hb_vector_t<char>*> shared_coords; |
197 | | if (unlikely (!shared_coords.alloc (coords_count_map.get_population ()))) |
198 | | return false; |
199 | | |
200 | | for (const auto _ : coords_count_map.iter ()) |
201 | | { |
202 | | if (_.second == 1) continue; |
203 | | shared_coords.push (_.first); |
204 | | } |
205 | | |
206 | | /* no shared tuples: no coords are used more than once */ |
207 | | if (!shared_coords) return true; |
208 | | /* sorting based on the coords frequency first (high to low), then compare |
209 | | * the coords bytes */ |
210 | | hb_qsort (shared_coords.arrayZ, shared_coords.length, sizeof (hb_vector_t<char>*), _cmp_coords, (void *) (&coords_count_map)); |
211 | | |
212 | | /* build shared_coords->idx map and shared tuples byte array */ |
213 | | |
214 | | shared_tuples_count = hb_min (0xFFFu + 1, shared_coords.length); |
215 | | unsigned len = shared_tuples_count * (shared_coords[0]->length); |
216 | | if (unlikely (!compiled_shared_tuples.alloc (len))) |
217 | | return false; |
218 | | |
219 | | for (unsigned i = 0; i < shared_tuples_count; i++) |
220 | | { |
221 | | shared_tuples_idx_map.set (shared_coords[i], i); |
222 | | /* add a concat() in hb_vector_t? */ |
223 | | for (char c : shared_coords[i]->iter ()) |
224 | | compiled_shared_tuples.push (c); |
225 | | } |
226 | | |
227 | | return true; |
228 | | } |
229 | | |
230 | | static int _cmp_coords (const void *pa, const void *pb, void *arg) |
231 | | { |
232 | | const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* coords_count_map = |
233 | | reinterpret_cast<const hb_hashmap_t<const hb_vector_t<char>*, unsigned>*> (arg); |
234 | | |
235 | | /* shared_coords is hb_vector_t<const hb_vector_t<char>*> so casting pa/pb |
236 | | * to be a pointer to a pointer */ |
237 | | const hb_vector_t<char>** a = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pa)); |
238 | | const hb_vector_t<char>** b = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pb)); |
239 | | |
240 | | bool has_a = coords_count_map->has (*a); |
241 | | bool has_b = coords_count_map->has (*b); |
242 | | |
243 | | if (has_a && has_b) |
244 | | { |
245 | | unsigned a_num = coords_count_map->get (*a); |
246 | | unsigned b_num = coords_count_map->get (*b); |
247 | | |
248 | | if (a_num != b_num) |
249 | | return b_num - a_num; |
250 | | |
251 | | return (*b)->as_array().cmp ((*a)->as_array ()); |
252 | | } |
253 | | else if (has_a) return -1; |
254 | | else if (has_b) return 1; |
255 | | else return 0; |
256 | | } |
257 | | |
258 | | template<typename Iterator, |
259 | | hb_requires (hb_is_iterator (Iterator))> |
260 | | bool serialize_glyph_var_data (hb_serialize_context_t *c, |
261 | | Iterator it, |
262 | | bool long_offset, |
263 | | unsigned num_glyphs, |
264 | | char* glyph_var_data_offsets /* OUT: glyph var data offsets array */) const |
265 | | { |
266 | | TRACE_SERIALIZE (this); |
267 | | |
268 | | if (long_offset) |
269 | | { |
270 | | ((HBUINT32 *) glyph_var_data_offsets)[0] = 0; |
271 | | glyph_var_data_offsets += 4; |
272 | | } |
273 | | else |
274 | | { |
275 | | ((HBUINT16 *) glyph_var_data_offsets)[0] = 0; |
276 | | glyph_var_data_offsets += 2; |
277 | | } |
278 | | unsigned glyph_offset = 0; |
279 | | hb_codepoint_t last_gid = 0; |
280 | | unsigned idx = 0; |
281 | | |
282 | | GlyphVariationData* cur_glyph = c->start_embed<GlyphVariationData> (); |
283 | | if (!cur_glyph) return_trace (false); |
284 | | for (auto &_ : it) |
285 | | { |
286 | | hb_codepoint_t gid = _.first; |
287 | | if (long_offset) |
288 | | for (; last_gid < gid; last_gid++) |
289 | | ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; |
290 | | else |
291 | | for (; last_gid < gid; last_gid++) |
292 | | ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; |
293 | | |
294 | | if (idx >= glyph_variations.length) return_trace (false); |
295 | | if (!cur_glyph->serialize (c, true, glyph_variations[idx])) return_trace (false); |
296 | | GlyphVariationData* next_glyph = c->start_embed<GlyphVariationData> (); |
297 | | glyph_offset += (char *) next_glyph - (char *) cur_glyph; |
298 | | |
299 | | if (long_offset) |
300 | | ((HBUINT32 *) glyph_var_data_offsets)[gid] = glyph_offset; |
301 | | else |
302 | | ((HBUINT16 *) glyph_var_data_offsets)[gid] = glyph_offset / 2; |
303 | | |
304 | | last_gid++; |
305 | | idx++; |
306 | | cur_glyph = next_glyph; |
307 | | } |
308 | | |
309 | | if (long_offset) |
310 | | for (; last_gid < num_glyphs; last_gid++) |
311 | | ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; |
312 | | else |
313 | | for (; last_gid < num_glyphs; last_gid++) |
314 | | ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; |
315 | | return_trace (true); |
316 | | } |
317 | | }; |
318 | | |
319 | | template <typename GidOffsetType, unsigned TableTag> |
320 | | struct gvar_GVAR |
321 | | { |
322 | | static constexpr hb_tag_t tableTag = TableTag; |
323 | | |
324 | | using GlyphVariationData = TupleVariationData<GidOffsetType>; |
325 | | |
326 | | bool has_data () const { return version.to_int () != 0; } |
327 | | |
328 | | bool sanitize_shallow (hb_sanitize_context_t *c) const |
329 | 0 | { |
330 | 0 | TRACE_SANITIZE (this); |
331 | 0 | return_trace (c->check_struct (this) && |
332 | 0 | hb_barrier () && |
333 | 0 | (version.major == 1) && |
334 | 0 | sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) && |
335 | 0 | (is_long_offset () ? |
336 | 0 | c->check_array (get_long_offset_array (), c->get_num_glyphs () + 1) : |
337 | 0 | c->check_array (get_short_offset_array (), c->get_num_glyphs () + 1))); |
338 | 0 | } Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned short, 2u>, 1735811442u>::sanitize_shallow(hb_sanitize_context_t*) const Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned int, 3u>, 1196835154u>::sanitize_shallow(hb_sanitize_context_t*) const |
339 | | |
340 | | /* GlyphVariationData not sanitized here; must be checked while accessing each glyph variation data */ |
341 | | bool sanitize (hb_sanitize_context_t *c) const |
342 | 0 | { return sanitize_shallow (c); } Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned short, 2u>, 1735811442u>::sanitize(hb_sanitize_context_t*) const Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned int, 3u>, 1196835154u>::sanitize(hb_sanitize_context_t*) const |
343 | | |
344 | | bool decompile_glyph_variations (hb_subset_context_t *c, |
345 | | glyph_variations_t<GidOffsetType>& glyph_vars /* OUT */) const |
346 | | { |
347 | | hb_hashmap_t<hb_codepoint_t, hb_bytes_t> new_gid_var_data_map; |
348 | | auto it = hb_iter (c->plan->new_to_old_gid_list); |
349 | | if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) |
350 | | { |
351 | | new_gid_var_data_map.set (0, hb_bytes_t ()); |
352 | | it++; |
353 | | } |
354 | | |
355 | | for (auto &_ : it) |
356 | | { |
357 | | hb_codepoint_t new_gid = _.first; |
358 | | hb_codepoint_t old_gid = _.second; |
359 | | hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, glyphCountX, old_gid); |
360 | | new_gid_var_data_map.set (new_gid, var_data_bytes); |
361 | | } |
362 | | |
363 | | if (new_gid_var_data_map.in_error ()) return false; |
364 | | |
365 | | hb_array_t<const F2DOT14> shared_tuples = (this+sharedTuples).as_array ((unsigned) sharedTupleCount * (unsigned) axisCount); |
366 | | return glyph_vars.create_from_glyphs_var_data (axisCount, shared_tuples, c->plan, new_gid_var_data_map); |
367 | | } |
368 | | |
369 | | template<typename Iterator, |
370 | | hb_requires (hb_is_iterator (Iterator))> |
371 | | bool serialize (hb_serialize_context_t *c, |
372 | | const glyph_variations_t<GidOffsetType>& glyph_vars, |
373 | | Iterator it, |
374 | | unsigned axis_count, |
375 | | unsigned num_glyphs, |
376 | | bool force_long_offsets) const |
377 | | { |
378 | | TRACE_SERIALIZE (this); |
379 | | gvar_GVAR *out = c->allocate_min<gvar_GVAR> (); |
380 | | if (unlikely (!out)) return_trace (false); |
381 | | |
382 | | out->version.major = 1; |
383 | | out->version.minor = 0; |
384 | | out->axisCount = axis_count; |
385 | | out->glyphCountX = hb_min (0xFFFFu, num_glyphs); |
386 | | |
387 | | unsigned glyph_var_data_size = glyph_vars.compiled_byte_size (); |
388 | | /* According to the spec: If the short format (Offset16) is used for offsets, |
389 | | * the value stored is the offset divided by 2, so the maximum data size should |
390 | | * be 2 * 0xFFFFu, which is 0x1FFFEu */ |
391 | | bool long_offset = glyph_var_data_size > 0x1FFFEu || force_long_offsets; |
392 | | out->flags = long_offset ? 1 : 0; |
393 | | |
394 | | HBUINT8 *glyph_var_data_offsets = c->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); |
395 | | if (!glyph_var_data_offsets) return_trace (false); |
396 | | |
397 | | /* shared tuples */ |
398 | | unsigned shared_tuple_count = glyph_vars.compiled_shared_tuples_count (); |
399 | | out->sharedTupleCount = shared_tuple_count; |
400 | | |
401 | | if (!shared_tuple_count) |
402 | | out->sharedTuples = 0; |
403 | | else |
404 | | { |
405 | | hb_array_t<const char> shared_tuples = glyph_vars.compiled_shared_tuples.as_array ().copy (c); |
406 | | if (!shared_tuples.arrayZ) return_trace (false); |
407 | | out->sharedTuples = shared_tuples.arrayZ - (char *) out; |
408 | | } |
409 | | |
410 | | char *glyph_var_data = c->start_embed<char> (); |
411 | | if (!glyph_var_data) return_trace (false); |
412 | | out->dataZ = glyph_var_data - (char *) out; |
413 | | |
414 | | return_trace (glyph_vars.serialize_glyph_var_data (c, it, long_offset, num_glyphs, |
415 | | (char *) glyph_var_data_offsets)); |
416 | | } |
417 | | |
418 | | bool instantiate (hb_subset_context_t *c) const |
419 | | { |
420 | | TRACE_SUBSET (this); |
421 | | glyph_variations_t<GidOffsetType> glyph_vars; |
422 | | if (!decompile_glyph_variations (c, glyph_vars)) |
423 | | return_trace (false); |
424 | | |
425 | | if (!glyph_vars.instantiate (c->plan)) return_trace (false); |
426 | | if (!glyph_vars.compile_bytes (c->plan->axes_index_map, c->plan->axes_old_index_tag_map)) |
427 | | return_trace (false); |
428 | | |
429 | | unsigned axis_count = c->plan->axes_index_map.get_population (); |
430 | | unsigned num_glyphs = c->plan->num_output_glyphs (); |
431 | | auto it = hb_iter (c->plan->new_to_old_gid_list); |
432 | | |
433 | | bool force_long_offsets = false; |
434 | | #ifdef HB_EXPERIMENTAL_API |
435 | | force_long_offsets = c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS; |
436 | | #endif |
437 | | return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs, force_long_offsets)); |
438 | | } |
439 | | |
440 | | bool subset (hb_subset_context_t *c) const |
441 | | { |
442 | | TRACE_SUBSET (this); |
443 | | if (c->plan->all_axes_pinned) |
444 | | return_trace (false); |
445 | | |
446 | | if (c->plan->normalized_coords) |
447 | | return_trace (instantiate (c)); |
448 | | |
449 | | unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0; |
450 | | |
451 | | gvar_GVAR *out = c->serializer->allocate_min<gvar_GVAR> (); |
452 | | if (unlikely (!out)) return_trace (false); |
453 | | |
454 | | out->version.major = 1; |
455 | | out->version.minor = 0; |
456 | | out->axisCount = axisCount; |
457 | | out->sharedTupleCount = sharedTupleCount; |
458 | | |
459 | | unsigned int num_glyphs = c->plan->num_output_glyphs (); |
460 | | out->glyphCountX = hb_min (0xFFFFu, num_glyphs); |
461 | | |
462 | | auto it = hb_iter (c->plan->new_to_old_gid_list); |
463 | | if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) |
464 | | it++; |
465 | | unsigned int subset_data_size = 0; |
466 | | for (auto &_ : it) |
467 | | { |
468 | | hb_codepoint_t old_gid = _.second; |
469 | | subset_data_size += get_glyph_var_data_bytes (c->source_blob, glyph_count, old_gid).length; |
470 | | } |
471 | | |
472 | | /* According to the spec: If the short format (Offset16) is used for offsets, |
473 | | * the value stored is the offset divided by 2, so the maximum data size should |
474 | | * be 2 * 0xFFFFu, which is 0x1FFFEu */ |
475 | | bool long_offset = subset_data_size > 0x1FFFEu; |
476 | | #ifdef HB_EXPERIMENTAL_API |
477 | | long_offset = long_offset || (c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS); |
478 | | #endif |
479 | | out->flags = long_offset ? 1 : 0; |
480 | | |
481 | | HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); |
482 | | if (!subset_offsets) return_trace (false); |
483 | | |
484 | | /* shared tuples */ |
485 | | if (!sharedTupleCount || !sharedTuples) |
486 | | out->sharedTuples = 0; |
487 | | else |
488 | | { |
489 | | unsigned int shared_tuple_size = F2DOT14::static_size * axisCount * sharedTupleCount; |
490 | | F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size); |
491 | | if (!tuples) return_trace (false); |
492 | | out->sharedTuples = (char *) tuples - (char *) out; |
493 | | hb_memcpy (tuples, this+sharedTuples, shared_tuple_size); |
494 | | } |
495 | | |
496 | | /* This ordering relative to the shared tuples array, which puts the glyphVariationData |
497 | | last in the table, is required when HB_SUBSET_FLAGS_IFTB_REQUIREMENTS is set */ |
498 | | char *subset_data = c->serializer->allocate_size<char> (subset_data_size, false); |
499 | | if (!subset_data) return_trace (false); |
500 | | out->dataZ = subset_data - (char *) out; |
501 | | |
502 | | |
503 | | if (long_offset) |
504 | | { |
505 | | ((HBUINT32 *) subset_offsets)[0] = 0; |
506 | | subset_offsets += 4; |
507 | | } |
508 | | else |
509 | | { |
510 | | ((HBUINT16 *) subset_offsets)[0] = 0; |
511 | | subset_offsets += 2; |
512 | | } |
513 | | unsigned int glyph_offset = 0; |
514 | | |
515 | | hb_codepoint_t last = 0; |
516 | | it = hb_iter (c->plan->new_to_old_gid_list); |
517 | | if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) |
518 | | it++; |
519 | | for (auto &_ : it) |
520 | | { |
521 | | hb_codepoint_t gid = _.first; |
522 | | hb_codepoint_t old_gid = _.second; |
523 | | |
524 | | if (long_offset) |
525 | | for (; last < gid; last++) |
526 | | ((HBUINT32 *) subset_offsets)[last] = glyph_offset; |
527 | | else |
528 | | for (; last < gid; last++) |
529 | | ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; |
530 | | |
531 | | hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, |
532 | | glyph_count, |
533 | | old_gid); |
534 | | |
535 | | hb_memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length); |
536 | | subset_data += var_data_bytes.length; |
537 | | glyph_offset += var_data_bytes.length; |
538 | | |
539 | | if (long_offset) |
540 | | ((HBUINT32 *) subset_offsets)[gid] = glyph_offset; |
541 | | else |
542 | | ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2; |
543 | | |
544 | | last++; // Skip over gid |
545 | | } |
546 | | |
547 | | if (long_offset) |
548 | | for (; last < num_glyphs; last++) |
549 | | ((HBUINT32 *) subset_offsets)[last] = glyph_offset; |
550 | | else |
551 | | for (; last < num_glyphs; last++) |
552 | | ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; |
553 | | |
554 | | return_trace (true); |
555 | | } |
556 | | |
557 | | protected: |
558 | | const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, |
559 | | unsigned glyph_count, |
560 | | hb_codepoint_t glyph) const |
561 | 0 | { |
562 | 0 | unsigned start_offset = get_offset (glyph_count, glyph); |
563 | 0 | unsigned end_offset = get_offset (glyph_count, glyph+1); |
564 | 0 | if (unlikely (end_offset < start_offset)) return hb_bytes_t (); |
565 | 0 | unsigned length = end_offset - start_offset; |
566 | 0 | hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length); |
567 | 0 | return likely (var_data.length >= GlyphVariationData::min_size) ? var_data : hb_bytes_t (); |
568 | 0 | } |
569 | | |
570 | 0 | bool is_long_offset () const { return flags & 1; } Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned short, 2u>, 1735811442u>::is_long_offset() const Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned int, 3u>, 1196835154u>::is_long_offset() const |
571 | | |
572 | | unsigned get_offset (unsigned glyph_count, unsigned i) const |
573 | 0 | { |
574 | 0 | if (unlikely (i > glyph_count)) return 0; |
575 | 0 | hb_barrier (); |
576 | 0 | return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2; |
577 | 0 | } |
578 | | |
579 | 0 | const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; } Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned short, 2u>, 1735811442u>::get_long_offset_array() const Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned int, 3u>, 1196835154u>::get_long_offset_array() const |
580 | 0 | const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; } Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned short, 2u>, 1735811442u>::get_short_offset_array() const Unexecuted instantiation: OT::gvar_GVAR<OT::IntType<unsigned int, 3u>, 1196835154u>::get_short_offset_array() const |
581 | | |
582 | | public: |
583 | | struct accelerator_t |
584 | | { |
585 | | bool has_data () const { return table->has_data (); } |
586 | | |
587 | | accelerator_t (hb_face_t *face) |
588 | 60 | { |
589 | 60 | table = hb_sanitize_context_t ().reference_table<gvar_GVAR> (face); |
590 | | /* If sanitize failed, set glyphCount to 0. */ |
591 | 60 | glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0; |
592 | | |
593 | | /* For shared tuples that only have one or two axes active, shared the index |
594 | | * of that axis as a cache. This will speed up caclulate_scalar() a lot |
595 | | * for fonts with lots of axes and many "monovar" or "duovar" tuples. */ |
596 | 60 | hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount); |
597 | 60 | unsigned count = table->sharedTupleCount; |
598 | 60 | if (unlikely (!shared_tuple_active_idx.resize (count, false))) return; |
599 | 60 | unsigned axis_count = table->axisCount; |
600 | 60 | for (unsigned i = 0; i < count; i++) |
601 | 0 | { |
602 | 0 | hb_array_t<const F2DOT14> tuple = shared_tuples.sub_array (axis_count * i, axis_count); |
603 | 0 | int idx1 = -1, idx2 = -1; |
604 | 0 | for (unsigned j = 0; j < axis_count; j++) |
605 | 0 | { |
606 | 0 | const F2DOT14 &peak = tuple.arrayZ[j]; |
607 | 0 | if (peak.to_int () != 0) |
608 | 0 | { |
609 | 0 | if (idx1 == -1) |
610 | 0 | idx1 = j; |
611 | 0 | else if (idx2 == -1) |
612 | 0 | idx2 = j; |
613 | 0 | else |
614 | 0 | { |
615 | 0 | idx1 = idx2 = -1; |
616 | 0 | break; |
617 | 0 | } |
618 | 0 | } |
619 | 0 | } |
620 | 0 | shared_tuple_active_idx.arrayZ[i] = {idx1, idx2}; |
621 | 0 | } |
622 | 60 | } |
623 | 0 | ~accelerator_t () { table.destroy (); } |
624 | | |
625 | | private: |
626 | | |
627 | | static float infer_delta (const hb_array_t<contour_point_t> points, |
628 | | const hb_array_t<contour_point_t> deltas, |
629 | | unsigned int target, unsigned int prev, unsigned int next, |
630 | | float contour_point_t::*m) |
631 | 0 | { |
632 | 0 | float target_val = points.arrayZ[target].*m; |
633 | 0 | float prev_val = points.arrayZ[prev].*m; |
634 | 0 | float next_val = points.arrayZ[next].*m; |
635 | 0 | float prev_delta = deltas.arrayZ[prev].*m; |
636 | 0 | float next_delta = deltas.arrayZ[next].*m; |
637 | |
|
638 | 0 | if (prev_val == next_val) |
639 | 0 | return (prev_delta == next_delta) ? prev_delta : 0.f; |
640 | 0 | else if (target_val <= hb_min (prev_val, next_val)) |
641 | 0 | return (prev_val < next_val) ? prev_delta : next_delta; |
642 | 0 | else if (target_val >= hb_max (prev_val, next_val)) |
643 | 0 | return (prev_val > next_val) ? prev_delta : next_delta; |
644 | | |
645 | | /* linear interpolation */ |
646 | 0 | float r = (target_val - prev_val) / (next_val - prev_val); |
647 | 0 | return prev_delta + r * (next_delta - prev_delta); |
648 | 0 | } |
649 | | |
650 | | static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) |
651 | 0 | { return (i >= end) ? start : (i + 1); } |
652 | | |
653 | | public: |
654 | | bool apply_deltas_to_points (hb_codepoint_t glyph, |
655 | | hb_array_t<const int> coords, |
656 | | const hb_array_t<contour_point_t> points, |
657 | | hb_glyf_scratch_t &scratch, |
658 | | bool phantom_only = false) const |
659 | 0 | { |
660 | 0 | if (unlikely (glyph >= glyphCount)) return true; |
661 | | |
662 | 0 | hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph); |
663 | 0 | if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true; |
664 | | |
665 | 0 | auto &shared_indices = scratch.shared_indices; |
666 | 0 | shared_indices.clear (); |
667 | |
|
668 | 0 | typename GlyphVariationData::tuple_iterator_t iterator; |
669 | 0 | if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount, |
670 | 0 | var_data_bytes.arrayZ, |
671 | 0 | shared_indices, &iterator)) |
672 | 0 | return true; /* so isn't applied at all */ |
673 | | |
674 | | /* Save original points for inferred delta calculation */ |
675 | 0 | auto &orig_points_vec = scratch.orig_points; |
676 | 0 | orig_points_vec.clear (); // Populated lazily |
677 | 0 | auto orig_points = orig_points_vec.as_array (); |
678 | | |
679 | | /* flag is used to indicate referenced point */ |
680 | 0 | auto &deltas_vec = scratch.deltas; |
681 | 0 | deltas_vec.clear (); // Populated lazily |
682 | 0 | auto deltas = deltas_vec.as_array (); |
683 | |
|
684 | 0 | unsigned num_coords = table->axisCount; |
685 | 0 | hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * num_coords); |
686 | |
|
687 | 0 | auto &private_indices = scratch.private_indices; |
688 | 0 | auto &x_deltas = scratch.x_deltas; |
689 | 0 | auto &y_deltas = scratch.y_deltas; |
690 | |
|
691 | 0 | unsigned count = points.length; |
692 | 0 | bool flush = false; |
693 | 0 | do |
694 | 0 | { |
695 | 0 | float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples, |
696 | 0 | &shared_tuple_active_idx); |
697 | 0 | if (scalar == 0.f) continue; |
698 | 0 | const HBUINT8 *p = iterator.get_serialized_data (); |
699 | 0 | unsigned int length = iterator.current_tuple->get_data_size (); |
700 | 0 | if (unlikely (!iterator.var_data_bytes.check_range (p, length))) |
701 | 0 | return false; |
702 | | |
703 | 0 | if (!deltas) |
704 | 0 | { |
705 | 0 | if (unlikely (!deltas_vec.resize (count, false))) return false; |
706 | 0 | deltas = deltas_vec.as_array (); |
707 | 0 | hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, |
708 | 0 | (phantom_only ? 4 : count) * sizeof (deltas[0])); |
709 | 0 | } |
710 | | |
711 | 0 | const HBUINT8 *end = p + length; |
712 | |
|
713 | 0 | bool has_private_points = iterator.current_tuple->has_private_points (); |
714 | 0 | if (has_private_points && |
715 | 0 | !GlyphVariationData::decompile_points (p, private_indices, end)) |
716 | 0 | return false; |
717 | 0 | const hb_array_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices; |
718 | |
|
719 | 0 | bool apply_to_all = (indices.length == 0); |
720 | 0 | unsigned int num_deltas = apply_to_all ? points.length : indices.length; |
721 | 0 | if (unlikely (!x_deltas.resize (num_deltas, false))) return false; |
722 | 0 | if (unlikely (!GlyphVariationData::decompile_deltas (p, x_deltas, end))) return false; |
723 | 0 | if (unlikely (!y_deltas.resize (num_deltas, false))) return false; |
724 | 0 | if (unlikely (!GlyphVariationData::decompile_deltas (p, y_deltas, end))) return false; |
725 | | |
726 | 0 | if (!apply_to_all) |
727 | 0 | { |
728 | 0 | if (!orig_points && !phantom_only) |
729 | 0 | { |
730 | 0 | orig_points_vec.extend (points); |
731 | 0 | if (unlikely (orig_points_vec.in_error ())) return false; |
732 | 0 | orig_points = orig_points_vec.as_array (); |
733 | 0 | } |
734 | | |
735 | 0 | if (flush) |
736 | 0 | { |
737 | 0 | for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) |
738 | 0 | points.arrayZ[i].translate (deltas.arrayZ[i]); |
739 | 0 | flush = false; |
740 | |
|
741 | 0 | } |
742 | 0 | hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, |
743 | 0 | (phantom_only ? 4 : count) * sizeof (deltas[0])); |
744 | 0 | } |
745 | | |
746 | 0 | if (HB_OPTIMIZE_SIZE_VAL) |
747 | 0 | { |
748 | 0 | for (unsigned int i = 0; i < num_deltas; i++) |
749 | 0 | { |
750 | 0 | unsigned int pt_index; |
751 | 0 | if (apply_to_all) |
752 | 0 | pt_index = i; |
753 | 0 | else |
754 | 0 | { |
755 | 0 | pt_index = indices[i]; |
756 | 0 | if (unlikely (pt_index >= deltas.length)) continue; |
757 | 0 | } |
758 | 0 | if (phantom_only && pt_index < count - 4) continue; |
759 | 0 | auto &delta = deltas.arrayZ[pt_index]; |
760 | 0 | delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ |
761 | 0 | delta.add_delta (x_deltas.arrayZ[i] * scalar, |
762 | 0 | y_deltas.arrayZ[i] * scalar); |
763 | 0 | } |
764 | 0 | } |
765 | 0 | else |
766 | 0 | { |
767 | | /* Ouch. Four cases... for optimization. */ |
768 | 0 | if (scalar != 1.0f) |
769 | 0 | { |
770 | 0 | if (apply_to_all) |
771 | 0 | for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) |
772 | 0 | { |
773 | 0 | auto &delta = deltas.arrayZ[i]; |
774 | 0 | delta.add_delta (x_deltas.arrayZ[i] * scalar, |
775 | 0 | y_deltas.arrayZ[i] * scalar); |
776 | 0 | } |
777 | 0 | else |
778 | 0 | for (unsigned int i = 0; i < num_deltas; i++) |
779 | 0 | { |
780 | 0 | unsigned int pt_index = indices[i]; |
781 | 0 | if (unlikely (pt_index >= deltas.length)) continue; |
782 | 0 | if (phantom_only && pt_index < count - 4) continue; |
783 | 0 | auto &delta = deltas.arrayZ[pt_index]; |
784 | 0 | delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ |
785 | 0 | delta.add_delta (x_deltas.arrayZ[i] * scalar, |
786 | 0 | y_deltas.arrayZ[i] * scalar); |
787 | 0 | } |
788 | 0 | } |
789 | 0 | else |
790 | 0 | { |
791 | 0 | if (apply_to_all) |
792 | 0 | for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) |
793 | 0 | { |
794 | 0 | auto &delta = deltas.arrayZ[i]; |
795 | 0 | delta.add_delta (x_deltas.arrayZ[i], |
796 | 0 | y_deltas.arrayZ[i]); |
797 | 0 | } |
798 | 0 | else |
799 | 0 | for (unsigned int i = 0; i < num_deltas; i++) |
800 | 0 | { |
801 | 0 | unsigned int pt_index = indices[i]; |
802 | 0 | if (unlikely (pt_index >= deltas.length)) continue; |
803 | 0 | if (phantom_only && pt_index < count - 4) continue; |
804 | 0 | auto &delta = deltas.arrayZ[pt_index]; |
805 | 0 | delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ |
806 | 0 | delta.add_delta (x_deltas.arrayZ[i], |
807 | 0 | y_deltas.arrayZ[i]); |
808 | 0 | } |
809 | 0 | } |
810 | 0 | } |
811 | | |
812 | | /* infer deltas for unreferenced points */ |
813 | 0 | if (!apply_to_all && !phantom_only) |
814 | 0 | { |
815 | 0 | unsigned start_point = 0; |
816 | 0 | unsigned end_point = 0; |
817 | 0 | while (true) |
818 | 0 | { |
819 | 0 | while (end_point < count && !points.arrayZ[end_point].is_end_point) |
820 | 0 | end_point++; |
821 | 0 | if (unlikely (end_point == count)) break; |
822 | | |
823 | | /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ |
824 | 0 | unsigned unref_count = 0; |
825 | 0 | for (unsigned i = start_point; i < end_point + 1; i++) |
826 | 0 | unref_count += deltas.arrayZ[i].flag; |
827 | 0 | unref_count = (end_point - start_point + 1) - unref_count; |
828 | |
|
829 | 0 | unsigned j = start_point; |
830 | 0 | if (unref_count == 0 || unref_count > end_point - start_point) |
831 | 0 | goto no_more_gaps; |
832 | | |
833 | 0 | for (;;) |
834 | 0 | { |
835 | | /* Locate the next gap of unreferenced points between two referenced points prev and next. |
836 | | * Note that a gap may wrap around at left (start_point) and/or at right (end_point). |
837 | | */ |
838 | 0 | unsigned int prev, next, i; |
839 | 0 | for (;;) |
840 | 0 | { |
841 | 0 | i = j; |
842 | 0 | j = next_index (i, start_point, end_point); |
843 | 0 | if (deltas.arrayZ[i].flag && !deltas.arrayZ[j].flag) break; |
844 | 0 | } |
845 | 0 | prev = j = i; |
846 | 0 | for (;;) |
847 | 0 | { |
848 | 0 | i = j; |
849 | 0 | j = next_index (i, start_point, end_point); |
850 | 0 | if (!deltas.arrayZ[i].flag && deltas.arrayZ[j].flag) break; |
851 | 0 | } |
852 | 0 | next = j; |
853 | | /* Infer deltas for all unref points in the gap between prev and next */ |
854 | 0 | i = prev; |
855 | 0 | for (;;) |
856 | 0 | { |
857 | 0 | i = next_index (i, start_point, end_point); |
858 | 0 | if (i == next) break; |
859 | 0 | deltas.arrayZ[i].x = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::x); |
860 | 0 | deltas.arrayZ[i].y = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::y); |
861 | 0 | if (--unref_count == 0) goto no_more_gaps; |
862 | 0 | } |
863 | 0 | } |
864 | 0 | no_more_gaps: |
865 | 0 | start_point = end_point = end_point + 1; |
866 | 0 | } |
867 | 0 | } |
868 | | |
869 | 0 | flush = true; |
870 | |
|
871 | 0 | } while (iterator.move_to_next ()); |
872 | | |
873 | 0 | if (flush) |
874 | 0 | { |
875 | 0 | for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) |
876 | 0 | points.arrayZ[i].translate (deltas.arrayZ[i]); |
877 | 0 | } |
878 | |
|
879 | 0 | return true; |
880 | 0 | } |
881 | | |
882 | | unsigned int get_axis_count () const { return table->axisCount; } |
883 | | |
884 | | private: |
885 | | hb_blob_ptr_t<gvar_GVAR> table; |
886 | | unsigned glyphCount; |
887 | | hb_vector_t<hb_pair_t<int, int>> shared_tuple_active_idx; |
888 | | }; |
889 | | |
890 | | protected: |
891 | | FixedVersion<>version; /* Version number of the glyph variations table |
892 | | * Set to 0x00010000u. */ |
893 | | HBUINT16 axisCount; /* The number of variation axes for this font. This must be |
894 | | * the same number as axisCount in the 'fvar' table. */ |
895 | | HBUINT16 sharedTupleCount; |
896 | | /* The number of shared tuple records. Shared tuple records |
897 | | * can be referenced within glyph variation data tables for |
898 | | * multiple glyphs, as opposed to other tuple records stored |
899 | | * directly within a glyph variation data table. */ |
900 | | NNOffset32To<UnsizedArrayOf<F2DOT14>> |
901 | | sharedTuples; /* Offset from the start of this table to the shared tuple records. |
902 | | * Array of tuple records shared across all glyph variation data tables. */ |
903 | | GidOffsetType glyphCountX; /* The number of glyphs in this font. This must match the number of |
904 | | * glyphs stored elsewhere in the font. */ |
905 | | HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows. |
906 | | * If bit 0 is clear, the offsets are uint16; if bit 0 is set, the |
907 | | * offsets are uint32. */ |
908 | | Offset32To<GlyphVariationData> |
909 | | dataZ; /* Offset from the start of this table to the array of |
910 | | * GlyphVariationData tables. */ |
911 | | UnsizedArrayOf<HBUINT8> |
912 | | offsetZ; /* Offsets from the start of the GlyphVariationData array |
913 | | * to each GlyphVariationData table. */ |
914 | | public: |
915 | | DEFINE_SIZE_ARRAY (20, offsetZ); |
916 | | }; |
917 | | |
918 | | using gvar = gvar_GVAR<HBUINT16, HB_OT_TAG_gvar>; |
919 | | using GVAR = gvar_GVAR<HBUINT24, HB_OT_TAG_GVAR>; |
920 | | |
921 | | struct gvar_accelerator_t : gvar::accelerator_t { |
922 | 60 | gvar_accelerator_t (hb_face_t *face) : gvar::accelerator_t (face) {} |
923 | | }; |
924 | | struct GVAR_accelerator_t : GVAR::accelerator_t { |
925 | 0 | GVAR_accelerator_t (hb_face_t *face) : GVAR::accelerator_t (face) {} |
926 | | }; |
927 | | |
928 | | } /* namespace OT */ |
929 | | |
930 | | #endif /* HB_OT_VAR_GVAR_TABLE_HH */ |