/src/harfbuzz/src/OT/glyf/CompositeGlyph.hh
Line | Count | Source (jump to first uncovered line) |
1 | | #ifndef OT_GLYF_COMPOSITEGLYPH_HH |
2 | | #define OT_GLYF_COMPOSITEGLYPH_HH |
3 | | |
4 | | |
5 | | #include "../../hb-open-type.hh" |
6 | | #include "composite-iter.hh" |
7 | | |
8 | | |
9 | | namespace OT { |
10 | | namespace glyf_impl { |
11 | | |
12 | | |
13 | | struct CompositeGlyphRecord |
14 | | { |
15 | | protected: |
16 | | enum composite_glyph_flag_t |
17 | | { |
18 | | ARG_1_AND_2_ARE_WORDS = 0x0001, |
19 | | ARGS_ARE_XY_VALUES = 0x0002, |
20 | | ROUND_XY_TO_GRID = 0x0004, |
21 | | WE_HAVE_A_SCALE = 0x0008, |
22 | | MORE_COMPONENTS = 0x0020, |
23 | | WE_HAVE_AN_X_AND_Y_SCALE = 0x0040, |
24 | | WE_HAVE_A_TWO_BY_TWO = 0x0080, |
25 | | WE_HAVE_INSTRUCTIONS = 0x0100, |
26 | | USE_MY_METRICS = 0x0200, |
27 | | OVERLAP_COMPOUND = 0x0400, |
28 | | SCALED_COMPONENT_OFFSET = 0x0800, |
29 | | UNSCALED_COMPONENT_OFFSET = 0x1000, |
30 | | #ifndef HB_NO_BEYOND_64K |
31 | | GID_IS_24BIT = 0x2000 |
32 | | #endif |
33 | | }; |
34 | | |
35 | | public: |
36 | | unsigned int get_size () const |
37 | 0 | { |
38 | 0 | unsigned int size = min_size; |
39 | | /* glyphIndex is 24bit instead of 16bit */ |
40 | | #ifndef HB_NO_BEYOND_64K |
41 | | if (flags & GID_IS_24BIT) size += HBGlyphID24::static_size - HBGlyphID16::static_size; |
42 | | #endif |
43 | | /* arg1 and 2 are int16 */ |
44 | 0 | if (flags & ARG_1_AND_2_ARE_WORDS) size += 4; |
45 | | /* arg1 and 2 are int8 */ |
46 | 0 | else size += 2; |
47 | | |
48 | | /* One x 16 bit (scale) */ |
49 | 0 | if (flags & WE_HAVE_A_SCALE) size += 2; |
50 | | /* Two x 16 bit (xscale, yscale) */ |
51 | 0 | else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) size += 4; |
52 | | /* Four x 16 bit (xscale, scale01, scale10, yscale) */ |
53 | 0 | else if (flags & WE_HAVE_A_TWO_BY_TWO) size += 8; |
54 | |
|
55 | 0 | return size; |
56 | 0 | } |
57 | | |
58 | 0 | void drop_instructions_flag () { flags = (uint16_t) flags & ~WE_HAVE_INSTRUCTIONS; } |
59 | | void set_overlaps_flag () |
60 | 0 | { |
61 | 0 | flags = (uint16_t) flags | OVERLAP_COMPOUND; |
62 | 0 | } |
63 | | |
64 | 0 | bool has_instructions () const { return flags & WE_HAVE_INSTRUCTIONS; } |
65 | | |
66 | 0 | bool has_more () const { return flags & MORE_COMPONENTS; } |
67 | 0 | bool is_use_my_metrics () const { return flags & USE_MY_METRICS; } |
68 | 0 | bool is_anchored () const { return !(flags & ARGS_ARE_XY_VALUES); } |
69 | | void get_anchor_points (unsigned int &point1, unsigned int &point2) const |
70 | 0 | { |
71 | 0 | const auto *p = &StructAfter<const HBUINT8> (flags); |
72 | | #ifndef HB_NO_BEYOND_64K |
73 | | if (flags & GID_IS_24BIT) |
74 | | p += HBGlyphID24::static_size; |
75 | | else |
76 | | #endif |
77 | 0 | p += HBGlyphID16::static_size; |
78 | 0 | if (flags & ARG_1_AND_2_ARE_WORDS) |
79 | 0 | { |
80 | 0 | point1 = ((const HBUINT16 *) p)[0]; |
81 | 0 | point2 = ((const HBUINT16 *) p)[1]; |
82 | 0 | } |
83 | 0 | else |
84 | 0 | { |
85 | 0 | point1 = p[0]; |
86 | 0 | point2 = p[1]; |
87 | 0 | } |
88 | 0 | } |
89 | | |
90 | | static void transform (const float (&matrix)[4], |
91 | | hb_array_t<contour_point_t> points) |
92 | 0 | { |
93 | 0 | if (matrix[0] != 1.f || matrix[1] != 0.f || |
94 | 0 | matrix[2] != 0.f || matrix[3] != 1.f) |
95 | 0 | for (auto &point : points) |
96 | 0 | point.transform (matrix); |
97 | 0 | } |
98 | | |
99 | | static void translate (const contour_point_t &trans, |
100 | | hb_array_t<contour_point_t> points) |
101 | 0 | { |
102 | 0 | if (HB_OPTIMIZE_SIZE_VAL) |
103 | 0 | { |
104 | 0 | if (trans.x != 0.f || trans.y != 0.f) |
105 | 0 | for (auto &point : points) |
106 | 0 | point.translate (trans); |
107 | 0 | } |
108 | 0 | else |
109 | 0 | { |
110 | 0 | if (trans.x != 0.f && trans.y != 0.f) |
111 | 0 | for (auto &point : points) |
112 | 0 | point.translate (trans); |
113 | 0 | else |
114 | 0 | { |
115 | 0 | if (trans.x != 0.f) |
116 | 0 | for (auto &point : points) |
117 | 0 | point.x += trans.x; |
118 | 0 | else if (trans.y != 0.f) |
119 | 0 | for (auto &point : points) |
120 | 0 | point.y += trans.y; |
121 | 0 | } |
122 | 0 | } |
123 | 0 | } |
124 | | |
125 | | void transform_points (hb_array_t<contour_point_t> points, |
126 | | const float (&matrix)[4], |
127 | | const contour_point_t &trans) const |
128 | 0 | { |
129 | 0 | if (scaled_offsets ()) |
130 | 0 | { |
131 | 0 | translate (trans, points); |
132 | 0 | transform (matrix, points); |
133 | 0 | } |
134 | 0 | else |
135 | 0 | { |
136 | 0 | transform (matrix, points); |
137 | 0 | translate (trans, points); |
138 | 0 | } |
139 | 0 | } |
140 | | |
141 | | bool get_points (contour_point_vector_t &points) const |
142 | 0 | { |
143 | 0 | float matrix[4]; |
144 | 0 | contour_point_t trans; |
145 | 0 | get_transformation (matrix, trans); |
146 | 0 | if (unlikely (!points.alloc (points.length + 1 + 4))) return false; // For phantom points |
147 | 0 | points.push (trans); |
148 | 0 | return true; |
149 | 0 | } |
150 | | |
151 | | unsigned compile_with_point (const contour_point_t &point, |
152 | | char *out) const |
153 | 0 | { |
154 | 0 | const HBINT8 *p = &StructAfter<const HBINT8> (flags); |
155 | 0 | #ifndef HB_NO_BEYOND_64K |
156 | 0 | if (flags & GID_IS_24BIT) |
157 | 0 | p += HBGlyphID24::static_size; |
158 | 0 | else |
159 | 0 | #endif |
160 | 0 | p += HBGlyphID16::static_size; |
161 | 0 |
|
162 | 0 | unsigned len = get_size (); |
163 | 0 | unsigned len_before_val = (const char *)p - (const char *)this; |
164 | 0 | if (flags & ARG_1_AND_2_ARE_WORDS) |
165 | 0 | { |
166 | 0 | // no overflow, copy value |
167 | 0 | hb_memcpy (out, this, len); |
168 | 0 |
|
169 | 0 | HBINT16 *o = reinterpret_cast<HBINT16 *> (out + len_before_val); |
170 | 0 | o[0] = roundf (point.x); |
171 | 0 | o[1] = roundf (point.y); |
172 | 0 | } |
173 | 0 | else |
174 | 0 | { |
175 | 0 | int new_x = roundf (point.x); |
176 | 0 | int new_y = roundf (point.y); |
177 | 0 | if (new_x <= 127 && new_x >= -128 && |
178 | 0 | new_y <= 127 && new_y >= -128) |
179 | 0 | { |
180 | 0 | hb_memcpy (out, this, len); |
181 | 0 | HBINT8 *o = reinterpret_cast<HBINT8 *> (out + len_before_val); |
182 | 0 | o[0] = new_x; |
183 | 0 | o[1] = new_y; |
184 | 0 | } |
185 | 0 | else |
186 | 0 | { |
187 | 0 | // new point value has an int8 overflow |
188 | 0 | hb_memcpy (out, this, len_before_val); |
189 | 0 | |
190 | 0 | //update flags |
191 | 0 | CompositeGlyphRecord *o = reinterpret_cast<CompositeGlyphRecord *> (out); |
192 | 0 | o->flags = flags | ARG_1_AND_2_ARE_WORDS; |
193 | 0 | out += len_before_val; |
194 | 0 |
|
195 | 0 | HBINT16 new_value; |
196 | 0 | new_value = new_x; |
197 | 0 | hb_memcpy (out, &new_value, HBINT16::static_size); |
198 | 0 | out += HBINT16::static_size; |
199 | 0 |
|
200 | 0 | new_value = new_y; |
201 | 0 | hb_memcpy (out, &new_value, HBINT16::static_size); |
202 | 0 | out += HBINT16::static_size; |
203 | 0 |
|
204 | 0 | hb_memcpy (out, p+2, len - len_before_val - 2); |
205 | 0 | len += 2; |
206 | 0 | } |
207 | 0 | } |
208 | 0 | return len; |
209 | 0 | } |
210 | | |
211 | | protected: |
212 | | bool scaled_offsets () const |
213 | 0 | { return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; } |
214 | | |
215 | | public: |
216 | | bool get_transformation (float (&matrix)[4], contour_point_t &trans) const |
217 | 0 | { |
218 | 0 | matrix[0] = matrix[3] = 1.f; |
219 | 0 | matrix[1] = matrix[2] = 0.f; |
220 | |
|
221 | 0 | const auto *p = &StructAfter<const HBINT8> (flags); |
222 | | #ifndef HB_NO_BEYOND_64K |
223 | | if (flags & GID_IS_24BIT) |
224 | | p += HBGlyphID24::static_size; |
225 | | else |
226 | | #endif |
227 | 0 | p += HBGlyphID16::static_size; |
228 | 0 | int tx, ty; |
229 | 0 | if (flags & ARG_1_AND_2_ARE_WORDS) |
230 | 0 | { |
231 | 0 | tx = *(const HBINT16 *) p; |
232 | 0 | p += HBINT16::static_size; |
233 | 0 | ty = *(const HBINT16 *) p; |
234 | 0 | p += HBINT16::static_size; |
235 | 0 | } |
236 | 0 | else |
237 | 0 | { |
238 | 0 | tx = *p++; |
239 | 0 | ty = *p++; |
240 | 0 | } |
241 | 0 | if (is_anchored ()) tx = ty = 0; |
242 | | |
243 | | /* set is_end_point flag to true, used by IUP delta optimization */ |
244 | 0 | trans.init ((float) tx, (float) ty, true); |
245 | |
|
246 | 0 | { |
247 | 0 | const F2DOT14 *points = (const F2DOT14 *) p; |
248 | 0 | if (flags & WE_HAVE_A_SCALE) |
249 | 0 | { |
250 | 0 | matrix[0] = matrix[3] = points[0].to_float (); |
251 | 0 | return true; |
252 | 0 | } |
253 | 0 | else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) |
254 | 0 | { |
255 | 0 | matrix[0] = points[0].to_float (); |
256 | 0 | matrix[3] = points[1].to_float (); |
257 | 0 | return true; |
258 | 0 | } |
259 | 0 | else if (flags & WE_HAVE_A_TWO_BY_TWO) |
260 | 0 | { |
261 | 0 | matrix[0] = points[0].to_float (); |
262 | 0 | matrix[1] = points[1].to_float (); |
263 | 0 | matrix[2] = points[2].to_float (); |
264 | 0 | matrix[3] = points[3].to_float (); |
265 | 0 | return true; |
266 | 0 | } |
267 | 0 | } |
268 | 0 | return tx || ty; |
269 | 0 | } |
270 | | |
271 | | hb_codepoint_t get_gid () const |
272 | 0 | { |
273 | | #ifndef HB_NO_BEYOND_64K |
274 | | if (flags & GID_IS_24BIT) |
275 | | return StructAfter<const HBGlyphID24> (flags); |
276 | | else |
277 | | #endif |
278 | 0 | return StructAfter<const HBGlyphID16> (flags); |
279 | 0 | } |
280 | | void set_gid (hb_codepoint_t gid) |
281 | 0 | { |
282 | 0 | #ifndef HB_NO_BEYOND_64K |
283 | 0 | if (flags & GID_IS_24BIT) |
284 | 0 | StructAfter<HBGlyphID24> (flags) = gid; |
285 | 0 | else |
286 | 0 | #endif |
287 | 0 | /* TODO assert? */ |
288 | 0 | StructAfter<HBGlyphID16> (flags) = gid; |
289 | 0 | } |
290 | | |
291 | | #ifndef HB_NO_BEYOND_64K |
292 | | void lower_gid_24_to_16 () |
293 | | { |
294 | | hb_codepoint_t gid = get_gid (); |
295 | | if (!(flags & GID_IS_24BIT) || gid > 0xFFFFu) |
296 | | return; |
297 | | |
298 | | /* Lower the flag and move the rest of the struct down. */ |
299 | | |
300 | | unsigned size = get_size (); |
301 | | char *end = (char *) this + size; |
302 | | char *p = &StructAfter<char> (flags); |
303 | | p += HBGlyphID24::static_size; |
304 | | |
305 | | flags = flags & ~GID_IS_24BIT; |
306 | | set_gid (gid); |
307 | | |
308 | | memmove (p - HBGlyphID24::static_size + HBGlyphID16::static_size, p, end - p); |
309 | | } |
310 | | #endif |
311 | | |
312 | | protected: |
313 | | HBUINT16 flags; |
314 | | HBUINT24 pad; |
315 | | public: |
316 | | DEFINE_SIZE_MIN (4); |
317 | | }; |
318 | | |
319 | | using composite_iter_t = composite_iter_tmpl<CompositeGlyphRecord>; |
320 | | |
321 | | struct CompositeGlyph |
322 | | { |
323 | | const GlyphHeader &header; |
324 | | hb_bytes_t bytes; |
325 | | CompositeGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) : |
326 | 0 | header (header_), bytes (bytes_) {} |
327 | | |
328 | | composite_iter_t iter () const |
329 | 0 | { return composite_iter_t (bytes, &StructAfter<CompositeGlyphRecord, GlyphHeader> (header)); } |
330 | | |
331 | | unsigned int instructions_length (hb_bytes_t bytes) const |
332 | 0 | { |
333 | 0 | unsigned int start = bytes.length; |
334 | 0 | unsigned int end = bytes.length; |
335 | 0 | const CompositeGlyphRecord *last = nullptr; |
336 | 0 | for (auto &item : iter ()) |
337 | 0 | last = &item; |
338 | 0 | if (unlikely (!last)) return 0; |
339 | 0 |
|
340 | 0 | if (last->has_instructions ()) |
341 | 0 | start = (char *) last - &bytes + last->get_size (); |
342 | 0 | if (unlikely (start > end)) return 0; |
343 | 0 | return end - start; |
344 | 0 | } |
345 | | |
346 | | /* Trimming for composites not implemented. |
347 | | * If removing hints it falls out of that. */ |
348 | 0 | const hb_bytes_t trim_padding () const { return bytes; } |
349 | | |
350 | | void drop_hints () |
351 | 0 | { |
352 | 0 | for (const auto &_ : iter ()) |
353 | 0 | const_cast<CompositeGlyphRecord &> (_).drop_instructions_flag (); |
354 | 0 | } |
355 | | |
356 | | /* Chop instructions off the end */ |
357 | | void drop_hints_bytes (hb_bytes_t &dest_start) const |
358 | 0 | { dest_start = bytes.sub_array (0, bytes.length - instructions_length (bytes)); } |
359 | | |
360 | | void set_overlaps_flag () |
361 | 0 | { |
362 | 0 | CompositeGlyphRecord& glyph_chain = const_cast<CompositeGlyphRecord &> ( |
363 | 0 | StructAfter<CompositeGlyphRecord, GlyphHeader> (header)); |
364 | 0 | if (!bytes.check_range(&glyph_chain, CompositeGlyphRecord::min_size)) |
365 | 0 | return; |
366 | 0 | glyph_chain.set_overlaps_flag (); |
367 | 0 | } |
368 | | |
369 | | bool compile_bytes_with_deltas (const hb_bytes_t &source_bytes, |
370 | | const contour_point_vector_t &points_with_deltas, |
371 | | hb_bytes_t &dest_bytes /* OUT */) |
372 | 0 | { |
373 | 0 | if (source_bytes.length <= GlyphHeader::static_size || |
374 | 0 | header.numberOfContours != -1) |
375 | 0 | { |
376 | 0 | dest_bytes = hb_bytes_t (); |
377 | 0 | return true; |
378 | 0 | } |
379 | 0 |
|
380 | 0 | unsigned source_len = source_bytes.length - GlyphHeader::static_size; |
381 | 0 |
|
382 | 0 | /* try to allocate more memories than source glyph bytes |
383 | 0 | * in case that there might be an overflow for int8 value |
384 | 0 | * and we would need to use int16 instead */ |
385 | 0 | char *o = (char *) hb_calloc (source_len * 2, sizeof (char)); |
386 | 0 | if (unlikely (!o)) return false; |
387 | 0 |
|
388 | 0 | const CompositeGlyphRecord *c = reinterpret_cast<const CompositeGlyphRecord *> (source_bytes.arrayZ + GlyphHeader::static_size); |
389 | 0 | auto it = composite_iter_t (hb_bytes_t ((const char *)c, source_len), c); |
390 | 0 |
|
391 | 0 | char *p = o; |
392 | 0 | unsigned i = 0, source_comp_len = 0; |
393 | 0 | for (const auto &component : it) |
394 | 0 | { |
395 | 0 | /* last 4 points in points_with_deltas are phantom points and should not be included */ |
396 | 0 | if (i >= points_with_deltas.length - 4) { |
397 | 0 | hb_free (o); |
398 | 0 | return false; |
399 | 0 | } |
400 | 0 |
|
401 | 0 | unsigned comp_len = component.get_size (); |
402 | 0 | if (component.is_anchored ()) |
403 | 0 | { |
404 | 0 | hb_memcpy (p, &component, comp_len); |
405 | 0 | p += comp_len; |
406 | 0 | } |
407 | 0 | else |
408 | 0 | { |
409 | 0 | unsigned new_len = component.compile_with_point (points_with_deltas[i], p); |
410 | 0 | p += new_len; |
411 | 0 | } |
412 | 0 | i++; |
413 | 0 | source_comp_len += comp_len; |
414 | 0 | } |
415 | 0 |
|
416 | 0 | //copy instructions if any |
417 | 0 | if (source_len > source_comp_len) |
418 | 0 | { |
419 | 0 | unsigned instr_len = source_len - source_comp_len; |
420 | 0 | hb_memcpy (p, (const char *)c + source_comp_len, instr_len); |
421 | 0 | p += instr_len; |
422 | 0 | } |
423 | 0 |
|
424 | 0 | unsigned len = p - o; |
425 | 0 | dest_bytes = hb_bytes_t (o, len); |
426 | 0 | return true; |
427 | 0 | } |
428 | | }; |
429 | | |
430 | | |
431 | | } /* namespace glyf_impl */ |
432 | | } /* namespace OT */ |
433 | | |
434 | | |
435 | | #endif /* OT_GLYF_COMPOSITEGLYPH_HH */ |