/src/ghostpdl/base/gxhintn.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Type 1 hinter, a new algorithm */ |
18 | | |
19 | | #include "memory_.h" |
20 | | #include "math_.h" |
21 | | #include "gx.h" |
22 | | #include "gxfixed.h" |
23 | | #include "gxarith.h" |
24 | | #include "gstypes.h" |
25 | | #include "gxmatrix.h" |
26 | | #include "gxpath.h" |
27 | | #include "gxfont.h" |
28 | | #include "gxfont1.h" |
29 | | #include "gxtype1.h" |
30 | | #include "gxhintn.h" |
31 | | #include "gzpath.h" |
32 | | #include "gserrors.h" |
33 | | |
34 | | /* todo : |
35 | | - Diagonal stems are not hinted; |
36 | | - Some fonts have no StdHW, StdWW. Adobe appears to autohint them. |
37 | | - Measure Adobe's flattness parameter. |
38 | | - Test Adobe compatibility for rotated/skewed glyphs. |
39 | | */ |
40 | | |
41 | | /* Stem processing basics : |
42 | | (See the glyph AE in Times-Roman by Adobe.) |
43 | | |
44 | | 0. This supposes that glyph is transformed to device space |
45 | | with a random matrix. |
46 | | |
47 | | All outline poles and all hint commands are stored in arrays |
48 | | before staring the exact processing. |
49 | | |
50 | | HR pole is pole before which stem replacement happens. |
51 | | |
52 | | 1. Stem hints may be primary ones (defined in the beginning of charstring), |
53 | | and secondary ones (defined at HR poles). Consider that |
54 | | secondary stem hints may be redundant (see AE in Times-Roman). |
55 | | Secondary stems are HIGHER priority than basic ones. |
56 | | |
57 | | 2. The range of secondary stem command is from its HR pole to next HR pole. |
58 | | The range of primary stem command is entire glyph. |
59 | | |
60 | | 3. The TT interpreter aligned stem3 with centering the middle stem. |
61 | | |
62 | | 4. If a stem boundary corresponds to a pole aligned with an alignment zone, |
63 | | pass aligned coordinate to the stem command. |
64 | | Use the stem boundary longitude middle point for alignment with |
65 | | skewed or rotated matrix. Use standard stem width for computing |
66 | | opposite coordinates. |
67 | | |
68 | | 4a. If a stem width rounds to several StemSnap* element, |
69 | | choose the element, to which more stems can round. |
70 | | See Adobe Technical Note #5049. |
71 | | |
72 | | 5. If several stems have a same boundary coordinate, |
73 | | this boundary gets more priority when aligned. |
74 | | |
75 | | 6. Considering each set of repeating stem commands as a stem complex, pass |
76 | | aligned coordinates to opposite boundaries of stem commands. |
77 | | |
78 | | 7. Pass aligned boundary coordinate to poles within stem command range. |
79 | | Note that this will pass aligned coordinates back to poles, |
80 | | from which stem alignment was taken. |
81 | | |
82 | | 8. Interpolate unaligned poles. |
83 | | |
84 | | 9. After the alignment is done, it is desirable to check for |
85 | | anomalous negative contours and fix them, but we have no |
86 | | good algorithm for this. The rasterizer must be tolerant |
87 | | to such contours (which may have self-crosses, self-contacts, |
88 | | or may change to opposite direction). |
89 | | |
90 | | */ |
91 | | |
92 | | /* Dotsection processing basics : |
93 | | |
94 | | If stem replacement occures, dotsection to be ignored. |
95 | | To check this properly, we test whether extremal poles of contour |
96 | | were actually aligned with stem hints. |
97 | | |
98 | | If a contour was aligned with stem hints by both X and Y, |
99 | | no special processing required. |
100 | | |
101 | | Otherwise if dotsection center falls near vstem axis, |
102 | | we align it by X with the axis. Otherwise we align |
103 | | it by X to half-pixel. Then we align the center by Y to |
104 | | half-pixel, and shift entire contour to satisfy |
105 | | the alignment of the center. |
106 | | */ |
107 | | |
108 | | /* vstem3/hstem3 processing basics : |
109 | | They are handled by the type 1,2 interpreters (gstype1.c, gstype2.c). |
110 | | */ |
111 | | |
112 | | /* flex processing basics : |
113 | | With type 1 it is handled with t1_hinter__flex_* functions. |
114 | | With type 2 it is handled by gstype2.c . |
115 | | */ |
116 | | |
117 | | #define ADOBE_OVERSHOOT_COMPATIBILIY 0 |
118 | | #define ADOBE_SHIFT_CHARPATH 0 |
119 | | |
120 | | /* The CONTRAST_STEMS option aligns one of two stem boundaries |
121 | | to integral pixel boundary when AlignToPixels = 0. |
122 | | It gives more contrast stems, because a bigger part |
123 | | of boldness is concentrated in smaller number of pixels. |
124 | | */ |
125 | 0 | #define CONTRAST_STEMS 1 |
126 | | |
127 | | static const char *s_pole_array = "t1_hinter pole array"; |
128 | | static const char *s_zone_array = "t1_hinter zone array"; |
129 | | static const char *s_hint_array = "t1_hinter hint array"; |
130 | | static const char *s_contour_array = "t1_hinter contour array"; |
131 | | static const char *s_subglyph_array = "t1_hinter subglyph array"; |
132 | | static const char *s_hint_range_array = "t1_hinter hint_range array"; |
133 | | static const char *s_hint_applying_array = "t1_hinter hint_applying array"; |
134 | | static const char *s_stem_snap_array = "t1_hinter stem_snap array"; |
135 | | static const char *s_stem_snap_vote_array = "t1_hinter stem_snap_vote array"; |
136 | | |
137 | 0 | #define member_prt(type, ptr, offset) (type *)((char *)(ptr) + (offset)) |
138 | | |
139 | | typedef int32_t int24; |
140 | | #define HAVE_INT64_T |
141 | | |
142 | | static const unsigned int split_bits = 12; |
143 | | static const unsigned int max_coord_bits = 24; /* = split_bits * 2 */ |
144 | | static const unsigned int matrix_bits = 19; /* <= sizeof(int) * 8 - 1 - split_bits */ |
145 | | static const unsigned int g2o_bitshift = 12; /* <= matrix_bits + max_coord_bits - (sizeof(int) * 8 + 1) */ |
146 | | #ifndef HAVE_INT64_T |
147 | | static const int32_t FFFFF000 = ~(int32_t)0xFFF; /* = ~(((int32_t)1 << split_bits) - 1) */ |
148 | | #endif |
149 | | /* Constants above must satisfy expressions given in comments. */ |
150 | | |
151 | | /* Computes (a*b)>>s, s <= 12 */ |
152 | | static inline int32_t mul_shift(int24 a, int19 b, unsigned int s) |
153 | 6.71M | { |
154 | 6.71M | #ifdef HAVE_INT64_T |
155 | 6.71M | return ( (int64_t)a * (int64_t)b ) >> s; /* unrounded result */ |
156 | | #else |
157 | | { /* 32 bit fallback */ |
158 | | int32_t aa = a & FFFFF000, a0 = a - aa, a1 = aa >> s; |
159 | | return ((a0 * b) >> s) + a1 * b; /* unrounded result */ |
160 | | } |
161 | | #endif |
162 | 6.71M | } |
163 | | |
164 | | /* Computes (a*b)>>s, s <= 12, with rounding */ |
165 | | static inline int32_t mul_shift_round(int24 a, int19 b, unsigned int s) |
166 | 196M | { |
167 | 196M | #ifdef HAVE_INT64_T |
168 | 196M | return (( ( (int64_t)a * (int64_t)b ) >> (s - 1)) + 1) >> 1; |
169 | | #else |
170 | | { /* 32 bit version */ |
171 | | int32_t aa = a & FFFFF000, a0 = a - aa, a1 = aa >> s; |
172 | | return ((((a0 * b) >> (s - 1)) + 1) >> 1) + a1 * b; /* rounded result */ |
173 | | } |
174 | | #endif |
175 | 196M | } |
176 | | |
177 | | static inline int32_t shift_rounded(int32_t v, unsigned int s) |
178 | 6.71M | { return ((v >> (s - 1)) + 1) >> 1; |
179 | 6.71M | } |
180 | | |
181 | | static inline int32_t Max(int32_t a, int32_t b) |
182 | 10.0M | { return a > b ? a : b; |
183 | 10.0M | } |
184 | | |
185 | | static inline long rshift(long a, int b) |
186 | 6.71M | { return b > 0 ? a << b : a >> -b; |
187 | 6.71M | } |
188 | | /*---------------------- members of matrix classes -------------------------*/ |
189 | | |
190 | | static inline void double_matrix__set(double_matrix * self, const gs_matrix_fixed * m) |
191 | 1.67M | { self->xx = m->xx; |
192 | 1.67M | self->xy = m->xy; |
193 | 1.67M | self->yx = m->yx; |
194 | 1.67M | self->yy = m->yy; |
195 | 1.67M | } |
196 | | |
197 | | static inline int double_matrix__invert_to(const double_matrix * self, double_matrix * m) |
198 | 1.67M | { double det = self->xx * self->yy - self->xy * self->yx; |
199 | | |
200 | 1.67M | if (fabs(det) * 1000000 <= fabs(self->xx) + fabs(self->xy) + fabs(self->yx) + fabs(self->yy)) |
201 | 0 | return_error(gs_error_rangecheck); |
202 | 1.67M | m->xx = self->yy / det; |
203 | 1.67M | m->xy = -self->xy / det; |
204 | 1.67M | m->yx = -self->yx / det; |
205 | 1.67M | m->yy = self->xx / det; |
206 | 1.67M | return 0; |
207 | 1.67M | } |
208 | | |
209 | | static void fraction_matrix__drop_bits(fraction_matrix * self, unsigned int bits) |
210 | 0 | { self->xx = shift_rounded(self->xx, bits); |
211 | 0 | self->xy = shift_rounded(self->xy, bits); |
212 | 0 | self->yx = shift_rounded(self->yx, bits); |
213 | 0 | self->yy = shift_rounded(self->yy, bits); |
214 | 0 | self->denominator >>= bits; |
215 | 0 | self->bitshift -= bits; |
216 | 0 | } |
217 | | |
218 | | static void fraction_matrix__set(fraction_matrix * self, const double_matrix * pmat) |
219 | 3.35M | { double axx = fabs(pmat->xx), axy = fabs(pmat->xy); |
220 | 3.35M | double ayx = fabs(pmat->yx), ayy = fabs(pmat->yy); |
221 | 3.35M | double scale = max(axx + axy, ayx + ayy); |
222 | 3.35M | int matrix_exp, m; |
223 | | |
224 | 3.35M | (void)frexp(scale, &matrix_exp); |
225 | 3.35M | self->bitshift = matrix_bits - matrix_exp; |
226 | 3.35M | if (self->bitshift >= sizeof( self->denominator) * 8) { |
227 | 0 | self->denominator = 0; |
228 | 0 | self->xx = self->xy = self->yx = self->yy = 0; |
229 | 3.35M | } else { |
230 | 3.35M | self->denominator = 1 << self->bitshift; |
231 | | /* Round towards zero for a better view of mirrored characters : */ |
232 | 3.35M | self->xx = (int32_t)(pmat->xx * self->denominator + 0.5); |
233 | 3.35M | self->xy = (int32_t)(pmat->xy * self->denominator + 0.5); |
234 | 3.35M | self->yx = (int32_t)(pmat->yx * self->denominator + 0.5); |
235 | 3.35M | self->yy = (int32_t)(pmat->yy * self->denominator + 0.5); |
236 | 3.35M | m = Max(Max(any_abs(self->xx), any_abs(self->xy)), Max(any_abs(self->yx), any_abs(self->yy))); |
237 | 3.35M | (void)frexp(m, &matrix_exp); |
238 | 3.35M | if (matrix_exp > matrix_bits) |
239 | 0 | fraction_matrix__drop_bits(self, matrix_exp - matrix_bits); |
240 | 3.35M | } |
241 | 3.35M | } |
242 | | |
243 | | static inline int fraction_matrix__to_double(const fraction_matrix * self, double_matrix * pmat) |
244 | 3.35M | { |
245 | 3.35M | if (self->denominator == 0) |
246 | 0 | return_error(gs_error_rangecheck); |
247 | 3.35M | pmat->xx = (double)self->xx / self->denominator; |
248 | 3.35M | pmat->xy = (double)self->xy / self->denominator; |
249 | 3.35M | pmat->yx = (double)self->yx / self->denominator; |
250 | 3.35M | pmat->yy = (double)self->yy / self->denominator; |
251 | 3.35M | return 0; |
252 | 3.35M | } |
253 | | |
254 | | static int fraction_matrix__invert_to(const fraction_matrix * self, fraction_matrix * pmat) |
255 | 1.67M | { double_matrix m, M; |
256 | 1.67M | int code; |
257 | | |
258 | 1.67M | code = fraction_matrix__to_double(self, &M); |
259 | 1.67M | if (code < 0) |
260 | 0 | return code; |
261 | 1.67M | code = double_matrix__invert_to(&M, &m); |
262 | 1.67M | if (code < 0) |
263 | 0 | return code; |
264 | 1.67M | fraction_matrix__set(pmat, &m); |
265 | 1.67M | return 0; |
266 | 1.67M | } |
267 | | |
268 | | static inline int32_t fraction_matrix__transform_x(fraction_matrix *self, int24 x, int24 y, unsigned int s) |
269 | 49.2M | { return mul_shift_round(x, self->xx, s) + mul_shift_round(y, self->yx, s); |
270 | 49.2M | } |
271 | | static inline int32_t fraction_matrix__transform_y(fraction_matrix *self, int24 x, int24 y, unsigned int s) |
272 | 49.2M | { return mul_shift_round(x, self->xy, s) + mul_shift_round(y, self->yy, s); |
273 | 49.2M | } |
274 | | |
275 | | /*--------------------------- friends ------------------------------*/ |
276 | | |
277 | | static inline int ranger_step_f(int i, int beg, int end) |
278 | 0 | { return (i == end ? beg : i + 1); |
279 | 0 | } |
280 | | |
281 | | static inline int ranger_step_b(int i, int beg, int end) |
282 | 0 | { return (i == beg ? end : i - 1); |
283 | 0 | } |
284 | | |
285 | | static inline fixed o2d(const t1_hinter *h, t1_hinter_space_coord v) |
286 | 98.4M | { |
287 | 98.4M | int s = h->g2o_fraction_bits - _fixed_shift; |
288 | | |
289 | 98.4M | if (s >= 1) |
290 | 98.3M | return ((v >> (h->g2o_fraction_bits - _fixed_shift - 1)) + 1) >> 1; |
291 | 108k | else if (s == 0) |
292 | 0 | return v; |
293 | 108k | else |
294 | 108k | return v << -s; |
295 | 98.4M | } |
296 | | |
297 | | static inline fixed d2o(const t1_hinter *h, t1_hinter_space_coord v) |
298 | 3.35M | { int s = h->g2o_fraction_bits - _fixed_shift; |
299 | | |
300 | 3.35M | if (s >= 0) |
301 | 3.35M | return v << s; |
302 | 0 | else |
303 | 0 | return v >> -s; |
304 | 3.35M | } |
305 | | |
306 | | static inline void g2o(t1_hinter * h, t1_glyph_space_coord gx, t1_glyph_space_coord gy, t1_hinter_space_coord *ox, t1_hinter_space_coord *oy) |
307 | 567 | { *ox = fraction_matrix__transform_x(&h->ctmf, gx, gy, g2o_bitshift); |
308 | 567 | *oy = fraction_matrix__transform_y(&h->ctmf, gx, gy, g2o_bitshift); |
309 | 567 | } |
310 | | |
311 | | static inline t1_hinter_space_coord g2o_dist(t1_glyph_space_coord gd, int19 coef) |
312 | 0 | { return mul_shift(gd, coef, g2o_bitshift); |
313 | 0 | } |
314 | | |
315 | | static inline void g2d(t1_hinter * h, t1_glyph_space_coord gx, t1_glyph_space_coord gy, fixed *dx, fixed *dy) |
316 | 49.2M | { |
317 | 49.2M | *dx = fraction_matrix__transform_x(&h->ctmf, gx, gy, g2o_bitshift); |
318 | 49.2M | *dy = fraction_matrix__transform_y(&h->ctmf, gx, gy, g2o_bitshift); |
319 | 49.2M | *dx = o2d(h, *dx); |
320 | 49.2M | *dy = o2d(h, *dy); |
321 | 49.2M | *dx += h->orig_dx; |
322 | 49.2M | *dy += h->orig_dy; |
323 | 49.2M | } |
324 | | |
325 | | static inline void o2g(t1_hinter * h, t1_hinter_space_coord ox, t1_hinter_space_coord oy, t1_glyph_space_coord *gx, t1_glyph_space_coord *gy) |
326 | 0 | { *gx = fraction_matrix__transform_x(&h->ctmi, ox, oy, split_bits); |
327 | 0 | *gy = fraction_matrix__transform_y(&h->ctmi, ox, oy, split_bits); |
328 | 0 | *gx = shift_rounded(*gx, h->g2o_fraction_bits + h->ctmi.bitshift - _fixed_shift - split_bits); |
329 | 0 | *gy = shift_rounded(*gy, h->g2o_fraction_bits + h->ctmi.bitshift - _fixed_shift - split_bits); |
330 | 0 | } |
331 | | |
332 | | static inline t1_glyph_space_coord o2g_dist(t1_hinter * h, t1_hinter_space_coord od, int19 coef) |
333 | 6.71M | { return shift_rounded(mul_shift(od, coef, split_bits), h->g2o_fraction_bits + h->ctmi.bitshift - _fixed_shift - split_bits); |
334 | 6.71M | } |
335 | | |
336 | | /* --------------------- t1_hint class members ---------------------*/ |
337 | | |
338 | | static void t1_hint__set_aligned_coord(t1_hint * self, t1_glyph_space_coord gc, t1_pole * pole, enum t1_align_type align, int quality) |
339 | 0 | { t1_glyph_space_coord g = (self->type == hstem ? pole->gy : pole->gx); |
340 | |
|
341 | 0 | if (any_abs(self->g0 - g) < any_abs(self->g1 - g)) { |
342 | 0 | if (self->aligned0 <= align && self->q0 > quality) |
343 | 0 | self->ag0 = gc, self->aligned0 = align, self->q0 = quality; |
344 | 0 | } else { |
345 | 0 | if (self->aligned1 <= align && self->q1 > quality) |
346 | 0 | self->ag1 = gc, self->aligned1 = align, self->q1 = quality; |
347 | 0 | } |
348 | 0 | } |
349 | | |
350 | | /* --------------------- t1_hinter class members - import --------------------*/ |
351 | | |
352 | | void t1_hinter__init(t1_hinter * self, gx_path *output_path) |
353 | 25.0M | { |
354 | 25.0M | self->max_import_coord = (1 << max_coord_bits); |
355 | 25.0M | self->stem_snap_count[0] = self->stem_snap_count[1] = 0; |
356 | 25.0M | self->stem_snap_vote_count = 0; |
357 | 25.0M | self->zone_count = 0; |
358 | 25.0M | self->pole_count = 0; |
359 | 25.0M | self->hint_count = 0; |
360 | 25.0M | self->contour_count = 0; |
361 | 25.0M | self->subglyph_count = 0; |
362 | 25.0M | self->hint_range_count = 0; |
363 | 25.0M | self->flex_count = 0; |
364 | 25.0M | self->have_flex = false; |
365 | | |
366 | 25.0M | self->max_subglyph_count = count_of(self->subglyph0); |
367 | 25.0M | self->max_contour_count = count_of(self->contour0); |
368 | 25.0M | self->max_zone_count = count_of(self->zone0); |
369 | 25.0M | self->max_pole_count = count_of(self->pole0); |
370 | 25.0M | self->max_hint_count = count_of(self->hint0); |
371 | 25.0M | self->max_hint_range_count = count_of(self->hint_range0); |
372 | 25.0M | self->max_hint_applying_count = count_of(self->hint_applying0); |
373 | 25.0M | self->max_stem_snap_count[0] = count_of(self->stem_snap0[0]); |
374 | 25.0M | self->max_stem_snap_count[1] = count_of(self->stem_snap0[1]); |
375 | 25.0M | self->max_stem_snap_vote_count = count_of(self->stem_snap_vote0); |
376 | | |
377 | 25.0M | self->pole = self->pole0; |
378 | 25.0M | self->hint = self->hint0; |
379 | 25.0M | self->zone = self->zone0; |
380 | 25.0M | self->contour = self->contour0; |
381 | 25.0M | self->subglyph = self->subglyph0; |
382 | 25.0M | self->hint_range = self->hint_range0; |
383 | 25.0M | self->hint_applying = self->hint_applying0; |
384 | 25.0M | self->stem_snap[0] = self->stem_snap0[0]; |
385 | 25.0M | self->stem_snap[1] = self->stem_snap0[1]; |
386 | 25.0M | self->stem_snap_vote = self->stem_snap_vote0; |
387 | | |
388 | 25.0M | self->FontType = 1; |
389 | 25.0M | self->ForceBold = false; |
390 | 25.0M | self->base_font_scale = 0; |
391 | 25.0M | self->resolution = 0; |
392 | 25.0M | self->heigt_transform_coef = self->width_transform_coef = 0; |
393 | 25.0M | self->heigt_transform_coef_rat = self->width_transform_coef_rat = 0; |
394 | 25.0M | self->heigt_transform_coef_inv = self->width_transform_coef_inv = 0; |
395 | 25.0M | self->cx = self->cy = 0; |
396 | 25.0M | self->contour[0] = 0; |
397 | 25.0M | self->subglyph[0] = 0; |
398 | 25.0M | self->keep_stem_width = false; |
399 | 25.0M | self->charpath_flag = false; |
400 | 25.0M | self->grid_fit_x = self->grid_fit_y = true; |
401 | 25.0M | self->output_path = output_path; |
402 | 25.0M | self->memory = (output_path == 0 ? 0 : output_path->memory); |
403 | 25.0M | self->disable_hinting = (self->memory == NULL); |
404 | 25.0M | self->pass_through = self->disable_hinting; |
405 | 25.0M | self->autohinting = false; |
406 | 25.0M | self->fix_contour_sign = false; |
407 | 25.0M | self->path_opened = false; |
408 | 25.0M | self->orig_dx = 0; |
409 | 25.0M | self->orig_dy = 0; |
410 | 25.0M | self->g2o_fraction_bits = 0; |
411 | | |
412 | 25.0M | self->stem_snap[0][0] = self->stem_snap[1][0] = 100; /* default */ |
413 | | |
414 | 25.0M | memset(&self->ctmf, 0x00, sizeof(self->ctmf)); |
415 | 25.0M | memset(&self->ctmi, 0x00, sizeof(self->ctmi)); |
416 | 25.0M | } |
417 | | |
418 | | static inline void t1_hinter__free_arrays(t1_hinter * self) |
419 | 1.67M | { if (self->pole != self->pole0) |
420 | 0 | gs_free_object(self->memory, self->pole, s_pole_array); |
421 | 1.67M | if (self->hint != self->hint0) |
422 | 0 | gs_free_object(self->memory, self->hint, s_hint_array); |
423 | 1.67M | if (self->zone != self->zone0) |
424 | 0 | gs_free_object(self->memory, self->zone, s_zone_array); |
425 | 1.67M | if (self->contour != self->contour0) |
426 | 0 | gs_free_object(self->memory, self->contour, s_contour_array); |
427 | 1.67M | if (self->subglyph != self->subglyph0) |
428 | 0 | gs_free_object(self->memory, self->subglyph, s_subglyph_array); |
429 | 1.67M | if (self->hint_range != self->hint_range0) |
430 | 0 | gs_free_object(self->memory, self->hint_range, s_hint_range_array); |
431 | 1.67M | if (self->hint_applying != self->hint_applying0) |
432 | 0 | gs_free_object(self->memory, self->hint_applying, s_hint_applying_array); |
433 | 1.67M | if (self->stem_snap[0] != self->stem_snap0[0]) |
434 | 0 | gs_free_object(self->memory, self->stem_snap[0], s_stem_snap_array); |
435 | 1.67M | if (self->stem_snap[1] != self->stem_snap0[1]) |
436 | 0 | gs_free_object(self->memory, self->stem_snap[1], s_stem_snap_array); |
437 | 1.67M | if (self->stem_snap_vote != self->stem_snap_vote0) |
438 | 0 | gs_free_object(self->memory, self->stem_snap_vote, s_stem_snap_vote_array); |
439 | 1.67M | self->pole = 0; |
440 | 1.67M | self->hint = 0; |
441 | 1.67M | self->zone = 0; |
442 | 1.67M | self->contour = 0; |
443 | 1.67M | self->hint_range = 0; |
444 | 1.67M | self->hint_applying = 0; |
445 | 1.67M | self->stem_snap[0] = self->stem_snap[1] = 0; |
446 | 1.67M | self->stem_snap_vote = 0; |
447 | 1.67M | } |
448 | | |
449 | | static inline void t1_hinter__init_outline(t1_hinter * self) |
450 | 1.67M | { |
451 | 1.67M | self->contour_count = 0; |
452 | 1.67M | self->pole_count = 0; |
453 | 1.67M | self->contour[0] = 0; |
454 | 1.67M | self->hint_count = 0; |
455 | 1.67M | self->primary_hint_count = -1; |
456 | 1.67M | self->suppress_overshoots = false; |
457 | 1.67M | self->path_opened = false; |
458 | 1.67M | } |
459 | | |
460 | | static void t1_hinter__compute_rat_transform_coef(t1_hinter * self) |
461 | 1.67M | { |
462 | | /* Round towards zero for a better view of mirrored characters : */ |
463 | 1.67M | self->heigt_transform_coef_rat = (int19)(self->heigt_transform_coef * self->ctmf.denominator + 0.5); |
464 | 1.67M | self->width_transform_coef_rat = (int19)(self->width_transform_coef * self->ctmf.denominator + 0.5); |
465 | 1.67M | self->heigt_transform_coef_inv = (int19)(self->ctmi.denominator / self->heigt_transform_coef + 0.5); |
466 | 1.67M | self->width_transform_coef_inv = (int19)(self->ctmi.denominator / self->width_transform_coef + 0.5); |
467 | 1.67M | } |
468 | | |
469 | | static inline void t1_hinter__adjust_matrix_precision(t1_hinter * self, fixed xx, fixed yy) |
470 | 52.5M | { |
471 | 52.5M | ufixed x = any_abs(xx), y = any_abs(yy); |
472 | 52.5M | ufixed c = (x > y ? x : y); |
473 | | |
474 | 52.5M | while (c >= self->max_import_coord) { |
475 | | /* Reduce the precision of ctmf to allow products to fit into 32 bits : */ |
476 | 0 | self->max_import_coord <<= 1; |
477 | 0 | fraction_matrix__drop_bits(&self->ctmf, 1); |
478 | 0 | fraction_matrix__drop_bits(&self->ctmi, 1); |
479 | 0 | self->g2o_fraction_bits -= 1; |
480 | 0 | self->g2o_fraction >>= 1; |
481 | 0 | t1_hinter__compute_rat_transform_coef(self); |
482 | 0 | } |
483 | 52.5M | if (self->ctmf.denominator == 0) { |
484 | | /* ctmf should be degenerate. */ |
485 | 15.2k | self->ctmf.denominator = 1; |
486 | 15.2k | } |
487 | 52.5M | } |
488 | | |
489 | | static inline void t1_hinter__set_origin(t1_hinter * self, fixed dx, fixed dy) |
490 | 1.67M | { |
491 | 1.67M | fixed align_x = rshift(fixed_1, (self->align_to_pixels ? (int)self->log2_pixels_x : self->log2_subpixels_x)); |
492 | 1.67M | fixed align_y = rshift(fixed_1, (self->align_to_pixels ? (int)self->log2_pixels_y : self->log2_subpixels_y)); |
493 | | |
494 | 1.67M | self->orig_dx = (dx + align_x / 2) & ~(align_x - 1); |
495 | 1.67M | self->orig_dy = (dy + align_y / 2) & ~(align_y - 1); |
496 | 1.67M | t1_hinter__adjust_matrix_precision(self, self->orig_dx, self->orig_dy); |
497 | 1.67M | self->orig_ox = d2o(self, self->orig_dx); |
498 | 1.67M | self->orig_oy = d2o(self, self->orig_dy); |
499 | | # if ADOBE_SHIFT_CHARPATH |
500 | | /* Adobe CPSI rounds coordinates for 'charpath' : |
501 | | X to trunc(x+0.5) |
502 | | Y to trunc(y)+0.5 |
503 | | */ |
504 | | if (self->charpath_flag) { |
505 | | self->orig_dx += fixed_half; |
506 | | self->orig_dx &= ~(fixed_1 - 1); |
507 | | self->orig_dy &= ~(fixed_1 - 1); |
508 | | self->orig_dy += fixed_half; |
509 | | } else { |
510 | | self->orig_dy += fixed_1; |
511 | | /* Adobe CPSI does this, not sure why. */ |
512 | | /* fixme : check bbox of cached bitmap. */ |
513 | | } |
514 | | # endif |
515 | 1.67M | } |
516 | | |
517 | | int t1_hinter__set_mapping(t1_hinter * self, gs_matrix_fixed * ctm, |
518 | | gs_matrix * FontMatrix, gs_matrix * baseFontMatrix, |
519 | | int log2_pixels_x, int log2_pixels_y, |
520 | | int log2_subpixels_x, int log2_subpixels_y, |
521 | | fixed origin_x, fixed origin_y, bool align_to_pixels) |
522 | 1.67M | { float axx = fabs(ctm->xx), axy = fabs(ctm->xy); |
523 | 1.67M | float ayx = fabs(ctm->xx), ayy = fabs(ctm->xy); |
524 | 1.67M | float scale = max(axx + axy, ayx + ayy); |
525 | 1.67M | double_matrix CTM; |
526 | 1.67M | int code; |
527 | | |
528 | 1.67M | self->disable_hinting |= (scale < 1/1024. || scale > 4); |
529 | 1.67M | self->pass_through |= self->disable_hinting; |
530 | 1.67M | self->log2_pixels_x = log2_pixels_x; |
531 | 1.67M | self->log2_pixels_y = log2_pixels_y; |
532 | 1.67M | self->log2_subpixels_x = log2_subpixels_x; |
533 | 1.67M | self->log2_subpixels_y = log2_subpixels_y; |
534 | 1.67M | double_matrix__set(&CTM, ctm); |
535 | 1.67M | fraction_matrix__set(&self->ctmf, &CTM); |
536 | 1.67M | self->g2o_fraction_bits = self->ctmf.bitshift - g2o_bitshift + _fixed_shift; |
537 | 1.67M | if (self->g2o_fraction_bits > max_coord_bits) { |
538 | 0 | fraction_matrix__drop_bits(&self->ctmf, self->g2o_fraction_bits - max_coord_bits); |
539 | 0 | self->g2o_fraction_bits = max_coord_bits; |
540 | 0 | } |
541 | 1.67M | if (self->ctmf.denominator != 0) { |
542 | 1.67M | code = fraction_matrix__invert_to(&self->ctmf, &self->ctmi); /* Note: ctmi is inversion of ctmf, not ctm. */ |
543 | 1.67M | if (code == gs_error_rangecheck) |
544 | 0 | self->ctmf.denominator = 0; |
545 | 1.67M | else if (code < 0) |
546 | 0 | return code; |
547 | 1.67M | } |
548 | 1.67M | if (self->ctmf.denominator != 0) { |
549 | 1.67M | self->g2o_fraction = 1 << self->g2o_fraction_bits; |
550 | | /* Note : possibly we'll adjust the matrix precision dynamically |
551 | | with adjust_matrix_precision while importing the glyph. */ |
552 | 1.67M | if (self->g2o_fraction == 0) |
553 | 0 | return_error(gs_error_limitcheck); |
554 | 1.67M | } |
555 | 1.67M | if (self->ctmf.denominator == 0 || self->ctmi.denominator == 0) { |
556 | | /* ctmf should be degenerate. */ |
557 | 0 | self->disable_hinting = true; |
558 | 0 | self->pass_through = true; |
559 | 0 | self->ctmf.denominator = 1; |
560 | 0 | } |
561 | 1.67M | self->transposed = (any_abs(self->ctmf.xy) * 10 > any_abs(self->ctmf.xx)); |
562 | 1.67M | { /* height_transform_coef is scaling factor for the |
563 | | distance between horizontal lines while transformation. |
564 | | width_transform_coef defines similarly. |
565 | | */ |
566 | 1.67M | double_matrix m; |
567 | 1.67M | double vp, sp, div_x, div_y; |
568 | | |
569 | 1.67M | code = fraction_matrix__to_double(&self->ctmf, &m); |
570 | 1.67M | if (code < 0) |
571 | 0 | return code; |
572 | 1.67M | vp = any_abs(m.xx * m.yy - m.yx * m.xy); |
573 | 1.67M | sp = any_abs(m.xx * m.yx + m.xy * m.yy); |
574 | 1.67M | div_x = hypot(m.xx, m.yx); |
575 | 1.67M | div_y = hypot(m.xy, m.yy); |
576 | 1.67M | if (vp != 0 && div_x != 0 && div_y != 0) { |
577 | 1.67M | if (!self->transposed) { |
578 | 1.67M | self->heigt_transform_coef = vp / div_x; |
579 | 1.67M | self->width_transform_coef = vp / div_y; |
580 | 1.67M | } else { |
581 | 4 | self->heigt_transform_coef = vp / div_y; |
582 | 4 | self->width_transform_coef = vp / div_x; |
583 | 4 | } |
584 | 1.67M | t1_hinter__compute_rat_transform_coef(self); |
585 | 1.67M | self->keep_stem_width = (sp <= vp / 3); /* small skew */ |
586 | 1.67M | } |
587 | 1.67M | } |
588 | 0 | { /* Compute font size and resolution : */ |
589 | 1.67M | gs_point p0, p1, p2; |
590 | 1.67M | double d0, d1, d2; |
591 | | |
592 | 1.67M | gs_distance_transform(0, 1, baseFontMatrix, &p0); |
593 | 1.67M | gs_distance_transform(0, 1, FontMatrix, &p1); |
594 | 1.67M | gs_distance_transform(0, 1, (gs_matrix *)ctm, &p2); |
595 | 1.67M | d0 = hypot(p0.x, p0.y); |
596 | 1.67M | d1 = hypot(p1.x, p1.y); |
597 | 1.67M | d2 = hypot(p2.x, p2.y); |
598 | 1.67M | self->base_font_scale = d0; |
599 | 1.67M | self->font_size = floor(d1 / d0 * 10000 + 0.5) / 10000; |
600 | 1.67M | self->resolution = floor(d2 / d1 * 10000000 + 0.5) / 10000000; |
601 | | /* |
602 | | * fixme: base_font_scale, font_size and resolution are computed wrongly |
603 | | * for any of the following cases : |
604 | | * |
605 | | * 1. CIDFontType0C with FontMatrix=[0.001 0 0 0.001 0 0] gives 1/1000 size. |
606 | | * A known example : CIDembedded.pdf . We could obtain the Type 9 FontMatrix |
607 | | * in type1_exec_init from penum->fstack. |
608 | | * |
609 | | * 2. See comment in pdf_font_orig_matrix. |
610 | | * |
611 | | * Currently we don't use these values with a regular build. |
612 | | * The ADOBE_OVERSHOOT_COMPATIBILIY build needs to fix them. |
613 | | */ |
614 | 1.67M | } |
615 | 1.67M | if (1 || /* Doesn't work - see comment above. */ |
616 | 1.67M | self->resolution * self->font_size >= 2) { |
617 | | /* Enable the grid fitting separately for axes : */ |
618 | 1.67M | self->grid_fit_y = (any_abs(self->ctmf.xy) * 10 < any_abs(self->ctmf.xx) || |
619 | 1.67M | any_abs(self->ctmf.xx) * 10 < any_abs(self->ctmf.xy)); |
620 | 1.67M | self->grid_fit_x = (any_abs(self->ctmf.yx) * 10 < any_abs(self->ctmf.yy) || |
621 | 1.67M | any_abs(self->ctmf.yy) * 10 < any_abs(self->ctmf.yx)); |
622 | 1.67M | } else { |
623 | | /* Disable the grid fitting for very small fonts. */ |
624 | 0 | self->grid_fit_x = self->grid_fit_y = false; |
625 | 0 | } |
626 | 1.67M | self->align_to_pixels = align_to_pixels; |
627 | 1.67M | t1_hinter__set_origin(self, origin_x, origin_y); |
628 | 1.67M | self->pixel_o_x = rshift(self->g2o_fraction, (self->align_to_pixels ? (int)self->log2_pixels_x : self->log2_subpixels_x)); |
629 | 1.67M | self->pixel_o_y = rshift(self->g2o_fraction, (self->align_to_pixels ? (int)self->log2_pixels_y : self->log2_subpixels_y)); |
630 | 1.67M | self->pixel_gh = any_abs(o2g_dist(self, self->pixel_o_x, self->heigt_transform_coef_inv)); |
631 | 1.67M | self->pixel_gw = any_abs(o2g_dist(self, self->pixel_o_y, self->width_transform_coef_inv)); |
632 | 1.67M | return 0; |
633 | 1.67M | } |
634 | | |
635 | | static void t1_hinter__make_zone(t1_hinter * self, t1_zone *zone, float * blues, enum t1_zone_type type, t1_glyph_space_coord blue_fuzz) |
636 | 0 | { t1_glyph_space_coord d = 0; |
637 | |
|
638 | 0 | zone->type = type; |
639 | 0 | zone->y = float2fixed(blues[0] + d); |
640 | 0 | zone->overshoot_y = float2fixed(blues[1] + d); |
641 | 0 | zone->y_min = min(zone->y, zone->overshoot_y) - blue_fuzz; |
642 | 0 | zone->y_max = max(zone->y, zone->overshoot_y) + blue_fuzz; |
643 | 0 | if (type == botzone ? zone->overshoot_y > zone->y : zone->overshoot_y < zone->y) { |
644 | 0 | int v = zone->overshoot_y; zone->overshoot_y = zone->y; zone->y = v; |
645 | 0 | } |
646 | 0 | t1_hinter__adjust_matrix_precision(self, zone->y_min, zone->y_max); |
647 | 0 | } |
648 | | |
649 | | static bool t1_hinter__realloc_array(gs_memory_t *mem, void **a, void *a0, int *max_count, int elem_size, int enhancement, const char *cname) |
650 | 0 | { |
651 | 0 | void *aa = gs_alloc_bytes(mem, (size_t)(*max_count + enhancement * 2) * elem_size, cname); |
652 | |
|
653 | 0 | if (aa == NULL) |
654 | 0 | return true; |
655 | 0 | memcpy(aa, *a, (size_t)*max_count * elem_size); |
656 | 0 | if (*a != a0) |
657 | 0 | gs_free_object(mem, *a, cname); |
658 | 0 | *a = aa; |
659 | 0 | *max_count += enhancement * 2; |
660 | 0 | return false; |
661 | 0 | } |
662 | | |
663 | | static int t1_hinter__set_alignment_zones(gs_memory_t *mem, t1_hinter * self, float * blues, int count, enum t1_zone_type type, bool family) |
664 | 0 | { int count2 = count / 2, i, j; |
665 | |
|
666 | 0 | if (!family) { |
667 | | /* Store zones : */ |
668 | 0 | if (count2 + self->zone_count >= self->max_zone_count) |
669 | 0 | if(t1_hinter__realloc_array(mem, (void **)&self->zone, self->zone0, &self->max_zone_count, |
670 | 0 | sizeof(self->zone0) / count_of(self->zone0), |
671 | 0 | max(T1_MAX_ALIGNMENT_ZONES, count), s_zone_array)) |
672 | 0 | return_error(gs_error_VMerror); |
673 | 0 | for (i = 0; i < count2; i++) |
674 | 0 | t1_hinter__make_zone(self, &self->zone[self->zone_count + i], blues + i + i, type, self->blue_fuzz); |
675 | 0 | self->zone_count += count2; |
676 | 0 | } else { |
677 | | /* Replace with family zones if allowed : */ |
678 | 0 | t1_zone zone; |
679 | 0 | for (i = 0; i < count2; i++) { |
680 | 0 | t1_hinter__make_zone(self, &zone, blues + i, type, self->blue_fuzz); |
681 | 0 | for (j = 0; j<self->zone_count; j++) { |
682 | 0 | t1_zone *zone1 = &self->zone[j]; |
683 | 0 | if (any_abs(zone.y - zone1->y ) * self->heigt_transform_coef <= 1 && |
684 | 0 | any_abs(zone.overshoot_y - zone1->overshoot_y) * self->heigt_transform_coef <= 1) |
685 | 0 | *zone1 = zone; |
686 | 0 | } |
687 | 0 | } |
688 | 0 | } |
689 | 0 | return 0; |
690 | 0 | } |
691 | | |
692 | | static int t1_hinter__set_stem_snap(gs_memory_t *mem, t1_hinter * self, float * value, int count, unsigned short hv) |
693 | 0 | { int count0 = self->stem_snap_count[hv], i, j; |
694 | 0 | t1_glyph_space_coord pixel_g = (!hv ? self->pixel_gh : self->pixel_gw); |
695 | |
|
696 | 0 | if (pixel_g == 0) |
697 | 0 | return 0; |
698 | 0 | if (count + count0 >= self->max_stem_snap_count[hv]) |
699 | 0 | if(t1_hinter__realloc_array(mem, (void **)&self->stem_snap[hv], self->stem_snap0[hv], &self->max_stem_snap_count[hv], |
700 | 0 | sizeof(self->stem_snap0[0]) / count_of(self->stem_snap0[0]), |
701 | 0 | max(T1_MAX_STEM_SNAPS, count), s_stem_snap_array)) |
702 | 0 | return_error(gs_error_VMerror); |
703 | 0 | if (count + count0 >= self->max_stem_snap_vote_count) |
704 | 0 | if(t1_hinter__realloc_array(mem, (void **)&self->stem_snap_vote, self->stem_snap_vote0, &self->max_stem_snap_vote_count, |
705 | 0 | sizeof(self->stem_snap_vote0) / count_of(self->stem_snap_vote0), |
706 | 0 | max(T1_MAX_STEM_SNAPS, count), s_stem_snap_vote_array)) |
707 | 0 | return_error(gs_error_VMerror); |
708 | 0 | if (count == 1 || (count > 0 && float2fixed(value[count - 1] - value[0]) > pixel_g)) { |
709 | 0 | for (i = 0; i < count; i++) |
710 | 0 | self->stem_snap[hv][i] = float2fixed(value[i]); |
711 | 0 | self->stem_snap_count[hv] = count; |
712 | 0 | for (i = 0; i < count; i++) { |
713 | 0 | for (j = i + 1; j < count; j++) |
714 | 0 | if (self->stem_snap[hv][i] > self->stem_snap[hv][j]) { |
715 | 0 | t1_glyph_space_coord v = self->stem_snap[hv][i]; |
716 | |
|
717 | 0 | self->stem_snap[hv][i] = self->stem_snap[hv][j]; |
718 | 0 | self->stem_snap[hv][j] = v; |
719 | 0 | } |
720 | 0 | } |
721 | 0 | for (i = 1, j = 0; i < count; i++) { |
722 | 0 | if (self->stem_snap[hv][j] != self->stem_snap[hv][i]) { |
723 | 0 | j++; |
724 | 0 | self->stem_snap[hv][j] = self->stem_snap[hv][i]; |
725 | 0 | } |
726 | 0 | } |
727 | 0 | self->stem_snap_count[hv] = j + 1; |
728 | 0 | } |
729 | 0 | return 0; |
730 | | /* We store unrounded stem snap elements, align stem width |
731 | | to an unrounded element, and then round the width to pixels. |
732 | | As an alternative we tried to round stem snap elements when storing them, |
733 | | and aligh stem width to the closest rounded value. The fist alternative gives |
734 | | results closer to Adobe, and therefore we believe that Adobe does the same. |
735 | | With the second alternative many glyphs render some wider, |
736 | | for example in aaon97_p7.pdf, adesso1.pdf at 300 dpi. |
737 | | |
738 | | Another arbitrary solution is ignoring stem snap when |
739 | | its variation is lesser than 1 pixel. We believe that a threshold |
740 | | must exist because Adobe says that stem snaps work for high resolutions only. |
741 | | However we took the 1 pixel value for the threshold from scratch, |
742 | | and experiments give good results. |
743 | | |
744 | | At last, we ignore Std*V when stem snap is used. |
745 | | Doing so because we don't know cases when Std*V |
746 | | isn't equal to any stem snap element. |
747 | | */ |
748 | 0 | } |
749 | | |
750 | | int t1_hinter__set_font_data(gs_memory_t *mem, t1_hinter * self, int FontType, gs_type1_data *pdata, bool no_grid_fitting, bool is_resource) |
751 | 1.67M | { int code; |
752 | | |
753 | 1.67M | t1_hinter__init_outline(self); |
754 | 1.67M | self->FontType = FontType; |
755 | 1.67M | self->BlueScale = pdata->BlueScale; |
756 | 1.67M | self->blue_shift = float2fixed(pdata->BlueShift); |
757 | 1.67M | self->blue_fuzz = float2fixed(pdata->BlueFuzz); |
758 | 1.67M | self->suppress_overshoots = (self->BlueScale > self->heigt_transform_coef / (1 << self->log2_pixels_y) - 0.00020417); |
759 | 1.67M | self->overshoot_threshold = (self->heigt_transform_coef != 0 ? (t1_glyph_space_coord)(fixed_half * (1 << self->log2_pixels_y) / self->heigt_transform_coef) : 0); |
760 | 1.67M | self->ForceBold = pdata->ForceBold; |
761 | 1.67M | self->disable_hinting |= no_grid_fitting; |
762 | 1.67M | self->pass_through |= no_grid_fitting; |
763 | 1.67M | self->charpath_flag = no_grid_fitting; |
764 | 1.67M | self->fix_contour_sign = (!is_resource && self->memory != NULL); |
765 | 1.67M | if (self->fix_contour_sign) |
766 | 0 | self->pass_through = false; |
767 | 1.67M | if (self->pass_through) |
768 | 1.67M | return 0; |
769 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->OtherBlues.values, pdata->OtherBlues.count, botzone, false); |
770 | 0 | if (code >= 0) |
771 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->BlueValues.values, min(2, pdata->BlueValues.count), botzone, false); |
772 | 0 | if (code >= 0) |
773 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->BlueValues.values + 2, pdata->BlueValues.count - 2, topzone, false); |
774 | 0 | if (code >= 0) |
775 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->FamilyOtherBlues.values, pdata->FamilyOtherBlues.count, botzone, true); |
776 | 0 | if (code >= 0) |
777 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->FamilyBlues.values, min(2, pdata->FamilyBlues.count), botzone, true); |
778 | 0 | if (code >= 0) |
779 | 0 | code = t1_hinter__set_alignment_zones(mem, self, pdata->FamilyBlues.values + 2, pdata->FamilyBlues.count - 2, topzone, true); |
780 | 0 | if (code >= 0) |
781 | 0 | code = t1_hinter__set_stem_snap(mem, self, pdata->StdHW.values, pdata->StdHW.count, 0); |
782 | 0 | if (code >= 0) |
783 | 0 | code = t1_hinter__set_stem_snap(mem, self, pdata->StdVW.values, pdata->StdVW.count, 1); |
784 | 0 | if (code >= 0) |
785 | 0 | code = t1_hinter__set_stem_snap(mem, self, pdata->StemSnapH.values, pdata->StemSnapH.count, 0); |
786 | 0 | if (code >= 0) |
787 | 0 | code = t1_hinter__set_stem_snap(mem, self, pdata->StemSnapV.values, pdata->StemSnapV.count, 1); |
788 | 0 | return code; |
789 | 1.67M | } |
790 | | |
791 | | int t1_hinter__set_font42_data(t1_hinter * self, int FontType, gs_type42_data *pdata, bool no_grid_fitting) |
792 | 0 | { |
793 | 0 | t1_hinter__init_outline(self); |
794 | 0 | self->FontType = FontType; |
795 | 0 | self->BlueScale = 0.039625; /* A Type 1 spec default. */ |
796 | 0 | self->blue_shift = 7; /* A Type 1 spec default. */ |
797 | 0 | self->blue_fuzz = 1; /* A Type 1 spec default. */ |
798 | 0 | self->suppress_overshoots = (self->BlueScale > self->heigt_transform_coef / (1 << self->log2_pixels_y) - 0.00020417); |
799 | 0 | self->overshoot_threshold = (self->heigt_transform_coef != 0 ? (t1_glyph_space_coord)(fixed_half * (1 << self->log2_pixels_y) / self->heigt_transform_coef) : 0); |
800 | 0 | self->ForceBold = false; |
801 | 0 | self->pass_through |= no_grid_fitting; |
802 | 0 | self->charpath_flag = no_grid_fitting; |
803 | 0 | self->autohinting = true; |
804 | 0 | if (self->pass_through) |
805 | 0 | return 0; |
806 | | /* Currently we don't provice alignments zones or stem snap. */ |
807 | 0 | return 0; |
808 | 0 | } |
809 | | |
810 | | static inline int t1_hinter__can_add_pole(t1_hinter * self, t1_pole **pole) |
811 | 4.53k | { if (self->pole_count >= self->max_pole_count) |
812 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->pole, self->pole0, &self->max_pole_count, |
813 | 0 | sizeof(self->pole0) / count_of(self->pole0), T1_MAX_POLES, s_pole_array)) |
814 | 0 | return_error(gs_error_VMerror); |
815 | 4.53k | *pole = &self->pole[self->pole_count]; |
816 | 4.53k | return 0; |
817 | 4.53k | } |
818 | | |
819 | | static inline int t1_hinter__add_pole(t1_hinter * self, t1_glyph_space_coord xx, t1_glyph_space_coord yy, enum t1_pole_type type) |
820 | 4.53k | { t1_pole *pole; |
821 | 4.53k | int code = t1_hinter__can_add_pole(self, &pole); |
822 | | |
823 | 4.53k | if (code < 0) |
824 | 0 | return code; |
825 | 4.53k | pole->gx = pole->ax = self->cx += xx; |
826 | 4.53k | pole->gy = pole->ay = self->cy += yy; |
827 | 4.53k | pole->ox = pole->oy = 0; |
828 | 4.53k | pole->type = type; |
829 | 4.53k | pole->contour_index = self->contour_count; |
830 | 4.53k | pole->aligned_x = pole->aligned_y = unaligned; |
831 | 4.53k | pole->boundary_length_x = pole->boundary_length_y = 0; |
832 | 4.53k | self->pole_count++; |
833 | 4.53k | return 0; |
834 | 4.53k | } |
835 | | |
836 | | int t1_hinter__sbw(t1_hinter * self, fixed sbx, fixed sby, fixed wx, fixed wy) |
837 | 25.0M | { self->cx = self->orig_gx = self->subglyph_orig_gx = sbx; |
838 | 25.0M | self->cy = self->orig_gy = self->subglyph_orig_gy = sby; |
839 | 25.0M | self->width_gx = wx; |
840 | 25.0M | self->width_gy = wy; |
841 | 25.0M | return 0; |
842 | 25.0M | } |
843 | | |
844 | | int t1_hinter__sbw_seac(t1_hinter * self, fixed sbx, fixed sby) |
845 | 0 | { t1_hinter__adjust_matrix_precision(self, sbx, sby); |
846 | 0 | self->cx = self->subglyph_orig_gx = self->orig_gx + sbx; |
847 | 0 | self->cy = self->subglyph_orig_gy = self->orig_gy + sby; |
848 | 0 | return 0; |
849 | 0 | } |
850 | | |
851 | | static bool t1_hinter__find_flex(t1_hinter * self, int k, int contour_beg, int contour_end, |
852 | | t1_glyph_space_coord pixel_g, t1_glyph_space_coord threshold, |
853 | | int i0, int i1, int N, int *j0, int *j1, |
854 | | t1_glyph_space_coord *gm) |
855 | 0 | { |
856 | 0 | int i, j, n = N - 5, m, l; |
857 | 0 | t1_glyph_space_coord *p_gc = (!k ? &self->pole[0].gx : &self->pole[0].gy); |
858 | 0 | t1_glyph_space_coord *p_gd = (!k ? &self->pole[0].gy : &self->pole[0].gx); |
859 | 0 | int offset_gc = (char *)p_gc - (char *)&self->pole[0]; |
860 | 0 | int offset_gd = (char *)p_gd - (char *)&self->pole[0]; |
861 | 0 | t1_glyph_space_coord gc0, gc1, gd0, gd1, gcl, gdl, gcp = 0, gdp = 0, gcd, gcm = 0; |
862 | |
|
863 | 0 | for (i = i0; n; n--, i = i + 1) { |
864 | 0 | if (i == contour_end) |
865 | 0 | i = contour_beg; |
866 | 0 | if (self->pole[i].type == offcurve) |
867 | 0 | continue; |
868 | 0 | gc0 = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gc); |
869 | 0 | gd0 = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gd); |
870 | 0 | for (j = i1, m = n; m; m--, j--) { |
871 | 0 | if (j < contour_beg) |
872 | 0 | j = contour_end - 1; |
873 | 0 | if (self->pole[j].type == offcurve) |
874 | 0 | continue; |
875 | 0 | gc1 = *member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc); |
876 | 0 | gd1 = *member_prt(t1_glyph_space_coord, &self->pole[j], offset_gd); |
877 | 0 | if (any_abs(gd1 - gd0) < pixel_g * 4) /* Arbitrary check for 4 pixels length. */ |
878 | 0 | continue; |
879 | 0 | if (gc0 == gc1) { /* Arbitrary check for strong equality. */ |
880 | | /* Compute the curvity direction relative to the middle coord. */ |
881 | 0 | bool gt = false, lt = false; |
882 | 0 | double area = 0, area0; |
883 | 0 | int dir = 0, prev_dir = 0, dir_change = 0; |
884 | |
|
885 | 0 | *gm = gc0; /* Safety. */ |
886 | | /* fixme: optimize: the computaion of gt, lt may be replaced with |
887 | | a longer loop, so that dir_change accounts outer segments. |
888 | | optimize : move the 1st iteratiot outside the loop. */ |
889 | 0 | for (l = i; ; gcp = gcl, gdp = gdl, prev_dir = dir, l++) { |
890 | 0 | if (l == contour_end) |
891 | 0 | l = contour_beg; |
892 | 0 | gcl = *member_prt(t1_glyph_space_coord, &self->pole[l], offset_gc); |
893 | 0 | gdl = *member_prt(t1_glyph_space_coord, &self->pole[l], offset_gd); |
894 | 0 | if (l != i) { |
895 | 0 | area += (double)(gcp - gc0) * (gdl - gdp) - (double)(gdp - gd0) * (gcl - gcp); |
896 | 0 | gcd = gcl - gc0; |
897 | 0 | gcd = any_abs(gcd); |
898 | 0 | if (gcm < gcd) { |
899 | 0 | *gm = gcl; |
900 | 0 | gcm = gcd; |
901 | 0 | } |
902 | 0 | dir = (gcp < gcl ? 1 : gcp > gcl ? -1 : prev_dir); |
903 | 0 | if (dir * prev_dir < 0) |
904 | 0 | dir_change++; |
905 | 0 | } |
906 | 0 | if (l == j) |
907 | 0 | break; |
908 | 0 | if (gcl < gc0) |
909 | 0 | lt = true; |
910 | 0 | if (gcl > gc0) |
911 | 0 | gt = true; |
912 | 0 | } |
913 | 0 | if (dir_change > 1) |
914 | 0 | continue; |
915 | 0 | if (gcm > threshold) |
916 | 0 | continue; |
917 | 0 | area = any_abs(area) / 2; /* Flex area. */ |
918 | 0 | area0 = (double)(gd1 - gd0) * gcm; /* Surrounding rectangle. */ |
919 | 0 | area0 = any_abs(area0); |
920 | 0 | if (area > area0 * 0.75) |
921 | 0 | continue; /* looks as a rounded rectangle. */ |
922 | 0 | if (!lt || !gt) { |
923 | 0 | int ii = i - 1, jj = j + 1; |
924 | 0 | t1_glyph_space_coord gii, gjj; |
925 | |
|
926 | 0 | if (ii < contour_beg) |
927 | 0 | ii = contour_end - 1; |
928 | 0 | if (jj == contour_end) |
929 | 0 | jj = contour_beg; |
930 | 0 | gii = *member_prt(t1_glyph_space_coord, &self->pole[ii], offset_gc); |
931 | 0 | gjj = *member_prt(t1_glyph_space_coord, &self->pole[jj], offset_gc); |
932 | 0 | if ((lt && gii <= gc0 && gjj <= gc0) || |
933 | 0 | (gt && gii >= gc0 && gjj >= gc0)) { |
934 | 0 | *j0 = i; |
935 | 0 | *j1 = j; |
936 | 0 | return true; |
937 | 0 | } |
938 | 0 | } |
939 | 0 | } |
940 | 0 | } |
941 | | /* Leave the loop here because t1_hinter__fix_missed_flex |
942 | | will try the interval starting with the next pole. |
943 | | We reserve the 'i' cycle for fonding a "best" flex |
944 | | within the interval. */ |
945 | 0 | break; |
946 | 0 | } |
947 | 0 | return false; |
948 | 0 | } |
949 | | |
950 | | static void t1_hinter__compact_flex(t1_hinter * self, int contour_beg, int contour_end, int i0, int i1, int *pi) |
951 | 0 | { |
952 | 0 | if (i0 > i1) { |
953 | 0 | t1_hinter__compact_flex(self, contour_beg, contour_end, i0, contour_end, pi); |
954 | 0 | t1_hinter__compact_flex(self, contour_beg, contour_end, contour_beg, i1, pi); |
955 | 0 | } else if (i0 < i1) { |
956 | 0 | int j; |
957 | 0 | int s = i1 - i0 - 1; |
958 | |
|
959 | 0 | for (j = 0; j < self->hint_range_count; j++) { |
960 | 0 | if (self->hint_range[j].beg_pole >= i1) |
961 | 0 | self->hint_range[j].beg_pole -= s; |
962 | 0 | else if (self->hint_range[j].beg_pole > i0) |
963 | 0 | self->hint_range[j].beg_pole = i0; |
964 | 0 | if (self->hint_range[j].end_pole >= i1) |
965 | 0 | self->hint_range[j].end_pole -= s; |
966 | 0 | else if (self->hint_range[j].end_pole > i0) |
967 | 0 | self->hint_range[j].end_pole = i0; |
968 | 0 | } |
969 | 0 | memmove(&self->pole[i0 + 1], &self->pole[i1], sizeof(*self->pole) * (self->pole_count - i1)); |
970 | 0 | self->contour[self->contour_count] -= s; |
971 | 0 | self->pole_count -= s; |
972 | 0 | if (*pi >= i1) |
973 | 0 | *pi -= s; |
974 | 0 | else if (i0 <= *pi) |
975 | 0 | *pi = i0; |
976 | 0 | } |
977 | 0 | } |
978 | | |
979 | | static void t1_hinter__adjust_stem_hints_by_missed_flex(t1_hinter * self, t1_glyph_space_coord g0, |
980 | | t1_glyph_space_coord gm, int k) |
981 | 0 | { |
982 | | /* While fixing a missed flex, a part of outline is shifted. |
983 | | If there are stem hints pointing to that outline part, we need to move |
984 | | their coordinates as well. Here we do so in some hackish way : |
985 | | shift any stem that falls into the related coordinate gap. |
986 | | It would be nice to have a thinner choice, |
987 | | but it appears some complicated, because it could |
988 | | multiply stem hints when a hint points to several stems, |
989 | | and only some of them are shifted. |
990 | | For a simplification we assume that a well designed hint |
991 | | must shift all such stems when unbending a flex. |
992 | | */ |
993 | 0 | t1_glyph_space_coord gg = g0; |
994 | 0 | int i; |
995 | |
|
996 | 0 | k = !k; |
997 | 0 | if (gm < g0) { |
998 | 0 | g0 ^= gm; gm ^= g0; g0 ^= gm; |
999 | 0 | } |
1000 | 0 | for (i = 0; i < self->hint_count; i++) |
1001 | 0 | if (k == (self->hint[i].type != hstem)) { |
1002 | 0 | t1_hint *hint = &self->hint[i]; |
1003 | |
|
1004 | 0 | if (g0 <= hint->g0 && hint->g0 <= gm) |
1005 | 0 | hint->g0 = hint->ag0 = gg; |
1006 | 0 | if (g0 <= hint->g1 && hint->g1 <= gm) |
1007 | 0 | hint->g1 = hint->ag1 = gg; |
1008 | 0 | } |
1009 | 0 | } |
1010 | | |
1011 | | static void t1_hinter__fix_missed_flex(t1_hinter * self) |
1012 | 0 | { |
1013 | 0 | int contour_beg, contour_end; |
1014 | 0 | int i, j, k, pj, n, j0, j1; |
1015 | |
|
1016 | 0 | if (self->contour_count == 0) |
1017 | 0 | return; |
1018 | 0 | contour_beg = self->contour[self->contour_count -1]; |
1019 | 0 | contour_end = self->pole_count - 1; /* the last contour's 'closepath'. */ |
1020 | 0 | if (contour_beg + 8 >= contour_end) |
1021 | 0 | return; |
1022 | 0 | for (k = 0; k < 2; k++) { |
1023 | 0 | t1_glyph_space_coord *p_gc = (!k ? &self->pole[0].gx : &self->pole[0].gy); |
1024 | 0 | t1_glyph_space_coord *p_gd = (!k ? &self->pole[0].gy : &self->pole[0].gx); |
1025 | 0 | int offset_gc = (char *)p_gc - (char *)&self->pole[0]; |
1026 | 0 | int offset_gd = (char *)p_gd - (char *)&self->pole[0]; |
1027 | 0 | t1_glyph_space_coord pixel_g = (!k ? self->pixel_gw : self->pixel_gh); |
1028 | 0 | t1_glyph_space_coord threshold = pixel_g * 5 / 10; |
1029 | 0 | t1_glyph_space_coord gc0, gc1, gc, gcj, gd = 0, ge, gm; |
1030 | 0 | int dir = 0, prev_dir; |
1031 | 0 | bool wrapped = false; |
1032 | |
|
1033 | 0 | gc = *member_prt(t1_glyph_space_coord, &self->pole[contour_beg], offset_gc); |
1034 | 0 | gc0 = gc - threshold; |
1035 | 0 | gc1 = gc + threshold; |
1036 | | /* Backward search for a plattue start. */ |
1037 | 0 | for (i = contour_end; i > contour_beg; i--) { |
1038 | 0 | gcj = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gc); |
1039 | 0 | if (self->pole[i].type == offcurve) |
1040 | 0 | continue; |
1041 | 0 | if (gcj < gc0 || gcj > gc1) |
1042 | 0 | break; |
1043 | 0 | } |
1044 | 0 | if (i == contour_end) { |
1045 | 0 | i = contour_beg; |
1046 | 0 | wrapped = true; |
1047 | 0 | } else |
1048 | 0 | i++; |
1049 | | /* Forward search for all platues. */ |
1050 | 0 | for (;;i++) { |
1051 | 0 | prev_dir = 0; |
1052 | 0 | if (i == contour_end) { |
1053 | 0 | if (wrapped) |
1054 | 0 | break; |
1055 | 0 | wrapped = true; |
1056 | 0 | i = contour_beg; |
1057 | 0 | } |
1058 | 0 | gc = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gc); |
1059 | 0 | ge = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gd); |
1060 | 0 | gc0 = gc - threshold; |
1061 | 0 | gc1 = gc + threshold; |
1062 | 0 | for (pj = i, j = i + 1, n = 0; ; pj = j, j++, n++) { |
1063 | 0 | if (j == contour_end) |
1064 | 0 | j = contour_beg; |
1065 | 0 | if (j == i) |
1066 | 0 | break; /* against bad glyphs. */ |
1067 | 0 | if (self->pole[j].type == offcurve) |
1068 | 0 | continue; |
1069 | 0 | gcj = *member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc); |
1070 | 0 | if (gcj < gc0 || gcj > gc1) |
1071 | 0 | break; |
1072 | 0 | gd = *member_prt(t1_glyph_space_coord, &self->pole[i], offset_gd); |
1073 | 0 | dir = (gd > ge ? 1 : -1); |
1074 | 0 | if (dir * prev_dir < 0) |
1075 | 0 | break; |
1076 | 0 | ge = gd; |
1077 | 0 | prev_dir = dir; |
1078 | 0 | } |
1079 | 0 | if (n < 6) |
1080 | 0 | continue; |
1081 | 0 | if (t1_hinter__find_flex(self, k, contour_beg, contour_end, pixel_g, threshold, i, pj, n, &j0, &j1, &gm)) { |
1082 | 0 | t1_hinter__compact_flex(self, contour_beg, contour_end, j0, j1, &i); |
1083 | 0 | t1_hinter__adjust_stem_hints_by_missed_flex(self, gc, gm, k); |
1084 | 0 | contour_end = self->pole_count - 1; |
1085 | 0 | } |
1086 | 0 | } |
1087 | 0 | } |
1088 | 0 | } |
1089 | | |
1090 | | int t1_hinter__rmoveto(t1_hinter * self, fixed xx, fixed yy) |
1091 | 3.72M | { int code; |
1092 | | |
1093 | 3.72M | t1_hinter__adjust_matrix_precision(self, xx, yy); |
1094 | 3.72M | if (self->flex_count == 0) { |
1095 | 3.72M | if (self->pass_through) { |
1096 | 3.72M | t1_glyph_space_coord gx = self->cx += xx; |
1097 | 3.72M | t1_glyph_space_coord gy = self->cy += yy; |
1098 | 3.72M | fixed fx, fy; |
1099 | | |
1100 | 3.72M | if (self->path_opened) { |
1101 | 233k | code = gx_path_close_subpath(self->output_path); |
1102 | 233k | if (code < 0) |
1103 | 0 | return code; |
1104 | 233k | self->path_opened = false; |
1105 | 233k | } |
1106 | 3.72M | g2d(self, gx, gy, &fx, &fy); |
1107 | 3.72M | code = gx_path_add_point(self->output_path, fx, fy); |
1108 | 3.72M | if (self->flex_count == 0) { |
1109 | 3.72M | self->bx = self->cx; |
1110 | 3.72M | self->by = self->cy; |
1111 | 3.72M | } |
1112 | 3.72M | return code; |
1113 | 3.72M | } |
1114 | 0 | if (self->pole_count > 0 && self->pole[self->pole_count - 1].type == moveto) |
1115 | 0 | self->pole_count--; |
1116 | 0 | if (self->pole_count > 0 && self->pole[self->pole_count - 1].type != closepath) { |
1117 | 0 | code = t1_hinter__closepath(self); |
1118 | 0 | if (code < 0) |
1119 | 0 | return code; |
1120 | 0 | } |
1121 | 0 | if (!self->have_flex) |
1122 | 0 | t1_hinter__fix_missed_flex(self); |
1123 | 0 | } |
1124 | 4.53k | code = t1_hinter__add_pole(self, xx, yy, moveto); |
1125 | 4.53k | if (self->flex_count == 0) { |
1126 | 0 | self->bx = self->cx; |
1127 | 0 | self->by = self->cy; |
1128 | 0 | } |
1129 | 4.53k | return code; |
1130 | 3.72M | } |
1131 | | |
1132 | | static inline void t1_hinter__skip_degenerate_segnment(t1_hinter * self, int npoles) |
1133 | 0 | { /* Degenerate segments amy appear due to import shift with bbox > 4096 */ |
1134 | 0 | int contour_beg = self->contour[self->contour_count], i; |
1135 | |
|
1136 | 0 | if (contour_beg >= self->pole_count - npoles) |
1137 | 0 | return; |
1138 | 0 | for (i = self->pole_count - npoles - 1; i < self->pole_count - 1; i++) |
1139 | 0 | if (self->pole[i].ax != self->cx || self->pole[i].ay != self->cy) |
1140 | 0 | return; |
1141 | 0 | self->pole_count -= npoles; |
1142 | 0 | } |
1143 | | |
1144 | | int t1_hinter__rlineto(t1_hinter * self, fixed xx, fixed yy) |
1145 | 8.51M | { |
1146 | 8.51M | t1_hinter__adjust_matrix_precision(self, xx, yy); |
1147 | 8.51M | if (self->pass_through) { |
1148 | 8.51M | t1_glyph_space_coord gx = self->cx += xx; |
1149 | 8.51M | t1_glyph_space_coord gy = self->cy += yy; |
1150 | 8.51M | fixed fx, fy; |
1151 | | |
1152 | 8.51M | self->path_opened = true; |
1153 | 8.51M | g2d(self, gx, gy, &fx, &fy); |
1154 | 8.51M | return gx_path_add_line(self->output_path, fx, fy); |
1155 | 8.51M | } else { |
1156 | 0 | int code = t1_hinter__add_pole(self, xx, yy, oncurve); |
1157 | |
|
1158 | 0 | if (code < 0) |
1159 | 0 | return code; |
1160 | 0 | t1_hinter__skip_degenerate_segnment(self, 1); |
1161 | 0 | return 0; |
1162 | 0 | } |
1163 | 8.51M | } |
1164 | | |
1165 | | int t1_hinter__rcurveto(t1_hinter * self, fixed xx0, fixed yy0, fixed xx1, fixed yy1, fixed xx2, fixed yy2) |
1166 | 12.3M | { |
1167 | 12.3M | t1_hinter__adjust_matrix_precision(self, xx0, yy0); |
1168 | 12.3M | t1_hinter__adjust_matrix_precision(self, xx1, yy1); |
1169 | 12.3M | t1_hinter__adjust_matrix_precision(self, xx2, yy2); |
1170 | 12.3M | if (self->pass_through) { |
1171 | 12.3M | t1_glyph_space_coord gx0 = self->cx += xx0; |
1172 | 12.3M | t1_glyph_space_coord gy0 = self->cy += yy0; |
1173 | 12.3M | t1_glyph_space_coord gx1 = self->cx += xx1; |
1174 | 12.3M | t1_glyph_space_coord gy1 = self->cy += yy1; |
1175 | 12.3M | t1_glyph_space_coord gx2 = self->cx += xx2; |
1176 | 12.3M | t1_glyph_space_coord gy2 = self->cy += yy2; |
1177 | 12.3M | fixed fx0, fy0, fx1, fy1, fx2, fy2; |
1178 | | |
1179 | 12.3M | self->path_opened = true; |
1180 | 12.3M | g2d(self, gx0, gy0, &fx0, &fy0); |
1181 | 12.3M | g2d(self, gx1, gy1, &fx1, &fy1); |
1182 | 12.3M | g2d(self, gx2, gy2, &fx2, &fy2); |
1183 | 12.3M | return gx_path_add_curve(self->output_path, fx0, fy0, fx1, fy1, fx2, fy2); |
1184 | 12.3M | } else { |
1185 | 0 | int code; |
1186 | |
|
1187 | 0 | code = t1_hinter__add_pole(self, xx0, yy0, offcurve); |
1188 | 0 | if (code < 0) |
1189 | 0 | return code; |
1190 | 0 | code = t1_hinter__add_pole(self, xx1, yy1, offcurve); |
1191 | 0 | if (code < 0) |
1192 | 0 | return code; |
1193 | 0 | code = t1_hinter__add_pole(self, xx2, yy2, oncurve); |
1194 | 0 | if (code < 0) |
1195 | 0 | return code; |
1196 | 0 | t1_hinter__skip_degenerate_segnment(self, 3); |
1197 | 0 | return 0; |
1198 | 0 | } |
1199 | 12.3M | } |
1200 | | |
1201 | | void t1_hinter__setcurrentpoint(t1_hinter * self, fixed xx, fixed yy) |
1202 | 567 | { |
1203 | 567 | t1_hinter__adjust_matrix_precision(self, xx, yy); |
1204 | 567 | if (self->FontType != 2) { |
1205 | | /* We use this function to set a subglyph origin |
1206 | | for composite glyphs in Type 2 fonts. |
1207 | | */ |
1208 | 567 | self->cx = xx; |
1209 | 567 | self->cy = yy; |
1210 | 567 | } else if (self->cx != xx || self->cy != yy) { |
1211 | | /* Type 1 spec reads : "The setcurrentpoint command is used only |
1212 | | in conjunction with results from OtherSubrs procedures." |
1213 | | We guess that such cases don't cause a real coordinate change |
1214 | | (our testbase shows that). But we met a font |
1215 | | (see comparefiles/type1-ce1_setcurrentpoint.ps) which use |
1216 | | setcurrentpoint immediately before moveto, with no conjunction |
1217 | | with OtherSubrs. (The check above is debug purpose only.) |
1218 | | */ |
1219 | 0 | self->cx = xx; |
1220 | 0 | self->cy = yy; |
1221 | 0 | } |
1222 | 567 | } |
1223 | | |
1224 | | int t1_hinter__closepath(t1_hinter * self) |
1225 | 1.80M | { if (self->pass_through) { |
1226 | 1.80M | self->path_opened = false; |
1227 | 1.80M | return gx_path_close_subpath(self->output_path); |
1228 | 1.80M | } else { |
1229 | 0 | int contour_beg = self->contour[self->contour_count], code; |
1230 | |
|
1231 | 0 | if (contour_beg == self->pole_count) |
1232 | 0 | return 0; /* maybe a single trailing moveto */ |
1233 | 0 | if (self->bx == self->cx && self->by == self->cy) { |
1234 | | /* Don't create degenerate segment */ |
1235 | 0 | self->pole[self->pole_count - 1].type = closepath; |
1236 | 0 | } else { |
1237 | 0 | t1_glyph_space_coord cx = self->cx, cy = self->cy; |
1238 | |
|
1239 | 0 | self->cx = self->bx; |
1240 | 0 | self->cy = self->by; |
1241 | 0 | code = t1_hinter__add_pole(self, 0, 0, closepath); |
1242 | 0 | if (code < 0) |
1243 | 0 | return code; |
1244 | 0 | self->cx = cx; |
1245 | 0 | self->cy = cy; |
1246 | 0 | } |
1247 | 0 | self->contour_count++; |
1248 | 0 | if (self->contour_count >= self->max_contour_count) |
1249 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->contour, self->contour0, &self->max_contour_count, |
1250 | 0 | sizeof(self->contour0) / count_of(self->contour0), T1_MAX_CONTOURS, s_contour_array)) |
1251 | 0 | return_error(gs_error_VMerror); |
1252 | 0 | self->contour[self->contour_count] = self->pole_count; |
1253 | 0 | return 0; |
1254 | 0 | } |
1255 | 1.80M | } |
1256 | | |
1257 | | int t1_hinter__end_subglyph(t1_hinter * self) |
1258 | 1.67M | { |
1259 | 1.67M | if (self->pass_through) |
1260 | 1.67M | return 0; |
1261 | 0 | self->subglyph_count++; |
1262 | 0 | if (self->subglyph_count >= self->max_subglyph_count) |
1263 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->subglyph, self->subglyph0, &self->max_subglyph_count, |
1264 | 0 | sizeof(self->subglyph0) / count_of(self->subglyph0), T1_MAX_SUBGLYPHS, s_subglyph_array)) |
1265 | 0 | return_error(gs_error_VMerror); |
1266 | 0 | self->subglyph[self->subglyph_count] = self->contour_count; |
1267 | 0 | return 0; |
1268 | 0 | } |
1269 | | |
1270 | | static inline int t1_hinter__can_add_hint(t1_hinter * self, t1_hint **hint) |
1271 | 0 | { if (self->hint_count >= self->max_hint_count) |
1272 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->hint, self->hint0, &self->max_hint_count, |
1273 | 0 | sizeof(self->hint0) / count_of(self->hint0), T1_MAX_HINTS, s_hint_array)) |
1274 | 0 | return_error(gs_error_VMerror); |
1275 | 0 | *hint = &self->hint[self->hint_count]; |
1276 | 0 | return 0; |
1277 | 0 | } |
1278 | | |
1279 | | int t1_hinter__flex_beg(t1_hinter * self) |
1280 | 567 | { if (self->flex_count != 0) |
1281 | 0 | return_error(gs_error_invalidfont); |
1282 | 567 | self->flex_count++; |
1283 | 567 | self->have_flex = true; |
1284 | 567 | if (self->pass_through) |
1285 | 567 | return t1_hinter__rmoveto(self, 0, 0); |
1286 | 0 | return 0; |
1287 | 567 | } |
1288 | | |
1289 | | int t1_hinter__flex_point(t1_hinter * self) |
1290 | 3.96k | { if (self->flex_count == 0) |
1291 | 0 | return_error(gs_error_invalidfont); |
1292 | 3.96k | self->flex_count++; |
1293 | 3.96k | return 0; |
1294 | 3.96k | } |
1295 | | |
1296 | | int t1_hinter__flex_end(t1_hinter * self, fixed flex_height) |
1297 | 567 | { t1_pole *pole0, *pole1, *pole4; |
1298 | 567 | t1_hinter_space_coord ox, oy; |
1299 | 567 | const int32_t div_x = self->g2o_fraction << self->log2_pixels_x; |
1300 | 567 | const int32_t div_y = self->g2o_fraction << self->log2_pixels_y; |
1301 | | |
1302 | 567 | if (self->flex_count != 8) |
1303 | 0 | return_error(gs_error_invalidfont); |
1304 | | /* We've got 8 poles accumulated in pole array. */ |
1305 | 567 | pole0 = &self->pole[self->pole_count - 8]; |
1306 | 567 | pole1 = &self->pole[self->pole_count - 7]; |
1307 | 567 | pole4 = &self->pole[self->pole_count - 4]; |
1308 | 567 | g2o(self, pole4->gx - pole1->gx, pole4->gy - pole1->gy, &ox, &oy); |
1309 | 567 | if (any_abs(ox) > div_x * fixed2float(flex_height) / 100 || |
1310 | 567 | any_abs(oy) > div_y * fixed2float(flex_height) / 100) { |
1311 | | /* do with curves */ |
1312 | 567 | if (self->pass_through) { |
1313 | 567 | fixed fx0, fy0, fx1, fy1, fx2, fy2; |
1314 | 567 | int code; |
1315 | | |
1316 | 567 | g2d(self, pole0[2].gx, pole0[2].gy, &fx0, &fy0); |
1317 | 567 | g2d(self, pole0[3].gx, pole0[3].gy, &fx1, &fy1); |
1318 | 567 | g2d(self, pole0[4].gx, pole0[4].gy, &fx2, &fy2); |
1319 | 567 | code = gx_path_add_curve(self->output_path, fx0, fy0, fx1, fy1, fx2, fy2); |
1320 | 567 | if (code < 0) |
1321 | 0 | return code; |
1322 | 567 | g2d(self, pole0[5].gx, pole0[5].gy, &fx0, &fy0); |
1323 | 567 | g2d(self, pole0[6].gx, pole0[6].gy, &fx1, &fy1); |
1324 | 567 | g2d(self, pole0[7].gx, pole0[7].gy, &fx2, &fy2); |
1325 | 567 | self->flex_count = 0; |
1326 | 567 | self->pole_count = 0; |
1327 | 567 | return gx_path_add_curve(self->output_path, fx0, fy0, fx1, fy1, fx2, fy2); |
1328 | 567 | } else { |
1329 | 0 | memmove(pole1, pole1 + 1, (sizeof(self->pole0) / count_of(self->pole0)) * 7); |
1330 | 0 | pole0[1].type = pole0[2].type = offcurve; |
1331 | 0 | pole0[3].type = oncurve; |
1332 | 0 | pole0[4].type = pole0[5].type = offcurve; |
1333 | 0 | pole0[6].type = oncurve; |
1334 | 0 | self->pole_count--; |
1335 | 0 | } |
1336 | 567 | } else { |
1337 | | /* do with line */ |
1338 | 0 | if (self->pass_through) { |
1339 | 0 | fixed fx, fy; |
1340 | |
|
1341 | 0 | g2d(self, pole0[7].gx, pole0[7].gy, &fx, &fy); |
1342 | 0 | self->flex_count = 0; |
1343 | 0 | self->pole_count = 0; |
1344 | 0 | return gx_path_add_line(self->output_path, fx, fy); |
1345 | 0 | } else { |
1346 | 0 | pole0[1] = pole0[7]; |
1347 | 0 | pole0[1].type = oncurve; |
1348 | 0 | self->pole_count -= 6; |
1349 | 0 | } |
1350 | 0 | } |
1351 | 0 | self->flex_count = 0; |
1352 | 0 | return 0; |
1353 | 567 | } |
1354 | | |
1355 | | static inline int t1_hinter__can_add_hint_range(t1_hinter * self, t1_hint_range **hint_range) |
1356 | 0 | { if (self->hint_range_count >= self->max_hint_range_count) |
1357 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->hint_range, self->hint_range0, &self->max_hint_range_count, |
1358 | 0 | sizeof(self->hint_range0) / count_of(self->hint_range0), T1_MAX_HINTS, s_hint_range_array)) |
1359 | 0 | return_error(gs_error_VMerror); |
1360 | 0 | *hint_range = &self->hint_range[self->hint_range_count]; |
1361 | 0 | return 0; |
1362 | 0 | } |
1363 | | |
1364 | | static inline int t1_hinter__can_add_hint_applying(t1_hinter * self, t1_hint_applying **hint_applying) |
1365 | 0 | { if (self->hint_applying_count >= self->max_hint_applying_count) |
1366 | 0 | if(t1_hinter__realloc_array(self->memory, (void **)&self->hint_applying, self->hint_applying0, &self->max_hint_applying_count, |
1367 | 0 | sizeof(self->hint_applying0) / count_of(self->hint_applying0), T1_MAX_HINTS, s_hint_applying_array)) |
1368 | 0 | return_error(gs_error_VMerror); |
1369 | 0 | *hint_applying = &self->hint_applying[self->hint_applying_count]; |
1370 | 0 | return 0; |
1371 | 0 | } |
1372 | | |
1373 | | int t1_hinter__hint_mask(t1_hinter * self, byte *mask) |
1374 | 4.42k | { int hint_count, i; |
1375 | | |
1376 | 4.42k | if (self->disable_hinting) |
1377 | 4.42k | return 0; |
1378 | 0 | hint_count = self->hint_count; |
1379 | |
|
1380 | 0 | for(i = 0; i < hint_count; i++) { |
1381 | 0 | bool activate = (mask != NULL && (mask[i >> 3] & (0x80 >> (i & 7))) != 0); |
1382 | 0 | t1_hint *hint = &self->hint[i]; |
1383 | |
|
1384 | 0 | if (activate) { |
1385 | 0 | if (hint->range_index != -1 && |
1386 | 0 | (self->hint_range[hint->range_index].end_pole == -1 || |
1387 | 0 | self->hint_range[hint->range_index].end_pole == self->pole_count)) { |
1388 | | /* continie the range */ |
1389 | 0 | self->hint_range[hint->range_index].end_pole = -1; |
1390 | 0 | } else { |
1391 | | /* add new range */ |
1392 | 0 | t1_hint_range *hint_range; |
1393 | 0 | int code = t1_hinter__can_add_hint_range(self, &hint_range); |
1394 | |
|
1395 | 0 | if (code < 0) |
1396 | 0 | return code; |
1397 | 0 | hint_range->beg_pole = self->pole_count; |
1398 | 0 | hint_range->end_pole = -1; |
1399 | 0 | hint_range->next = hint->range_index; |
1400 | 0 | hint->range_index = self->hint_range_count; |
1401 | 0 | self->hint_range_count++; |
1402 | 0 | } |
1403 | 0 | } else { |
1404 | 0 | if (hint->range_index != -1 && |
1405 | 0 | self->hint_range[hint->range_index].end_pole == -1) { |
1406 | | /* deactivate */ |
1407 | 0 | self->hint_range[hint->range_index].end_pole = self->pole_count; |
1408 | 0 | } else |
1409 | 0 | DO_NOTHING; |
1410 | 0 | } |
1411 | 0 | } |
1412 | 0 | return 0; |
1413 | 0 | } |
1414 | | |
1415 | | int t1_hinter__drop_hints(t1_hinter * self) |
1416 | 631k | { if (self->disable_hinting) |
1417 | 631k | return 0; |
1418 | 0 | if (self->primary_hint_count == -1) |
1419 | 0 | self->primary_hint_count = self->hint_range_count; |
1420 | 0 | return t1_hinter__hint_mask(self, NULL); |
1421 | 631k | } |
1422 | | |
1423 | | static inline int t1_hinter__stem(t1_hinter * self, enum t1_hint_type type, unsigned short stem3_index |
1424 | | , fixed v0, fixed v1, int side_mask) |
1425 | 0 | { t1_hint *hint; |
1426 | 0 | t1_glyph_space_coord s = (type == hstem ? self->subglyph_orig_gy : self->subglyph_orig_gx); |
1427 | 0 | t1_glyph_space_coord g0 = s + v0; |
1428 | 0 | t1_glyph_space_coord g1 = s + v0 + v1; |
1429 | 0 | t1_hint_range *range; |
1430 | 0 | int i, code; |
1431 | |
|
1432 | 0 | t1_hinter__adjust_matrix_precision(self, (side_mask & 1 ? g0 : g1), (side_mask & 2 ? g1 : g0)); |
1433 | 0 | for (i = 0; i < self->hint_count; i++) |
1434 | 0 | if (self->hint[i].type == type && |
1435 | 0 | self->hint[i].g0 == g0 && self->hint[i].g1 == g1 && |
1436 | 0 | self->hint[i].side_mask == side_mask) |
1437 | 0 | break; |
1438 | 0 | if (i < self->hint_count) |
1439 | 0 | hint = &self->hint[i]; |
1440 | 0 | else { |
1441 | 0 | code = t1_hinter__can_add_hint(self, &hint); |
1442 | 0 | if (code < 0) |
1443 | 0 | return code; |
1444 | 0 | hint->type = type; |
1445 | 0 | hint->g0 = hint->ag0 = g0; |
1446 | 0 | hint->g1 = hint->ag1 = g1; |
1447 | 0 | hint->aligned0 = hint->aligned1 = unaligned; |
1448 | 0 | hint->q0 = hint->q1 = max_int; |
1449 | 0 | hint->b0 = hint->b1 = false; |
1450 | 0 | hint->stem3_index = stem3_index; |
1451 | 0 | hint->range_index = -1; |
1452 | 0 | hint->side_mask = side_mask; |
1453 | 0 | hint->stem_snap_index0 = hint->stem_snap_index1 = 0; |
1454 | 0 | hint->boundary_length0 = hint->boundary_length1 = 0; |
1455 | 0 | } |
1456 | 0 | code = t1_hinter__can_add_hint_range(self, &range); |
1457 | 0 | if (code < 0) |
1458 | 0 | return code; |
1459 | 0 | range->beg_pole = self->pole_count; |
1460 | 0 | range->end_pole = -1; |
1461 | 0 | range->next = hint->range_index; |
1462 | 0 | hint->range_index = range - self->hint_range; |
1463 | 0 | if (i >= self->hint_count) |
1464 | 0 | self->hint_count++; |
1465 | 0 | self->hint_range_count++; |
1466 | 0 | return 0; |
1467 | 0 | } |
1468 | | |
1469 | | int t1_hinter__dotsection(t1_hinter * self) |
1470 | 86 | { if (self->pole_count == 0 || self->pole[self->pole_count - 1].type != moveto) |
1471 | 86 | return 0; /* We store beginning dotsection hints only. */ |
1472 | 0 | if (self->disable_hinting) |
1473 | 0 | return 0; |
1474 | 0 | return t1_hinter__stem(self, dot, 0, 0, 0, 0); |
1475 | 0 | } |
1476 | | |
1477 | | int t1_hinter__hstem(t1_hinter * self, fixed x0, fixed x1) |
1478 | 3.87M | { if (self->disable_hinting) |
1479 | 3.87M | return 0; |
1480 | 0 | return t1_hinter__stem(self, hstem, 0, x0, x1, 3); |
1481 | 3.87M | } |
1482 | | |
1483 | | int t1_hinter__overall_hstem(t1_hinter * self, fixed x0, fixed x1, int side_mask) |
1484 | 0 | { /* True Type autohinting only. */ |
1485 | 0 | if (self->disable_hinting) |
1486 | 0 | return 0; |
1487 | 0 | return t1_hinter__stem(self, hstem, 0, x0, x1, side_mask); |
1488 | 0 | } |
1489 | | |
1490 | | int t1_hinter__vstem(t1_hinter * self, fixed y0, fixed y1) |
1491 | 2.65M | { if (self->disable_hinting) |
1492 | 2.65M | return 0; |
1493 | 0 | return t1_hinter__stem(self, vstem, 0, y0, y1, 3); |
1494 | 2.65M | } |
1495 | | |
1496 | | int t1_hinter__hstem3(t1_hinter * self, fixed x0, fixed x1, fixed x2, fixed x3, fixed x4, fixed x5) |
1497 | 247 | { int code; |
1498 | | |
1499 | 247 | if (self->disable_hinting) |
1500 | 247 | return 0; |
1501 | 0 | code = t1_hinter__stem(self, hstem, 1, x0, x1, 3); |
1502 | 0 | if (code < 0) |
1503 | 0 | return code; |
1504 | 0 | code = t1_hinter__stem(self, hstem, 2, x2, x3, 3); |
1505 | 0 | if (code < 0) |
1506 | 0 | return code; |
1507 | 0 | return t1_hinter__stem(self, hstem, 3, x4, x5, 3); |
1508 | 0 | } |
1509 | | |
1510 | | int t1_hinter__vstem3(t1_hinter * self, fixed y0, fixed y1, fixed y2, fixed y3, fixed y4, fixed y5) |
1511 | 2.44k | { int code; |
1512 | | |
1513 | 2.44k | if (self->disable_hinting) |
1514 | 2.44k | return 0; |
1515 | 0 | code = t1_hinter__stem(self, vstem, 1, y0, y1, 3); |
1516 | 0 | if (code < 0) |
1517 | 0 | return code; |
1518 | 0 | code = t1_hinter__stem(self, vstem, 2, y2, y3, 3); |
1519 | 0 | if (code < 0) |
1520 | 0 | return code; |
1521 | 0 | return t1_hinter__stem(self, vstem, 3, y4, y5, 3); |
1522 | 0 | } |
1523 | | |
1524 | | /* --------------------- t1_hinter class members - accessories --------------------*/ |
1525 | | |
1526 | | int t1_hinter__is_x_fitting(t1_hinter * self) |
1527 | 0 | { return self->grid_fit_x; |
1528 | 0 | } |
1529 | | |
1530 | | /* --------------------- t1_hinter class members - the hinting --------------------*/ |
1531 | | |
1532 | | static inline int t1_hinter__segment_beg(t1_hinter * self, int pole_index) |
1533 | 0 | { int contour_index = self->pole[pole_index].contour_index; |
1534 | 0 | int beg_contour_pole = self->contour[contour_index]; |
1535 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2; |
1536 | 0 | int prev = ranger_step_b(pole_index, beg_contour_pole, end_contour_pole); |
1537 | |
|
1538 | 0 | while (self->pole[prev].type == offcurve) |
1539 | 0 | prev = ranger_step_b(prev, beg_contour_pole, end_contour_pole); |
1540 | 0 | return prev; |
1541 | 0 | } |
1542 | | |
1543 | | static inline int t1_hinter__segment_end(t1_hinter * self, int pole_index) |
1544 | 0 | { int contour_index = self->pole[pole_index].contour_index; |
1545 | 0 | int beg_contour_pole = self->contour[contour_index]; |
1546 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2; |
1547 | 0 | int next = ranger_step_f(pole_index, beg_contour_pole, end_contour_pole); |
1548 | |
|
1549 | 0 | while (self->pole[next].type == offcurve) |
1550 | 0 | next = ranger_step_f(next, beg_contour_pole, end_contour_pole); |
1551 | 0 | return next; |
1552 | 0 | } |
1553 | | |
1554 | | static void t1_hinter__compute_y_span(t1_hinter * self) |
1555 | 1.67M | { |
1556 | 1.67M | int n = self->pole_count - 1; |
1557 | 1.67M | int i; |
1558 | | |
1559 | 1.67M | if (n > 1) { |
1560 | | /* For non-space characters ignore the trailing moveto. |
1561 | | Rather it could give a baseline, |
1562 | | it is not guaranteedly good, |
1563 | | and doesn't allow a stable recognition |
1564 | | of the upper side of a dot, comma, etc.. */ |
1565 | 0 | n--; |
1566 | 1.67M | } else if (n < 0) { |
1567 | 1.67M | return; /* empty glyph */ |
1568 | 1.67M | } |
1569 | 0 | self->ymin = self->ymax = self->pole[0].gy; |
1570 | 0 | for (i = 1; i < n; i++) { |
1571 | 0 | if (self->ymin > self->pole[i].gy) |
1572 | 0 | self->ymin = self->pole[i].gy; |
1573 | 0 | if (self->ymax < self->pole[i].gy) |
1574 | 0 | self->ymax = self->pole[i].gy; |
1575 | 0 | } |
1576 | 0 | self->ymid = (self->ymax + self->ymin) / 2; |
1577 | 0 | } |
1578 | | |
1579 | | static void t1_hinter__simplify_representation(t1_hinter * self) |
1580 | 1.67M | { int i, j; |
1581 | 1.67M | int last_pole = self->pole_count - 1; |
1582 | 1.67M | int primary_hint_count = self->primary_hint_count; |
1583 | | |
1584 | 1.67M | if (last_pole > 1 && self->pole[last_pole -1].type == closepath) |
1585 | 0 | last_pole -= 2; /* Skip the trailing moveto. */ |
1586 | 1.67M | if (self->pole_count <= 1) |
1587 | 1.67M | return; /* An empty glyph (only a trailing moveto). */ |
1588 | | /* Remove hints which are disabled with !grid_fit_x, !grid_fit_y. |
1589 | | * We can't do before import is completed due to hint mask commands. |
1590 | | */ |
1591 | 0 | if (!self->grid_fit_x || !self->grid_fit_y) { |
1592 | 0 | for (i = j = 0; i < self->hint_count; i++) |
1593 | 0 | if ((self->hint[i].type == vstem && !self->grid_fit_x) || |
1594 | 0 | (self->hint[i].type == hstem && !self->grid_fit_y)) { |
1595 | 0 | if (i < primary_hint_count) |
1596 | 0 | self->primary_hint_count--; |
1597 | 0 | continue; /* skip it. */ |
1598 | 0 | } else { |
1599 | 0 | if (i != j) /* for Valgrind */ |
1600 | 0 | self->hint[j] = self->hint[i]; |
1601 | 0 | j++; |
1602 | 0 | } |
1603 | 0 | self->hint_count = j; |
1604 | 0 | } |
1605 | 0 | for (i = 0; i < self->hint_range_count; i++) { |
1606 | 0 | t1_hint_range *hint_range = &self->hint_range[i]; |
1607 | |
|
1608 | 0 | j = hint_range->beg_pole; |
1609 | 0 | if (self->pole[j].type == closepath) |
1610 | 0 | hint_range->beg_pole = ++j; |
1611 | 0 | else { |
1612 | 0 | if (self->pole[j].type == offcurve) |
1613 | 0 | hint_range->beg_pole = --j; |
1614 | 0 | if (self->pole[j].type == offcurve) |
1615 | 0 | hint_range->beg_pole = --j; |
1616 | 0 | } |
1617 | 0 | j = hint_range->end_pole; |
1618 | 0 | if (j == -1) |
1619 | 0 | hint_range->end_pole = j = last_pole; |
1620 | 0 | if (self->pole[j].type == offcurve) |
1621 | 0 | hint_range->end_pole = ++j; |
1622 | 0 | if (self->pole[j].type == offcurve) |
1623 | 0 | hint_range->end_pole = ++j; |
1624 | 0 | } |
1625 | | /* moveto's were needed to decode path correctly. |
1626 | | We don't need them so far. |
1627 | | Replace 'moveto' with 'oncurve' : |
1628 | | */ |
1629 | 0 | for (i = 0; i <= self->contour_count; i++) |
1630 | 0 | if (self->pole[self->contour[i]].type == moveto) |
1631 | 0 | self->pole[self->contour[i]].type = oncurve; |
1632 | | /* After the decoding, hint commands refer to the last pole before HR occures. |
1633 | | Move pointers to the beginning segment pole. |
1634 | | */ |
1635 | 0 | for (j = 0; j < self->hint_range_count; j++) { |
1636 | 0 | int beg_pole = self->hint_range[j].beg_pole; |
1637 | 0 | int contour_index = self->pole[beg_pole].contour_index; |
1638 | 0 | int contour_beg_pole = self->contour[contour_index]; |
1639 | |
|
1640 | 0 | if (beg_pole > contour_beg_pole && beg_pole < last_pole) |
1641 | 0 | self->hint_range[j].beg_pole = t1_hinter__segment_beg(self, beg_pole); |
1642 | 0 | } |
1643 | 0 | } |
1644 | | |
1645 | | static inline bool t1_hinter__is_small_angle(t1_hinter * self, int pole_index0, int pole_index1, |
1646 | | long tan_x, long tan_y, int alpha, int alpha_div, int *quality) |
1647 | 0 | { long gx = self->pole[pole_index1].gx - self->pole[pole_index0].gx; |
1648 | 0 | long gy = self->pole[pole_index1].gy - self->pole[pole_index0].gy; |
1649 | 0 | long vp = mul_shift(gx, tan_y, _fixed_shift) - mul_shift(gy, tan_x, _fixed_shift); |
1650 | 0 | long sp = mul_shift(gx, tan_x, _fixed_shift) + mul_shift(gy, tan_y, _fixed_shift); |
1651 | 0 | long vp1 = any_abs(vp), sp1 = any_abs(sp); |
1652 | |
|
1653 | 0 | if (gx == 0 && gy == 0) { |
1654 | 0 | *quality = max_int; |
1655 | 0 | return false; |
1656 | 0 | } |
1657 | 0 | if (vp1 >= sp1) { |
1658 | 0 | *quality = max_int; |
1659 | 0 | return false; |
1660 | 0 | } |
1661 | 0 | if (vp1 / alpha_div > sp1 / alpha) { |
1662 | 0 | *quality = max_int; |
1663 | 0 | return false; |
1664 | 0 | } |
1665 | 0 | *quality = vp1 * 100 / sp1; /* The best quality is 0. */ |
1666 | 0 | return true; |
1667 | 0 | } |
1668 | | |
1669 | | static inline bool t1_hinter__next_contour_pole(t1_hinter * self, int pole_index) |
1670 | 0 | { int contour_index = self->pole[pole_index].contour_index; |
1671 | 0 | int beg_contour_pole = self->contour[contour_index]; |
1672 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2; |
1673 | |
|
1674 | 0 | return ranger_step_f(pole_index, beg_contour_pole, end_contour_pole); |
1675 | 0 | } |
1676 | | |
1677 | | static inline bool t1_hinter__is_good_tangent(t1_hinter * self, int pole_index, long tan_x, long tan_y, int *quality) |
1678 | 0 | { int contour_index = self->pole[pole_index].contour_index; |
1679 | 0 | int beg_contour_pole = self->contour[contour_index]; |
1680 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2, prev, next; |
1681 | 0 | int const alpha = 9, alpha_div = 10; |
1682 | 0 | int quality0, quality1; |
1683 | 0 | bool good0, good1; |
1684 | |
|
1685 | 0 | prev = ranger_step_b(pole_index, beg_contour_pole, end_contour_pole); |
1686 | 0 | good0 = t1_hinter__is_small_angle(self, prev, pole_index, tan_x, tan_y, alpha, alpha_div, &quality0); |
1687 | 0 | if (quality0 == 0) { |
1688 | 0 | *quality = 0; |
1689 | 0 | return true; |
1690 | 0 | } |
1691 | 0 | next = ranger_step_f(pole_index, beg_contour_pole, end_contour_pole); |
1692 | 0 | good1 = t1_hinter__is_small_angle(self, next, pole_index, tan_x, tan_y, alpha, alpha_div, &quality1); |
1693 | 0 | *quality = min(quality0, quality1); |
1694 | 0 | return good0 || good1; |
1695 | 0 | } |
1696 | | |
1697 | | static void t1_hinter__compute_type1_stem_ranges(t1_hinter * self) |
1698 | 0 | { int j; |
1699 | 0 | int end_range_pole = self->pole_count - 3; |
1700 | 0 | int primary_hint_count = self->primary_hint_count; |
1701 | |
|
1702 | 0 | if (self->hint_count == 0) |
1703 | 0 | return; |
1704 | 0 | if (primary_hint_count == -1) |
1705 | 0 | primary_hint_count = self->hint_range_count; |
1706 | | /* Process primary hints - ranges are entire glyph : */ |
1707 | 0 | for(j = 0; j < primary_hint_count; j++) { |
1708 | 0 | self->hint_range[j].beg_pole = 0; |
1709 | 0 | self->hint_range[j].end_pole = end_range_pole; |
1710 | 0 | } |
1711 | | /* Note that ranges of primary hints may include a tail of the hint array |
1712 | | due to multiple contours. Primary hints have a lesser priority, |
1713 | | so apply them first, and possibly recover later. |
1714 | | */ |
1715 | 0 | } |
1716 | | |
1717 | | static void t1_hinter__compute_type2_stem_ranges(t1_hinter * self) |
1718 | 0 | { int i; |
1719 | |
|
1720 | 0 | for (i = 0; i < self->hint_range_count; i++) |
1721 | 0 | if (self->hint_range[i].end_pole == -1) |
1722 | 0 | self->hint_range[i].end_pole = self->pole_count - 2; |
1723 | 0 | } |
1724 | | |
1725 | | static bool t1_hinter__is_stem_boundary_near(t1_hinter * self, const t1_hint *hint, |
1726 | | t1_glyph_space_coord g, int boundary) |
1727 | 0 | { |
1728 | 0 | t1_glyph_space_coord const fuzz = self->blue_fuzz; /* comparefiles/tpc2.ps */ |
1729 | |
|
1730 | 0 | return any_abs(g - (boundary ? hint->g1 : hint->g0)) <= fuzz; |
1731 | 0 | } |
1732 | | |
1733 | | static int t1_hinter__is_stem_hint_applicable(t1_hinter * self, t1_hint *hint, int pole_index, int *quality) |
1734 | 0 | { /* We don't check hint->side_mask because the unused coord should be outside the design bbox. */ |
1735 | 0 | int k; |
1736 | |
|
1737 | 0 | if (hint->type == hstem |
1738 | 0 | && ((k = 1, t1_hinter__is_stem_boundary_near(self, hint, self->pole[pole_index].gy, 0)) || |
1739 | 0 | (k = 2, t1_hinter__is_stem_boundary_near(self, hint, self->pole[pole_index].gy, 1))) |
1740 | 0 | && t1_hinter__is_good_tangent(self, pole_index, 1, 0, quality)) |
1741 | 0 | return k; |
1742 | 0 | if (hint->type == vstem |
1743 | 0 | && ((k = 1, t1_hinter__is_stem_boundary_near(self, hint, self->pole[pole_index].gx, 0)) || |
1744 | 0 | (k = 2, t1_hinter__is_stem_boundary_near(self, hint, self->pole[pole_index].gx, 1))) |
1745 | 0 | && t1_hinter__is_good_tangent(self, pole_index, 0, 1, quality)) |
1746 | 0 | return k; |
1747 | 0 | return 0; |
1748 | 0 | } |
1749 | | |
1750 | | static t1_zone * t1_hinter__find_zone(t1_hinter * self, t1_glyph_space_coord pole_y, bool curve, bool convex, bool concave) |
1751 | 0 | { bool maybe_top = !curve || convex; |
1752 | 0 | bool maybe_bot = !curve || concave; |
1753 | 0 | int i; |
1754 | |
|
1755 | 0 | for (i = 0; i < self->zone_count; i++) { |
1756 | 0 | t1_zone *zone = &self->zone[i]; |
1757 | 0 | if ((maybe_top && zone->type == topzone) || (maybe_bot && zone->type == botzone)) |
1758 | 0 | if (zone->y_min <= pole_y && pole_y <= zone->y_max) |
1759 | 0 | return zone; |
1760 | 0 | } |
1761 | 0 | return NULL; |
1762 | | /*todo: optimize narrowing the search range */ |
1763 | 0 | } |
1764 | | |
1765 | | static void t1_hinter__align_to_grid__general(t1_hinter * self, int32_t unit, |
1766 | | t1_glyph_space_coord gx, t1_glyph_space_coord gy, |
1767 | | t1_hinter_space_coord *pdx, t1_hinter_space_coord *pdy, |
1768 | | bool align_to_pixels, bool absolute) |
1769 | 0 | { |
1770 | 0 | long div_x = rshift(unit, (align_to_pixels ? (int)self->log2_pixels_x : self->log2_subpixels_x)); |
1771 | 0 | long div_y = rshift(unit, (align_to_pixels ? (int)self->log2_pixels_y : self->log2_subpixels_y)); |
1772 | 0 | t1_hinter_space_coord ox, oy, dx, dy; |
1773 | |
|
1774 | 0 | g2o(self, gx, gy, &ox, &oy); |
1775 | 0 | if (absolute) { |
1776 | 0 | ox += self->orig_ox; |
1777 | 0 | oy += self->orig_oy; |
1778 | 0 | } |
1779 | 0 | dx = ox % div_x; |
1780 | 0 | dy = oy % div_y; /* So far dx and dy are 19 bits */ |
1781 | 0 | if (dx > div_x / 2 ) |
1782 | 0 | dx = - div_x + dx; |
1783 | 0 | else if (dx < - div_x / 2) |
1784 | 0 | dx = div_x + dx; |
1785 | 0 | if (dy > div_y / 2) |
1786 | 0 | dy = - div_y + dy; |
1787 | 0 | else if (dy < - div_y / 2) |
1788 | 0 | dy = div_y + dy; |
1789 | 0 | *pdx = dx; |
1790 | 0 | *pdy = dy; |
1791 | 0 | } |
1792 | | |
1793 | | static void t1_hinter__align_to_grid__final(t1_hinter * self, |
1794 | | t1_glyph_space_coord *x, t1_glyph_space_coord *y, |
1795 | | t1_hinter_space_coord dx, t1_hinter_space_coord dy) |
1796 | 0 | { |
1797 | 0 | t1_glyph_space_coord gxd, gyd; |
1798 | |
|
1799 | 0 | o2g(self, dx, dy, &gxd, &gyd); |
1800 | 0 | if (self->grid_fit_x) { |
1801 | 0 | *x -= gxd; |
1802 | 0 | *x = (*x + 7) & ~15; /* Round to suppress small noise : */ |
1803 | 0 | } |
1804 | 0 | if (self->grid_fit_y) { |
1805 | 0 | *y -= gyd; |
1806 | 0 | *y = (*y + 7) & ~15; /* Round to suppress small noise : */ |
1807 | 0 | } |
1808 | 0 | } |
1809 | | |
1810 | | static void t1_hinter__hint_stem_snap_range(t1_hinter * self, |
1811 | | t1_glyph_space_coord w0, t1_glyph_space_coord w1, bool horiz, |
1812 | | short *index0, short *index1) |
1813 | 0 | { int k = (horiz ? 0 : 1), i; |
1814 | 0 | bool index0_set = false; |
1815 | |
|
1816 | 0 | *index0 = 0; |
1817 | 0 | *index1 = -1; |
1818 | 0 | for (i = 0; i < self->stem_snap_count[k]; i++) { |
1819 | 0 | if (w0 > self->stem_snap[k][i]) |
1820 | 0 | continue; |
1821 | 0 | if (!index0_set) { |
1822 | 0 | index0_set = true; |
1823 | 0 | *index0 = i; |
1824 | 0 | } |
1825 | 0 | if (w1 < self->stem_snap[k][i]) |
1826 | 0 | break; |
1827 | 0 | *index1 = i; |
1828 | 0 | } |
1829 | 0 | } |
1830 | | |
1831 | | static void t1_hinter__align_to_grid(t1_hinter * self, int32_t unit, |
1832 | | t1_glyph_space_coord *x, t1_glyph_space_coord *y, bool align_to_pixels) |
1833 | 0 | { if (unit > 0) { |
1834 | 0 | t1_hinter_space_coord dx, dy; |
1835 | |
|
1836 | 0 | t1_hinter__align_to_grid__general(self, unit, *x, *y, &dx, &dy, align_to_pixels, align_to_pixels); |
1837 | 0 | t1_hinter__align_to_grid__final(self, x, y, dx, dy); |
1838 | 0 | } |
1839 | 0 | } |
1840 | | |
1841 | | static void t1_hinter_compute_stem_snap_range_hv(t1_hinter * self, int hv) |
1842 | 0 | { |
1843 | 0 | const enum t1_hint_type T[] = {hstem, vstem}; |
1844 | 0 | int i, j; |
1845 | 0 | enum t1_hint_type t = T[hv]; |
1846 | 0 | bool horiz = (t == hstem); |
1847 | 0 | t1_glyph_space_coord pixel_g = (horiz ? self->pixel_gh : self->pixel_gw); |
1848 | 0 | int stem_snap_count = self->stem_snap_count[hv]; |
1849 | |
|
1850 | 0 | memset(self->stem_snap_vote, 0, stem_snap_count * sizeof(self->stem_snap_vote[0])); |
1851 | 0 | for (i = 0; i < self->hint_count; i++) { |
1852 | 0 | if (self->hint[i].type == t) { |
1853 | 0 | t1_glyph_space_coord gw = any_abs(self->hint[i].g1 - self->hint[i].g0); |
1854 | |
|
1855 | 0 | t1_hinter__hint_stem_snap_range(self, gw - pixel_g + 1, gw + pixel_g - 1, horiz, |
1856 | 0 | &self->hint[i].stem_snap_index0, &self->hint[i].stem_snap_index1); |
1857 | 0 | for (j = self->hint[i].stem_snap_index0; j <= self->hint[i].stem_snap_index1; j++) |
1858 | 0 | self->stem_snap_vote[j]++; |
1859 | 0 | } |
1860 | 0 | } |
1861 | 0 | for (i = 0; i < self->hint_count; i++) { |
1862 | 0 | if (self->hint[i].type == t) { |
1863 | 0 | int m = 0, mj = -1, d, md = pixel_g * 2; |
1864 | 0 | t1_glyph_space_coord gw = any_abs(self->hint[i].g1 - self->hint[i].g0); |
1865 | |
|
1866 | 0 | for (j = self->hint[i].stem_snap_index0; j <= self->hint[i].stem_snap_index1; j++) { |
1867 | 0 | if (m < self->stem_snap_vote[j]) { |
1868 | 0 | m = self->stem_snap_vote[j]; |
1869 | 0 | mj = j; |
1870 | 0 | md = any_abs(gw - pixel_g / 5 - self->stem_snap[hv][mj]); |
1871 | 0 | } else { |
1872 | 0 | d = any_abs(gw - pixel_g / 5 - self->stem_snap[hv][j]); |
1873 | 0 | if (md > d) { |
1874 | 0 | md = d; |
1875 | 0 | mj = j; |
1876 | 0 | } |
1877 | 0 | } |
1878 | 0 | } |
1879 | 0 | self->hint[i].stem_snap_index0 = mj; |
1880 | 0 | } |
1881 | 0 | } |
1882 | 0 | } |
1883 | | |
1884 | | static void t1_hinter_compute_stem_snap_range(t1_hinter * self) |
1885 | 0 | { |
1886 | 0 | if (self->stem_snap_count[0] > 1) |
1887 | 0 | t1_hinter_compute_stem_snap_range_hv(self, 0); |
1888 | 0 | if (self->stem_snap_count[1] > 1) |
1889 | 0 | t1_hinter_compute_stem_snap_range_hv(self, 1); |
1890 | 0 | } |
1891 | | |
1892 | | static void t1_hinter__align_stem_width(t1_hinter * self, t1_glyph_space_coord *pgw, const t1_hint *hint) |
1893 | 0 | { |
1894 | 0 | bool horiz = (hint->type == hstem); |
1895 | 0 | t1_glyph_space_coord gw = *pgw; |
1896 | 0 | t1_glyph_space_coord pixel_g = (horiz ? self->pixel_gh : self->pixel_gw); |
1897 | 0 | t1_glyph_space_coord gwe; |
1898 | |
|
1899 | 0 | if (!self->keep_stem_width || pixel_g == 0) |
1900 | 0 | return; |
1901 | 0 | if (hint->stem_snap_index0 >= 0 && self->stem_snap_count[horiz ? 0 : 1] > 0) { |
1902 | 0 | t1_glyph_space_coord w0 = self->stem_snap[horiz ? 0 : 1][hint->stem_snap_index0]; |
1903 | 0 | t1_glyph_space_coord thr0 = pixel_g * 70 / 100, thr1 = pixel_g * 35 / 100; |
1904 | |
|
1905 | 0 | if (gw - thr0 <= w0 && w0 <= gw + thr1) |
1906 | 0 | gw = w0; |
1907 | 0 | } |
1908 | 0 | gwe = gw % pixel_g; |
1909 | 0 | if (gw >= pixel_g && gwe < pixel_g / 2) |
1910 | 0 | gw -= gwe; |
1911 | 0 | else |
1912 | 0 | gw += pixel_g - gwe; |
1913 | 0 | *pgw = gw; |
1914 | 0 | } |
1915 | | |
1916 | | static void t1_hinter__align_stem_to_grid(t1_hinter * self, int32_t unit, |
1917 | | t1_glyph_space_coord *x0, t1_glyph_space_coord *y0, |
1918 | | t1_glyph_space_coord x1, t1_glyph_space_coord y1, |
1919 | | bool align_to_pixels, const t1_hint *hint) |
1920 | 0 | { /* Implemented for Bug 687578 "T1 hinter disturbs stem width". */ |
1921 | | /* fixme: optimize. */ |
1922 | 0 | if (unit > 0) { |
1923 | 0 | bool horiz = (hint->type == hstem); |
1924 | 0 | t1_glyph_space_coord gw = (horiz ? y1 - *y0 : x1 - *x0); |
1925 | 0 | t1_glyph_space_coord GW = any_abs(gw), GW0 = GW; |
1926 | 0 | bool positive = (gw >= 0); |
1927 | 0 | int19 cf = (horiz ? self->heigt_transform_coef_rat : self->width_transform_coef_rat); |
1928 | 0 | t1_hinter_space_coord dx0, dy0, dx1, dy1, dgw; |
1929 | |
|
1930 | 0 | t1_hinter__align_to_grid__general(self, unit, *x0, *y0, &dx0, &dy0, align_to_pixels, align_to_pixels); |
1931 | 0 | t1_hinter__align_to_grid__general(self, unit, x1, y1, &dx1, &dy1, align_to_pixels, align_to_pixels); |
1932 | 0 | t1_hinter__align_stem_width(self, &GW, hint); |
1933 | 0 | dgw = g2o_dist(GW - GW0, cf); |
1934 | 0 | if ((horiz ? (!self->transposed ? self->ctmf.yy : self->ctmf.xy) |
1935 | 0 | : (!self->transposed ? self->ctmf.xx : self->ctmf.yx)) < 0) |
1936 | 0 | dgw = - dgw; |
1937 | 0 | if (horiz) { |
1938 | 0 | t1_hinter_space_coord ddy1 = (positive ? dy0 - dgw : dy0 + dgw); |
1939 | 0 | t1_hinter_space_coord ddy0 = (positive ? dy1 + dgw : dy1 - dgw); |
1940 | |
|
1941 | 0 | if (any_abs(dy0 + ddy1) > any_abs(dy1 + ddy0)) |
1942 | 0 | dy0 = ddy0; |
1943 | 0 | } else { |
1944 | 0 | t1_hinter_space_coord ddx1 = (positive ? dx0 - dgw : dx0 + dgw); |
1945 | 0 | t1_hinter_space_coord ddx0 = (positive ? dx1 + dgw : dx1 - dgw); |
1946 | |
|
1947 | 0 | if (any_abs(dx0 + ddx1) > any_abs(dx1 + ddx0)) |
1948 | 0 | dx0 = ddx0; |
1949 | 0 | } |
1950 | 0 | t1_hinter__align_to_grid__final(self, x0, y0, dx0, dy0); |
1951 | 0 | } |
1952 | 0 | } |
1953 | | |
1954 | | #if ADOBE_OVERSHOOT_COMPATIBILIY |
1955 | | static inline t1_hinter_space_coord g2o_dist_blue(t1_hinter * h, t1_glyph_space_coord gw) |
1956 | | { double W = fixed2float(gw); |
1957 | | double w = W * (h->resolution * h->font_size * h->base_font_scale - h->BlueScale) + 1; |
1958 | | |
1959 | | return (t1_hinter_space_coord)(w * h->g2o_fraction); |
1960 | | /* todo : exclude floating point */ |
1961 | | } |
1962 | | |
1963 | | static void t1_hinter__add_overshoot(t1_hinter * self, t1_zone * zone, t1_glyph_space_coord * x, t1_glyph_space_coord * y) |
1964 | | { t1_glyph_space_coord gy = *y; |
1965 | | /* t1_glyph_space_coord gw = any_abs(zone->overshoot_y - zone->y); */ |
1966 | | t1_glyph_space_coord gw = any_abs(gy - zone->y); |
1967 | | t1_hinter_space_coord ow = g2o_dist_blue(self, gw); |
1968 | | t1_hinter_space_coord ow1 = ow / self->g2o_fraction * self->g2o_fraction; |
1969 | | t1_glyph_space_coord gw1 = o2g_dist(self, ow1, self->heigt_transform_coef_inv); |
1970 | | |
1971 | | *y = zone->y + (zone->type == topzone ? gw1 : -gw1); |
1972 | | } |
1973 | | #endif |
1974 | | |
1975 | | static enum t1_align_type t1_hinter__compute_aligned_coord(t1_hinter * self, |
1976 | | t1_glyph_space_coord * gc, int segment_index, fixed t, const t1_hint *hint, |
1977 | | enum t1_align_type align0) |
1978 | 0 | { /* Returns true, if alignment zone is applied. */ |
1979 | | /* t is 0 or 0.5, and it is always 0 for curves. */ |
1980 | 0 | bool horiz = (hint->type == hstem); |
1981 | 0 | enum t1_align_type align = align0; |
1982 | 0 | t1_glyph_space_coord gx = self->pole[segment_index].gx, gx0; |
1983 | 0 | t1_glyph_space_coord gy = self->pole[segment_index].gy, gy0; |
1984 | 0 | t1_glyph_space_coord gc0 = (horiz ? gy : gx); |
1985 | 0 | bool align_by_stem = |
1986 | 0 | align0 == unaligned /* Force aligning outer boundaries |
1987 | | from the TT spot analyzer. */ |
1988 | 0 | && hint->b0 && hint->b1; /* It's a real stem. Contrary |
1989 | | 033-52-5873.pdf uses single hint boundaries |
1990 | | to mark top|bottom sides of a glyph, |
1991 | | but their opposite boundaries are dummy coordinates, |
1992 | | which don't correspond to poles. */ |
1993 | | |
1994 | | /* Compute point of specified segment by parameter t : */ |
1995 | 0 | if (t) { |
1996 | 0 | int next = t1_hinter__segment_end(self, segment_index); |
1997 | 0 | t1_glyph_space_coord gx1 = self->pole[next].gx; |
1998 | 0 | t1_glyph_space_coord gy1 = self->pole[next].gy; |
1999 | |
|
2000 | 0 | gx = (gx + gx1) / 2; |
2001 | 0 | gy = (gy + gy1) / 2; |
2002 | 0 | } |
2003 | 0 | gx0 = gx; |
2004 | 0 | gy0 = gy; |
2005 | 0 | if (horiz) { |
2006 | 0 | t1_pole * pole = &self->pole[segment_index]; |
2007 | 0 | int contour_index = pole->contour_index; |
2008 | 0 | int beg_contour_pole = self->contour[contour_index]; |
2009 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2; |
2010 | 0 | int prev1 = ranger_step_b(segment_index, beg_contour_pole, end_contour_pole); |
2011 | 0 | int prev2 = ranger_step_b(prev1 , beg_contour_pole, end_contour_pole); |
2012 | 0 | int next1 = ranger_step_f(segment_index, beg_contour_pole, end_contour_pole); |
2013 | 0 | int next2 = ranger_step_f(next1 , beg_contour_pole, end_contour_pole); |
2014 | 0 | bool forwd_horiz = (any_abs(self->pole[next1].gy - pole->gy) <= |
2015 | 0 | max(self->blue_fuzz, any_abs(self->pole[next1].gx - pole->gx) / 10)); |
2016 | 0 | bool bckwd_horiz = (any_abs(self->pole[prev1].gy - pole->gy) <= |
2017 | 0 | max(self->blue_fuzz, any_abs(self->pole[prev1].gx - pole->gx) / 10)); |
2018 | 0 | bool maximum = (self->pole[next1].gy - pole->gy < 0 && |
2019 | 0 | self->pole[prev1].gy - pole->gy < 0); |
2020 | 0 | bool minimum = (self->pole[next1].gy - pole->gy > 0 && |
2021 | 0 | self->pole[prev1].gy - pole->gy > 0); |
2022 | |
|
2023 | 0 | if (forwd_horiz || bckwd_horiz || maximum || minimum) { |
2024 | 0 | bool forwd_curve = (self->pole[next1].type == offcurve); |
2025 | 0 | bool bckwd_curve = (self->pole[prev1].type == offcurve); |
2026 | 0 | bool curve = (bckwd_curve && forwd_curve); |
2027 | 0 | bool convex = (curve && self->pole[prev2].gy <= pole->gy && |
2028 | 0 | self->pole[next2].gy <= pole->gy); |
2029 | 0 | bool concave = (curve && self->pole[prev2].gy >= pole->gy && |
2030 | 0 | self->pole[next2].gy >= pole->gy); |
2031 | 0 | t1_zone *zone = t1_hinter__find_zone(self, pole->gy, curve || maximum || minimum, |
2032 | 0 | convex || maximum, concave || minimum); |
2033 | |
|
2034 | 0 | if (zone != NULL && |
2035 | 0 | (forwd_horiz || bckwd_horiz || |
2036 | 0 | (maximum && zone->type == topzone) || |
2037 | 0 | (minimum && zone->type == botzone))) { |
2038 | 0 | if (self->suppress_overshoots) |
2039 | | # if ADOBE_OVERSHOOT_COMPATIBILIY |
2040 | | gy = (zone->type == topzone ? zone->overshoot_y : zone->y); |
2041 | | # else |
2042 | 0 | gy = zone->y; |
2043 | 0 | # endif |
2044 | 0 | else { |
2045 | 0 | t1_glyph_space_coord s = zone->y - pole->gy; |
2046 | 0 | if (zone->type == topzone) |
2047 | 0 | s = -s; |
2048 | 0 | if (!curve && s < self->overshoot_threshold) |
2049 | 0 | gy = zone->y; |
2050 | 0 | else if (s > self->overshoot_threshold) { |
2051 | 0 | t1_glyph_space_coord ss = self->overshoot_threshold * 2; |
2052 | |
|
2053 | 0 | if (s < ss) /* Enforce overshoot : */ |
2054 | 0 | gy = (zone->type == topzone ? zone->y + ss : zone->y - ss); |
2055 | 0 | else { |
2056 | | # if ADOBE_OVERSHOOT_COMPATIBILIY |
2057 | | t1_hinter__add_overshoot(self, zone, &gx, &gy); |
2058 | | # endif |
2059 | 0 | } |
2060 | 0 | } |
2061 | 0 | } |
2062 | 0 | align = (zone->type == topzone ? topzn : botzn); |
2063 | 0 | align_by_stem = false; |
2064 | 0 | } |
2065 | 0 | } |
2066 | 0 | } |
2067 | 0 | if (align_by_stem) { |
2068 | 0 | t1_glyph_space_coord gx1, gy1; |
2069 | |
|
2070 | 0 | if (horiz) { |
2071 | 0 | bool b0 = t1_hinter__is_stem_boundary_near(self, hint, gy, 0); |
2072 | 0 | bool b1 = t1_hinter__is_stem_boundary_near(self, hint, gy, 1); |
2073 | |
|
2074 | 0 | gx1 = gx; |
2075 | 0 | if (b0 && !b1) |
2076 | 0 | gy1 = hint->g1, align_by_stem = true; |
2077 | 0 | else if (!b0 && b1) |
2078 | 0 | gy1 = hint->g0, align_by_stem = true; |
2079 | 0 | else |
2080 | 0 | gy1 = 0; /* Quiet the compiler. */ |
2081 | 0 | } else { |
2082 | 0 | bool b0 = t1_hinter__is_stem_boundary_near(self, hint, gx, 0); |
2083 | 0 | bool b1 = t1_hinter__is_stem_boundary_near(self, hint, gx, 1); |
2084 | |
|
2085 | 0 | gy1 = gy; |
2086 | 0 | if (b0 && !b1) |
2087 | 0 | gx1 = hint->g1, align_by_stem = true; |
2088 | 0 | else if (!b0 && b1) |
2089 | 0 | gx1 = hint->g0, align_by_stem = true; |
2090 | 0 | else |
2091 | 0 | gx1 = 0; /* Quiet the compiler. */ |
2092 | 0 | } |
2093 | 0 | if (align_by_stem) |
2094 | 0 | t1_hinter__align_stem_to_grid(self, self->g2o_fraction, &gx, &gy, gx1, gy1, |
2095 | 0 | CONTRAST_STEMS || self->align_to_pixels, hint); |
2096 | 0 | } |
2097 | 0 | if (!align_by_stem) |
2098 | 0 | t1_hinter__align_to_grid(self, self->g2o_fraction, &gx, &gy, |
2099 | 0 | CONTRAST_STEMS || self->align_to_pixels); |
2100 | 0 | *gc = gc0 + (horiz ? gy - gy0 : gx - gx0); |
2101 | 0 | return (align == unaligned ? aligned : align); |
2102 | 0 | } |
2103 | | |
2104 | 0 | #define PRESERVE_STEM_SLANT 1 /* 0 - always diminish |
2105 | | 1 - preserve iff slanted in design space |
2106 | | 2 - always preserve */ |
2107 | | |
2108 | | static int t1_hinter__find_stem_middle(t1_hinter * self, fixed *t, int pole_index, bool horiz) |
2109 | 0 | { |
2110 | | /* *t = 0 preserves slant; *t = fixed_half deminishes slant (don't apply to curves). */ |
2111 | 0 | if (PRESERVE_STEM_SLANT == 2) { |
2112 | 0 | *t = 0; |
2113 | 0 | return pole_index; |
2114 | 0 | } else { |
2115 | | /* For a better representation of arms with a small slope, |
2116 | | we align their poles. It appears useful for CJK fonts, |
2117 | | see comparefiles/japan.ps, Bug687603.ps . |
2118 | | Otherwise (a slightly rotated glyph, see Openhuis_pdf_zw.pdf) |
2119 | | we align the arm middle, causing the slope to look smaller |
2120 | | */ |
2121 | | /* We assume proper glyphs, see Type 1 spec, chapter 4. */ |
2122 | 0 | int next = t1_hinter__next_contour_pole(self, pole_index); |
2123 | 0 | const int alpha = 10; |
2124 | 0 | int design_slant; |
2125 | 0 | bool curve = self->pole[next].type == offcurve; |
2126 | 0 | bool continuing = (horiz ? t1_hinter__is_small_angle(self, next, pole_index, 1, 0, alpha, 1, &design_slant) |
2127 | 0 | : t1_hinter__is_small_angle(self, next, pole_index, 0, 1, alpha, 1, &design_slant)); |
2128 | |
|
2129 | 0 | if (!PRESERVE_STEM_SLANT || design_slant == 0) |
2130 | 0 | *t = (!curve && continuing ? fixed_half : 0); |
2131 | 0 | else |
2132 | 0 | *t = 0; |
2133 | 0 | return pole_index; |
2134 | 0 | } |
2135 | 0 | } |
2136 | | |
2137 | | static int t1_hinter__skip_stem(t1_hinter * self, int pole_index, bool horiz) |
2138 | 0 | { /* We assume proper glyphs, see Type 1 spec, chapter 4. */ |
2139 | 0 | int i = pole_index; |
2140 | 0 | int next_pole = t1_hinter__next_contour_pole(self, i); |
2141 | 0 | int next_segm = t1_hinter__segment_end(self, i); |
2142 | 0 | long tan_x = (horiz ? 1 : 0); |
2143 | 0 | long tan_y = (horiz ? 0 : 1); |
2144 | 0 | int quality; |
2145 | |
|
2146 | 0 | while (t1_hinter__is_small_angle(self, i, next_pole, tan_x, tan_y, 1000, 1, &quality) && /* The threshold is taken from scratch. */ |
2147 | 0 | t1_hinter__is_small_angle(self, i, next_segm, tan_x, tan_y, 1000, 1, &quality)) { |
2148 | 0 | i = t1_hinter__segment_end(self, i); |
2149 | 0 | if (i == pole_index) { |
2150 | | /* An invalid glyph with <=2 segments in the contour with no angles. */ |
2151 | 0 | break; |
2152 | 0 | } |
2153 | 0 | next_pole = t1_hinter__next_contour_pole(self, i); |
2154 | 0 | next_segm = t1_hinter__segment_end(self, i); |
2155 | 0 | } |
2156 | 0 | return i; |
2157 | 0 | } |
2158 | | |
2159 | | static void t1_hinter__mark_existing_stems(t1_hinter * self) |
2160 | 0 | { /* fixme: Duplicated code with t1_hinter__align_stem_commands. */ |
2161 | 0 | int i, j, jj, k; |
2162 | |
|
2163 | 0 | for(i = 0; i < self->hint_count; i++) |
2164 | 0 | if (self->hint[i].type == vstem || self->hint[i].type == hstem) |
2165 | 0 | for (k = self->hint[i].range_index; k != -1; k = self->hint_range[k].next) { |
2166 | 0 | int beg_range_pole = self->hint_range[k].beg_pole; |
2167 | 0 | int end_range_pole = self->hint_range[k].end_pole; |
2168 | 0 | int quality; |
2169 | |
|
2170 | 0 | if (self->pole[beg_range_pole].type == closepath) { |
2171 | | /* A workaround for a buggy font from the Bug 687393, |
2172 | | which defines a range with 'closepath' only. */ |
2173 | 0 | beg_range_pole++; |
2174 | 0 | if (beg_range_pole > end_range_pole) |
2175 | 0 | continue; |
2176 | 0 | } |
2177 | 0 | for (j = beg_range_pole; j <= end_range_pole;) { |
2178 | 0 | int k = t1_hinter__is_stem_hint_applicable(self, &self->hint[i], j, &quality); |
2179 | 0 | if (k == 1) |
2180 | 0 | self->hint[i].b0 = true; |
2181 | 0 | else if (k == 2) |
2182 | 0 | self->hint[i].b1 = true; |
2183 | 0 | { /* Step to the next pole in the range : */ |
2184 | 0 | jj = j; |
2185 | 0 | j = t1_hinter__segment_end(self, j); |
2186 | 0 | if (j <= jj) /* Rolled over contour end ? */ |
2187 | 0 | j = self->contour[self->pole[j].contour_index + 1]; /* Go to the next contour. */ |
2188 | 0 | } |
2189 | 0 | } |
2190 | 0 | } |
2191 | 0 | } |
2192 | | |
2193 | | static void t1_hinter__add_boundary_length(t1_hinter * self, t1_hint *hint, |
2194 | | int pole_index0, int pole_index1) |
2195 | 0 | { const t1_pole *pole = &self->pole[pole_index0]; |
2196 | 0 | int contour_index = pole->contour_index; |
2197 | 0 | int beg_contour_pole = self->contour[contour_index]; |
2198 | 0 | int end_contour_pole = self->contour[contour_index + 1] - 2; |
2199 | 0 | int i0 = ranger_step_b(pole_index0, beg_contour_pole, end_contour_pole); |
2200 | 0 | int i1 = ranger_step_f(pole_index1, beg_contour_pole, end_contour_pole); |
2201 | 0 | t1_glyph_space_coord g = (hint->type == hstem ? pole->gy : pole->gx); |
2202 | |
|
2203 | 0 | if (self->pole[i0].type == oncurve) |
2204 | 0 | i0 = pole_index0; |
2205 | 0 | if (self->pole[i1].type == oncurve) |
2206 | 0 | i1 = pole_index1; |
2207 | 0 | *(any_abs(hint->g0 - g) < any_abs(hint->g1 - g) ? &hint->boundary_length0 : &hint->boundary_length1) |
2208 | 0 | += (hint->type == hstem ? any_abs(self->pole[i0].gx - self->pole[i1].gx) |
2209 | 0 | : any_abs(self->pole[i0].gy - self->pole[i1].gy)); |
2210 | 0 | } |
2211 | | |
2212 | | static void t1_hinter__align_stem_commands(t1_hinter * self) |
2213 | 0 | { int i, j, jj, k; |
2214 | |
|
2215 | 0 | for(i = 0; i < self->hint_count; i++) { |
2216 | 0 | self->hint[i].boundary_length0 = self->hint[i].boundary_length1 = 0; |
2217 | 0 | if (self->hint[i].type == vstem || self->hint[i].type == hstem) |
2218 | 0 | for (k = self->hint[i].range_index; k != -1; k = self->hint_range[k].next) { |
2219 | 0 | int beg_range_pole = self->hint_range[k].beg_pole; |
2220 | 0 | int end_range_pole = self->hint_range[k].end_pole; |
2221 | 0 | bool horiz = (self->hint[i].type == hstem); |
2222 | 0 | int quality = max_int; |
2223 | |
|
2224 | 0 | if (self->pole[beg_range_pole].type == closepath) { |
2225 | | /* A workaround for a buggy font from the Bug 687393, |
2226 | | which defines a range with 'closepath' only. */ |
2227 | 0 | beg_range_pole++; |
2228 | 0 | if (beg_range_pole > end_range_pole) |
2229 | 0 | continue; |
2230 | 0 | } |
2231 | 0 | for (j = beg_range_pole; j <= end_range_pole;) { |
2232 | 0 | if (self->pole[j].type == closepath) { |
2233 | 0 | j++; |
2234 | 0 | continue; |
2235 | 0 | } |
2236 | 0 | if (t1_hinter__is_stem_hint_applicable(self, &self->hint[i], j, &quality)) { |
2237 | 0 | fixed t; /* Type 1 spec implies that it is 0 for curves, 0.5 for bars */ |
2238 | 0 | int segment_index = t1_hinter__find_stem_middle(self, &t, j, horiz); |
2239 | 0 | t1_glyph_space_coord gc; |
2240 | 0 | enum t1_align_type align = unaligned; |
2241 | |
|
2242 | 0 | if (self->hint[i].side_mask != 3) { |
2243 | | /* An overal hint from the True Type autohinter. */ |
2244 | 0 | align = (self->hint[i].side_mask & 2 ? topzn : botzn); |
2245 | 0 | } else if (self->autohinting && horiz) { |
2246 | 0 | if (self->pole[segment_index].gy == self->hint[i].g0) |
2247 | 0 | align = (self->hint[i].g0 > self->hint[i].g1 ? topzn : botzn); |
2248 | 0 | } |
2249 | 0 | align = t1_hinter__compute_aligned_coord(self, &gc, |
2250 | 0 | segment_index, t, &self->hint[i], align); |
2251 | | /* todo: optimize: primary commands don't need to align, if suppressed by secondary ones. */ |
2252 | 0 | t1_hint__set_aligned_coord(&self->hint[i], gc, &self->pole[j], align, quality); |
2253 | 0 | jj = j; |
2254 | 0 | j = t1_hinter__skip_stem(self, j, horiz); |
2255 | 0 | t1_hinter__add_boundary_length(self, &self->hint[i], jj, j); |
2256 | 0 | if (j < jj) { /* Rolled over contour end ? */ |
2257 | 0 | j = self->contour[self->pole[j].contour_index + 1]; /* Go to the next contour. */ |
2258 | 0 | continue; |
2259 | 0 | } |
2260 | 0 | } |
2261 | 0 | { /* Step to the next pole in the range : */ |
2262 | 0 | jj = j; |
2263 | 0 | j = t1_hinter__segment_end(self, j); |
2264 | 0 | if (j <= jj) /* Rolled over contour end ? */ |
2265 | 0 | j = self->contour[self->pole[j].contour_index + 1]; /* Go to the next contour. */ |
2266 | 0 | } |
2267 | 0 | } |
2268 | 0 | } |
2269 | 0 | } |
2270 | 0 | } |
2271 | | |
2272 | | static void t1_hinter__unfix_opposite_to_common(t1_hinter * self) |
2273 | 0 | { /* Implemented for Bug 687578 "T1 hinter disturbs stem width". */ |
2274 | 0 | int i, j, k, m, n; |
2275 | 0 | t1_glyph_space_coord d, md; |
2276 | 0 | t1_glyph_space_coord *p_ci, *p_cj, *p_agj, agm; |
2277 | 0 | enum t1_align_type *p_aj, *p_ai, *p_oi, *p_oj, am; |
2278 | |
|
2279 | 0 | for (k = 0; k < 2; k++) { /* g0, g1 */ |
2280 | | /* Since the number of stems in a complex is usually small, |
2281 | | we don't care about redundant computations. */ |
2282 | 0 | for(i = 0; i < self->hint_count; i++) { |
2283 | 0 | if (self->hint[i].type == vstem || self->hint[i].type == hstem) { |
2284 | 0 | p_ai = (!k ? &self->hint[i].aligned0 : &self->hint[i].aligned1); |
2285 | 0 | p_oi = (!k ? &self->hint[i].aligned1 : &self->hint[i].aligned0); |
2286 | 0 | if (*p_ai > weak && *p_ai == *p_oi) { |
2287 | 0 | p_ci = (!k ? &self->hint[i].g0 : &self->hint[i].g1); |
2288 | 0 | md = any_abs(self->hint[i].g1 - self->hint[i].g0); |
2289 | 0 | m = i; |
2290 | 0 | am = *p_ai; |
2291 | 0 | agm = (!k ? self->hint[m].ag0 : self->hint[m].ag1); |
2292 | 0 | n = 0; |
2293 | 0 | for(j = 0; j < self->hint_count; j++) { |
2294 | 0 | if (j != i && self->hint[i].type == self->hint[j].type) { |
2295 | 0 | p_cj = (!k ? &self->hint[j].g0 : &self->hint[j].g1); |
2296 | 0 | if (*p_ci == *p_cj) { |
2297 | 0 | n++; |
2298 | 0 | p_aj = (!k ? &self->hint[j].aligned0 : &self->hint[j].aligned1); |
2299 | 0 | d = any_abs(self->hint[j].g1 - self->hint[j].g0); |
2300 | 0 | if (am < *p_aj) { |
2301 | 0 | md = d; |
2302 | 0 | m = j; |
2303 | 0 | am = *p_aj; |
2304 | 0 | agm = (!k ? self->hint[m].ag0 : self->hint[m].ag1); |
2305 | 0 | } if (md < d) { |
2306 | 0 | md = d; |
2307 | 0 | } |
2308 | 0 | } |
2309 | 0 | } |
2310 | 0 | } |
2311 | 0 | if (n) { |
2312 | 0 | for(j = 0; j < self->hint_count; j++) { |
2313 | 0 | p_cj = (!k ? &self->hint[j].g0 : &self->hint[j].g1); |
2314 | 0 | if (*p_ci == *p_cj) { |
2315 | 0 | p_aj = (!k ? &self->hint[j].aligned0 : &self->hint[j].aligned1); |
2316 | 0 | p_oj = (!k ? &self->hint[j].aligned1 : &self->hint[j].aligned0); |
2317 | 0 | p_agj = (!k ? &self->hint[j].ag0 : &self->hint[j].ag1); |
2318 | 0 | *p_aj = am; |
2319 | 0 | if (*p_oj == aligned) |
2320 | 0 | *p_oj = weak; |
2321 | 0 | *p_agj = agm; |
2322 | 0 | } |
2323 | 0 | } |
2324 | 0 | } |
2325 | 0 | } |
2326 | 0 | } |
2327 | 0 | } |
2328 | 0 | } |
2329 | 0 | } |
2330 | | |
2331 | | static void t1_hinter__compute_opposite_stem_coords(t1_hinter * self) |
2332 | 0 | { int i; |
2333 | |
|
2334 | 0 | for (i = 0; i < self->hint_count; i++) |
2335 | 0 | if ((self->hint[i].type == vstem || self->hint[i].type == hstem)) { |
2336 | 0 | t1_glyph_space_coord ag0 = self->hint[i].ag0; |
2337 | 0 | t1_glyph_space_coord ag1 = self->hint[i].ag1; |
2338 | 0 | enum t1_align_type aligned0 = self->hint[i].aligned0; |
2339 | 0 | enum t1_align_type aligned1 = self->hint[i].aligned1; |
2340 | 0 | t1_glyph_space_coord gw; |
2341 | |
|
2342 | 0 | gw = any_abs(self->hint[i].g1 - self->hint[i].g0); |
2343 | 0 | t1_hinter__align_stem_width(self, &gw, &self->hint[i]); |
2344 | 0 | if (self->hint[i].g1 - self->hint[i].g0 < 0) |
2345 | 0 | gw = -gw; |
2346 | 0 | if (aligned0 > aligned1) |
2347 | 0 | ag1 = ag0 + gw; |
2348 | 0 | else if (aligned0 < aligned1) |
2349 | 0 | ag0 = ag1 - gw; |
2350 | 0 | else { |
2351 | 0 | t1_glyph_space_coord d0 = any_abs(ag0 - self->hint[i].g0); |
2352 | 0 | t1_glyph_space_coord d1 = any_abs(ag1 - self->hint[i].g1); |
2353 | |
|
2354 | 0 | if (aligned0 == topzn || aligned1 == topzn) |
2355 | 0 | if (gw > 0) |
2356 | 0 | ag0 = ag1 - gw; |
2357 | 0 | else |
2358 | 0 | ag1 = ag0 + gw; |
2359 | 0 | else if (aligned0 == botzn || aligned1 == botzn) |
2360 | 0 | if (gw < 0) |
2361 | 0 | ag0 = ag1 - gw; |
2362 | 0 | else |
2363 | 0 | ag1 = ag0 + gw; |
2364 | 0 | else if (self->hint[i].type == hstem && |
2365 | 0 | min(any_abs(self->hint[i].g0 - self->ymid), any_abs(self->hint[i].g1 - self->ymid)) > |
2366 | 0 | (self->ymax - self->ymin) / 5) { |
2367 | 0 | if ((self->hint[i].g1 + self->hint[i].g0) / 2 > self->ymid) |
2368 | 0 | ag0 = ag1 - gw; |
2369 | 0 | else |
2370 | 0 | ag1 = ag0 + gw; |
2371 | 0 | } else { |
2372 | 0 | if (d0 < d1) |
2373 | 0 | ag1 = ag0 + gw; |
2374 | 0 | else |
2375 | 0 | ag0 = ag1 - gw; |
2376 | 0 | } |
2377 | 0 | } |
2378 | 0 | self->hint[i].ag0 = ag0; |
2379 | 0 | self->hint[i].ag1 = ag1; |
2380 | 0 | } |
2381 | 0 | } |
2382 | | |
2383 | | static int t1_hinter__store_hint_applying(t1_hinter * self, t1_hint *hint, int pole_index) |
2384 | 0 | { |
2385 | 0 | t1_hint_applying *ha; |
2386 | 0 | int code = t1_hinter__can_add_hint_applying(self, &ha); |
2387 | |
|
2388 | 0 | if (code < 0) |
2389 | 0 | return code; |
2390 | 0 | ha->pole = pole_index; |
2391 | 0 | ha->opposite = -1; |
2392 | 0 | self->hint_applying_count++; |
2393 | 0 | return 0; |
2394 | 0 | } |
2395 | | |
2396 | | static int t1_hinter__align_stem_poles(t1_hinter * self) |
2397 | 0 | { int i, j, k; |
2398 | 0 | t1_glyph_space_coord const fuzz = self->blue_fuzz; /* comparefiles/tpc2.ps */ |
2399 | 0 | int code = 0; |
2400 | |
|
2401 | 0 | for (i = 0; i < self->hint_count; i++) |
2402 | 0 | if (self->hint[i].type == vstem || self->hint[i].type == hstem) { |
2403 | 0 | t1_hint * hint = &self->hint[i]; |
2404 | 0 | t1_glyph_space_coord ag0 = hint->ag0, ag1 = hint->ag1; |
2405 | 0 | bool horiz = (hint->type == hstem); |
2406 | | |
2407 | | /* fixme: optimize: Reduce hint_applying with storing only one side of the hint. */ |
2408 | 0 | self->hint_applying_count = 0; |
2409 | 0 | for (k = self->hint[i].range_index; k != -1; k = self->hint_range[k].next) { |
2410 | 0 | int beg_range_pole = self->hint_range[k].beg_pole; |
2411 | 0 | int end_range_pole = self->hint_range[k].end_pole; |
2412 | |
|
2413 | 0 | for (j = beg_range_pole; j <= end_range_pole; j++) { |
2414 | 0 | t1_pole * pole = &self->pole[j]; |
2415 | |
|
2416 | 0 | if (pole->type != oncurve) |
2417 | 0 | continue; |
2418 | 0 | if (!horiz && any_abs(pole->gx - hint->g0) <= fuzz) |
2419 | 0 | code = t1_hinter__store_hint_applying(self, hint, j); |
2420 | 0 | else if (!horiz && any_abs(pole->gx - hint->g1) <= fuzz) |
2421 | 0 | code = t1_hinter__store_hint_applying(self, hint, j); |
2422 | 0 | else if ( horiz && any_abs(pole->gy - hint->g0) <= fuzz) |
2423 | 0 | code = t1_hinter__store_hint_applying(self, hint, j); |
2424 | 0 | else if ( horiz && any_abs(pole->gy - hint->g1) <= fuzz) |
2425 | 0 | code = t1_hinter__store_hint_applying(self, hint, j); |
2426 | 0 | if (code < 0) |
2427 | 0 | return code; |
2428 | 0 | } |
2429 | 0 | } |
2430 | 0 | for (k = 0; k < self->hint_applying_count; k++) { |
2431 | 0 | t1_hint_applying *ha0 = &self->hint_applying[k]; |
2432 | 0 | int pole_index0 = ha0->pole; |
2433 | 0 | t1_pole *pole0 = &self->pole[pole_index0]; |
2434 | 0 | t1_glyph_space_coord g0 = (horiz ? pole0->gy : pole0->gx); |
2435 | 0 | t1_glyph_space_coord t0 = (horiz ? pole0->gx : pole0->gy); |
2436 | 0 | bool gt0 = any_abs(hint->g0 - g0) > any_abs(hint->g1 - g0); |
2437 | 0 | t1_glyph_space_coord d, md = any_abs(hint->g1 - hint->g0) * 5 / 4; |
2438 | 0 | int mj = -1; |
2439 | |
|
2440 | 0 | for (j = 0; j < self->hint_applying_count; j++) { |
2441 | 0 | t1_hint_applying *ha1 = &self->hint_applying[j]; |
2442 | 0 | int pole_index1 = ha1->pole; |
2443 | 0 | t1_pole *pole1 = &self->pole[pole_index1]; |
2444 | 0 | t1_glyph_space_coord g1 = (horiz ? pole1->gy : pole1->gx); |
2445 | 0 | t1_glyph_space_coord t1 = (horiz ? pole1->gx : pole1->gy); |
2446 | 0 | bool gt1 = any_abs(hint->g0 - g1) > any_abs(hint->g1 - g1); |
2447 | |
|
2448 | 0 | if (gt0 != gt1) { |
2449 | 0 | d = any_abs(t1 - t0); |
2450 | 0 | if (md > d) { |
2451 | 0 | mj = j; |
2452 | 0 | } |
2453 | 0 | } |
2454 | 0 | } |
2455 | 0 | if (mj != -1) { |
2456 | 0 | ha0->opposite = mj; |
2457 | 0 | self->hint_applying[mj].opposite = j; |
2458 | 0 | } |
2459 | 0 | } |
2460 | 0 | for (k = 0; k < self->hint_applying_count; k++) { |
2461 | 0 | t1_hint_applying *ha = &self->hint_applying[k]; |
2462 | 0 | int pole_index = ha->pole; |
2463 | 0 | t1_pole *pole = &self->pole[pole_index]; |
2464 | 0 | t1_glyph_space_coord g0 = (horiz ? pole->gy : pole->gx); |
2465 | 0 | bool gt0 = any_abs(hint->g0 - g0) > any_abs(hint->g1 - g0); |
2466 | 0 | enum t1_align_type align = (!gt0 ? hint->aligned0 : hint->aligned1); |
2467 | 0 | t1_glyph_space_coord ag = (!gt0 ? ag0 : ag1); |
2468 | 0 | t1_glyph_space_coord bl = (!gt0 ? hint->boundary_length1 : hint->boundary_length0); /* opposite */ |
2469 | |
|
2470 | 0 | if (ha->opposite == -1) |
2471 | 0 | align = weak; |
2472 | 0 | if (!horiz) { |
2473 | 0 | if (pole->aligned_x < align) |
2474 | 0 | pole->ax = ag, pole->aligned_x = align, pole->boundary_length_x = bl; |
2475 | 0 | } else { |
2476 | 0 | if (pole->aligned_y < align) |
2477 | 0 | pole->ay = ag, pole->aligned_y = align, pole->boundary_length_y = bl; |
2478 | 0 | } |
2479 | 0 | } |
2480 | 0 | } |
2481 | 0 | return 0; |
2482 | 0 | } |
2483 | | |
2484 | | static t1_hint * t1_hinter__find_vstem_by_center(t1_hinter * self, t1_glyph_space_coord gx) |
2485 | 0 | { /* Find vstem with axis near gx : */ |
2486 | 0 | int i; |
2487 | 0 | t1_hint * hint = NULL; |
2488 | 0 | t1_glyph_space_coord dx = fixed_1; |
2489 | |
|
2490 | 0 | for (i = 0; i < self->hint_count; i++) |
2491 | 0 | if (self->hint[i].type == vstem) { |
2492 | 0 | t1_glyph_space_coord d = any_abs(gx - (self->hint[i].ag0 + self->hint[i].ag1) / 2); |
2493 | |
|
2494 | 0 | if (dx > d) { |
2495 | 0 | dx = d; |
2496 | 0 | hint = &self->hint[i]; |
2497 | 0 | } |
2498 | 0 | } |
2499 | 0 | return hint; |
2500 | 0 | } |
2501 | | |
2502 | | static void t1_hinter__process_dotsection(t1_hinter * self, int beg_pole, int end_pole) |
2503 | 0 | { /* Since source outline must have oncurve poles at XY extremes, |
2504 | | we compute bounding box from poles. |
2505 | | */ |
2506 | 0 | int i; |
2507 | 0 | t1_glyph_space_coord min_gx = self->pole[beg_pole].gx, min_gy = self->pole[beg_pole].gy; |
2508 | 0 | t1_glyph_space_coord max_gx = min_gx, max_gy = min_gy; |
2509 | 0 | t1_glyph_space_coord center_gx, center_gy, center_agx, center_agy; |
2510 | 0 | t1_glyph_space_coord sx, sy; |
2511 | 0 | bool aligned_min_x = false, aligned_min_y = false, aligned_max_x = false, aligned_max_y = false; |
2512 | 0 | bool aligned_x, aligned_y; |
2513 | |
|
2514 | 0 | for (i = beg_pole + 1; i <= end_pole; i++) { |
2515 | 0 | t1_glyph_space_coord gx = self->pole[i].gx, gy = self->pole[i].gy; |
2516 | |
|
2517 | 0 | min_gx = min(min_gx, gx); |
2518 | 0 | min_gy = min(min_gy, gy); |
2519 | 0 | max_gx = max(max_gx, gx); |
2520 | 0 | max_gy = max(max_gy, gy); |
2521 | 0 | } |
2522 | 0 | for (i = beg_pole; i <= end_pole; i++) { |
2523 | 0 | if (self->pole[i].gx == min_gx) |
2524 | 0 | aligned_min_x |= self->pole[i].aligned_x; |
2525 | 0 | if (self->pole[i].gy == min_gy) |
2526 | 0 | aligned_min_y |= self->pole[i].aligned_y; |
2527 | 0 | if (self->pole[i].gx == max_gx) |
2528 | 0 | aligned_max_x |= self->pole[i].aligned_x; |
2529 | 0 | if (self->pole[i].gy == max_gy) |
2530 | 0 | aligned_max_y |= self->pole[i].aligned_y; |
2531 | 0 | } |
2532 | 0 | aligned_x = aligned_min_x && aligned_max_x; |
2533 | 0 | aligned_y = aligned_min_y && aligned_max_y; |
2534 | 0 | if (aligned_x && aligned_y) |
2535 | 0 | return; /* The contour was aligned with stem commands - nothing to do. */ |
2536 | 0 | center_gx = center_agx = (min_gx + max_gx) / 2; |
2537 | 0 | center_gy = center_agy = (min_gy + max_gy) / 2; |
2538 | 0 | if (!aligned_x) { |
2539 | | /* Heuristic : apply vstem if it is close to the center : */ |
2540 | 0 | t1_hint * hint = t1_hinter__find_vstem_by_center(self, center_gx); |
2541 | 0 | if (hint != NULL) { |
2542 | 0 | center_agx = (hint->ag0 + hint->ag1) / 2; /* Align with vstem */ |
2543 | 0 | aligned_x = true; |
2544 | 0 | } |
2545 | 0 | } |
2546 | 0 | t1_hinter__align_to_grid(self, self->g2o_fraction / 2, ¢er_agx, ¢er_agy, |
2547 | 0 | CONTRAST_STEMS || self->align_to_pixels); |
2548 | 0 | sx = center_agx - center_gx; |
2549 | 0 | sy = center_agy - center_gy; |
2550 | 0 | if (aligned_x) |
2551 | 0 | sx = 0; |
2552 | 0 | if (aligned_y) |
2553 | 0 | sy = 0; |
2554 | | /* Shift the contour (sets alignment flags to prevent interpolation) : */ |
2555 | 0 | for (i = beg_pole; i <= end_pole; i++) { |
2556 | 0 | self->pole[i].ax = self->pole[i].gx + sx; |
2557 | 0 | self->pole[i].ay = self->pole[i].gy + sy; |
2558 | 0 | self->pole[i].aligned_x |= !aligned_x; /* Prevent interpolation if we aligned it here. */ |
2559 | 0 | self->pole[i].aligned_y |= !aligned_y; |
2560 | 0 | } |
2561 | 0 | } |
2562 | | |
2563 | | static void t1_hinter__process_dotsections(t1_hinter * self) |
2564 | 0 | { int i; |
2565 | |
|
2566 | 0 | for(i = 0; i < self->hint_count; i++) |
2567 | 0 | if (self->hint[i].type == dot) { |
2568 | 0 | int pole_index = self->hint_range[self->hint[i].range_index].beg_pole; |
2569 | 0 | int contour_index = self->pole[pole_index].contour_index; |
2570 | 0 | int beg_pole = self->contour[contour_index]; |
2571 | 0 | int end_pole = self->contour[contour_index + 1] - 2; |
2572 | |
|
2573 | 0 | t1_hinter__process_dotsection(self, beg_pole, end_pole); |
2574 | 0 | } |
2575 | 0 | } |
2576 | | |
2577 | | static void t1_hinter__interpolate_other_poles(t1_hinter * self) |
2578 | 0 | { int i, j, k; |
2579 | |
|
2580 | 0 | for (k = 0; k<2; k++) { /* X, Y */ |
2581 | 0 | t1_glyph_space_coord *p_gc = (!k ? &self->pole[0].gx : &self->pole[0].gy); |
2582 | 0 | t1_glyph_space_coord *p_wc = (!k ? &self->pole[0].gy : &self->pole[0].gx); |
2583 | 0 | t1_glyph_space_coord *p_ac = (!k ? &self->pole[0].ax : &self->pole[0].ay); |
2584 | 0 | t1_glyph_space_coord *p_bl = (!k ? &self->pole[0].boundary_length_x : &self->pole[0].boundary_length_y); |
2585 | 0 | enum t1_align_type *p_f = (!k ? &self->pole[0].aligned_x : &self->pole[0].aligned_y); |
2586 | 0 | int offset_gc = (char *)p_gc - (char *)&self->pole[0]; |
2587 | 0 | int offset_wc = (char *)p_wc - (char *)&self->pole[0]; |
2588 | 0 | int offset_ac = (char *)p_ac - (char *)&self->pole[0]; |
2589 | 0 | int offset_bl = (char *)p_bl - (char *)&self->pole[0]; |
2590 | 0 | int offset_f = (char *)p_f - (char *)&self->pole[0]; |
2591 | |
|
2592 | 0 | for (i = 0; i < self->contour_count; i++) { |
2593 | 0 | int beg_contour_pole = self->contour[i]; |
2594 | 0 | int end_contour_pole = self->contour[i + 1] - 2; |
2595 | 0 | int range_beg; |
2596 | |
|
2597 | 0 | for (j = beg_contour_pole; j <= end_contour_pole; j++) |
2598 | 0 | if (*member_prt(enum t1_align_type, &self->pole[j], offset_f)) |
2599 | 0 | break; |
2600 | 0 | if (j > end_contour_pole) |
2601 | 0 | continue; |
2602 | 0 | range_beg = j; |
2603 | 0 | do { |
2604 | 0 | int start_pole = j, stop_pole = -1; |
2605 | 0 | t1_glyph_space_coord min_a, max_a; |
2606 | 0 | t1_glyph_space_coord min_g, max_g, g0, g1, a0, a1; |
2607 | 0 | int min_i = start_pole, max_i = start_pole, cut_l, l; |
2608 | 0 | bool moved = false; |
2609 | |
|
2610 | 0 | do { |
2611 | 0 | int min_l = 0, max_l = 0; |
2612 | 0 | int min_w, max_w, w0; |
2613 | |
|
2614 | 0 | g0 = *member_prt(t1_glyph_space_coord, &self->pole[start_pole], offset_gc); |
2615 | 0 | w0 = *member_prt(t1_glyph_space_coord, &self->pole[start_pole], offset_wc); |
2616 | 0 | a0 = *member_prt(t1_glyph_space_coord, &self->pole[start_pole], offset_ac); |
2617 | 0 | min_g = g0; |
2618 | 0 | max_g = g0; |
2619 | 0 | min_w = max_w = w0; |
2620 | 0 | for (j = ranger_step_f(start_pole, beg_contour_pole, end_contour_pole), l = 1; |
2621 | 0 | j != start_pole; |
2622 | 0 | j = ranger_step_f(j, beg_contour_pole, end_contour_pole), l++) { |
2623 | 0 | t1_glyph_space_coord g = * member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc); |
2624 | 0 | t1_glyph_space_coord w = * member_prt(t1_glyph_space_coord, &self->pole[j], offset_wc); |
2625 | |
|
2626 | 0 | if (min_g > g) |
2627 | 0 | min_g = g, min_i = j, min_l = l; |
2628 | 0 | if (max_g < g) |
2629 | 0 | max_g = g, max_i = j, max_l = l; |
2630 | 0 | if (min_w > w) |
2631 | 0 | min_w = w; |
2632 | 0 | if (max_w < w) |
2633 | 0 | max_w = w; |
2634 | 0 | if (*member_prt(enum t1_align_type, &self->pole[j], offset_f)) |
2635 | 0 | break; |
2636 | 0 | if (j == stop_pole) |
2637 | 0 | break; |
2638 | 0 | } |
2639 | 0 | stop_pole = j; |
2640 | 0 | cut_l = l; |
2641 | 0 | g1 = * member_prt(t1_glyph_space_coord, &self->pole[stop_pole], offset_gc); |
2642 | 0 | a1 = * member_prt(t1_glyph_space_coord, &self->pole[stop_pole], offset_ac); |
2643 | |
|
2644 | 0 | if (start_pole != stop_pole) |
2645 | 0 | if (any_abs(g0 - g1) >= any_abs(a0 - a1) / 10) |
2646 | 0 | if (any_abs(max_g - min_g) <= any_abs(max_w - min_w) / 4) |
2647 | 0 | break; /* OK to interpolate. */ |
2648 | | /* else break at an extremal pole : */ |
2649 | 0 | if (min_i != start_pole && min_l < cut_l && min_g != g0 && min_g != g1) |
2650 | 0 | stop_pole = min_i, cut_l = min_l; |
2651 | 0 | if (max_i != start_pole && max_l < cut_l && max_g != g0 && max_g != g1) |
2652 | 0 | stop_pole = max_i, cut_l = max_l; |
2653 | 0 | } while (cut_l < l); |
2654 | | /* Now start_pole and end_pole point to the contour interval to interpolate. */ |
2655 | 0 | if (g0 < g1) { |
2656 | 0 | min_g = g0; |
2657 | 0 | max_g = g1; |
2658 | 0 | min_a = a0; |
2659 | 0 | max_a = a1; |
2660 | 0 | } else { |
2661 | 0 | min_g = g1; |
2662 | 0 | max_g = g0; |
2663 | 0 | min_a = a1; |
2664 | 0 | max_a = a0; |
2665 | 0 | } |
2666 | 0 | if (min_g == max_g && min_a != max_a) { |
2667 | | /* Alignment conflict, choose by boundary_length. */ |
2668 | 0 | if (* member_prt(t1_glyph_space_coord, &self->pole[start_pole], offset_bl) < |
2669 | 0 | * member_prt(t1_glyph_space_coord, &self->pole[stop_pole], offset_bl)) |
2670 | 0 | min_a = max_a = a1; |
2671 | 0 | else |
2672 | 0 | min_a = max_a = a0; |
2673 | 0 | } |
2674 | 0 | for (j = start_pole; ; |
2675 | 0 | j = ranger_step_f(j, beg_contour_pole, end_contour_pole)) { |
2676 | 0 | t1_glyph_space_coord g = * member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc); |
2677 | |
|
2678 | 0 | if (g <= min_g) |
2679 | 0 | * member_prt(t1_glyph_space_coord, &self->pole[j], offset_ac) = |
2680 | 0 | * member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc) + (min_a - min_g); |
2681 | 0 | else if (g >= max_g) |
2682 | 0 | * member_prt(t1_glyph_space_coord, &self->pole[j], offset_ac) = |
2683 | 0 | * member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc) + (max_a - max_g); |
2684 | 0 | if(moved && j == stop_pole) |
2685 | 0 | break; |
2686 | 0 | moved = true; |
2687 | 0 | } |
2688 | 0 | if (min_g < max_g) { |
2689 | 0 | int24 div = max_g - min_g; |
2690 | 0 | int24 mul = max_a - min_a; |
2691 | | /* Due to glyph coordinate definition, div is not smaller than 2^12. |
2692 | | |
2693 | | In the following cycle we need to compute x*mul/div for 24-bit integers, |
2694 | | We replace this expression with x*u/2^12 where u = mul*2^12/div |
2695 | | (note that it's an approximation with relative precision 2^-12). |
2696 | | |
2697 | | If mul or div are big, we drop 5 bits to fit them into int19. |
2698 | | Note that it's another approximation with relative precision 2^-14. |
2699 | | Let now they are m0 and d. |
2700 | | |
2701 | | Then we compute : |
2702 | | |
2703 | | q1 = m0 / d, r1 = m0 % d, m1 = r1 << 12; // r1 < 2^19, m0 < 2^12 |
2704 | | q2 = m1 / d, r2 = m1 % d, m2 = r2 << 12; // r2 < 2^19, m1 < 2^12 |
2705 | | q3 = m2 / d, r3 = m2 % d, m3 = r3 << 12; // r3 < 2^19, m2 < 2^12 |
2706 | | and so on. |
2707 | | |
2708 | | We have : |
2709 | | |
2710 | | u = ((q1 + (q2 >> 12) + (q3 >> 24) + ...) << 12 |
2711 | | = (q1 << 12) + q2 + (q3 >> 12) + ... |
2712 | | = (q1 << 12) + q2 . |
2713 | | |
2714 | | Thus we got pretty nice formula without iterations. Implementing it below. |
2715 | | */ |
2716 | 0 | int24 m0 = mul, d = div, q1, q2, r1, m1, u; |
2717 | |
|
2718 | 0 | if (m0 >= (1 << 19) || d >= (1 << 19)) |
2719 | 0 | m0 >>= 5, d >>= 5; |
2720 | 0 | q1 = m0 / d, r1 = m0 % d, m1 = r1 << 12; |
2721 | 0 | q2 = m1 / d; |
2722 | 0 | u = (q1 << 12) + q2; |
2723 | 0 | for (j = ranger_step_f(start_pole, beg_contour_pole, end_contour_pole); j != stop_pole; |
2724 | 0 | j = ranger_step_f(j, beg_contour_pole, end_contour_pole)) { |
2725 | 0 | t1_glyph_space_coord g = *member_prt(t1_glyph_space_coord, &self->pole[j], offset_gc); |
2726 | |
|
2727 | 0 | if (min_g < g && g < max_g) { |
2728 | 0 | t1_glyph_space_coord *a = member_prt(t1_glyph_space_coord, &self->pole[j], offset_ac); |
2729 | 0 | t1_glyph_space_coord x = g - min_g; |
2730 | 0 | t1_glyph_space_coord h = mul_shift(x, u, 12); /* It is x*u/2^12 */ |
2731 | | |
2732 | | /* h = (int24)(x * (double)mul / div + 0.5); Uncomment this to disable our tricks. */ |
2733 | 0 | *a = min_a + h; |
2734 | 0 | } |
2735 | 0 | } |
2736 | 0 | } |
2737 | 0 | j = stop_pole; |
2738 | 0 | } while (j != range_beg); |
2739 | 0 | } |
2740 | 0 | } |
2741 | 0 | } |
2742 | | |
2743 | | static int t1_hinter__export(t1_hinter * self) |
2744 | 0 | { int i, j, code; |
2745 | 0 | fixed fx, fy; |
2746 | |
|
2747 | 0 | for(i = 0; ; i++) { |
2748 | 0 | int end_pole, beg_pole = self->contour[i]; |
2749 | 0 | t1_pole *pole = & self->pole[beg_pole]; |
2750 | |
|
2751 | 0 | g2d(self, pole->ax, pole->ay, &fx, &fy); |
2752 | 0 | code = gx_path_add_point(self->output_path, fx, fy); |
2753 | 0 | if (code < 0) |
2754 | 0 | return code; |
2755 | 0 | if (i >= self->contour_count) |
2756 | 0 | break; |
2757 | 0 | end_pole = self->contour[i + 1] - 2; |
2758 | 0 | for(j = beg_pole + 1; j <= end_pole; j++) { |
2759 | 0 | pole = & self->pole[j]; |
2760 | 0 | g2d(self, pole->ax, pole->ay, &fx, &fy); |
2761 | 0 | if (pole->type == oncurve) { |
2762 | 0 | code = gx_path_add_line(self->output_path, fx, fy); |
2763 | 0 | if (code < 0) |
2764 | 0 | return code; |
2765 | 0 | } else { |
2766 | 0 | int j1 = j + 1, j2 = (j + 2 > end_pole ? beg_pole : j + 2); |
2767 | 0 | fixed fx1, fy1, fx2, fy2; |
2768 | |
|
2769 | 0 | g2d(self, self->pole[j1].ax, self->pole[j1].ay, &fx1, &fy1); |
2770 | 0 | g2d(self, self->pole[j2].ax, self->pole[j2].ay, &fx2, &fy2); |
2771 | 0 | code = gx_path_add_curve(self->output_path, fx, fy, fx1, fy1, fx2, fy2); |
2772 | 0 | if (code < 0) |
2773 | 0 | return code; |
2774 | 0 | j+=2; |
2775 | 0 | } |
2776 | 0 | } |
2777 | 0 | code = gx_path_close_subpath(self->output_path); |
2778 | 0 | if (code < 0) |
2779 | 0 | return code; |
2780 | 0 | } |
2781 | 0 | return 0; |
2782 | 0 | } |
2783 | | |
2784 | | static int t1_hinter__add_trailing_moveto(t1_hinter * self) |
2785 | 1.67M | { t1_glyph_space_coord gx = self->width_gx, gy = self->width_gy; |
2786 | | |
2787 | | #if 0 /* self appears wrong due to several reasons : |
2788 | | 1. With TextAlphaBits=1, AlignToPixels must have no effect. |
2789 | | 2. ashow, awidthshow must add the width before alignment. |
2790 | | 4. In the PDF interpreter, Tc must add before alignment. |
2791 | | 5. Since a character origin is aligned, |
2792 | | rounding its width doesn't affect subsequent characters. |
2793 | | 6. When the character size is smaller than half pixel width, |
2794 | | glyph widths rounds to zero, causing overlapped glyphs. |
2795 | | (Bug 687719 "PDFWRITE corrupts letter spacing/placement"). |
2796 | | */ |
2797 | | if (self->align_to_pixels) |
2798 | | t1_hinter__align_to_grid(self, self->g2o_fraction, &gx, &gy, self->align_to_pixels); |
2799 | | #endif |
2800 | 1.67M | return t1_hinter__rmoveto(self, gx - self->cx, gy - self->cy); |
2801 | 1.67M | } |
2802 | | |
2803 | | int t1_hinter__endglyph(t1_hinter * self) |
2804 | 1.67M | { int code = 0; |
2805 | | |
2806 | 1.67M | code = t1_hinter__add_trailing_moveto(self); |
2807 | 1.67M | if (code < 0) |
2808 | 0 | goto exit; |
2809 | 1.67M | code = t1_hinter__end_subglyph(self); |
2810 | 1.67M | if (code < 0) |
2811 | 0 | goto exit; |
2812 | 1.67M | t1_hinter__adjust_matrix_precision(self, self->orig_gx, self->orig_gy); |
2813 | 1.67M | t1_hinter__compute_y_span(self); |
2814 | 1.67M | t1_hinter__simplify_representation(self); |
2815 | 1.67M | if (!self->disable_hinting && (self->grid_fit_x || self->grid_fit_y)) { |
2816 | 0 | if (self->FontType == 1) |
2817 | 0 | t1_hinter__compute_type1_stem_ranges(self); |
2818 | 0 | else |
2819 | 0 | t1_hinter__compute_type2_stem_ranges(self); |
2820 | 0 | t1_hinter__mark_existing_stems(self); |
2821 | 0 | t1_hinter_compute_stem_snap_range(self); |
2822 | 0 | t1_hinter__align_stem_commands(self); |
2823 | 0 | t1_hinter__unfix_opposite_to_common(self); |
2824 | 0 | t1_hinter__compute_opposite_stem_coords(self); |
2825 | | /* stem3 was processed in the Type 1 interpreter. */ |
2826 | 0 | code = t1_hinter__align_stem_poles(self); |
2827 | 0 | if (code < 0) |
2828 | 0 | goto exit; |
2829 | 0 | t1_hinter__process_dotsections(self); |
2830 | 0 | t1_hinter__interpolate_other_poles(self); |
2831 | 0 | } |
2832 | 1.67M | if (self->pole_count) { |
2833 | 0 | if (self->fix_contour_sign) { |
2834 | 0 | t1_hinter__fix_contour_signs(self); |
2835 | 0 | } |
2836 | 0 | code = t1_hinter__export(self); |
2837 | 0 | } |
2838 | 1.67M | exit: |
2839 | 1.67M | t1_hinter__free_arrays(self); |
2840 | 1.67M | return code; |
2841 | 1.67M | } |