Coverage Report

Created: 2025-07-07 10:01

/work/workdir/UnpackedTarball/harfbuzz/src/hb-ot-var-common.hh
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright © 2021  Google, Inc.
3
 *
4
 *  This is part of HarfBuzz, a text shaping library.
5
 *
6
 * Permission is hereby granted, without written agreement and without
7
 * license or royalty fees, to use, copy, modify, and distribute this
8
 * software and its documentation for any purpose, provided that the
9
 * above copyright notice and the following two paragraphs appear in
10
 * all copies of this software.
11
 *
12
 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13
 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14
 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15
 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16
 * DAMAGE.
17
 *
18
 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19
 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20
 * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
21
 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22
 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23
 *
24
 */
25
26
#ifndef HB_OT_VAR_COMMON_HH
27
#define HB_OT_VAR_COMMON_HH
28
29
#include "hb-ot-layout-common.hh"
30
#include "hb-priority-queue.hh"
31
#include "hb-subset-instancer-iup.hh"
32
33
34
namespace OT {
35
36
37
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
38
struct TupleVariationHeader
39
{
40
  friend struct tuple_delta_t;
41
  unsigned get_size (unsigned axis_count) const
42
0
  { return min_size + get_all_tuples (axis_count).get_size (); }
43
44
0
  unsigned get_data_size () const { return varDataSize; }
45
46
  const TupleVariationHeader &get_next (unsigned axis_count) const
47
0
  { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
48
49
  bool unpack_axis_tuples (unsigned axis_count,
50
                           const hb_array_t<const F2DOT14> shared_tuples,
51
                           const hb_map_t *axes_old_index_tag_map,
52
                           hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const
53
0
  {
54
0
    const F2DOT14 *peak_tuple = nullptr;
55
0
    if (has_peak ())
56
0
      peak_tuple = get_peak_tuple (axis_count).arrayZ;
57
0
    else
58
0
    {
59
0
      unsigned int index = get_index ();
60
0
      if (unlikely ((index + 1) * axis_count > shared_tuples.length))
61
0
        return false;
62
0
      peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ;
63
0
    }
64
0
65
0
    const F2DOT14 *start_tuple = nullptr;
66
0
    const F2DOT14 *end_tuple = nullptr;
67
0
    bool has_interm = has_intermediate ();
68
0
69
0
    if (has_interm)
70
0
    {
71
0
      start_tuple = get_start_tuple (axis_count).arrayZ;
72
0
      end_tuple = get_end_tuple (axis_count).arrayZ;
73
0
    }
74
0
75
0
    for (unsigned i = 0; i < axis_count; i++)
76
0
    {
77
0
      float peak = peak_tuple[i].to_float ();
78
0
      if (peak == 0.f) continue;
79
0
80
0
      hb_tag_t *axis_tag;
81
0
      if (!axes_old_index_tag_map->has (i, &axis_tag))
82
0
        return false;
83
0
84
0
      float start, end;
85
0
      if (has_interm)
86
0
      {
87
0
        start = start_tuple[i].to_float ();
88
0
        end = end_tuple[i].to_float ();
89
0
      }
90
0
      else
91
0
      {
92
0
        start = hb_min (peak, 0.f);
93
0
        end = hb_max (peak, 0.f);
94
0
      }
95
0
      axis_tuples.set (*axis_tag, Triple ((double) start, (double) peak, (double) end));
96
0
    }
97
0
98
0
    return true;
99
0
  }
100
101
  double calculate_scalar (hb_array_t<const int> coords, unsigned int coord_count,
102
         const hb_array_t<const F2DOT14> shared_tuples,
103
         const hb_vector_t<hb_pair_t<int,int>> *shared_tuple_active_idx = nullptr) const
104
0
  {
105
0
    const F2DOT14 *peak_tuple;
106
107
0
    unsigned start_idx = 0;
108
0
    unsigned end_idx = coord_count;
109
0
    unsigned step = 1;
110
111
0
    if (has_peak ())
112
0
      peak_tuple = get_peak_tuple (coord_count).arrayZ;
113
0
    else
114
0
    {
115
0
      unsigned int index = get_index ();
116
0
      if (unlikely ((index + 1) * coord_count > shared_tuples.length))
117
0
        return 0.0;
118
0
      peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ;
119
120
0
      if (shared_tuple_active_idx)
121
0
      {
122
0
  if (unlikely (index >= shared_tuple_active_idx->length))
123
0
    return 0.0;
124
0
  auto _ = (*shared_tuple_active_idx).arrayZ[index];
125
0
  if (_.second != -1)
126
0
  {
127
0
    start_idx = _.first;
128
0
    end_idx = _.second + 1;
129
0
    step = _.second - _.first;
130
0
  }
131
0
  else if (_.first != -1)
132
0
  {
133
0
    start_idx = _.first;
134
0
    end_idx = start_idx + 1;
135
0
  }
136
0
      }
137
0
    }
138
139
0
    const F2DOT14 *start_tuple = nullptr;
140
0
    const F2DOT14 *end_tuple = nullptr;
141
0
    bool has_interm = has_intermediate ();
142
0
    if (has_interm)
143
0
    {
144
0
      start_tuple = get_start_tuple (coord_count).arrayZ;
145
0
      end_tuple = get_end_tuple (coord_count).arrayZ;
146
0
    }
147
148
0
    double scalar = 1.0;
149
0
    for (unsigned int i = start_idx; i < end_idx; i += step)
150
0
    {
151
0
      int peak = peak_tuple[i].to_int ();
152
0
      if (!peak) continue;
153
154
0
      int v = coords[i];
155
0
      if (v == peak) continue;
156
157
0
      if (has_interm)
158
0
      {
159
0
        int start = start_tuple[i].to_int ();
160
0
        int end = end_tuple[i].to_int ();
161
0
        if (unlikely (start > peak || peak > end ||
162
0
                      (start < 0 && end > 0 && peak))) continue;
163
0
        if (v < start || v > end) return 0.0;
164
0
        if (v < peak)
165
0
        { if (peak != start) scalar *= (double) (v - start) / (peak - start); }
166
0
        else
167
0
        { if (peak != end) scalar *= (double) (end - v) / (end - peak); }
168
0
      }
169
0
      else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.0;
170
0
      else
171
0
        scalar *= (double) v / peak;
172
0
    }
173
0
    return scalar;
174
0
  }
175
176
0
  bool           has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
177
0
  bool   has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
178
0
  bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
179
0
  unsigned      get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
180
181
  protected:
182
  struct TuppleIndex : HBUINT16
183
  {
184
    enum Flags {
185
      EmbeddedPeakTuple   = 0x8000u,
186
      IntermediateRegion  = 0x4000u,
187
      PrivatePointNumbers = 0x2000u,
188
      TupleIndexMask      = 0x0FFFu
189
    };
190
191
0
    TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
192
    DEFINE_SIZE_STATIC (2);
193
  };
194
195
  hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
196
0
  { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
197
  hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
198
0
  { return get_all_tuples (axis_count).sub_array (0, axis_count); }
199
  hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
200
0
  { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
201
  hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
202
0
  { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
203
204
  HBUINT16      varDataSize;    /* The size in bytes of the serialized
205
                                 * data for this tuple variation table. */
206
  TuppleIndex   tupleIndex;     /* A packed field. The high 4 bits are flags (see below).
207
                                   The low 12 bits are an index into a shared tuple
208
                                   records array. */
209
  /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
210
                                /* Peak tuple record for this tuple variation table — optional,
211
                                 * determined by flags in the tupleIndex value.
212
                                 *
213
                                 * Note that this must always be included in the 'cvar' table. */
214
  /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
215
                                /* Intermediate start tuple record for this tuple variation table — optional,
216
                                   determined by flags in the tupleIndex value. */
217
  /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
218
                                /* Intermediate end tuple record for this tuple variation table — optional,
219
                                 * determined by flags in the tupleIndex value. */
220
  public:
221
  DEFINE_SIZE_MIN (4);
222
};
223
224
struct tuple_delta_t
225
{
226
  static constexpr bool realloc_move = true;  // Watch out when adding new members!
227
228
  public:
229
  hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
230
231
  /* indices_length = point_count, indice[i] = 1 means point i is referenced */
232
  hb_vector_t<bool> indices;
233
234
  hb_vector_t<float> deltas_x;
235
  /* empty for cvar tuples */
236
  hb_vector_t<float> deltas_y;
237
238
  /* compiled data: header and deltas
239
   * compiled point data is saved in a hashmap within tuple_variations_t cause
240
   * some point sets might be reused by different tuple variations */
241
  hb_vector_t<unsigned char> compiled_tuple_header;
242
  hb_vector_t<unsigned char> compiled_deltas;
243
244
  /* compiled peak coords, empty for non-gvar tuples */
245
  hb_vector_t<char> compiled_peak_coords;
246
247
  tuple_delta_t () = default;
248
  tuple_delta_t (const tuple_delta_t& o) = default;
249
250
  friend void swap (tuple_delta_t& a, tuple_delta_t& b) noexcept
251
0
  {
252
0
    hb_swap (a.axis_tuples, b.axis_tuples);
253
0
    hb_swap (a.indices, b.indices);
254
0
    hb_swap (a.deltas_x, b.deltas_x);
255
0
    hb_swap (a.deltas_y, b.deltas_y);
256
0
    hb_swap (a.compiled_tuple_header, b.compiled_tuple_header);
257
0
    hb_swap (a.compiled_deltas, b.compiled_deltas);
258
0
    hb_swap (a.compiled_peak_coords, b.compiled_peak_coords);
259
0
  }
260
261
  tuple_delta_t (tuple_delta_t&& o)  noexcept : tuple_delta_t ()
262
0
  { hb_swap (*this, o); }
263
264
  tuple_delta_t& operator = (tuple_delta_t&& o) noexcept
265
0
  {
266
0
    hb_swap (*this, o);
267
0
    return *this;
268
0
  }
269
270
  void remove_axis (hb_tag_t axis_tag)
271
0
  { axis_tuples.del (axis_tag); }
272
273
  bool set_tent (hb_tag_t axis_tag, Triple tent)
274
0
  { return axis_tuples.set (axis_tag, tent); }
275
276
  tuple_delta_t& operator += (const tuple_delta_t& o)
277
0
  {
278
0
    unsigned num = indices.length;
279
0
    for (unsigned i = 0; i < num; i++)
280
0
    {
281
0
      if (indices.arrayZ[i])
282
0
      {
283
0
        if (o.indices.arrayZ[i])
284
0
        {
285
0
          deltas_x[i] += o.deltas_x[i];
286
0
          if (deltas_y && o.deltas_y)
287
0
            deltas_y[i] += o.deltas_y[i];
288
0
        }
289
0
      }
290
0
      else
291
0
      {
292
0
        if (!o.indices.arrayZ[i]) continue;
293
0
        indices.arrayZ[i] = true;
294
0
        deltas_x[i] = o.deltas_x[i];
295
0
        if (deltas_y && o.deltas_y)
296
0
          deltas_y[i] = o.deltas_y[i];
297
0
      }
298
0
    }
299
0
    return *this;
300
0
  }
301
302
  tuple_delta_t& operator *= (float scalar)
303
0
  {
304
0
    if (scalar == 1.0f)
305
0
      return *this;
306
0
307
0
    unsigned num = indices.length;
308
0
    if (deltas_y)
309
0
      for (unsigned i = 0; i < num; i++)
310
0
      {
311
0
  if (!indices.arrayZ[i]) continue;
312
0
  deltas_x[i] *= scalar;
313
0
  deltas_y[i] *= scalar;
314
0
      }
315
0
    else
316
0
      for (unsigned i = 0; i < num; i++)
317
0
      {
318
0
  if (!indices.arrayZ[i]) continue;
319
0
  deltas_x[i] *= scalar;
320
0
      }
321
0
    return *this;
322
0
  }
323
324
  hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
325
                                                          TripleDistances axis_triple_distances) const
326
0
  {
327
0
    hb_vector_t<tuple_delta_t> out;
328
0
    Triple *tent;
329
0
    if (!axis_tuples.has (axis_tag, &tent))
330
0
    {
331
0
      out.push (*this);
332
0
      return out;
333
0
    }
334
0
335
0
    if ((tent->minimum < 0.0 && tent->maximum > 0.0) ||
336
0
        !(tent->minimum <= tent->middle && tent->middle <= tent->maximum))
337
0
      return out;
338
0
339
0
    if (tent->middle == 0.0)
340
0
    {
341
0
      out.push (*this);
342
0
      return out;
343
0
    }
344
0
345
0
    rebase_tent_result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
346
0
    for (auto &t : solutions)
347
0
    {
348
0
      tuple_delta_t new_var = *this;
349
0
      if (t.second == Triple ())
350
0
        new_var.remove_axis (axis_tag);
351
0
      else
352
0
        new_var.set_tent (axis_tag, t.second);
353
0
354
0
      new_var *= t.first;
355
0
      out.push (std::move (new_var));
356
0
    }
357
0
358
0
    return out;
359
0
  }
360
361
  bool compile_peak_coords (const hb_map_t& axes_index_map,
362
                            const hb_map_t& axes_old_index_tag_map)
363
0
  {
364
0
    unsigned axis_count = axes_index_map.get_population ();
365
0
    if (unlikely (!compiled_peak_coords.alloc (axis_count * F2DOT14::static_size)))
366
0
      return false;
367
0
368
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
369
0
    for (unsigned i = 0; i < orig_axis_count; i++)
370
0
    {
371
0
      if (!axes_index_map.has (i))
372
0
        continue;
373
0
374
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
375
0
      Triple *coords;
376
0
      F2DOT14 peak_coord;
377
0
      if (axis_tuples.has (axis_tag, &coords))
378
0
        peak_coord.set_float (coords->middle);
379
0
      else
380
0
        peak_coord.set_int (0);
381
0
382
0
      /* push F2DOT14 value into char vector */
383
0
      int16_t val = peak_coord.to_int ();
384
0
      compiled_peak_coords.push (static_cast<char> (val >> 8));
385
0
      compiled_peak_coords.push (static_cast<char> (val & 0xFF));
386
0
    }
387
0
388
0
    return !compiled_peak_coords.in_error ();
389
0
  }
390
391
  /* deltas should be compiled already before we compile tuple
392
   * variation header cause we need to fill in the size of the
393
   * serialized data for this tuple variation */
394
  bool compile_tuple_var_header (const hb_map_t& axes_index_map,
395
                                 unsigned points_data_length,
396
                                 const hb_map_t& axes_old_index_tag_map,
397
                                 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map)
398
0
  {
399
0
    /* compiled_deltas could be empty after iup delta optimization, we can skip
400
0
     * compiling this tuple and return true */
401
0
    if (!compiled_deltas) return true;
402
0
403
0
    unsigned cur_axis_count = axes_index_map.get_population ();
404
0
    /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */
405
0
    unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4;
406
0
    if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false;
407
0
408
0
    unsigned flag = 0;
409
0
    /* skip the first 4 header bytes: variationDataSize+tupleIndex */
410
0
    F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4);
411
0
    F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ());
412
0
    hb_array_t<F2DOT14> coords (p, end - p);
413
0
414
0
    /* encode peak coords */
415
0
    unsigned peak_count = 0;
416
0
    unsigned *shared_tuple_idx;
417
0
    if (shared_tuples_idx_map &&
418
0
        shared_tuples_idx_map->has (&compiled_peak_coords, &shared_tuple_idx))
419
0
    {
420
0
      flag = *shared_tuple_idx;
421
0
    }
422
0
    else
423
0
    {
424
0
      peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map);
425
0
      if (!peak_count) return false;
426
0
    }
427
0
428
0
    /* encode interim coords, it's optional so returned num could be 0 */
429
0
    unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map);
430
0
431
0
    /* pointdata length = 0 implies "use shared points" */
432
0
    if (points_data_length)
433
0
      flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers;
434
0
435
0
    unsigned serialized_data_size = points_data_length + compiled_deltas.length;
436
0
    TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ());
437
0
    o->varDataSize = serialized_data_size;
438
0
    o->tupleIndex = flag;
439
0
440
0
    unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size);
441
0
    return compiled_tuple_header.resize (total_header_len);
442
0
  }
443
444
  unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords,
445
                               unsigned& flag,
446
                               const hb_map_t& axes_index_map,
447
                               const hb_map_t& axes_old_index_tag_map) const
448
0
  {
449
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
450
0
    auto it = peak_coords.iter ();
451
0
    unsigned count = 0;
452
0
    for (unsigned i = 0; i < orig_axis_count; i++)
453
0
    {
454
0
      if (!axes_index_map.has (i)) /* axis pinned */
455
0
        continue;
456
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
457
0
      Triple *coords;
458
0
      if (!axis_tuples.has (axis_tag, &coords))
459
0
        (*it).set_int (0);
460
0
      else
461
0
        (*it).set_float (coords->middle);
462
0
      it++;
463
0
      count++;
464
0
    }
465
0
    flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple;
466
0
    return count;
467
0
  }
468
469
  /* if no need to encode intermediate coords, then just return p */
470
  unsigned encode_interm_coords (hb_array_t<F2DOT14> coords,
471
                                 unsigned& flag,
472
                                 const hb_map_t& axes_index_map,
473
                                 const hb_map_t& axes_old_index_tag_map) const
474
0
  {
475
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
476
0
    unsigned cur_axis_count = axes_index_map.get_population ();
477
0
478
0
    auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter ();
479
0
    auto end_coords_iter = coords.sub_array (cur_axis_count).iter ();
480
0
    bool encode_needed = false;
481
0
    unsigned count = 0;
482
0
    for (unsigned i = 0; i < orig_axis_count; i++)
483
0
    {
484
0
      if (!axes_index_map.has (i)) /* axis pinned */
485
0
        continue;
486
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
487
0
      Triple *coords;
488
0
      float min_val = 0.f, val = 0.f, max_val = 0.f;
489
0
      if (axis_tuples.has (axis_tag, &coords))
490
0
      {
491
0
        min_val = coords->minimum;
492
0
        val = coords->middle;
493
0
        max_val = coords->maximum;
494
0
      }
495
0
496
0
      (*start_coords_iter).set_float (min_val);
497
0
      (*end_coords_iter).set_float (max_val);
498
0
499
0
      start_coords_iter++;
500
0
      end_coords_iter++;
501
0
      count += 2;
502
0
      if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f))
503
0
        encode_needed = true;
504
0
    }
505
0
506
0
    if (encode_needed)
507
0
    {
508
0
      flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion;
509
0
      return count;
510
0
    }
511
0
    return 0;
512
0
  }
513
514
  bool compile_deltas ()
515
0
  { return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); }
516
517
  static bool compile_deltas (hb_array_t<const bool> point_indices,
518
            hb_array_t<const float> x_deltas,
519
            hb_array_t<const float> y_deltas,
520
            hb_vector_t<unsigned char> &compiled_deltas /* OUT */)
521
0
  {
522
0
    hb_vector_t<int> rounded_deltas;
523
0
    if (unlikely (!rounded_deltas.alloc (point_indices.length)))
524
0
      return false;
525
0
526
0
    for (unsigned i = 0; i < point_indices.length; i++)
527
0
    {
528
0
      if (!point_indices[i]) continue;
529
0
      int rounded_delta = (int) roundf (x_deltas.arrayZ[i]);
530
0
      rounded_deltas.push (rounded_delta);
531
0
    }
532
0
533
0
    if (!rounded_deltas) return true;
534
0
    /* allocate enough memories 5 * num_deltas */
535
0
    unsigned alloc_len = 5 * rounded_deltas.length;
536
0
    if (y_deltas)
537
0
      alloc_len *= 2;
538
0
539
0
    if (unlikely (!compiled_deltas.resize (alloc_len))) return false;
540
0
541
0
    unsigned encoded_len = compile_deltas (compiled_deltas, rounded_deltas);
542
0
543
0
    if (y_deltas)
544
0
    {
545
0
      /* reuse the rounded_deltas vector, check that y_deltas have the same num of deltas as x_deltas */
546
0
      unsigned j = 0;
547
0
      for (unsigned idx = 0; idx < point_indices.length; idx++)
548
0
      {
549
0
        if (!point_indices[idx]) continue;
550
0
        int rounded_delta = (int) roundf (y_deltas.arrayZ[idx]);
551
0
552
0
        if (j >= rounded_deltas.length) return false;
553
0
554
0
        rounded_deltas[j++] = rounded_delta;
555
0
      }
556
0
557
0
      if (j != rounded_deltas.length) return false;
558
0
      encoded_len += compile_deltas (compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
559
0
    }
560
0
    return compiled_deltas.resize (encoded_len);
561
0
  }
562
563
  static unsigned compile_deltas (hb_array_t<unsigned char> encoded_bytes,
564
          hb_array_t<const int> deltas)
565
0
  {
566
0
    return TupleValues::compile (deltas, encoded_bytes);
567
0
  }
568
569
  bool calc_inferred_deltas (const contour_point_vector_t& orig_points)
570
0
  {
571
0
    unsigned point_count = orig_points.length;
572
0
    if (point_count != indices.length)
573
0
      return false;
574
0
575
0
    unsigned ref_count = 0;
576
0
    hb_vector_t<unsigned> end_points;
577
0
578
0
    for (unsigned i = 0; i < point_count; i++)
579
0
    {
580
0
      if (indices.arrayZ[i])
581
0
        ref_count++;
582
0
      if (orig_points.arrayZ[i].is_end_point)
583
0
        end_points.push (i);
584
0
    }
585
0
    /* all points are referenced, nothing to do */
586
0
    if (ref_count == point_count)
587
0
      return true;
588
0
    if (unlikely (end_points.in_error ())) return false;
589
0
590
0
    hb_set_t inferred_idxes;
591
0
    unsigned start_point = 0;
592
0
    for (unsigned end_point : end_points)
593
0
    {
594
0
      /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
595
0
      unsigned unref_count = 0;
596
0
      for (unsigned i = start_point; i < end_point + 1; i++)
597
0
        unref_count += indices.arrayZ[i];
598
0
      unref_count = (end_point - start_point + 1) - unref_count;
599
0
600
0
      unsigned j = start_point;
601
0
      if (unref_count == 0 || unref_count > end_point - start_point)
602
0
        goto no_more_gaps;
603
0
      for (;;)
604
0
      {
605
0
        /* Locate the next gap of unreferenced points between two referenced points prev and next.
606
0
         * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
607
0
         */
608
0
        unsigned int prev, next, i;
609
0
        for (;;)
610
0
        {
611
0
          i = j;
612
0
          j = next_index (i, start_point, end_point);
613
0
          if (indices.arrayZ[i] && !indices.arrayZ[j]) break;
614
0
        }
615
0
        prev = j = i;
616
0
        for (;;)
617
0
        {
618
0
          i = j;
619
0
          j = next_index (i, start_point, end_point);
620
0
          if (!indices.arrayZ[i] && indices.arrayZ[j]) break;
621
0
        }
622
0
        next = j;
623
0
       /* Infer deltas for all unref points in the gap between prev and next */
624
0
        i = prev;
625
0
        for (;;)
626
0
        {
627
0
          i = next_index (i, start_point, end_point);
628
0
          if (i == next) break;
629
0
          deltas_x.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].x,
630
0
                                            (double) orig_points.arrayZ[prev].x,
631
0
                                            (double) orig_points.arrayZ[next].x,
632
0
                                            (double) deltas_x.arrayZ[prev], (double) deltas_x.arrayZ[next]);
633
0
          deltas_y.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].y,
634
0
                                            (double) orig_points.arrayZ[prev].y,
635
0
                                            (double) orig_points.arrayZ[next].y,
636
0
                                            (double) deltas_y.arrayZ[prev], (double) deltas_y.arrayZ[next]);
637
0
          inferred_idxes.add (i);
638
0
          if (--unref_count == 0) goto no_more_gaps;
639
0
        }
640
0
      }
641
0
    no_more_gaps:
642
0
      start_point = end_point + 1;
643
0
    }
644
0
645
0
    for (unsigned i = 0; i < point_count; i++)
646
0
    {
647
0
      /* if points are not referenced and deltas are not inferred, set to 0.
648
0
       * reference all points for gvar */
649
0
      if ( !indices[i])
650
0
      {
651
0
        if (!inferred_idxes.has (i))
652
0
        {
653
0
          deltas_x.arrayZ[i] = 0.0;
654
0
          deltas_y.arrayZ[i] = 0.0;
655
0
        }
656
0
        indices[i] = true;
657
0
      }
658
0
    }
659
0
    return true;
660
0
  }
661
662
  bool optimize (const contour_point_vector_t& contour_points,
663
                 bool is_composite,
664
                 double tolerance = 0.5 + 1e-10)
665
0
  {
666
0
    unsigned count = contour_points.length;
667
0
    if (deltas_x.length != count ||
668
0
        deltas_y.length != count)
669
0
      return false;
670
0
671
0
    hb_vector_t<bool> opt_indices;
672
0
    hb_vector_t<int> rounded_x_deltas, rounded_y_deltas;
673
0
674
0
    if (unlikely (!rounded_x_deltas.alloc (count) ||
675
0
                  !rounded_y_deltas.alloc (count)))
676
0
      return false;
677
0
678
0
    for (unsigned i = 0; i < count; i++)
679
0
    {
680
0
      int rounded_x_delta = (int) roundf (deltas_x.arrayZ[i]);
681
0
      int rounded_y_delta = (int) roundf (deltas_y.arrayZ[i]);
682
0
      rounded_x_deltas.push (rounded_x_delta);
683
0
      rounded_y_deltas.push (rounded_y_delta);
684
0
    }
685
0
686
0
    if (!iup_delta_optimize (contour_points, rounded_x_deltas, rounded_y_deltas, opt_indices, tolerance))
687
0
      return false;
688
0
689
0
    unsigned ref_count = 0;
690
0
    for (bool ref_flag : opt_indices)
691
0
       ref_count += ref_flag;
692
0
693
0
    if (ref_count == count) return true;
694
0
695
0
    hb_vector_t<float> opt_deltas_x, opt_deltas_y;
696
0
    bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0);
697
0
    if (is_comp_glyph_wo_deltas)
698
0
    {
699
0
      if (unlikely (!opt_deltas_x.resize (count) ||
700
0
                    !opt_deltas_y.resize (count)))
701
0
        return false;
702
0
703
0
      opt_indices.arrayZ[0] = true;
704
0
      for (unsigned i = 1; i < count; i++)
705
0
        opt_indices.arrayZ[i] = false;
706
0
    }
707
0
708
0
    hb_vector_t<unsigned char> opt_point_data;
709
0
    if (!compile_point_set (opt_indices, opt_point_data))
710
0
      return false;
711
0
    hb_vector_t<unsigned char> opt_deltas_data;
712
0
    if (!compile_deltas (opt_indices,
713
0
                         is_comp_glyph_wo_deltas ? opt_deltas_x : deltas_x,
714
0
                         is_comp_glyph_wo_deltas ? opt_deltas_y : deltas_y,
715
0
                         opt_deltas_data))
716
0
      return false;
717
0
718
0
    hb_vector_t<unsigned char> point_data;
719
0
    if (!compile_point_set (indices, point_data))
720
0
      return false;
721
0
    hb_vector_t<unsigned char> deltas_data;
722
0
    if (!compile_deltas (indices, deltas_x, deltas_y, deltas_data))
723
0
      return false;
724
0
725
0
    if (opt_point_data.length + opt_deltas_data.length < point_data.length + deltas_data.length)
726
0
    {
727
0
      indices.fini ();
728
0
      indices = std::move (opt_indices);
729
0
730
0
      if (is_comp_glyph_wo_deltas)
731
0
      {
732
0
        deltas_x.fini ();
733
0
        deltas_x = std::move (opt_deltas_x);
734
0
735
0
        deltas_y.fini ();
736
0
        deltas_y = std::move (opt_deltas_y);
737
0
      }
738
0
    }
739
0
    return !indices.in_error () && !deltas_x.in_error () && !deltas_y.in_error ();
740
0
  }
741
742
  static bool compile_point_set (const hb_vector_t<bool> &point_indices,
743
                                 hb_vector_t<unsigned char>& compiled_points /* OUT */)
744
0
  {
745
0
    unsigned num_points = 0;
746
0
    for (bool i : point_indices)
747
0
      if (i) num_points++;
748
0
749
0
    /* when iup optimization is enabled, num of referenced points could be 0 */
750
0
    if (!num_points) return true;
751
0
752
0
    unsigned indices_length = point_indices.length;
753
0
    /* If the points set consists of all points in the glyph, it's encoded with a
754
0
     * single zero byte */
755
0
    if (num_points == indices_length)
756
0
      return compiled_points.resize (1);
757
0
758
0
    /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
759
0
    unsigned num_bytes = 2 + 3 *num_points;
760
0
    if (unlikely (!compiled_points.resize (num_bytes, false)))
761
0
      return false;
762
0
763
0
    unsigned pos = 0;
764
0
    /* binary data starts with the total number of reference points */
765
0
    if (num_points < 0x80)
766
0
      compiled_points.arrayZ[pos++] = num_points;
767
0
    else
768
0
    {
769
0
      compiled_points.arrayZ[pos++] = ((num_points >> 8) | 0x80);
770
0
      compiled_points.arrayZ[pos++] = num_points & 0xFF;
771
0
    }
772
0
773
0
    const unsigned max_run_length = 0x7F;
774
0
    unsigned i = 0;
775
0
    unsigned last_value = 0;
776
0
    unsigned num_encoded = 0;
777
0
    while (i < indices_length && num_encoded < num_points)
778
0
    {
779
0
      unsigned run_length = 0;
780
0
      unsigned header_pos = pos;
781
0
      compiled_points.arrayZ[pos++] = 0;
782
0
783
0
      bool use_byte_encoding = false;
784
0
      bool new_run = true;
785
0
      while (i < indices_length && num_encoded < num_points &&
786
0
             run_length <= max_run_length)
787
0
      {
788
0
        // find out next referenced point index
789
0
        while (i < indices_length && !point_indices[i])
790
0
          i++;
791
0
792
0
        if (i >= indices_length) break;
793
0
794
0
        unsigned cur_value = i;
795
0
        unsigned delta = cur_value - last_value;
796
0
797
0
        if (new_run)
798
0
        {
799
0
          use_byte_encoding = (delta <= 0xFF);
800
0
          new_run = false;
801
0
        }
802
0
803
0
        if (use_byte_encoding && delta > 0xFF)
804
0
          break;
805
0
806
0
        if (use_byte_encoding)
807
0
          compiled_points.arrayZ[pos++] = delta;
808
0
        else
809
0
        {
810
0
          compiled_points.arrayZ[pos++] = delta >> 8;
811
0
          compiled_points.arrayZ[pos++] = delta & 0xFF;
812
0
        }
813
0
        i++;
814
0
        last_value = cur_value;
815
0
        run_length++;
816
0
        num_encoded++;
817
0
      }
818
0
819
0
      if (use_byte_encoding)
820
0
        compiled_points.arrayZ[header_pos] = run_length - 1;
821
0
      else
822
0
        compiled_points.arrayZ[header_pos] = (run_length - 1) | 0x80;
823
0
    }
824
0
    return compiled_points.resize (pos, false);
825
0
  }
826
827
  static double infer_delta (double target_val, double prev_val, double next_val, double prev_delta, double next_delta)
828
0
  {
829
0
    if (prev_val == next_val)
830
0
      return (prev_delta == next_delta) ? prev_delta : 0.0;
831
0
    else if (target_val <= hb_min (prev_val, next_val))
832
0
      return (prev_val < next_val) ? prev_delta : next_delta;
833
0
    else if (target_val >= hb_max (prev_val, next_val))
834
0
      return (prev_val > next_val) ? prev_delta : next_delta;
835
0
836
0
    double r = (target_val - prev_val) / (next_val - prev_val);
837
0
    return prev_delta + r * (next_delta - prev_delta);
838
0
  }
839
840
  static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end)
841
0
  { return (i >= end) ? start : (i + 1); }
842
};
843
844
template <typename OffType = HBUINT16>
845
struct TupleVariationData
846
{
847
  bool sanitize (hb_sanitize_context_t *c) const
848
  {
849
    TRACE_SANITIZE (this);
850
    // here check on min_size only, TupleVariationHeader and var data will be
851
    // checked while accessing through iterator.
852
    return_trace (c->check_struct (this));
853
  }
854
855
  unsigned get_size (unsigned axis_count) const
856
  {
857
    unsigned total_size = min_size;
858
    unsigned count = tupleVarCount.get_count ();
859
    const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
860
    for (unsigned i = 0; i < count; i++)
861
    {
862
      total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
863
      tuple_var_header = &tuple_var_header->get_next (axis_count);
864
    }
865
866
    return total_size;
867
  }
868
869
  const TupleVariationHeader &get_tuple_var_header (void) const
870
0
  { return StructAfter<TupleVariationHeader> (data); }
871
872
  struct tuple_iterator_t;
873
  struct tuple_variations_t
874
  {
875
    hb_vector_t<tuple_delta_t> tuple_vars;
876
877
    private:
878
    /* referenced point set->compiled point data map */
879
    hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<unsigned char>> point_data_map;
880
    /* referenced point set-> count map, used in finding shared points */
881
    hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
882
883
    /* empty for non-gvar tuples.
884
     * shared_points_bytes is a pointer to some value in the point_data_map,
885
     * which will be freed during map destruction. Save it for serialization, so
886
     * no need to do find_shared_points () again */
887
    hb_vector_t<unsigned char> *shared_points_bytes = nullptr;
888
889
    /* total compiled byte size as TupleVariationData format, initialized to 0 */
890
    unsigned compiled_byte_size = 0;
891
    bool needs_padding = false;
892
893
    /* for gvar iup delta optimization: whether this is a composite glyph */
894
    bool is_composite = false;
895
896
    public:
897
    tuple_variations_t () = default;
898
    tuple_variations_t (const tuple_variations_t&) = delete;
899
    tuple_variations_t& operator=(const tuple_variations_t&) = delete;
900
    tuple_variations_t (tuple_variations_t&&) = default;
901
    tuple_variations_t& operator=(tuple_variations_t&&) = default;
902
    ~tuple_variations_t () = default;
903
904
    explicit operator bool () const { return bool (tuple_vars); }
905
    unsigned get_var_count () const
906
    {
907
      unsigned count = 0;
908
      /* when iup delta opt is enabled, compiled_deltas could be empty and we
909
       * should skip this tuple */
910
      for (auto& tuple: tuple_vars)
911
        if (tuple.compiled_deltas) count++;
912
913
      if (shared_points_bytes && shared_points_bytes->length)
914
        count |= TupleVarCount::SharedPointNumbers;
915
      return count;
916
    }
917
918
    unsigned get_compiled_byte_size () const
919
    { return compiled_byte_size; }
920
921
    bool create_from_tuple_var_data (tuple_iterator_t iterator,
922
                                     unsigned tuple_var_count,
923
                                     unsigned point_count,
924
                                     bool is_gvar,
925
                                     const hb_map_t *axes_old_index_tag_map,
926
                                     const hb_vector_t<unsigned> &shared_indices,
927
                                     const hb_array_t<const F2DOT14> shared_tuples,
928
                                     bool is_composite_glyph)
929
    {
930
      do
931
      {
932
        const HBUINT8 *p = iterator.get_serialized_data ();
933
        unsigned int length = iterator.current_tuple->get_data_size ();
934
        if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
935
          return false;
936
937
        hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
938
        if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples)
939
            || axis_tuples.is_empty ())
940
          return false;
941
942
        hb_vector_t<unsigned> private_indices;
943
        bool has_private_points = iterator.current_tuple->has_private_points ();
944
        const HBUINT8 *end = p + length;
945
        if (has_private_points &&
946
            !TupleVariationData::decompile_points (p, private_indices, end))
947
          return false;
948
949
        const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices;
950
        bool apply_to_all = (indices.length == 0);
951
        unsigned num_deltas = apply_to_all ? point_count : indices.length;
952
953
        hb_vector_t<int> deltas_x;
954
955
        if (unlikely (!deltas_x.resize (num_deltas, false) ||
956
                      !TupleVariationData::decompile_deltas (p, deltas_x, end)))
957
          return false;
958
959
        hb_vector_t<int> deltas_y;
960
        if (is_gvar)
961
        {
962
          if (unlikely (!deltas_y.resize (num_deltas, false) ||
963
                        !TupleVariationData::decompile_deltas (p, deltas_y, end)))
964
            return false;
965
        }
966
967
        tuple_delta_t var;
968
        var.axis_tuples = std::move (axis_tuples);
969
        if (unlikely (!var.indices.resize (point_count) ||
970
                      !var.deltas_x.resize (point_count, false)))
971
          return false;
972
973
        if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false)))
974
          return false;
975
976
        for (unsigned i = 0; i < num_deltas; i++)
977
        {
978
          unsigned idx = apply_to_all ? i : indices[i];
979
          if (idx >= point_count) continue;
980
          var.indices[idx] = true;
981
          var.deltas_x[idx] = deltas_x[i];
982
          if (is_gvar)
983
            var.deltas_y[idx] = deltas_y[i];
984
        }
985
        tuple_vars.push (std::move (var));
986
      } while (iterator.move_to_next ());
987
988
      is_composite = is_composite_glyph;
989
      return true;
990
    }
991
992
    bool create_from_item_var_data (const VarData &var_data,
993
                                    const hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>>& regions,
994
                                    const hb_map_t& axes_old_index_tag_map,
995
                                    unsigned& item_count,
996
                                    const hb_inc_bimap_t* inner_map = nullptr)
997
0
    {
998
0
      /* NULL offset, to keep original varidx valid, just return */
999
0
      if (&var_data == &Null (VarData))
1000
0
        return true;
1001
0
1002
0
      unsigned num_regions = var_data.get_region_index_count ();
1003
0
      if (!tuple_vars.alloc (num_regions)) return false;
1004
0
1005
0
      item_count = inner_map ? inner_map->get_population () : var_data.get_item_count ();
1006
0
      if (!item_count) return true;
1007
0
      unsigned row_size = var_data.get_row_size ();
1008
0
      const HBUINT8 *delta_bytes = var_data.get_delta_bytes ();
1009
0
1010
0
      for (unsigned r = 0; r < num_regions; r++)
1011
0
      {
1012
0
        /* In VarData, deltas are organized in rows, convert them into
1013
0
         * column(region) based tuples, resize deltas_x first */
1014
0
        tuple_delta_t tuple;
1015
0
        if (!tuple.deltas_x.resize (item_count, false) ||
1016
0
            !tuple.indices.resize (item_count, false))
1017
0
          return false;
1018
0
1019
0
        for (unsigned i = 0; i < item_count; i++)
1020
0
        {
1021
0
          tuple.indices.arrayZ[i] = true;
1022
0
          tuple.deltas_x.arrayZ[i] = var_data.get_item_delta_fast (inner_map ? inner_map->backward (i) : i,
1023
0
                                                                   r, delta_bytes, row_size);
1024
0
        }
1025
0
1026
0
        unsigned region_index = var_data.get_region_index (r);
1027
0
        if (region_index >= regions.length) return false;
1028
0
        tuple.axis_tuples = regions.arrayZ[region_index];
1029
0
1030
0
        tuple_vars.push (std::move (tuple));
1031
0
      }
1032
0
      return !tuple_vars.in_error ();
1033
0
    }
1034
1035
    private:
1036
    static int _cmp_axis_tag (const void *pa, const void *pb)
1037
0
    {
1038
0
      const hb_tag_t *a = (const hb_tag_t*) pa;
1039
0
      const hb_tag_t *b = (const hb_tag_t*) pb;
1040
0
      return (int)(*a) - (int)(*b);
1041
0
    }
1042
1043
    bool change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1044
                                              const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1045
0
    {
1046
0
      /* sort axis_tag/axis_limits, make result deterministic */
1047
0
      hb_vector_t<hb_tag_t> axis_tags;
1048
0
      if (!axis_tags.alloc (normalized_axes_location.get_population ()))
1049
0
        return false;
1050
0
      for (auto t : normalized_axes_location.keys ())
1051
0
        axis_tags.push (t);
1052
0
1053
0
      axis_tags.qsort (_cmp_axis_tag);
1054
0
      for (auto axis_tag : axis_tags)
1055
0
      {
1056
0
        Triple *axis_limit;
1057
0
        if (!normalized_axes_location.has (axis_tag, &axis_limit))
1058
0
          return false;
1059
0
        TripleDistances axis_triple_distances{1.0, 1.0};
1060
0
        if (axes_triple_distances.has (axis_tag))
1061
0
          axis_triple_distances = axes_triple_distances.get (axis_tag);
1062
0
1063
0
        hb_vector_t<tuple_delta_t> new_vars;
1064
0
        for (const tuple_delta_t& var : tuple_vars)
1065
0
        {
1066
0
          hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, *axis_limit, axis_triple_distances);
1067
0
          if (!out) continue;
1068
0
1069
0
          unsigned new_len = new_vars.length + out.length;
1070
0
1071
0
          if (unlikely (!new_vars.alloc (new_len, false)))
1072
0
            return false;
1073
0
1074
0
          for (unsigned i = 0; i < out.length; i++)
1075
0
            new_vars.push (std::move (out[i]));
1076
0
        }
1077
0
        tuple_vars.fini ();
1078
0
        tuple_vars = std::move (new_vars);
1079
0
      }
1080
0
      return true;
1081
0
    }
1082
1083
    /* merge tuple variations with overlapping tents, if iup delta optimization
1084
     * is enabled, add default deltas to contour_points */
1085
    bool merge_tuple_variations (contour_point_vector_t* contour_points = nullptr)
1086
0
    {
1087
0
      hb_vector_t<tuple_delta_t> new_vars;
1088
0
      hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m;
1089
0
      unsigned i = 0;
1090
0
      for (const tuple_delta_t& var : tuple_vars)
1091
0
      {
1092
0
        /* if all axes are pinned, drop the tuple variation */
1093
0
        if (var.axis_tuples.is_empty ())
1094
0
        {
1095
0
          /* if iup_delta_optimize is enabled, add deltas to contour coords */
1096
0
          if (contour_points && !contour_points->add_deltas (var.deltas_x,
1097
0
                                                             var.deltas_y,
1098
0
                                                             var.indices))
1099
0
            return false;
1100
0
          continue;
1101
0
        }
1102
0
1103
0
        unsigned *idx;
1104
0
        if (m.has (&(var.axis_tuples), &idx))
1105
0
        {
1106
0
          new_vars[*idx] += var;
1107
0
        }
1108
0
        else
1109
0
        {
1110
0
          new_vars.push (var);
1111
0
          if (!m.set (&(var.axis_tuples), i))
1112
0
            return false;
1113
0
          i++;
1114
0
        }
1115
0
      }
1116
0
      tuple_vars.fini ();
1117
0
      tuple_vars = std::move (new_vars);
1118
0
      return true;
1119
0
    }
1120
1121
    /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
1122
     * also update point_set->count map, which will be used in finding shared
1123
     * point set*/
1124
    bool compile_all_point_sets ()
1125
    {
1126
      for (const auto& tuple: tuple_vars)
1127
      {
1128
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1129
        if (point_data_map.has (points_set))
1130
        {
1131
          unsigned *count;
1132
          if (unlikely (!point_set_count_map.has (points_set, &count) ||
1133
                        !point_set_count_map.set (points_set, (*count) + 1)))
1134
            return false;
1135
          continue;
1136
        }
1137
1138
        hb_vector_t<unsigned char> compiled_point_data;
1139
        if (!tuple_delta_t::compile_point_set (*points_set, compiled_point_data))
1140
          return false;
1141
1142
        if (!point_data_map.set (points_set, std::move (compiled_point_data)) ||
1143
            !point_set_count_map.set (points_set, 1))
1144
          return false;
1145
      }
1146
      return true;
1147
    }
1148
1149
    /* find shared points set which saves most bytes */
1150
    void find_shared_points ()
1151
    {
1152
      unsigned max_saved_bytes = 0;
1153
1154
      for (const auto& _ : point_data_map.iter_ref ())
1155
      {
1156
        const hb_vector_t<bool>* points_set = _.first;
1157
        unsigned data_length = _.second.length;
1158
        if (!data_length) continue;
1159
        unsigned *count;
1160
        if (unlikely (!point_set_count_map.has (points_set, &count) ||
1161
                      *count <= 1))
1162
        {
1163
          shared_points_bytes = nullptr;
1164
          return;
1165
        }
1166
1167
        unsigned saved_bytes = data_length * ((*count) -1);
1168
        if (saved_bytes > max_saved_bytes)
1169
        {
1170
          max_saved_bytes = saved_bytes;
1171
          shared_points_bytes = &(_.second);
1172
        }
1173
      }
1174
    }
1175
1176
    bool calc_inferred_deltas (const contour_point_vector_t& contour_points)
1177
0
    {
1178
0
      for (tuple_delta_t& var : tuple_vars)
1179
0
        if (!var.calc_inferred_deltas (contour_points))
1180
0
          return false;
1181
0
1182
0
      return true;
1183
0
    }
1184
1185
    bool iup_optimize (const contour_point_vector_t& contour_points)
1186
0
    {
1187
0
      for (tuple_delta_t& var : tuple_vars)
1188
0
      {
1189
0
        if (!var.optimize (contour_points, is_composite))
1190
0
          return false;
1191
0
      }
1192
0
      return true;
1193
0
    }
1194
1195
    public:
1196
    bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1197
                      const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances,
1198
                      contour_point_vector_t* contour_points = nullptr,
1199
                      bool optimize = false)
1200
0
    {
1201
0
      if (!tuple_vars) return true;
1202
0
      if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances))
1203
0
        return false;
1204
0
      /* compute inferred deltas only for gvar */
1205
0
      if (contour_points)
1206
0
        if (!calc_inferred_deltas (*contour_points))
1207
0
          return false;
1208
0
1209
0
      /* if iup delta opt is on, contour_points can't be null */
1210
0
      if (optimize && !contour_points)
1211
0
        return false;
1212
0
1213
0
      if (!merge_tuple_variations (optimize ? contour_points : nullptr))
1214
0
        return false;
1215
0
1216
0
      if (optimize && !iup_optimize (*contour_points)) return false;
1217
0
      return !tuple_vars.in_error ();
1218
0
    }
1219
1220
    bool compile_bytes (const hb_map_t& axes_index_map,
1221
                        const hb_map_t& axes_old_index_tag_map,
1222
                        bool use_shared_points,
1223
                        bool is_gvar = false,
1224
                        const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map = nullptr)
1225
    {
1226
      // return true for empty glyph
1227
      if (!tuple_vars)
1228
        return true;
1229
1230
      // compile points set and store data in hashmap
1231
      if (!compile_all_point_sets ())
1232
        return false;
1233
1234
      /* total compiled byte size as TupleVariationData format, initialized to its
1235
       * min_size: 4 */
1236
      compiled_byte_size += 4;
1237
1238
      if (use_shared_points)
1239
      {
1240
        find_shared_points ();
1241
        if (shared_points_bytes)
1242
          compiled_byte_size += shared_points_bytes->length;
1243
      }
1244
      // compile delta and tuple var header for each tuple variation
1245
      for (auto& tuple: tuple_vars)
1246
      {
1247
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1248
        hb_vector_t<unsigned char> *points_data;
1249
        if (unlikely (!point_data_map.has (points_set, &points_data)))
1250
          return false;
1251
1252
        /* when iup optimization is enabled, num of referenced points could be 0
1253
         * and thus the compiled points bytes is empty, we should skip compiling
1254
         * this tuple */
1255
        if (!points_data->length)
1256
          continue;
1257
        if (!tuple.compile_deltas ())
1258
          return false;
1259
1260
        unsigned points_data_length = (points_data != shared_points_bytes) ? points_data->length : 0;
1261
        if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map,
1262
                                             shared_tuples_idx_map))
1263
          return false;
1264
        compiled_byte_size += tuple.compiled_tuple_header.length + points_data_length + tuple.compiled_deltas.length;
1265
      }
1266
1267
      if (is_gvar && (compiled_byte_size % 2))
1268
      {
1269
        needs_padding = true;
1270
        compiled_byte_size += 1;
1271
      }
1272
1273
      return true;
1274
    }
1275
1276
    bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const
1277
    {
1278
      TRACE_SERIALIZE (this);
1279
      for (const auto& tuple: tuple_vars)
1280
      {
1281
        tuple.compiled_tuple_header.as_array ().copy (c);
1282
        if (c->in_error ()) return_trace (false);
1283
        total_header_len += tuple.compiled_tuple_header.length;
1284
      }
1285
      return_trace (true);
1286
    }
1287
1288
    bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const
1289
    {
1290
      TRACE_SERIALIZE (this);
1291
      if (is_gvar && shared_points_bytes)
1292
      {
1293
        hb_ubytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length);
1294
        s.copy (c);
1295
      }
1296
1297
      for (const auto& tuple: tuple_vars)
1298
      {
1299
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1300
        hb_vector_t<unsigned char> *point_data;
1301
        if (!point_data_map.has (points_set, &point_data))
1302
          return_trace (false);
1303
1304
        if (!is_gvar || point_data != shared_points_bytes)
1305
        {
1306
          hb_ubytes_t s (point_data->arrayZ, point_data->length);
1307
          s.copy (c);
1308
        }
1309
1310
        tuple.compiled_deltas.as_array ().copy (c);
1311
        if (c->in_error ()) return_trace (false);
1312
      }
1313
1314
      /* padding for gvar */
1315
      if (is_gvar && needs_padding)
1316
      {
1317
        HBUINT8 pad;
1318
        pad = 0;
1319
        if (!c->embed (pad)) return_trace (false);
1320
      }
1321
      return_trace (true);
1322
    }
1323
  };
1324
1325
  struct tuple_iterator_t
1326
  {
1327
    unsigned get_axis_count () const { return axis_count; }
1328
1329
    void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
1330
0
    {
1331
0
      var_data_bytes = var_data_bytes_;
1332
0
      var_data = var_data_bytes_.as<TupleVariationData> ();
1333
0
      index = 0;
1334
0
      axis_count = axis_count_;
1335
0
      current_tuple = &var_data->get_tuple_var_header ();
1336
0
      data_offset = 0;
1337
0
      table_base = table_base_;
1338
0
    }
1339
1340
    bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
1341
0
    {
1342
0
      if (var_data->has_shared_point_numbers ())
1343
0
      {
1344
0
        const HBUINT8 *base = &(table_base+var_data->data);
1345
0
        const HBUINT8 *p = base;
1346
0
        if (!decompile_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
1347
0
        data_offset = p - base;
1348
0
      }
1349
0
      return true;
1350
0
    }
1351
1352
    bool is_valid () const
1353
0
    {
1354
0
      return (index < var_data->tupleVarCount.get_count ()) &&
1355
0
             var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
1356
0
             var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
1357
0
                                                                current_tuple->get_size (axis_count)));
1358
0
    }
1359
1360
    bool move_to_next ()
1361
0
    {
1362
0
      data_offset += current_tuple->get_data_size ();
1363
0
      current_tuple = &current_tuple->get_next (axis_count);
1364
0
      index++;
1365
0
      return is_valid ();
1366
0
    }
1367
1368
    const HBUINT8 *get_serialized_data () const
1369
0
    { return &(table_base+var_data->data) + data_offset; }
1370
1371
    private:
1372
    const TupleVariationData *var_data;
1373
    unsigned int index;
1374
    unsigned int axis_count;
1375
    unsigned int data_offset;
1376
    const void *table_base;
1377
1378
    public:
1379
    hb_bytes_t var_data_bytes;
1380
    const TupleVariationHeader *current_tuple;
1381
  };
1382
1383
  static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
1384
                                  const void *table_base,
1385
                                  hb_vector_t<unsigned int> &shared_indices /* OUT */,
1386
                                  tuple_iterator_t *iterator /* OUT */)
1387
0
  {
1388
0
    iterator->init (var_data_bytes, axis_count, table_base);
1389
0
    if (!iterator->get_shared_indices (shared_indices))
1390
0
      return false;
1391
0
    return iterator->is_valid ();
1392
0
  }
1393
1394
0
  bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
1395
1396
  static bool decompile_points (const HBUINT8 *&p /* IN/OUT */,
1397
        hb_vector_t<unsigned int> &points /* OUT */,
1398
        const HBUINT8 *end)
1399
0
  {
1400
0
    enum packed_point_flag_t
1401
0
    {
1402
0
      POINTS_ARE_WORDS     = 0x80,
1403
0
      POINT_RUN_COUNT_MASK = 0x7F
1404
0
    };
1405
1406
0
    if (unlikely (p + 1 > end)) return false;
1407
1408
0
    unsigned count = *p++;
1409
0
    if (count & POINTS_ARE_WORDS)
1410
0
    {
1411
0
      if (unlikely (p + 1 > end)) return false;
1412
0
      count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
1413
0
    }
1414
0
    if (unlikely (!points.resize (count, false))) return false;
1415
1416
0
    unsigned n = 0;
1417
0
    unsigned i = 0;
1418
0
    while (i < count)
1419
0
    {
1420
0
      if (unlikely (p + 1 > end)) return false;
1421
0
      unsigned control = *p++;
1422
0
      unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
1423
0
      unsigned stop = i + run_count;
1424
0
      if (unlikely (stop > count)) return false;
1425
0
      if (control & POINTS_ARE_WORDS)
1426
0
      {
1427
0
        if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1428
0
        for (; i < stop; i++)
1429
0
        {
1430
0
          n += *(const HBUINT16 *)p;
1431
0
          points.arrayZ[i] = n;
1432
0
          p += HBUINT16::static_size;
1433
0
        }
1434
0
      }
1435
0
      else
1436
0
      {
1437
0
        if (unlikely (p + run_count > end)) return false;
1438
0
        for (; i < stop; i++)
1439
0
        {
1440
0
          n += *p++;
1441
0
          points.arrayZ[i] = n;
1442
0
        }
1443
0
      }
1444
0
    }
1445
0
    return true;
1446
0
  }
1447
1448
  template <typename T>
1449
  static bool decompile_deltas (const HBUINT8 *&p /* IN/OUT */,
1450
        hb_vector_t<T> &deltas /* IN/OUT */,
1451
        const HBUINT8 *end,
1452
        bool consume_all = false)
1453
0
  {
1454
0
    return TupleValues::decompile (p, deltas, end, consume_all);
1455
0
  }
1456
1457
0
  bool has_data () const { return tupleVarCount; }
1458
1459
  bool decompile_tuple_variations (unsigned point_count,
1460
                                   bool is_gvar,
1461
                                   tuple_iterator_t iterator,
1462
                                   const hb_map_t *axes_old_index_tag_map,
1463
                                   const hb_vector_t<unsigned> &shared_indices,
1464
                                   const hb_array_t<const F2DOT14> shared_tuples,
1465
                                   tuple_variations_t& tuple_variations, /* OUT */
1466
                                   bool is_composite_glyph = false) const
1467
  {
1468
    return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount,
1469
                                                        point_count, is_gvar,
1470
                                                        axes_old_index_tag_map,
1471
                                                        shared_indices,
1472
                                                        shared_tuples,
1473
                                                        is_composite_glyph);
1474
  }
1475
1476
  bool serialize (hb_serialize_context_t *c,
1477
                  bool is_gvar,
1478
                  const tuple_variations_t& tuple_variations) const
1479
  {
1480
    TRACE_SERIALIZE (this);
1481
    /* empty tuple variations, just return and skip serialization. */
1482
    if (!tuple_variations) return_trace (true);
1483
1484
    auto *out = c->start_embed (this);
1485
    if (unlikely (!c->extend_min (out))) return_trace (false);
1486
1487
    if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (),
1488
                          HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1489
1490
    unsigned total_header_len = 0;
1491
1492
    if (!tuple_variations.serialize_var_headers (c, total_header_len))
1493
      return_trace (false);
1494
1495
    unsigned data_offset = min_size + total_header_len;
1496
    if (!is_gvar) data_offset += 4;
1497
    if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1498
1499
    return tuple_variations.serialize_var_data (c, is_gvar);
1500
  }
1501
1502
  protected:
1503
  struct TupleVarCount : HBUINT16
1504
  {
1505
    friend struct tuple_variations_t;
1506
0
    bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
1507
0
    unsigned int get_count () const { return (*this) & CountMask; }
1508
    TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
1509
    explicit operator bool () const { return get_count (); }
1510
1511
    protected:
1512
    enum Flags
1513
    {
1514
      SharedPointNumbers= 0x8000u,
1515
      CountMask         = 0x0FFFu
1516
    };
1517
    public:
1518
    DEFINE_SIZE_STATIC (2);
1519
  };
1520
1521
  TupleVarCount tupleVarCount;  /* A packed field. The high 4 bits are flags, and the
1522
                                 * low 12 bits are the number of tuple variation tables
1523
                                 * for this glyph. The number of tuple variation tables
1524
                                 * can be any number between 1 and 4095. */
1525
  OffsetTo<HBUINT8, OffType>
1526
                data;           /* Offset from the start of the base table
1527
                                 * to the serialized data. */
1528
  /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
1529
  public:
1530
  DEFINE_SIZE_MIN (2 + OffType::static_size);
1531
};
1532
1533
// TODO: Move tuple_variations_t to outside of TupleVariationData
1534
using tuple_variations_t = TupleVariationData<HBUINT16>::tuple_variations_t;
1535
struct item_variations_t
1536
{
1537
  using region_t = const hb_hashmap_t<hb_tag_t, Triple>*;
1538
  private:
1539
  /* each subtable is decompiled into a tuple_variations_t, in which all tuples
1540
   * have the same num of deltas (rows) */
1541
  hb_vector_t<tuple_variations_t> vars;
1542
1543
  /* num of retained rows for each subtable, there're 2 cases when var_data is empty:
1544
   * 1. retained item_count is zero
1545
   * 2. regions is empty and item_count is non-zero.
1546
   * when converting to tuples, both will be dropped because the tuple is empty,
1547
   * however, we need to retain 2. as all-zero rows to keep original varidx
1548
   * valid, so we need a way to remember the num of rows for each subtable */
1549
  hb_vector_t<unsigned> var_data_num_rows;
1550
1551
  /* original region list, decompiled from item varstore, used when rebuilding
1552
   * region list after instantiation */
1553
  hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>> orig_region_list;
1554
1555
  /* region list: vector of Regions, maintain the original order for the regions
1556
   * that existed before instantiate (), append the new regions at the end.
1557
   * Regions are stored in each tuple already, save pointers only.
1558
   * When converting back to item varstore, unused regions will be pruned */
1559
  hb_vector_t<region_t> region_list;
1560
1561
  /* region -> idx map after instantiation and pruning unused regions */
1562
  hb_hashmap_t<region_t, unsigned> region_map;
1563
1564
  /* all delta rows after instantiation */
1565
  hb_vector_t<hb_vector_t<int>> delta_rows;
1566
  /* final optimized vector of encoding objects used to assemble the varstore */
1567
  hb_vector_t<delta_row_encoding_t> encodings;
1568
1569
  /* old varidxes -> new var_idxes map */
1570
  hb_map_t varidx_map;
1571
1572
  /* has long words */
1573
  bool has_long = false;
1574
1575
  public:
1576
  bool has_long_word () const
1577
0
  { return has_long; }
1578
1579
  const hb_vector_t<region_t>& get_region_list () const
1580
0
  { return region_list; }
1581
1582
  const hb_vector_t<delta_row_encoding_t>& get_vardata_encodings () const
1583
0
  { return encodings; }
1584
1585
  const hb_map_t& get_varidx_map () const
1586
0
  { return varidx_map; }
1587
1588
  bool instantiate (const ItemVariationStore& varStore,
1589
                    const hb_subset_plan_t *plan,
1590
                    bool optimize=true,
1591
                    bool use_no_variation_idx=true,
1592
                    const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1593
0
  {
1594
0
    if (!create_from_item_varstore (varStore, plan->axes_old_index_tag_map, inner_maps))
1595
0
      return false;
1596
0
    if (!instantiate_tuple_vars (plan->axes_location, plan->axes_triple_distances))
1597
0
      return false;
1598
0
    return as_item_varstore (optimize, use_no_variation_idx);
1599
0
  }
1600
1601
  /* keep below APIs public only for unit test: test-item-varstore */
1602
  bool create_from_item_varstore (const ItemVariationStore& varStore,
1603
                                  const hb_map_t& axes_old_index_tag_map,
1604
                                  const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1605
0
  {
1606
0
    const VarRegionList& regionList = varStore.get_region_list ();
1607
0
    if (!regionList.get_var_regions (axes_old_index_tag_map, orig_region_list))
1608
0
      return false;
1609
0
1610
0
    unsigned num_var_data = varStore.get_sub_table_count ();
1611
0
    if (inner_maps && inner_maps.length != num_var_data) return false;
1612
0
    if (!vars.alloc (num_var_data) ||
1613
0
        !var_data_num_rows.alloc (num_var_data)) return false;
1614
0
1615
0
    for (unsigned i = 0; i < num_var_data; i++)
1616
0
    {
1617
0
      if (inner_maps && !inner_maps.arrayZ[i].get_population ())
1618
0
          continue;
1619
0
      tuple_variations_t var_data_tuples;
1620
0
      unsigned item_count = 0;
1621
0
      if (!var_data_tuples.create_from_item_var_data (varStore.get_sub_table (i),
1622
0
                                                      orig_region_list,
1623
0
                                                      axes_old_index_tag_map,
1624
0
                                                      item_count,
1625
0
                                                      inner_maps ? &(inner_maps.arrayZ[i]) : nullptr))
1626
0
        return false;
1627
0
1628
0
      var_data_num_rows.push (item_count);
1629
0
      vars.push (std::move (var_data_tuples));
1630
0
    }
1631
0
    return !vars.in_error () && !var_data_num_rows.in_error () && vars.length == var_data_num_rows.length;
1632
0
  }
1633
1634
  bool instantiate_tuple_vars (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1635
                               const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1636
0
  {
1637
0
    for (tuple_variations_t& tuple_vars : vars)
1638
0
      if (!tuple_vars.instantiate (normalized_axes_location, axes_triple_distances))
1639
0
        return false;
1640
0
1641
0
    if (!build_region_list ()) return false;
1642
0
    return true;
1643
0
  }
1644
1645
  bool build_region_list ()
1646
0
  {
1647
0
    /* scan all tuples and collect all unique regions, prune unused regions */
1648
0
    hb_hashmap_t<region_t, unsigned> all_regions;
1649
0
    hb_hashmap_t<region_t, unsigned> used_regions;
1650
0
1651
0
    /* use a vector when inserting new regions, make result deterministic */
1652
0
    hb_vector_t<region_t> all_unique_regions;
1653
0
    for (const tuple_variations_t& sub_table : vars)
1654
0
    {
1655
0
      for (const tuple_delta_t& tuple : sub_table.tuple_vars)
1656
0
      {
1657
0
        region_t r = &(tuple.axis_tuples);
1658
0
        if (!used_regions.has (r))
1659
0
        {
1660
0
          bool all_zeros = true;
1661
0
          for (float d : tuple.deltas_x)
1662
0
          {
1663
0
            int delta = (int) roundf (d);
1664
0
            if (delta != 0)
1665
0
            {
1666
0
              all_zeros = false;
1667
0
              break;
1668
0
            }
1669
0
          }
1670
0
          if (!all_zeros)
1671
0
          {
1672
0
            if (!used_regions.set (r, 1))
1673
0
              return false;
1674
0
          }
1675
0
        }
1676
0
        if (all_regions.has (r))
1677
0
          continue;
1678
0
        if (!all_regions.set (r, 1))
1679
0
          return false;
1680
0
        all_unique_regions.push (r);
1681
0
      }
1682
0
    }
1683
0
1684
0
    /* regions are empty means no variation data, return true */
1685
0
    if (!all_regions || !all_unique_regions) return true;
1686
0
1687
0
    if (!region_list.alloc (all_regions.get_population ()))
1688
0
      return false;
1689
0
1690
0
    unsigned idx = 0;
1691
0
    /* append the original regions that pre-existed */
1692
0
    for (const auto& r : orig_region_list)
1693
0
    {
1694
0
      if (!all_regions.has (&r) || !used_regions.has (&r))
1695
0
        continue;
1696
0
1697
0
      region_list.push (&r);
1698
0
      if (!region_map.set (&r, idx))
1699
0
        return false;
1700
0
      all_regions.del (&r);
1701
0
      idx++;
1702
0
    }
1703
0
1704
0
    /* append the new regions at the end */
1705
0
    for (const auto& r: all_unique_regions)
1706
0
    {
1707
0
      if (!all_regions.has (r) || !used_regions.has (r))
1708
0
        continue;
1709
0
      region_list.push (r);
1710
0
      if (!region_map.set (r, idx))
1711
0
        return false;
1712
0
      all_regions.del (r);
1713
0
      idx++;
1714
0
    }
1715
0
    return (!region_list.in_error ()) && (!region_map.in_error ());
1716
0
  }
1717
1718
  /* main algorithm ported from fonttools VarStore_optimize() method, optimize
1719
   * varstore by default */
1720
1721
  struct combined_gain_idx_tuple_t
1722
  {
1723
    int gain;
1724
    unsigned idx_1;
1725
    unsigned idx_2;
1726
1727
    combined_gain_idx_tuple_t () = default;
1728
    combined_gain_idx_tuple_t (int gain_, unsigned i, unsigned j)
1729
0
        :gain (gain_), idx_1 (i), idx_2 (j) {}
1730
1731
    bool operator < (const combined_gain_idx_tuple_t& o)
1732
0
    {
1733
0
      if (gain != o.gain)
1734
0
        return gain < o.gain;
1735
0
1736
0
      if (idx_1 != o.idx_1)
1737
0
        return idx_1 < o.idx_1;
1738
0
1739
0
      return idx_2 < o.idx_2;
1740
0
    }
1741
1742
    bool operator <= (const combined_gain_idx_tuple_t& o)
1743
0
    {
1744
0
      if (*this < o) return true;
1745
0
      return gain == o.gain && idx_1 == o.idx_1 && idx_2 == o.idx_2;
1746
0
    }
1747
  };
1748
1749
  bool as_item_varstore (bool optimize=true, bool use_no_variation_idx=true)
1750
0
  {
1751
0
    /* return true if no variation data */
1752
0
    if (!region_list) return true;
1753
0
    unsigned num_cols = region_list.length;
1754
0
    /* pre-alloc a 2D vector for all sub_table's VarData rows */
1755
0
    unsigned total_rows = 0;
1756
0
    for (unsigned major = 0; major < var_data_num_rows.length; major++)
1757
0
      total_rows += var_data_num_rows[major];
1758
0
1759
0
    if (!delta_rows.resize (total_rows)) return false;
1760
0
    /* init all rows to [0]*num_cols */
1761
0
    for (unsigned i = 0; i < total_rows; i++)
1762
0
      if (!(delta_rows[i].resize (num_cols))) return false;
1763
0
1764
0
    /* old VarIdxes -> full encoding_row mapping */
1765
0
    hb_hashmap_t<unsigned, const hb_vector_t<int>*> front_mapping;
1766
0
    unsigned start_row = 0;
1767
0
    hb_vector_t<delta_row_encoding_t> encoding_objs;
1768
0
    hb_hashmap_t<hb_vector_t<uint8_t>, unsigned> chars_idx_map;
1769
0
1770
0
    /* delta_rows map, used for filtering out duplicate rows */
1771
0
    hb_hashmap_t<const hb_vector_t<int>*, unsigned> delta_rows_map;
1772
0
    for (unsigned major = 0; major < vars.length; major++)
1773
0
    {
1774
0
      /* deltas are stored in tuples(column based), convert them back into items
1775
0
       * (row based) delta */
1776
0
      const tuple_variations_t& tuples = vars[major];
1777
0
      unsigned num_rows = var_data_num_rows[major];
1778
0
      for (const tuple_delta_t& tuple: tuples.tuple_vars)
1779
0
      {
1780
0
        if (tuple.deltas_x.length != num_rows)
1781
0
          return false;
1782
0
1783
0
        /* skip unused regions */
1784
0
        unsigned *col_idx;
1785
0
        if (!region_map.has (&(tuple.axis_tuples), &col_idx))
1786
0
          continue;
1787
0
1788
0
        for (unsigned i = 0; i < num_rows; i++)
1789
0
        {
1790
0
          int rounded_delta = roundf (tuple.deltas_x[i]);
1791
0
          delta_rows[start_row + i][*col_idx] += rounded_delta;
1792
0
          if ((!has_long) && (rounded_delta < -65536 || rounded_delta > 65535))
1793
0
            has_long = true;
1794
0
        }
1795
0
      }
1796
0
1797
0
      if (!optimize)
1798
0
      {
1799
0
        /* assemble a delta_row_encoding_t for this subtable, skip optimization so
1800
0
         * chars is not initialized, we only need delta rows for serialization */
1801
0
        delta_row_encoding_t obj;
1802
0
        for (unsigned r = start_row; r < start_row + num_rows; r++)
1803
0
          obj.add_row (&(delta_rows.arrayZ[r]));
1804
0
1805
0
        encodings.push (std::move (obj));
1806
0
        start_row += num_rows;
1807
0
        continue;
1808
0
      }
1809
0
1810
0
      for (unsigned minor = 0; minor < num_rows; minor++)
1811
0
      {
1812
0
        const hb_vector_t<int>& row = delta_rows[start_row + minor];
1813
0
        if (use_no_variation_idx)
1814
0
        {
1815
0
          bool all_zeros = true;
1816
0
          for (int delta : row)
1817
0
          {
1818
0
            if (delta != 0)
1819
0
            {
1820
0
              all_zeros = false;
1821
0
              break;
1822
0
            }
1823
0
          }
1824
0
          if (all_zeros)
1825
0
            continue;
1826
0
        }
1827
0
1828
0
        if (!front_mapping.set ((major<<16) + minor, &row))
1829
0
          return false;
1830
0
1831
0
        hb_vector_t<uint8_t> chars = delta_row_encoding_t::get_row_chars (row);
1832
0
        if (!chars) return false;
1833
0
1834
0
        if (delta_rows_map.has (&row))
1835
0
          continue;
1836
0
1837
0
        delta_rows_map.set (&row, 1);
1838
0
        unsigned *obj_idx;
1839
0
        if (chars_idx_map.has (chars, &obj_idx))
1840
0
        {
1841
0
          delta_row_encoding_t& obj = encoding_objs[*obj_idx];
1842
0
          if (!obj.add_row (&row))
1843
0
            return false;
1844
0
        }
1845
0
        else
1846
0
        {
1847
0
          if (!chars_idx_map.set (chars, encoding_objs.length))
1848
0
            return false;
1849
0
          delta_row_encoding_t obj (std::move (chars), &row);
1850
0
          encoding_objs.push (std::move (obj));
1851
0
        }
1852
0
      }
1853
0
1854
0
      start_row += num_rows;
1855
0
    }
1856
0
1857
0
    /* return directly if no optimization, maintain original VariationIndex so
1858
0
     * varidx_map would be empty */
1859
0
    if (!optimize) return !encodings.in_error ();
1860
0
1861
0
    /* sort encoding_objs */
1862
0
    encoding_objs.qsort ();
1863
0
1864
0
    /* main algorithm: repeatedly pick 2 best encodings to combine, and combine
1865
0
     * them */
1866
0
    hb_priority_queue_t<combined_gain_idx_tuple_t> queue;
1867
0
    unsigned num_todos = encoding_objs.length;
1868
0
    for (unsigned i = 0; i < num_todos; i++)
1869
0
    {
1870
0
      for (unsigned j = i + 1; j < num_todos; j++)
1871
0
      {
1872
0
        int combining_gain = encoding_objs.arrayZ[i].gain_from_merging (encoding_objs.arrayZ[j]);
1873
0
        if (combining_gain > 0)
1874
0
          queue.insert (combined_gain_idx_tuple_t (-combining_gain, i, j), 0);
1875
0
      }
1876
0
    }
1877
0
1878
0
    hb_set_t removed_todo_idxes;
1879
0
    while (queue)
1880
0
    {
1881
0
      auto t = queue.pop_minimum ().first;
1882
0
      unsigned i = t.idx_1;
1883
0
      unsigned j = t.idx_2;
1884
0
1885
0
      if (removed_todo_idxes.has (i) || removed_todo_idxes.has (j))
1886
0
        continue;
1887
0
1888
0
      delta_row_encoding_t& encoding = encoding_objs.arrayZ[i];
1889
0
      delta_row_encoding_t& other_encoding = encoding_objs.arrayZ[j];
1890
0
1891
0
      removed_todo_idxes.add (i);
1892
0
      removed_todo_idxes.add (j);
1893
0
1894
0
      hb_vector_t<uint8_t> combined_chars;
1895
0
      if (!combined_chars.alloc (encoding.chars.length))
1896
0
        return false;
1897
0
1898
0
      for (unsigned idx = 0; idx < encoding.chars.length; idx++)
1899
0
      {
1900
0
        uint8_t v = hb_max (encoding.chars.arrayZ[idx], other_encoding.chars.arrayZ[idx]);
1901
0
        combined_chars.push (v);
1902
0
      }
1903
0
1904
0
      delta_row_encoding_t combined_encoding_obj (std::move (combined_chars));
1905
0
      for (const auto& row : hb_concat (encoding.items, other_encoding.items))
1906
0
        combined_encoding_obj.add_row (row);
1907
0
1908
0
      for (unsigned idx = 0; idx < encoding_objs.length; idx++)
1909
0
      {
1910
0
        if (removed_todo_idxes.has (idx)) continue;
1911
0
1912
0
        const delta_row_encoding_t& obj = encoding_objs.arrayZ[idx];
1913
0
        if (obj.chars == combined_chars)
1914
0
        {
1915
0
          for (const auto& row : obj.items)
1916
0
            combined_encoding_obj.add_row (row);
1917
0
1918
0
          removed_todo_idxes.add (idx);
1919
0
          continue;
1920
0
        }
1921
0
1922
0
        int combined_gain = combined_encoding_obj.gain_from_merging (obj);
1923
0
        if (combined_gain > 0)
1924
0
          queue.insert (combined_gain_idx_tuple_t (-combined_gain, idx, encoding_objs.length), 0);
1925
0
      }
1926
0
1927
0
      encoding_objs.push (std::move (combined_encoding_obj));
1928
0
    }
1929
0
1930
0
    int num_final_encodings = (int) encoding_objs.length - (int) removed_todo_idxes.get_population ();
1931
0
    if (num_final_encodings <= 0) return false;
1932
0
1933
0
    if (!encodings.alloc (num_final_encodings)) return false;
1934
0
    for (unsigned i = 0; i < encoding_objs.length; i++)
1935
0
    {
1936
0
      if (removed_todo_idxes.has (i)) continue;
1937
0
      encodings.push (std::move (encoding_objs.arrayZ[i]));
1938
0
    }
1939
0
1940
0
    /* sort again based on width, make result deterministic */
1941
0
    encodings.qsort (delta_row_encoding_t::cmp_width);
1942
0
1943
0
    return compile_varidx_map (front_mapping);
1944
0
  }
1945
1946
  private:
1947
  /* compile varidx_map for one VarData subtable (index specified by major) */
1948
  bool compile_varidx_map (const hb_hashmap_t<unsigned, const hb_vector_t<int>*>& front_mapping)
1949
0
  {
1950
0
    /* full encoding_row -> new VarIdxes mapping */
1951
0
    hb_hashmap_t<const hb_vector_t<int>*, unsigned> back_mapping;
1952
0
1953
0
    for (unsigned major = 0; major < encodings.length; major++)
1954
0
    {
1955
0
      delta_row_encoding_t& encoding = encodings[major];
1956
0
      /* just sanity check, this shouldn't happen */
1957
0
      if (encoding.is_empty ())
1958
0
        return false;
1959
0
1960
0
      unsigned num_rows = encoding.items.length;
1961
0
1962
0
      /* sort rows, make result deterministic */
1963
0
      encoding.items.qsort (_cmp_row);
1964
0
1965
0
      /* compile old to new var_idxes mapping */
1966
0
      for (unsigned minor = 0; minor < num_rows; minor++)
1967
0
      {
1968
0
        unsigned new_varidx = (major << 16) + minor;
1969
0
        back_mapping.set (encoding.items.arrayZ[minor], new_varidx);
1970
0
      }
1971
0
    }
1972
0
1973
0
    for (auto _ : front_mapping.iter ())
1974
0
    {
1975
0
      unsigned old_varidx = _.first;
1976
0
      unsigned *new_varidx;
1977
0
      if (back_mapping.has (_.second, &new_varidx))
1978
0
        varidx_map.set (old_varidx, *new_varidx);
1979
0
      else
1980
0
        varidx_map.set (old_varidx, HB_OT_LAYOUT_NO_VARIATIONS_INDEX);
1981
0
    }
1982
0
    return !varidx_map.in_error ();
1983
0
  }
1984
1985
  static int _cmp_row (const void *pa, const void *pb)
1986
0
  {
1987
0
    /* compare pointers of vectors(const hb_vector_t<int>*) that represent a row */
1988
0
    const hb_vector_t<int>** a = (const hb_vector_t<int>**) pa;
1989
0
    const hb_vector_t<int>** b = (const hb_vector_t<int>**) pb;
1990
0
1991
0
    for (unsigned i = 0; i < (*b)->length; i++)
1992
0
    {
1993
0
      int va = (*a)->arrayZ[i];
1994
0
      int vb = (*b)->arrayZ[i];
1995
0
      if (va != vb)
1996
0
        return va < vb ? -1 : 1;
1997
0
    }
1998
0
    return 0;
1999
0
  }
2000
};
2001
2002
2003
} /* namespace OT */
2004
2005
2006
#endif /* HB_OT_VAR_COMMON_HH */