Coverage Report

Created: 2025-07-12 07:03

/src/harfbuzz/src/hb-ot-var-common.hh
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright © 2021  Google, Inc.
3
 *
4
 *  This is part of HarfBuzz, a text shaping library.
5
 *
6
 * Permission is hereby granted, without written agreement and without
7
 * license or royalty fees, to use, copy, modify, and distribute this
8
 * software and its documentation for any purpose, provided that the
9
 * above copyright notice and the following two paragraphs appear in
10
 * all copies of this software.
11
 *
12
 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13
 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14
 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15
 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16
 * DAMAGE.
17
 *
18
 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19
 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20
 * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
21
 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22
 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23
 *
24
 */
25
26
#ifndef HB_OT_VAR_COMMON_HH
27
#define HB_OT_VAR_COMMON_HH
28
29
#include "hb-ot-layout-common.hh"
30
#include "hb-priority-queue.hh"
31
#include "hb-subset-instancer-iup.hh"
32
33
34
namespace OT {
35
36
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
37
struct TupleVariationHeader
38
{
39
  friend struct tuple_delta_t;
40
  unsigned get_size (unsigned axis_count) const
41
0
  { return min_size + get_all_tuples (axis_count).get_size (); }
42
43
0
  unsigned get_data_size () const { return varDataSize; }
44
45
  const TupleVariationHeader &get_next (unsigned axis_count) const
46
0
  { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
47
48
  bool unpack_axis_tuples (unsigned axis_count,
49
                           const hb_array_t<const F2DOT14> shared_tuples,
50
                           const hb_map_t *axes_old_index_tag_map,
51
                           hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const
52
0
  {
53
0
    const F2DOT14 *peak_tuple = nullptr;
54
0
    if (has_peak ())
55
0
      peak_tuple = get_peak_tuple (axis_count);
56
0
    else
57
0
    {
58
0
      unsigned int index = get_index ();
59
0
      if (unlikely ((index + 1) * axis_count > shared_tuples.length))
60
0
        return false;
61
0
      peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ;
62
0
    }
63
0
64
0
    const F2DOT14 *start_tuple = nullptr;
65
0
    const F2DOT14 *end_tuple = nullptr;
66
0
    bool has_interm = has_intermediate ();
67
0
68
0
    if (has_interm)
69
0
    {
70
0
      start_tuple = get_start_tuple (axis_count);
71
0
      end_tuple = get_end_tuple (axis_count);
72
0
    }
73
0
74
0
    for (unsigned i = 0; i < axis_count; i++)
75
0
    {
76
0
      float peak = peak_tuple[i].to_float ();
77
0
      if (peak == 0.f) continue;
78
0
79
0
      hb_tag_t *axis_tag;
80
0
      if (!axes_old_index_tag_map->has (i, &axis_tag))
81
0
        return false;
82
0
83
0
      float start, end;
84
0
      if (has_interm)
85
0
      {
86
0
        start = start_tuple[i].to_float ();
87
0
        end = end_tuple[i].to_float ();
88
0
      }
89
0
      else
90
0
      {
91
0
        start = hb_min (peak, 0.f);
92
0
        end = hb_max (peak, 0.f);
93
0
      }
94
0
      axis_tuples.set (*axis_tag, Triple ((double) start, (double) peak, (double) end));
95
0
    }
96
0
97
0
    return true;
98
0
  }
99
100
  HB_ALWAYS_INLINE
101
  double calculate_scalar (hb_array_t<const int> coords, unsigned int coord_count,
102
         const hb_array_t<const F2DOT14> shared_tuples,
103
         hb_scalar_cache_t *shared_tuple_scalar_cache = nullptr) const
104
0
  {
105
0
    unsigned tuple_index = tupleIndex;
106
107
0
    const F2DOT14 *peak_tuple;
108
109
0
    bool has_interm = tuple_index & TuppleIndex::IntermediateRegion; // Inlined for performance
110
0
    if (unlikely (has_interm))
111
0
      shared_tuple_scalar_cache = nullptr;
112
113
0
    if (unlikely (tuple_index & TuppleIndex::EmbeddedPeakTuple)) // Inlined for performance
114
0
    {
115
0
      peak_tuple = get_peak_tuple (coord_count);
116
0
      shared_tuple_scalar_cache = nullptr;
117
0
    }
118
0
    else
119
0
    {
120
0
      unsigned int index = tuple_index & TuppleIndex::TupleIndexMask; // Inlined for performance
121
122
0
      float scalar;
123
0
      if (shared_tuple_scalar_cache &&
124
0
    shared_tuple_scalar_cache->get (index, &scalar))
125
0
  return (double) scalar;
126
127
0
      if (unlikely ((index + 1) * coord_count > shared_tuples.length))
128
0
        return 0.0;
129
0
      peak_tuple = shared_tuples.arrayZ + (coord_count * index);
130
131
0
    }
132
133
0
    const F2DOT14 *start_tuple = nullptr;
134
0
    const F2DOT14 *end_tuple = nullptr;
135
136
0
    if (has_interm)
137
0
    {
138
0
      start_tuple = get_start_tuple (coord_count);
139
0
      end_tuple = get_end_tuple (coord_count);
140
0
    }
141
142
0
    double scalar = 1.0;
143
0
    for (unsigned int i = 0; i < coord_count; i++)
144
0
    {
145
0
      int peak = peak_tuple[i].to_int ();
146
0
      if (!peak) continue;
147
148
0
      int v = coords[i];
149
0
      if (!v) { scalar = 0.0; break; }
150
0
      if (v == peak) continue;
151
152
0
      if (has_interm)
153
0
      {
154
0
        int start = start_tuple[i].to_int ();
155
0
        int end = end_tuple[i].to_int ();
156
0
        if (unlikely (start > peak || peak > end ||
157
0
                      (start < 0 && end > 0 && peak))) continue;
158
0
        if (v < start || v > end) { scalar = 0.0; break; }
159
0
        if (v < peak)
160
0
        { if (peak != start) scalar *= (double) (v - start) / (peak - start); }
161
0
        else
162
0
        { if (peak != end) scalar *= (double) (end - v) / (end - peak); }
163
0
      }
164
0
      else if (v < hb_min (0, peak) || v > hb_max (0, peak)) { scalar = 0.0; break; }
165
0
      else
166
0
        scalar *= (double) v / peak;
167
0
    }
168
0
    if (shared_tuple_scalar_cache)
169
0
      shared_tuple_scalar_cache->set (get_index (), scalar);
170
0
    return scalar;
171
0
  }
172
173
0
  bool           has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
174
0
  bool   has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
175
0
  bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
176
0
  unsigned      get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
177
178
  protected:
179
  struct TuppleIndex : HBUINT16
180
  {
181
    enum Flags {
182
      EmbeddedPeakTuple   = 0x8000u,
183
      IntermediateRegion  = 0x4000u,
184
      PrivatePointNumbers = 0x2000u,
185
      TupleIndexMask      = 0x0FFFu
186
    };
187
188
0
    TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
189
    DEFINE_SIZE_STATIC (2);
190
  };
191
192
  hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
193
0
  { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
194
  const F2DOT14* get_all_tuples_base (unsigned axis_count) const
195
0
  { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).arrayZ; }
196
  const F2DOT14* get_peak_tuple (unsigned axis_count) const
197
0
  { return get_all_tuples_base (axis_count); }
198
  const F2DOT14* get_start_tuple (unsigned axis_count) const
199
0
  { return get_all_tuples_base (axis_count) + has_peak () * axis_count; }
200
  const F2DOT14* get_end_tuple (unsigned axis_count) const
201
0
  { return get_all_tuples_base (axis_count) + has_peak () * axis_count + axis_count; }
202
203
  HBUINT16      varDataSize;    /* The size in bytes of the serialized
204
                                 * data for this tuple variation table. */
205
  TuppleIndex   tupleIndex;     /* A packed field. The high 4 bits are flags (see below).
206
                                   The low 12 bits are an index into a shared tuple
207
                                   records array. */
208
  /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
209
                                /* Peak tuple record for this tuple variation table — optional,
210
                                 * determined by flags in the tupleIndex value.
211
                                 *
212
                                 * Note that this must always be included in the 'cvar' table. */
213
  /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
214
                                /* Intermediate start tuple record for this tuple variation table — optional,
215
                                   determined by flags in the tupleIndex value. */
216
  /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
217
                                /* Intermediate end tuple record for this tuple variation table — optional,
218
                                 * determined by flags in the tupleIndex value. */
219
  public:
220
  DEFINE_SIZE_MIN (4);
221
};
222
223
struct tuple_delta_t
224
{
225
  static constexpr bool realloc_move = true;  // Watch out when adding new members!
226
227
  public:
228
  hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
229
230
  /* indices_length = point_count, indice[i] = 1 means point i is referenced */
231
  hb_vector_t<bool> indices;
232
233
  hb_vector_t<float> deltas_x;
234
  /* empty for cvar tuples */
235
  hb_vector_t<float> deltas_y;
236
237
  /* compiled data: header and deltas
238
   * compiled point data is saved in a hashmap within tuple_variations_t cause
239
   * some point sets might be reused by different tuple variations */
240
  hb_vector_t<unsigned char> compiled_tuple_header;
241
  hb_vector_t<unsigned char> compiled_deltas;
242
243
  /* compiled peak coords, empty for non-gvar tuples */
244
  hb_vector_t<char> compiled_peak_coords;
245
246
  tuple_delta_t () = default;
247
  tuple_delta_t (const tuple_delta_t& o) = default;
248
249
  friend void swap (tuple_delta_t& a, tuple_delta_t& b) noexcept
250
0
  {
251
0
    hb_swap (a.axis_tuples, b.axis_tuples);
252
0
    hb_swap (a.indices, b.indices);
253
0
    hb_swap (a.deltas_x, b.deltas_x);
254
0
    hb_swap (a.deltas_y, b.deltas_y);
255
0
    hb_swap (a.compiled_tuple_header, b.compiled_tuple_header);
256
0
    hb_swap (a.compiled_deltas, b.compiled_deltas);
257
0
    hb_swap (a.compiled_peak_coords, b.compiled_peak_coords);
258
0
  }
259
260
  tuple_delta_t (tuple_delta_t&& o)  noexcept : tuple_delta_t ()
261
0
  { hb_swap (*this, o); }
262
263
  tuple_delta_t& operator = (tuple_delta_t&& o) noexcept
264
0
  {
265
0
    hb_swap (*this, o);
266
0
    return *this;
267
0
  }
268
269
  void remove_axis (hb_tag_t axis_tag)
270
0
  { axis_tuples.del (axis_tag); }
271
272
  bool set_tent (hb_tag_t axis_tag, Triple tent)
273
0
  { return axis_tuples.set (axis_tag, tent); }
274
275
  tuple_delta_t& operator += (const tuple_delta_t& o)
276
0
  {
277
0
    unsigned num = indices.length;
278
0
    for (unsigned i = 0; i < num; i++)
279
0
    {
280
0
      if (indices.arrayZ[i])
281
0
      {
282
0
        if (o.indices.arrayZ[i])
283
0
        {
284
0
          deltas_x[i] += o.deltas_x[i];
285
0
          if (deltas_y && o.deltas_y)
286
0
            deltas_y[i] += o.deltas_y[i];
287
0
        }
288
0
      }
289
0
      else
290
0
      {
291
0
        if (!o.indices.arrayZ[i]) continue;
292
0
        indices.arrayZ[i] = true;
293
0
        deltas_x[i] = o.deltas_x[i];
294
0
        if (deltas_y && o.deltas_y)
295
0
          deltas_y[i] = o.deltas_y[i];
296
0
      }
297
0
    }
298
0
    return *this;
299
0
  }
300
301
  tuple_delta_t& operator *= (float scalar)
302
0
  {
303
0
    if (scalar == 1.0f)
304
0
      return *this;
305
0
306
0
    unsigned num = indices.length;
307
0
    if (deltas_y)
308
0
      for (unsigned i = 0; i < num; i++)
309
0
      {
310
0
  if (!indices.arrayZ[i]) continue;
311
0
  deltas_x[i] *= scalar;
312
0
  deltas_y[i] *= scalar;
313
0
      }
314
0
    else
315
0
      for (unsigned i = 0; i < num; i++)
316
0
      {
317
0
  if (!indices.arrayZ[i]) continue;
318
0
  deltas_x[i] *= scalar;
319
0
      }
320
0
    return *this;
321
0
  }
322
323
  hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
324
                                                          TripleDistances axis_triple_distances) const
325
0
  {
326
0
    hb_vector_t<tuple_delta_t> out;
327
0
    Triple *tent;
328
0
    if (!axis_tuples.has (axis_tag, &tent))
329
0
    {
330
0
      out.push (*this);
331
0
      return out;
332
0
    }
333
0
334
0
    if ((tent->minimum < 0.0 && tent->maximum > 0.0) ||
335
0
        !(tent->minimum <= tent->middle && tent->middle <= tent->maximum))
336
0
      return out;
337
0
338
0
    if (tent->middle == 0.0)
339
0
    {
340
0
      out.push (*this);
341
0
      return out;
342
0
    }
343
0
344
0
    rebase_tent_result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
345
0
    for (auto &t : solutions)
346
0
    {
347
0
      tuple_delta_t new_var = *this;
348
0
      if (t.second == Triple ())
349
0
        new_var.remove_axis (axis_tag);
350
0
      else
351
0
        new_var.set_tent (axis_tag, t.second);
352
0
353
0
      new_var *= t.first;
354
0
      out.push (std::move (new_var));
355
0
    }
356
0
357
0
    return out;
358
0
  }
359
360
  bool compile_peak_coords (const hb_map_t& axes_index_map,
361
                            const hb_map_t& axes_old_index_tag_map)
362
0
  {
363
0
    unsigned axis_count = axes_index_map.get_population ();
364
0
    if (unlikely (!compiled_peak_coords.alloc (axis_count * F2DOT14::static_size)))
365
0
      return false;
366
0
367
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
368
0
    for (unsigned i = 0; i < orig_axis_count; i++)
369
0
    {
370
0
      if (!axes_index_map.has (i))
371
0
        continue;
372
0
373
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
374
0
      Triple *coords;
375
0
      F2DOT14 peak_coord;
376
0
      if (axis_tuples.has (axis_tag, &coords))
377
0
        peak_coord.set_float (coords->middle);
378
0
      else
379
0
        peak_coord.set_int (0);
380
0
381
0
      /* push F2DOT14 value into char vector */
382
0
      int16_t val = peak_coord.to_int ();
383
0
      compiled_peak_coords.push (static_cast<char> (val >> 8));
384
0
      compiled_peak_coords.push (static_cast<char> (val & 0xFF));
385
0
    }
386
0
387
0
    return !compiled_peak_coords.in_error ();
388
0
  }
389
390
  /* deltas should be compiled already before we compile tuple
391
   * variation header cause we need to fill in the size of the
392
   * serialized data for this tuple variation */
393
  bool compile_tuple_var_header (const hb_map_t& axes_index_map,
394
                                 unsigned points_data_length,
395
                                 const hb_map_t& axes_old_index_tag_map,
396
                                 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map)
397
0
  {
398
0
    /* compiled_deltas could be empty after iup delta optimization, we can skip
399
0
     * compiling this tuple and return true */
400
0
    if (!compiled_deltas) return true;
401
0
402
0
    unsigned cur_axis_count = axes_index_map.get_population ();
403
0
    /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */
404
0
    unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4;
405
0
    if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false;
406
0
407
0
    unsigned flag = 0;
408
0
    /* skip the first 4 header bytes: variationDataSize+tupleIndex */
409
0
    F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4);
410
0
    F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ());
411
0
    hb_array_t<F2DOT14> coords (p, end - p);
412
0
413
0
    /* encode peak coords */
414
0
    unsigned peak_count = 0;
415
0
    unsigned *shared_tuple_idx;
416
0
    if (shared_tuples_idx_map &&
417
0
        shared_tuples_idx_map->has (&compiled_peak_coords, &shared_tuple_idx))
418
0
    {
419
0
      flag = *shared_tuple_idx;
420
0
    }
421
0
    else
422
0
    {
423
0
      peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map);
424
0
      if (!peak_count) return false;
425
0
    }
426
0
427
0
    /* encode interim coords, it's optional so returned num could be 0 */
428
0
    unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map);
429
0
430
0
    /* pointdata length = 0 implies "use shared points" */
431
0
    if (points_data_length)
432
0
      flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers;
433
0
434
0
    unsigned serialized_data_size = points_data_length + compiled_deltas.length;
435
0
    TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ());
436
0
    o->varDataSize = serialized_data_size;
437
0
    o->tupleIndex = flag;
438
0
439
0
    unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size);
440
0
    return compiled_tuple_header.resize (total_header_len);
441
0
  }
442
443
  unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords,
444
                               unsigned& flag,
445
                               const hb_map_t& axes_index_map,
446
                               const hb_map_t& axes_old_index_tag_map) const
447
0
  {
448
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
449
0
    auto it = peak_coords.iter ();
450
0
    unsigned count = 0;
451
0
    for (unsigned i = 0; i < orig_axis_count; i++)
452
0
    {
453
0
      if (!axes_index_map.has (i)) /* axis pinned */
454
0
        continue;
455
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
456
0
      Triple *coords;
457
0
      if (!axis_tuples.has (axis_tag, &coords))
458
0
        (*it).set_int (0);
459
0
      else
460
0
        (*it).set_float (coords->middle);
461
0
      it++;
462
0
      count++;
463
0
    }
464
0
    flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple;
465
0
    return count;
466
0
  }
467
468
  /* if no need to encode intermediate coords, then just return p */
469
  unsigned encode_interm_coords (hb_array_t<F2DOT14> coords,
470
                                 unsigned& flag,
471
                                 const hb_map_t& axes_index_map,
472
                                 const hb_map_t& axes_old_index_tag_map) const
473
0
  {
474
0
    unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
475
0
    unsigned cur_axis_count = axes_index_map.get_population ();
476
0
477
0
    auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter ();
478
0
    auto end_coords_iter = coords.sub_array (cur_axis_count).iter ();
479
0
    bool encode_needed = false;
480
0
    unsigned count = 0;
481
0
    for (unsigned i = 0; i < orig_axis_count; i++)
482
0
    {
483
0
      if (!axes_index_map.has (i)) /* axis pinned */
484
0
        continue;
485
0
      hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
486
0
      Triple *coords;
487
0
      float min_val = 0.f, val = 0.f, max_val = 0.f;
488
0
      if (axis_tuples.has (axis_tag, &coords))
489
0
      {
490
0
        min_val = coords->minimum;
491
0
        val = coords->middle;
492
0
        max_val = coords->maximum;
493
0
      }
494
0
495
0
      (*start_coords_iter).set_float (min_val);
496
0
      (*end_coords_iter).set_float (max_val);
497
0
498
0
      start_coords_iter++;
499
0
      end_coords_iter++;
500
0
      count += 2;
501
0
      if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f))
502
0
        encode_needed = true;
503
0
    }
504
0
505
0
    if (encode_needed)
506
0
    {
507
0
      flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion;
508
0
      return count;
509
0
    }
510
0
    return 0;
511
0
  }
512
513
  bool compile_deltas ()
514
0
  { return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); }
515
516
  static bool compile_deltas (hb_array_t<const bool> point_indices,
517
            hb_array_t<const float> x_deltas,
518
            hb_array_t<const float> y_deltas,
519
            hb_vector_t<unsigned char> &compiled_deltas /* OUT */)
520
0
  {
521
0
    hb_vector_t<int> rounded_deltas;
522
0
    if (unlikely (!rounded_deltas.alloc (point_indices.length)))
523
0
      return false;
524
0
525
0
    for (unsigned i = 0; i < point_indices.length; i++)
526
0
    {
527
0
      if (!point_indices[i]) continue;
528
0
      int rounded_delta = (int) roundf (x_deltas.arrayZ[i]);
529
0
      rounded_deltas.push (rounded_delta);
530
0
    }
531
0
532
0
    if (!rounded_deltas) return true;
533
0
    /* allocate enough memories 5 * num_deltas */
534
0
    unsigned alloc_len = 5 * rounded_deltas.length;
535
0
    if (y_deltas)
536
0
      alloc_len *= 2;
537
0
538
0
    if (unlikely (!compiled_deltas.resize (alloc_len))) return false;
539
0
540
0
    unsigned encoded_len = compile_deltas (compiled_deltas, rounded_deltas);
541
0
542
0
    if (y_deltas)
543
0
    {
544
0
      /* reuse the rounded_deltas vector, check that y_deltas have the same num of deltas as x_deltas */
545
0
      unsigned j = 0;
546
0
      for (unsigned idx = 0; idx < point_indices.length; idx++)
547
0
      {
548
0
        if (!point_indices[idx]) continue;
549
0
        int rounded_delta = (int) roundf (y_deltas.arrayZ[idx]);
550
0
551
0
        if (j >= rounded_deltas.length) return false;
552
0
553
0
        rounded_deltas[j++] = rounded_delta;
554
0
      }
555
0
556
0
      if (j != rounded_deltas.length) return false;
557
0
      encoded_len += compile_deltas (compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
558
0
    }
559
0
    return compiled_deltas.resize (encoded_len);
560
0
  }
561
562
  static unsigned compile_deltas (hb_array_t<unsigned char> encoded_bytes,
563
          hb_array_t<const int> deltas)
564
0
  {
565
0
    return TupleValues::compile (deltas, encoded_bytes);
566
0
  }
567
568
  bool calc_inferred_deltas (const contour_point_vector_t& orig_points)
569
0
  {
570
0
    unsigned point_count = orig_points.length;
571
0
    if (point_count != indices.length)
572
0
      return false;
573
0
574
0
    unsigned ref_count = 0;
575
0
    hb_vector_t<unsigned> end_points;
576
0
577
0
    for (unsigned i = 0; i < point_count; i++)
578
0
    {
579
0
      if (indices.arrayZ[i])
580
0
        ref_count++;
581
0
      if (orig_points.arrayZ[i].is_end_point)
582
0
        end_points.push (i);
583
0
    }
584
0
    /* all points are referenced, nothing to do */
585
0
    if (ref_count == point_count)
586
0
      return true;
587
0
    if (unlikely (end_points.in_error ())) return false;
588
0
589
0
    hb_set_t inferred_idxes;
590
0
    unsigned start_point = 0;
591
0
    for (unsigned end_point : end_points)
592
0
    {
593
0
      /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
594
0
      unsigned unref_count = 0;
595
0
      for (unsigned i = start_point; i < end_point + 1; i++)
596
0
        unref_count += indices.arrayZ[i];
597
0
      unref_count = (end_point - start_point + 1) - unref_count;
598
0
599
0
      unsigned j = start_point;
600
0
      if (unref_count == 0 || unref_count > end_point - start_point)
601
0
        goto no_more_gaps;
602
0
      for (;;)
603
0
      {
604
0
        /* Locate the next gap of unreferenced points between two referenced points prev and next.
605
0
         * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
606
0
         */
607
0
        unsigned int prev, next, i;
608
0
        for (;;)
609
0
        {
610
0
          i = j;
611
0
          j = next_index (i, start_point, end_point);
612
0
          if (indices.arrayZ[i] && !indices.arrayZ[j]) break;
613
0
        }
614
0
        prev = j = i;
615
0
        for (;;)
616
0
        {
617
0
          i = j;
618
0
          j = next_index (i, start_point, end_point);
619
0
          if (!indices.arrayZ[i] && indices.arrayZ[j]) break;
620
0
        }
621
0
        next = j;
622
0
       /* Infer deltas for all unref points in the gap between prev and next */
623
0
        i = prev;
624
0
        for (;;)
625
0
        {
626
0
          i = next_index (i, start_point, end_point);
627
0
          if (i == next) break;
628
0
          deltas_x.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].x,
629
0
                                            (double) orig_points.arrayZ[prev].x,
630
0
                                            (double) orig_points.arrayZ[next].x,
631
0
                                            (double) deltas_x.arrayZ[prev], (double) deltas_x.arrayZ[next]);
632
0
          deltas_y.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].y,
633
0
                                            (double) orig_points.arrayZ[prev].y,
634
0
                                            (double) orig_points.arrayZ[next].y,
635
0
                                            (double) deltas_y.arrayZ[prev], (double) deltas_y.arrayZ[next]);
636
0
          inferred_idxes.add (i);
637
0
          if (--unref_count == 0) goto no_more_gaps;
638
0
        }
639
0
      }
640
0
    no_more_gaps:
641
0
      start_point = end_point + 1;
642
0
    }
643
0
644
0
    for (unsigned i = 0; i < point_count; i++)
645
0
    {
646
0
      /* if points are not referenced and deltas are not inferred, set to 0.
647
0
       * reference all points for gvar */
648
0
      if ( !indices[i])
649
0
      {
650
0
        if (!inferred_idxes.has (i))
651
0
        {
652
0
          deltas_x.arrayZ[i] = 0.0;
653
0
          deltas_y.arrayZ[i] = 0.0;
654
0
        }
655
0
        indices[i] = true;
656
0
      }
657
0
    }
658
0
    return true;
659
0
  }
660
661
  bool optimize (const contour_point_vector_t& contour_points,
662
                 bool is_composite,
663
                 double tolerance = 0.5 + 1e-10)
664
0
  {
665
0
    unsigned count = contour_points.length;
666
0
    if (deltas_x.length != count ||
667
0
        deltas_y.length != count)
668
0
      return false;
669
0
670
0
    hb_vector_t<bool> opt_indices;
671
0
    hb_vector_t<int> rounded_x_deltas, rounded_y_deltas;
672
0
673
0
    if (unlikely (!rounded_x_deltas.alloc (count) ||
674
0
                  !rounded_y_deltas.alloc (count)))
675
0
      return false;
676
0
677
0
    for (unsigned i = 0; i < count; i++)
678
0
    {
679
0
      int rounded_x_delta = (int) roundf (deltas_x.arrayZ[i]);
680
0
      int rounded_y_delta = (int) roundf (deltas_y.arrayZ[i]);
681
0
      rounded_x_deltas.push (rounded_x_delta);
682
0
      rounded_y_deltas.push (rounded_y_delta);
683
0
    }
684
0
685
0
    if (!iup_delta_optimize (contour_points, rounded_x_deltas, rounded_y_deltas, opt_indices, tolerance))
686
0
      return false;
687
0
688
0
    unsigned ref_count = 0;
689
0
    for (bool ref_flag : opt_indices)
690
0
       ref_count += ref_flag;
691
0
692
0
    if (ref_count == count) return true;
693
0
694
0
    hb_vector_t<float> opt_deltas_x, opt_deltas_y;
695
0
    bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0);
696
0
    if (is_comp_glyph_wo_deltas)
697
0
    {
698
0
      if (unlikely (!opt_deltas_x.resize (count) ||
699
0
                    !opt_deltas_y.resize (count)))
700
0
        return false;
701
0
702
0
      opt_indices.arrayZ[0] = true;
703
0
      for (unsigned i = 1; i < count; i++)
704
0
        opt_indices.arrayZ[i] = false;
705
0
    }
706
0
707
0
    hb_vector_t<unsigned char> opt_point_data;
708
0
    if (!compile_point_set (opt_indices, opt_point_data))
709
0
      return false;
710
0
    hb_vector_t<unsigned char> opt_deltas_data;
711
0
    if (!compile_deltas (opt_indices,
712
0
                         is_comp_glyph_wo_deltas ? opt_deltas_x : deltas_x,
713
0
                         is_comp_glyph_wo_deltas ? opt_deltas_y : deltas_y,
714
0
                         opt_deltas_data))
715
0
      return false;
716
0
717
0
    hb_vector_t<unsigned char> point_data;
718
0
    if (!compile_point_set (indices, point_data))
719
0
      return false;
720
0
    hb_vector_t<unsigned char> deltas_data;
721
0
    if (!compile_deltas (indices, deltas_x, deltas_y, deltas_data))
722
0
      return false;
723
0
724
0
    if (opt_point_data.length + opt_deltas_data.length < point_data.length + deltas_data.length)
725
0
    {
726
0
      indices.fini ();
727
0
      indices = std::move (opt_indices);
728
0
729
0
      if (is_comp_glyph_wo_deltas)
730
0
      {
731
0
        deltas_x.fini ();
732
0
        deltas_x = std::move (opt_deltas_x);
733
0
734
0
        deltas_y.fini ();
735
0
        deltas_y = std::move (opt_deltas_y);
736
0
      }
737
0
    }
738
0
    return !indices.in_error () && !deltas_x.in_error () && !deltas_y.in_error ();
739
0
  }
740
741
  static bool compile_point_set (const hb_vector_t<bool> &point_indices,
742
                                 hb_vector_t<unsigned char>& compiled_points /* OUT */)
743
0
  {
744
0
    unsigned num_points = 0;
745
0
    for (bool i : point_indices)
746
0
      if (i) num_points++;
747
0
748
0
    /* when iup optimization is enabled, num of referenced points could be 0 */
749
0
    if (!num_points) return true;
750
0
751
0
    unsigned indices_length = point_indices.length;
752
0
    /* If the points set consists of all points in the glyph, it's encoded with a
753
0
     * single zero byte */
754
0
    if (num_points == indices_length)
755
0
      return compiled_points.resize (1);
756
0
757
0
    /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
758
0
    unsigned num_bytes = 2 + 3 *num_points;
759
0
    if (unlikely (!compiled_points.resize (num_bytes, false)))
760
0
      return false;
761
0
762
0
    unsigned pos = 0;
763
0
    /* binary data starts with the total number of reference points */
764
0
    if (num_points < 0x80)
765
0
      compiled_points.arrayZ[pos++] = num_points;
766
0
    else
767
0
    {
768
0
      compiled_points.arrayZ[pos++] = ((num_points >> 8) | 0x80);
769
0
      compiled_points.arrayZ[pos++] = num_points & 0xFF;
770
0
    }
771
0
772
0
    const unsigned max_run_length = 0x7F;
773
0
    unsigned i = 0;
774
0
    unsigned last_value = 0;
775
0
    unsigned num_encoded = 0;
776
0
    while (i < indices_length && num_encoded < num_points)
777
0
    {
778
0
      unsigned run_length = 0;
779
0
      unsigned header_pos = pos;
780
0
      compiled_points.arrayZ[pos++] = 0;
781
0
782
0
      bool use_byte_encoding = false;
783
0
      bool new_run = true;
784
0
      while (i < indices_length && num_encoded < num_points &&
785
0
             run_length <= max_run_length)
786
0
      {
787
0
        // find out next referenced point index
788
0
        while (i < indices_length && !point_indices[i])
789
0
          i++;
790
0
791
0
        if (i >= indices_length) break;
792
0
793
0
        unsigned cur_value = i;
794
0
        unsigned delta = cur_value - last_value;
795
0
796
0
        if (new_run)
797
0
        {
798
0
          use_byte_encoding = (delta <= 0xFF);
799
0
          new_run = false;
800
0
        }
801
0
802
0
        if (use_byte_encoding && delta > 0xFF)
803
0
          break;
804
0
805
0
        if (use_byte_encoding)
806
0
          compiled_points.arrayZ[pos++] = delta;
807
0
        else
808
0
        {
809
0
          compiled_points.arrayZ[pos++] = delta >> 8;
810
0
          compiled_points.arrayZ[pos++] = delta & 0xFF;
811
0
        }
812
0
        i++;
813
0
        last_value = cur_value;
814
0
        run_length++;
815
0
        num_encoded++;
816
0
      }
817
0
818
0
      if (use_byte_encoding)
819
0
        compiled_points.arrayZ[header_pos] = run_length - 1;
820
0
      else
821
0
        compiled_points.arrayZ[header_pos] = (run_length - 1) | 0x80;
822
0
    }
823
0
    return compiled_points.resize (pos, false);
824
0
  }
825
826
  static double infer_delta (double target_val, double prev_val, double next_val, double prev_delta, double next_delta)
827
0
  {
828
0
    if (prev_val == next_val)
829
0
      return (prev_delta == next_delta) ? prev_delta : 0.0;
830
0
    else if (target_val <= hb_min (prev_val, next_val))
831
0
      return (prev_val < next_val) ? prev_delta : next_delta;
832
0
    else if (target_val >= hb_max (prev_val, next_val))
833
0
      return (prev_val > next_val) ? prev_delta : next_delta;
834
0
835
0
    double r = (target_val - prev_val) / (next_val - prev_val);
836
0
    return prev_delta + r * (next_delta - prev_delta);
837
0
  }
838
839
  static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end)
840
0
  { return (i >= end) ? start : (i + 1); }
841
};
842
843
template <typename OffType = HBUINT16>
844
struct TupleVariationData
845
{
846
  bool sanitize (hb_sanitize_context_t *c) const
847
  {
848
    TRACE_SANITIZE (this);
849
    // here check on min_size only, TupleVariationHeader and var data will be
850
    // checked while accessing through iterator.
851
    return_trace (c->check_struct (this));
852
  }
853
854
  unsigned get_size (unsigned axis_count) const
855
  {
856
    unsigned total_size = min_size;
857
    unsigned count = tupleVarCount.get_count ();
858
    const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
859
    for (unsigned i = 0; i < count; i++)
860
    {
861
      total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
862
      tuple_var_header = &tuple_var_header->get_next (axis_count);
863
    }
864
865
    return total_size;
866
  }
867
868
  const TupleVariationHeader &get_tuple_var_header (void) const
869
0
  { return StructAfter<TupleVariationHeader> (data); }
870
871
  struct tuple_iterator_t;
872
  struct tuple_variations_t
873
  {
874
    hb_vector_t<tuple_delta_t> tuple_vars;
875
876
    private:
877
    /* referenced point set->compiled point data map */
878
    hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<unsigned char>> point_data_map;
879
    /* referenced point set-> count map, used in finding shared points */
880
    hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
881
882
    /* empty for non-gvar tuples.
883
     * shared_points_bytes is a pointer to some value in the point_data_map,
884
     * which will be freed during map destruction. Save it for serialization, so
885
     * no need to do find_shared_points () again */
886
    hb_vector_t<unsigned char> *shared_points_bytes = nullptr;
887
888
    /* total compiled byte size as TupleVariationData format, initialized to 0 */
889
    unsigned compiled_byte_size = 0;
890
    bool needs_padding = false;
891
892
    /* for gvar iup delta optimization: whether this is a composite glyph */
893
    bool is_composite = false;
894
895
    public:
896
    tuple_variations_t () = default;
897
    tuple_variations_t (const tuple_variations_t&) = delete;
898
    tuple_variations_t& operator=(const tuple_variations_t&) = delete;
899
    tuple_variations_t (tuple_variations_t&&) = default;
900
    tuple_variations_t& operator=(tuple_variations_t&&) = default;
901
    ~tuple_variations_t () = default;
902
903
    explicit operator bool () const { return bool (tuple_vars); }
904
    unsigned get_var_count () const
905
    {
906
      unsigned count = 0;
907
      /* when iup delta opt is enabled, compiled_deltas could be empty and we
908
       * should skip this tuple */
909
      for (auto& tuple: tuple_vars)
910
        if (tuple.compiled_deltas) count++;
911
912
      if (shared_points_bytes && shared_points_bytes->length)
913
        count |= TupleVarCount::SharedPointNumbers;
914
      return count;
915
    }
916
917
    unsigned get_compiled_byte_size () const
918
    { return compiled_byte_size; }
919
920
    bool create_from_tuple_var_data (tuple_iterator_t iterator,
921
                                     unsigned tuple_var_count,
922
                                     unsigned point_count,
923
                                     bool is_gvar,
924
                                     const hb_map_t *axes_old_index_tag_map,
925
                                     const hb_vector_t<unsigned> &shared_indices,
926
                                     const hb_array_t<const F2DOT14> shared_tuples,
927
                                     bool is_composite_glyph)
928
    {
929
      do
930
      {
931
        const HBUINT8 *p = iterator.get_serialized_data ();
932
        unsigned int length = iterator.current_tuple->get_data_size ();
933
        if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
934
          return false;
935
936
        hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
937
        if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples)
938
            || axis_tuples.is_empty ())
939
          return false;
940
941
        hb_vector_t<unsigned> private_indices;
942
        bool has_private_points = iterator.current_tuple->has_private_points ();
943
        const HBUINT8 *end = p + length;
944
        if (has_private_points &&
945
            !TupleVariationData::decompile_points (p, private_indices, end))
946
          return false;
947
948
        const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices;
949
        bool apply_to_all = (indices.length == 0);
950
        unsigned num_deltas = apply_to_all ? point_count : indices.length;
951
952
        hb_vector_t<int> deltas_x;
953
954
        if (unlikely (!deltas_x.resize (num_deltas, false) ||
955
                      !TupleVariationData::decompile_deltas (p, deltas_x, end)))
956
          return false;
957
958
        hb_vector_t<int> deltas_y;
959
        if (is_gvar)
960
        {
961
          if (unlikely (!deltas_y.resize (num_deltas, false) ||
962
                        !TupleVariationData::decompile_deltas (p, deltas_y, end)))
963
            return false;
964
        }
965
966
        tuple_delta_t var;
967
        var.axis_tuples = std::move (axis_tuples);
968
        if (unlikely (!var.indices.resize (point_count) ||
969
                      !var.deltas_x.resize (point_count, false)))
970
          return false;
971
972
        if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false)))
973
          return false;
974
975
        for (unsigned i = 0; i < num_deltas; i++)
976
        {
977
          unsigned idx = apply_to_all ? i : indices[i];
978
          if (idx >= point_count) continue;
979
          var.indices[idx] = true;
980
          var.deltas_x[idx] = deltas_x[i];
981
          if (is_gvar)
982
            var.deltas_y[idx] = deltas_y[i];
983
        }
984
        tuple_vars.push (std::move (var));
985
      } while (iterator.move_to_next ());
986
987
      is_composite = is_composite_glyph;
988
      return true;
989
    }
990
991
    bool create_from_item_var_data (const VarData &var_data,
992
                                    const hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>>& regions,
993
                                    const hb_map_t& axes_old_index_tag_map,
994
                                    unsigned& item_count,
995
                                    const hb_inc_bimap_t* inner_map = nullptr)
996
0
    {
997
0
      /* NULL offset, to keep original varidx valid, just return */
998
0
      if (&var_data == &Null (VarData))
999
0
        return true;
1000
0
1001
0
      unsigned num_regions = var_data.get_region_index_count ();
1002
0
      if (!tuple_vars.alloc (num_regions)) return false;
1003
0
1004
0
      item_count = inner_map ? inner_map->get_population () : var_data.get_item_count ();
1005
0
      if (!item_count) return true;
1006
0
      unsigned row_size = var_data.get_row_size ();
1007
0
      const HBUINT8 *delta_bytes = var_data.get_delta_bytes ();
1008
0
1009
0
      for (unsigned r = 0; r < num_regions; r++)
1010
0
      {
1011
0
        /* In VarData, deltas are organized in rows, convert them into
1012
0
         * column(region) based tuples, resize deltas_x first */
1013
0
        tuple_delta_t tuple;
1014
0
        if (!tuple.deltas_x.resize (item_count, false) ||
1015
0
            !tuple.indices.resize (item_count, false))
1016
0
          return false;
1017
0
1018
0
        for (unsigned i = 0; i < item_count; i++)
1019
0
        {
1020
0
          tuple.indices.arrayZ[i] = true;
1021
0
          tuple.deltas_x.arrayZ[i] = var_data.get_item_delta_fast (inner_map ? inner_map->backward (i) : i,
1022
0
                                                                   r, delta_bytes, row_size);
1023
0
        }
1024
0
1025
0
        unsigned region_index = var_data.get_region_index (r);
1026
0
        if (region_index >= regions.length) return false;
1027
0
        tuple.axis_tuples = regions.arrayZ[region_index];
1028
0
1029
0
        tuple_vars.push (std::move (tuple));
1030
0
      }
1031
0
      return !tuple_vars.in_error ();
1032
0
    }
1033
1034
    private:
1035
    static int _cmp_axis_tag (const void *pa, const void *pb)
1036
0
    {
1037
0
      const hb_tag_t *a = (const hb_tag_t*) pa;
1038
0
      const hb_tag_t *b = (const hb_tag_t*) pb;
1039
0
      return (int)(*a) - (int)(*b);
1040
0
    }
1041
1042
    bool change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1043
                                              const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1044
0
    {
1045
0
      /* sort axis_tag/axis_limits, make result deterministic */
1046
0
      hb_vector_t<hb_tag_t> axis_tags;
1047
0
      if (!axis_tags.alloc (normalized_axes_location.get_population ()))
1048
0
        return false;
1049
0
      for (auto t : normalized_axes_location.keys ())
1050
0
        axis_tags.push (t);
1051
0
1052
0
      axis_tags.qsort (_cmp_axis_tag);
1053
0
      for (auto axis_tag : axis_tags)
1054
0
      {
1055
0
        Triple *axis_limit;
1056
0
        if (!normalized_axes_location.has (axis_tag, &axis_limit))
1057
0
          return false;
1058
0
        TripleDistances axis_triple_distances{1.0, 1.0};
1059
0
        if (axes_triple_distances.has (axis_tag))
1060
0
          axis_triple_distances = axes_triple_distances.get (axis_tag);
1061
0
1062
0
        hb_vector_t<tuple_delta_t> new_vars;
1063
0
        for (const tuple_delta_t& var : tuple_vars)
1064
0
        {
1065
0
          hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, *axis_limit, axis_triple_distances);
1066
0
          if (!out) continue;
1067
0
1068
0
          unsigned new_len = new_vars.length + out.length;
1069
0
1070
0
          if (unlikely (!new_vars.alloc (new_len, false)))
1071
0
            return false;
1072
0
1073
0
          for (unsigned i = 0; i < out.length; i++)
1074
0
            new_vars.push (std::move (out[i]));
1075
0
        }
1076
0
        tuple_vars.fini ();
1077
0
        tuple_vars = std::move (new_vars);
1078
0
      }
1079
0
      return true;
1080
0
    }
1081
1082
    /* merge tuple variations with overlapping tents, if iup delta optimization
1083
     * is enabled, add default deltas to contour_points */
1084
    bool merge_tuple_variations (contour_point_vector_t* contour_points = nullptr)
1085
0
    {
1086
0
      hb_vector_t<tuple_delta_t> new_vars;
1087
0
      hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m;
1088
0
      unsigned i = 0;
1089
0
      for (const tuple_delta_t& var : tuple_vars)
1090
0
      {
1091
0
        /* if all axes are pinned, drop the tuple variation */
1092
0
        if (var.axis_tuples.is_empty ())
1093
0
        {
1094
0
          /* if iup_delta_optimize is enabled, add deltas to contour coords */
1095
0
          if (contour_points && !contour_points->add_deltas (var.deltas_x,
1096
0
                                                             var.deltas_y,
1097
0
                                                             var.indices))
1098
0
            return false;
1099
0
          continue;
1100
0
        }
1101
0
1102
0
        unsigned *idx;
1103
0
        if (m.has (&(var.axis_tuples), &idx))
1104
0
        {
1105
0
          new_vars[*idx] += var;
1106
0
        }
1107
0
        else
1108
0
        {
1109
0
          new_vars.push (var);
1110
0
          if (!m.set (&(var.axis_tuples), i))
1111
0
            return false;
1112
0
          i++;
1113
0
        }
1114
0
      }
1115
0
      tuple_vars.fini ();
1116
0
      tuple_vars = std::move (new_vars);
1117
0
      return true;
1118
0
    }
1119
1120
    /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
1121
     * also update point_set->count map, which will be used in finding shared
1122
     * point set*/
1123
    bool compile_all_point_sets ()
1124
    {
1125
      for (const auto& tuple: tuple_vars)
1126
      {
1127
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1128
        if (point_data_map.has (points_set))
1129
        {
1130
          unsigned *count;
1131
          if (unlikely (!point_set_count_map.has (points_set, &count) ||
1132
                        !point_set_count_map.set (points_set, (*count) + 1)))
1133
            return false;
1134
          continue;
1135
        }
1136
1137
        hb_vector_t<unsigned char> compiled_point_data;
1138
        if (!tuple_delta_t::compile_point_set (*points_set, compiled_point_data))
1139
          return false;
1140
1141
        if (!point_data_map.set (points_set, std::move (compiled_point_data)) ||
1142
            !point_set_count_map.set (points_set, 1))
1143
          return false;
1144
      }
1145
      return true;
1146
    }
1147
1148
    /* find shared points set which saves most bytes */
1149
    void find_shared_points ()
1150
    {
1151
      unsigned max_saved_bytes = 0;
1152
1153
      for (const auto& _ : point_data_map.iter_ref ())
1154
      {
1155
        const hb_vector_t<bool>* points_set = _.first;
1156
        unsigned data_length = _.second.length;
1157
        if (!data_length) continue;
1158
        unsigned *count;
1159
        if (unlikely (!point_set_count_map.has (points_set, &count) ||
1160
                      *count <= 1))
1161
        {
1162
          shared_points_bytes = nullptr;
1163
          return;
1164
        }
1165
1166
        unsigned saved_bytes = data_length * ((*count) -1);
1167
        if (saved_bytes > max_saved_bytes)
1168
        {
1169
          max_saved_bytes = saved_bytes;
1170
          shared_points_bytes = &(_.second);
1171
        }
1172
      }
1173
    }
1174
1175
    bool calc_inferred_deltas (const contour_point_vector_t& contour_points)
1176
0
    {
1177
0
      for (tuple_delta_t& var : tuple_vars)
1178
0
        if (!var.calc_inferred_deltas (contour_points))
1179
0
          return false;
1180
0
1181
0
      return true;
1182
0
    }
1183
1184
    bool iup_optimize (const contour_point_vector_t& contour_points)
1185
0
    {
1186
0
      for (tuple_delta_t& var : tuple_vars)
1187
0
      {
1188
0
        if (!var.optimize (contour_points, is_composite))
1189
0
          return false;
1190
0
      }
1191
0
      return true;
1192
0
    }
1193
1194
    public:
1195
    bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1196
                      const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances,
1197
                      contour_point_vector_t* contour_points = nullptr,
1198
                      bool optimize = false)
1199
0
    {
1200
0
      if (!tuple_vars) return true;
1201
0
      if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances))
1202
0
        return false;
1203
0
      /* compute inferred deltas only for gvar */
1204
0
      if (contour_points)
1205
0
        if (!calc_inferred_deltas (*contour_points))
1206
0
          return false;
1207
0
1208
0
      /* if iup delta opt is on, contour_points can't be null */
1209
0
      if (optimize && !contour_points)
1210
0
        return false;
1211
0
1212
0
      if (!merge_tuple_variations (optimize ? contour_points : nullptr))
1213
0
        return false;
1214
0
1215
0
      if (optimize && !iup_optimize (*contour_points)) return false;
1216
0
      return !tuple_vars.in_error ();
1217
0
    }
1218
1219
    bool compile_bytes (const hb_map_t& axes_index_map,
1220
                        const hb_map_t& axes_old_index_tag_map,
1221
                        bool use_shared_points,
1222
                        bool is_gvar = false,
1223
                        const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map = nullptr)
1224
    {
1225
      // return true for empty glyph
1226
      if (!tuple_vars)
1227
        return true;
1228
1229
      // compile points set and store data in hashmap
1230
      if (!compile_all_point_sets ())
1231
        return false;
1232
1233
      /* total compiled byte size as TupleVariationData format, initialized to its
1234
       * min_size: 4 */
1235
      compiled_byte_size += 4;
1236
1237
      if (use_shared_points)
1238
      {
1239
        find_shared_points ();
1240
        if (shared_points_bytes)
1241
          compiled_byte_size += shared_points_bytes->length;
1242
      }
1243
      // compile delta and tuple var header for each tuple variation
1244
      for (auto& tuple: tuple_vars)
1245
      {
1246
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1247
        hb_vector_t<unsigned char> *points_data;
1248
        if (unlikely (!point_data_map.has (points_set, &points_data)))
1249
          return false;
1250
1251
        /* when iup optimization is enabled, num of referenced points could be 0
1252
         * and thus the compiled points bytes is empty, we should skip compiling
1253
         * this tuple */
1254
        if (!points_data->length)
1255
          continue;
1256
        if (!tuple.compile_deltas ())
1257
          return false;
1258
1259
        unsigned points_data_length = (points_data != shared_points_bytes) ? points_data->length : 0;
1260
        if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map,
1261
                                             shared_tuples_idx_map))
1262
          return false;
1263
        compiled_byte_size += tuple.compiled_tuple_header.length + points_data_length + tuple.compiled_deltas.length;
1264
      }
1265
1266
      if (is_gvar && (compiled_byte_size % 2))
1267
      {
1268
        needs_padding = true;
1269
        compiled_byte_size += 1;
1270
      }
1271
1272
      return true;
1273
    }
1274
1275
    bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const
1276
    {
1277
      TRACE_SERIALIZE (this);
1278
      for (const auto& tuple: tuple_vars)
1279
      {
1280
        tuple.compiled_tuple_header.as_array ().copy (c);
1281
        if (c->in_error ()) return_trace (false);
1282
        total_header_len += tuple.compiled_tuple_header.length;
1283
      }
1284
      return_trace (true);
1285
    }
1286
1287
    bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const
1288
    {
1289
      TRACE_SERIALIZE (this);
1290
      if (is_gvar && shared_points_bytes)
1291
      {
1292
        hb_ubytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length);
1293
        s.copy (c);
1294
      }
1295
1296
      for (const auto& tuple: tuple_vars)
1297
      {
1298
        const hb_vector_t<bool>* points_set = &(tuple.indices);
1299
        hb_vector_t<unsigned char> *point_data;
1300
        if (!point_data_map.has (points_set, &point_data))
1301
          return_trace (false);
1302
1303
        if (!is_gvar || point_data != shared_points_bytes)
1304
        {
1305
          hb_ubytes_t s (point_data->arrayZ, point_data->length);
1306
          s.copy (c);
1307
        }
1308
1309
        tuple.compiled_deltas.as_array ().copy (c);
1310
        if (c->in_error ()) return_trace (false);
1311
      }
1312
1313
      /* padding for gvar */
1314
      if (is_gvar && needs_padding)
1315
      {
1316
        HBUINT8 pad;
1317
        pad = 0;
1318
        if (!c->embed (pad)) return_trace (false);
1319
      }
1320
      return_trace (true);
1321
    }
1322
  };
1323
1324
  struct tuple_iterator_t
1325
  {
1326
    unsigned get_axis_count () const { return axis_count; }
1327
1328
    void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
1329
0
    {
1330
0
      var_data_bytes = var_data_bytes_;
1331
0
      var_data = var_data_bytes_.as<TupleVariationData> ();
1332
0
      tuples_left = var_data->tupleVarCount.get_count ();
1333
0
      axis_count = axis_count_;
1334
0
      current_tuple = &var_data->get_tuple_var_header ();
1335
0
      data_offset = 0;
1336
0
      table_base = table_base_;
1337
0
    }
1338
1339
    bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
1340
0
    {
1341
0
      if (var_data->has_shared_point_numbers ())
1342
0
      {
1343
0
        const HBUINT8 *base = &(table_base+var_data->data);
1344
0
        const HBUINT8 *p = base;
1345
0
        if (!decompile_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
1346
0
        data_offset = p - base;
1347
0
      }
1348
0
      return true;
1349
0
    }
1350
1351
    bool is_valid ()
1352
0
    {
1353
0
      if (unlikely (tuples_left <= 0))
1354
0
  return false;
1355
1356
0
      current_tuple_size = TupleVariationHeader::min_size;
1357
0
      if (unlikely (!var_data_bytes.check_range (current_tuple, current_tuple_size)))
1358
0
  return false;
1359
1360
0
      current_tuple_size = current_tuple->get_size (axis_count);
1361
0
      if (unlikely (!var_data_bytes.check_range (current_tuple, current_tuple_size)))
1362
0
  return false;
1363
1364
0
      return true;
1365
0
    }
1366
1367
    HB_ALWAYS_INLINE
1368
    bool move_to_next ()
1369
0
    {
1370
0
      data_offset += current_tuple->get_data_size ();
1371
0
      current_tuple = &StructAtOffset<TupleVariationHeader> (current_tuple, current_tuple_size);
1372
0
      tuples_left--;
1373
0
      return is_valid ();
1374
0
    }
1375
1376
    // TODO: Make it return (sanitized) hb_bytes_t
1377
    const HBUINT8 *get_serialized_data () const
1378
0
    { return &(table_base+var_data->data) + data_offset; }
1379
1380
    private:
1381
    signed tuples_left;
1382
    const TupleVariationData *var_data;
1383
    unsigned int axis_count;
1384
    unsigned int data_offset;
1385
    unsigned int current_tuple_size;
1386
    const void *table_base;
1387
1388
    public:
1389
    hb_bytes_t var_data_bytes;
1390
    const TupleVariationHeader *current_tuple;
1391
  };
1392
1393
  static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
1394
                                  const void *table_base,
1395
                                  hb_vector_t<unsigned int> &shared_indices /* OUT */,
1396
                                  tuple_iterator_t *iterator /* OUT */)
1397
0
  {
1398
0
    iterator->init (var_data_bytes, axis_count, table_base);
1399
0
    if (!iterator->get_shared_indices (shared_indices))
1400
0
      return false;
1401
0
    return iterator->is_valid ();
1402
0
  }
1403
1404
0
  bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
1405
1406
  static bool decompile_points (const HBUINT8 *&p /* IN/OUT */,
1407
        hb_vector_t<unsigned int> &points /* OUT */,
1408
        const HBUINT8 *end)
1409
0
  {
1410
0
    enum packed_point_flag_t
1411
0
    {
1412
0
      POINTS_ARE_WORDS     = 0x80,
1413
0
      POINT_RUN_COUNT_MASK = 0x7F
1414
0
    };
1415
1416
0
    if (unlikely (p + 1 > end)) return false;
1417
1418
0
    unsigned count = *p++;
1419
0
    if (count & POINTS_ARE_WORDS)
1420
0
    {
1421
0
      if (unlikely (p + 1 > end)) return false;
1422
0
      count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
1423
0
    }
1424
0
    if (unlikely (!points.resize (count, false))) return false;
1425
1426
0
    unsigned n = 0;
1427
0
    unsigned i = 0;
1428
0
    while (i < count)
1429
0
    {
1430
0
      if (unlikely (p + 1 > end)) return false;
1431
0
      unsigned control = *p++;
1432
0
      unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
1433
0
      unsigned stop = i + run_count;
1434
0
      if (unlikely (stop > count)) return false;
1435
0
      if (control & POINTS_ARE_WORDS)
1436
0
      {
1437
0
        if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1438
0
        for (; i < stop; i++)
1439
0
        {
1440
0
          n += *(const HBUINT16 *)p;
1441
0
          points.arrayZ[i] = n;
1442
0
          p += HBUINT16::static_size;
1443
0
        }
1444
0
      }
1445
0
      else
1446
0
      {
1447
0
        if (unlikely (p + run_count > end)) return false;
1448
0
        for (; i < stop; i++)
1449
0
        {
1450
0
          n += *p++;
1451
0
          points.arrayZ[i] = n;
1452
0
        }
1453
0
      }
1454
0
    }
1455
0
    return true;
1456
0
  }
1457
1458
  template <typename T>
1459
  static bool decompile_deltas (const HBUINT8 *&p /* IN/OUT */,
1460
        hb_vector_t<T> &deltas /* IN/OUT */,
1461
        const HBUINT8 *end,
1462
        bool consume_all = false)
1463
0
  {
1464
0
    return TupleValues::decompile (p, deltas, end, consume_all);
1465
0
  }
1466
1467
0
  bool has_data () const { return tupleVarCount; }
1468
1469
  bool decompile_tuple_variations (unsigned point_count,
1470
                                   bool is_gvar,
1471
                                   tuple_iterator_t iterator,
1472
                                   const hb_map_t *axes_old_index_tag_map,
1473
                                   const hb_vector_t<unsigned> &shared_indices,
1474
                                   const hb_array_t<const F2DOT14> shared_tuples,
1475
                                   tuple_variations_t& tuple_variations, /* OUT */
1476
                                   bool is_composite_glyph = false) const
1477
  {
1478
    return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount,
1479
                                                        point_count, is_gvar,
1480
                                                        axes_old_index_tag_map,
1481
                                                        shared_indices,
1482
                                                        shared_tuples,
1483
                                                        is_composite_glyph);
1484
  }
1485
1486
  bool serialize (hb_serialize_context_t *c,
1487
                  bool is_gvar,
1488
                  const tuple_variations_t& tuple_variations) const
1489
  {
1490
    TRACE_SERIALIZE (this);
1491
    /* empty tuple variations, just return and skip serialization. */
1492
    if (!tuple_variations) return_trace (true);
1493
1494
    auto *out = c->start_embed (this);
1495
    if (unlikely (!c->extend_min (out))) return_trace (false);
1496
1497
    if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (),
1498
                          HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1499
1500
    unsigned total_header_len = 0;
1501
1502
    if (!tuple_variations.serialize_var_headers (c, total_header_len))
1503
      return_trace (false);
1504
1505
    unsigned data_offset = min_size + total_header_len;
1506
    if (!is_gvar) data_offset += 4;
1507
    if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1508
1509
    return tuple_variations.serialize_var_data (c, is_gvar);
1510
  }
1511
1512
  protected:
1513
  struct TupleVarCount : HBUINT16
1514
  {
1515
    friend struct tuple_variations_t;
1516
0
    bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
1517
0
    unsigned int get_count () const { return (*this) & CountMask; }
1518
    TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
1519
    explicit operator bool () const { return get_count (); }
1520
1521
    protected:
1522
    enum Flags
1523
    {
1524
      SharedPointNumbers= 0x8000u,
1525
      CountMask         = 0x0FFFu
1526
    };
1527
    public:
1528
    DEFINE_SIZE_STATIC (2);
1529
  };
1530
1531
  TupleVarCount tupleVarCount;  /* A packed field. The high 4 bits are flags, and the
1532
                                 * low 12 bits are the number of tuple variation tables
1533
                                 * for this glyph. The number of tuple variation tables
1534
                                 * can be any number between 1 and 4095. */
1535
  OffsetTo<HBUINT8, OffType>
1536
                data;           /* Offset from the start of the base table
1537
                                 * to the serialized data. */
1538
  /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
1539
  public:
1540
  DEFINE_SIZE_MIN (2 + OffType::static_size);
1541
};
1542
1543
// TODO: Move tuple_variations_t to outside of TupleVariationData
1544
using tuple_variations_t = TupleVariationData<HBUINT16>::tuple_variations_t;
1545
struct item_variations_t
1546
{
1547
  using region_t = const hb_hashmap_t<hb_tag_t, Triple>*;
1548
  private:
1549
  /* each subtable is decompiled into a tuple_variations_t, in which all tuples
1550
   * have the same num of deltas (rows) */
1551
  hb_vector_t<tuple_variations_t> vars;
1552
1553
  /* num of retained rows for each subtable, there're 2 cases when var_data is empty:
1554
   * 1. retained item_count is zero
1555
   * 2. regions is empty and item_count is non-zero.
1556
   * when converting to tuples, both will be dropped because the tuple is empty,
1557
   * however, we need to retain 2. as all-zero rows to keep original varidx
1558
   * valid, so we need a way to remember the num of rows for each subtable */
1559
  hb_vector_t<unsigned> var_data_num_rows;
1560
1561
  /* original region list, decompiled from item varstore, used when rebuilding
1562
   * region list after instantiation */
1563
  hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>> orig_region_list;
1564
1565
  /* region list: vector of Regions, maintain the original order for the regions
1566
   * that existed before instantiate (), append the new regions at the end.
1567
   * Regions are stored in each tuple already, save pointers only.
1568
   * When converting back to item varstore, unused regions will be pruned */
1569
  hb_vector_t<region_t> region_list;
1570
1571
  /* region -> idx map after instantiation and pruning unused regions */
1572
  hb_hashmap_t<region_t, unsigned> region_map;
1573
1574
  /* all delta rows after instantiation */
1575
  hb_vector_t<hb_vector_t<int>> delta_rows;
1576
  /* final optimized vector of encoding objects used to assemble the varstore */
1577
  hb_vector_t<delta_row_encoding_t> encodings;
1578
1579
  /* old varidxes -> new var_idxes map */
1580
  hb_map_t varidx_map;
1581
1582
  /* has long words */
1583
  bool has_long = false;
1584
1585
  public:
1586
  bool has_long_word () const
1587
0
  { return has_long; }
1588
1589
  const hb_vector_t<region_t>& get_region_list () const
1590
0
  { return region_list; }
1591
1592
  const hb_vector_t<delta_row_encoding_t>& get_vardata_encodings () const
1593
0
  { return encodings; }
1594
1595
  const hb_map_t& get_varidx_map () const
1596
0
  { return varidx_map; }
1597
1598
  bool instantiate (const ItemVariationStore& varStore,
1599
                    const hb_subset_plan_t *plan,
1600
                    bool optimize=true,
1601
                    bool use_no_variation_idx=true,
1602
                    const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1603
0
  {
1604
0
    if (!create_from_item_varstore (varStore, plan->axes_old_index_tag_map, inner_maps))
1605
0
      return false;
1606
0
    if (!instantiate_tuple_vars (plan->axes_location, plan->axes_triple_distances))
1607
0
      return false;
1608
0
    return as_item_varstore (optimize, use_no_variation_idx);
1609
0
  }
1610
1611
  /* keep below APIs public only for unit test: test-item-varstore */
1612
  bool create_from_item_varstore (const ItemVariationStore& varStore,
1613
                                  const hb_map_t& axes_old_index_tag_map,
1614
                                  const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1615
0
  {
1616
0
    const VarRegionList& regionList = varStore.get_region_list ();
1617
0
    if (!regionList.get_var_regions (axes_old_index_tag_map, orig_region_list))
1618
0
      return false;
1619
0
1620
0
    unsigned num_var_data = varStore.get_sub_table_count ();
1621
0
    if (inner_maps && inner_maps.length != num_var_data) return false;
1622
0
    if (!vars.alloc (num_var_data) ||
1623
0
        !var_data_num_rows.alloc (num_var_data)) return false;
1624
0
1625
0
    for (unsigned i = 0; i < num_var_data; i++)
1626
0
    {
1627
0
      if (inner_maps && !inner_maps.arrayZ[i].get_population ())
1628
0
          continue;
1629
0
      tuple_variations_t var_data_tuples;
1630
0
      unsigned item_count = 0;
1631
0
      if (!var_data_tuples.create_from_item_var_data (varStore.get_sub_table (i),
1632
0
                                                      orig_region_list,
1633
0
                                                      axes_old_index_tag_map,
1634
0
                                                      item_count,
1635
0
                                                      inner_maps ? &(inner_maps.arrayZ[i]) : nullptr))
1636
0
        return false;
1637
0
1638
0
      var_data_num_rows.push (item_count);
1639
0
      vars.push (std::move (var_data_tuples));
1640
0
    }
1641
0
    return !vars.in_error () && !var_data_num_rows.in_error () && vars.length == var_data_num_rows.length;
1642
0
  }
1643
1644
  bool instantiate_tuple_vars (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1645
                               const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1646
0
  {
1647
0
    for (tuple_variations_t& tuple_vars : vars)
1648
0
      if (!tuple_vars.instantiate (normalized_axes_location, axes_triple_distances))
1649
0
        return false;
1650
0
1651
0
    if (!build_region_list ()) return false;
1652
0
    return true;
1653
0
  }
1654
1655
  bool build_region_list ()
1656
0
  {
1657
0
    /* scan all tuples and collect all unique regions, prune unused regions */
1658
0
    hb_hashmap_t<region_t, unsigned> all_regions;
1659
0
    hb_hashmap_t<region_t, unsigned> used_regions;
1660
0
1661
0
    /* use a vector when inserting new regions, make result deterministic */
1662
0
    hb_vector_t<region_t> all_unique_regions;
1663
0
    for (const tuple_variations_t& sub_table : vars)
1664
0
    {
1665
0
      for (const tuple_delta_t& tuple : sub_table.tuple_vars)
1666
0
      {
1667
0
        region_t r = &(tuple.axis_tuples);
1668
0
        if (!used_regions.has (r))
1669
0
        {
1670
0
          bool all_zeros = true;
1671
0
          for (float d : tuple.deltas_x)
1672
0
          {
1673
0
            int delta = (int) roundf (d);
1674
0
            if (delta != 0)
1675
0
            {
1676
0
              all_zeros = false;
1677
0
              break;
1678
0
            }
1679
0
          }
1680
0
          if (!all_zeros)
1681
0
          {
1682
0
            if (!used_regions.set (r, 1))
1683
0
              return false;
1684
0
          }
1685
0
        }
1686
0
        if (all_regions.has (r))
1687
0
          continue;
1688
0
        if (!all_regions.set (r, 1))
1689
0
          return false;
1690
0
        all_unique_regions.push (r);
1691
0
      }
1692
0
    }
1693
0
1694
0
    /* regions are empty means no variation data, return true */
1695
0
    if (!all_regions || !all_unique_regions) return true;
1696
0
1697
0
    if (!region_list.alloc (all_regions.get_population ()))
1698
0
      return false;
1699
0
1700
0
    unsigned idx = 0;
1701
0
    /* append the original regions that pre-existed */
1702
0
    for (const auto& r : orig_region_list)
1703
0
    {
1704
0
      if (!all_regions.has (&r) || !used_regions.has (&r))
1705
0
        continue;
1706
0
1707
0
      region_list.push (&r);
1708
0
      if (!region_map.set (&r, idx))
1709
0
        return false;
1710
0
      all_regions.del (&r);
1711
0
      idx++;
1712
0
    }
1713
0
1714
0
    /* append the new regions at the end */
1715
0
    for (const auto& r: all_unique_regions)
1716
0
    {
1717
0
      if (!all_regions.has (r) || !used_regions.has (r))
1718
0
        continue;
1719
0
      region_list.push (r);
1720
0
      if (!region_map.set (r, idx))
1721
0
        return false;
1722
0
      all_regions.del (r);
1723
0
      idx++;
1724
0
    }
1725
0
    return (!region_list.in_error ()) && (!region_map.in_error ());
1726
0
  }
1727
1728
  /* main algorithm ported from fonttools VarStore_optimize() method, optimize
1729
   * varstore by default */
1730
1731
  struct combined_gain_idx_tuple_t
1732
  {
1733
    int gain;
1734
    unsigned idx_1;
1735
    unsigned idx_2;
1736
1737
    combined_gain_idx_tuple_t () = default;
1738
    combined_gain_idx_tuple_t (int gain_, unsigned i, unsigned j)
1739
0
        :gain (gain_), idx_1 (i), idx_2 (j) {}
1740
1741
    bool operator < (const combined_gain_idx_tuple_t& o)
1742
0
    {
1743
0
      if (gain != o.gain)
1744
0
        return gain < o.gain;
1745
0
1746
0
      if (idx_1 != o.idx_1)
1747
0
        return idx_1 < o.idx_1;
1748
0
1749
0
      return idx_2 < o.idx_2;
1750
0
    }
1751
1752
    bool operator <= (const combined_gain_idx_tuple_t& o)
1753
0
    {
1754
0
      if (*this < o) return true;
1755
0
      return gain == o.gain && idx_1 == o.idx_1 && idx_2 == o.idx_2;
1756
0
    }
1757
  };
1758
1759
  bool as_item_varstore (bool optimize=true, bool use_no_variation_idx=true)
1760
0
  {
1761
0
    /* return true if no variation data */
1762
0
    if (!region_list) return true;
1763
0
    unsigned num_cols = region_list.length;
1764
0
    /* pre-alloc a 2D vector for all sub_table's VarData rows */
1765
0
    unsigned total_rows = 0;
1766
0
    for (unsigned major = 0; major < var_data_num_rows.length; major++)
1767
0
      total_rows += var_data_num_rows[major];
1768
0
1769
0
    if (!delta_rows.resize (total_rows)) return false;
1770
0
    /* init all rows to [0]*num_cols */
1771
0
    for (unsigned i = 0; i < total_rows; i++)
1772
0
      if (!(delta_rows[i].resize (num_cols))) return false;
1773
0
1774
0
    /* old VarIdxes -> full encoding_row mapping */
1775
0
    hb_hashmap_t<unsigned, const hb_vector_t<int>*> front_mapping;
1776
0
    unsigned start_row = 0;
1777
0
    hb_vector_t<delta_row_encoding_t> encoding_objs;
1778
0
    hb_hashmap_t<hb_vector_t<uint8_t>, unsigned> chars_idx_map;
1779
0
1780
0
    /* delta_rows map, used for filtering out duplicate rows */
1781
0
    hb_hashmap_t<const hb_vector_t<int>*, unsigned> delta_rows_map;
1782
0
    for (unsigned major = 0; major < vars.length; major++)
1783
0
    {
1784
0
      /* deltas are stored in tuples(column based), convert them back into items
1785
0
       * (row based) delta */
1786
0
      const tuple_variations_t& tuples = vars[major];
1787
0
      unsigned num_rows = var_data_num_rows[major];
1788
0
      for (const tuple_delta_t& tuple: tuples.tuple_vars)
1789
0
      {
1790
0
        if (tuple.deltas_x.length != num_rows)
1791
0
          return false;
1792
0
1793
0
        /* skip unused regions */
1794
0
        unsigned *col_idx;
1795
0
        if (!region_map.has (&(tuple.axis_tuples), &col_idx))
1796
0
          continue;
1797
0
1798
0
        for (unsigned i = 0; i < num_rows; i++)
1799
0
        {
1800
0
          int rounded_delta = roundf (tuple.deltas_x[i]);
1801
0
          delta_rows[start_row + i][*col_idx] += rounded_delta;
1802
0
          if ((!has_long) && (rounded_delta < -65536 || rounded_delta > 65535))
1803
0
            has_long = true;
1804
0
        }
1805
0
      }
1806
0
1807
0
      if (!optimize)
1808
0
      {
1809
0
        /* assemble a delta_row_encoding_t for this subtable, skip optimization so
1810
0
         * chars is not initialized, we only need delta rows for serialization */
1811
0
        delta_row_encoding_t obj;
1812
0
        for (unsigned r = start_row; r < start_row + num_rows; r++)
1813
0
          obj.add_row (&(delta_rows.arrayZ[r]));
1814
0
1815
0
        encodings.push (std::move (obj));
1816
0
        start_row += num_rows;
1817
0
        continue;
1818
0
      }
1819
0
1820
0
      for (unsigned minor = 0; minor < num_rows; minor++)
1821
0
      {
1822
0
        const hb_vector_t<int>& row = delta_rows[start_row + minor];
1823
0
        if (use_no_variation_idx)
1824
0
        {
1825
0
          bool all_zeros = true;
1826
0
          for (int delta : row)
1827
0
          {
1828
0
            if (delta != 0)
1829
0
            {
1830
0
              all_zeros = false;
1831
0
              break;
1832
0
            }
1833
0
          }
1834
0
          if (all_zeros)
1835
0
            continue;
1836
0
        }
1837
0
1838
0
        if (!front_mapping.set ((major<<16) + minor, &row))
1839
0
          return false;
1840
0
1841
0
        hb_vector_t<uint8_t> chars = delta_row_encoding_t::get_row_chars (row);
1842
0
        if (!chars) return false;
1843
0
1844
0
        if (delta_rows_map.has (&row))
1845
0
          continue;
1846
0
1847
0
        delta_rows_map.set (&row, 1);
1848
0
        unsigned *obj_idx;
1849
0
        if (chars_idx_map.has (chars, &obj_idx))
1850
0
        {
1851
0
          delta_row_encoding_t& obj = encoding_objs[*obj_idx];
1852
0
          if (!obj.add_row (&row))
1853
0
            return false;
1854
0
        }
1855
0
        else
1856
0
        {
1857
0
          if (!chars_idx_map.set (chars, encoding_objs.length))
1858
0
            return false;
1859
0
          delta_row_encoding_t obj (std::move (chars), &row);
1860
0
          encoding_objs.push (std::move (obj));
1861
0
        }
1862
0
      }
1863
0
1864
0
      start_row += num_rows;
1865
0
    }
1866
0
1867
0
    /* return directly if no optimization, maintain original VariationIndex so
1868
0
     * varidx_map would be empty */
1869
0
    if (!optimize) return !encodings.in_error ();
1870
0
1871
0
    /* sort encoding_objs */
1872
0
    encoding_objs.qsort ();
1873
0
1874
0
    /* main algorithm: repeatedly pick 2 best encodings to combine, and combine
1875
0
     * them */
1876
0
    hb_priority_queue_t<combined_gain_idx_tuple_t> queue;
1877
0
    unsigned num_todos = encoding_objs.length;
1878
0
    for (unsigned i = 0; i < num_todos; i++)
1879
0
    {
1880
0
      for (unsigned j = i + 1; j < num_todos; j++)
1881
0
      {
1882
0
        int combining_gain = encoding_objs.arrayZ[i].gain_from_merging (encoding_objs.arrayZ[j]);
1883
0
        if (combining_gain > 0)
1884
0
          queue.insert (combined_gain_idx_tuple_t (-combining_gain, i, j), 0);
1885
0
      }
1886
0
    }
1887
0
1888
0
    hb_set_t removed_todo_idxes;
1889
0
    while (queue)
1890
0
    {
1891
0
      auto t = queue.pop_minimum ().first;
1892
0
      unsigned i = t.idx_1;
1893
0
      unsigned j = t.idx_2;
1894
0
1895
0
      if (removed_todo_idxes.has (i) || removed_todo_idxes.has (j))
1896
0
        continue;
1897
0
1898
0
      delta_row_encoding_t& encoding = encoding_objs.arrayZ[i];
1899
0
      delta_row_encoding_t& other_encoding = encoding_objs.arrayZ[j];
1900
0
1901
0
      removed_todo_idxes.add (i);
1902
0
      removed_todo_idxes.add (j);
1903
0
1904
0
      hb_vector_t<uint8_t> combined_chars;
1905
0
      if (!combined_chars.alloc (encoding.chars.length))
1906
0
        return false;
1907
0
1908
0
      for (unsigned idx = 0; idx < encoding.chars.length; idx++)
1909
0
      {
1910
0
        uint8_t v = hb_max (encoding.chars.arrayZ[idx], other_encoding.chars.arrayZ[idx]);
1911
0
        combined_chars.push (v);
1912
0
      }
1913
0
1914
0
      delta_row_encoding_t combined_encoding_obj (std::move (combined_chars));
1915
0
      for (const auto& row : hb_concat (encoding.items, other_encoding.items))
1916
0
        combined_encoding_obj.add_row (row);
1917
0
1918
0
      for (unsigned idx = 0; idx < encoding_objs.length; idx++)
1919
0
      {
1920
0
        if (removed_todo_idxes.has (idx)) continue;
1921
0
1922
0
        const delta_row_encoding_t& obj = encoding_objs.arrayZ[idx];
1923
0
        if (obj.chars == combined_chars)
1924
0
        {
1925
0
          for (const auto& row : obj.items)
1926
0
            combined_encoding_obj.add_row (row);
1927
0
1928
0
          removed_todo_idxes.add (idx);
1929
0
          continue;
1930
0
        }
1931
0
1932
0
        int combined_gain = combined_encoding_obj.gain_from_merging (obj);
1933
0
        if (combined_gain > 0)
1934
0
          queue.insert (combined_gain_idx_tuple_t (-combined_gain, idx, encoding_objs.length), 0);
1935
0
      }
1936
0
1937
0
      encoding_objs.push (std::move (combined_encoding_obj));
1938
0
    }
1939
0
1940
0
    int num_final_encodings = (int) encoding_objs.length - (int) removed_todo_idxes.get_population ();
1941
0
    if (num_final_encodings <= 0) return false;
1942
0
1943
0
    if (!encodings.alloc (num_final_encodings)) return false;
1944
0
    for (unsigned i = 0; i < encoding_objs.length; i++)
1945
0
    {
1946
0
      if (removed_todo_idxes.has (i)) continue;
1947
0
      encodings.push (std::move (encoding_objs.arrayZ[i]));
1948
0
    }
1949
0
1950
0
    /* sort again based on width, make result deterministic */
1951
0
    encodings.qsort (delta_row_encoding_t::cmp_width);
1952
0
1953
0
    return compile_varidx_map (front_mapping);
1954
0
  }
1955
1956
  private:
1957
  /* compile varidx_map for one VarData subtable (index specified by major) */
1958
  bool compile_varidx_map (const hb_hashmap_t<unsigned, const hb_vector_t<int>*>& front_mapping)
1959
0
  {
1960
0
    /* full encoding_row -> new VarIdxes mapping */
1961
0
    hb_hashmap_t<const hb_vector_t<int>*, unsigned> back_mapping;
1962
0
1963
0
    for (unsigned major = 0; major < encodings.length; major++)
1964
0
    {
1965
0
      delta_row_encoding_t& encoding = encodings[major];
1966
0
      /* just sanity check, this shouldn't happen */
1967
0
      if (encoding.is_empty ())
1968
0
        return false;
1969
0
1970
0
      unsigned num_rows = encoding.items.length;
1971
0
1972
0
      /* sort rows, make result deterministic */
1973
0
      encoding.items.qsort (_cmp_row);
1974
0
1975
0
      /* compile old to new var_idxes mapping */
1976
0
      for (unsigned minor = 0; minor < num_rows; minor++)
1977
0
      {
1978
0
        unsigned new_varidx = (major << 16) + minor;
1979
0
        back_mapping.set (encoding.items.arrayZ[minor], new_varidx);
1980
0
      }
1981
0
    }
1982
0
1983
0
    for (auto _ : front_mapping.iter ())
1984
0
    {
1985
0
      unsigned old_varidx = _.first;
1986
0
      unsigned *new_varidx;
1987
0
      if (back_mapping.has (_.second, &new_varidx))
1988
0
        varidx_map.set (old_varidx, *new_varidx);
1989
0
      else
1990
0
        varidx_map.set (old_varidx, HB_OT_LAYOUT_NO_VARIATIONS_INDEX);
1991
0
    }
1992
0
    return !varidx_map.in_error ();
1993
0
  }
1994
1995
  static int _cmp_row (const void *pa, const void *pb)
1996
0
  {
1997
0
    /* compare pointers of vectors(const hb_vector_t<int>*) that represent a row */
1998
0
    const hb_vector_t<int>** a = (const hb_vector_t<int>**) pa;
1999
0
    const hb_vector_t<int>** b = (const hb_vector_t<int>**) pb;
2000
0
2001
0
    for (unsigned i = 0; i < (*b)->length; i++)
2002
0
    {
2003
0
      int va = (*a)->arrayZ[i];
2004
0
      int vb = (*b)->arrayZ[i];
2005
0
      if (va != vb)
2006
0
        return va < vb ? -1 : 1;
2007
0
    }
2008
0
    return 0;
2009
0
  }
2010
};
2011
2012
2013
} /* namespace OT */
2014
2015
2016
#endif /* HB_OT_VAR_COMMON_HH */