Coverage Report

Created: 2026-03-12 07:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libjxl/lib/jxl/modular/encoding/enc_ma.cc
Line
Count
Source
1
// Copyright (c) the JPEG XL Project Authors. All rights reserved.
2
//
3
// Use of this source code is governed by a BSD-style
4
// license that can be found in the LICENSE file.
5
6
#include "lib/jxl/modular/encoding/enc_ma.h"
7
8
#include <algorithm>
9
#include <cstdint>
10
#include <cstdlib>
11
#include <cstring>
12
#include <limits>
13
#include <numeric>
14
#include <queue>
15
#include <vector>
16
17
#include "lib/jxl/ans_params.h"
18
#include "lib/jxl/base/bits.h"
19
#include "lib/jxl/base/common.h"
20
#include "lib/jxl/base/compiler_specific.h"
21
#include "lib/jxl/base/status.h"
22
#include "lib/jxl/dec_ans.h"
23
#include "lib/jxl/modular/encoding/dec_ma.h"
24
#include "lib/jxl/modular/encoding/ma_common.h"
25
#include "lib/jxl/modular/modular_image.h"
26
27
#undef HWY_TARGET_INCLUDE
28
#define HWY_TARGET_INCLUDE "lib/jxl/modular/encoding/enc_ma.cc"
29
#include <hwy/foreach_target.h>
30
#include <hwy/highway.h>
31
32
#include "lib/jxl/base/fast_math-inl.h"
33
#include "lib/jxl/base/random.h"
34
#include "lib/jxl/enc_ans.h"
35
#include "lib/jxl/modular/encoding/context_predict.h"
36
#include "lib/jxl/modular/options.h"
37
#include "lib/jxl/pack_signed.h"
38
HWY_BEFORE_NAMESPACE();
39
namespace jxl {
40
namespace HWY_NAMESPACE {
41
42
// These templates are not found via ADL.
43
using hwy::HWY_NAMESPACE::Eq;
44
using hwy::HWY_NAMESPACE::IfThenElse;
45
using hwy::HWY_NAMESPACE::Lt;
46
using hwy::HWY_NAMESPACE::Max;
47
48
const HWY_FULL(float) df;
49
const HWY_FULL(int32_t) di;
50
0
size_t Padded(size_t x) { return RoundUpTo(x, Lanes(df)); }
Unexecuted instantiation: jxl::N_SSE4::Padded(unsigned long)
Unexecuted instantiation: jxl::N_AVX2::Padded(unsigned long)
Unexecuted instantiation: jxl::N_SSE2::Padded(unsigned long)
51
52
// Compute entropy of the histogram, taking into account the minimum probability
53
// for symbols with non-zero counts.
54
0
float EstimateBits(const int32_t *counts, size_t num_symbols) {
55
0
  int32_t total = std::accumulate(counts, counts + num_symbols, 0);
56
0
  const auto zero = Zero(df);
57
0
  const auto minprob = Set(df, 1.0f / ANS_TAB_SIZE);
58
0
  const auto inv_total = Set(df, 1.0f / total);
59
0
  auto bits_lanes = Zero(df);
60
0
  auto total_v = Set(di, total);
61
0
  for (size_t i = 0; i < num_symbols; i += Lanes(df)) {
62
0
    const auto counts_iv = LoadU(di, &counts[i]);
63
0
    const auto counts_fv = ConvertTo(df, counts_iv);
64
0
    const auto probs = Mul(counts_fv, inv_total);
65
0
    const auto mprobs = Max(probs, minprob);
66
0
    const auto nbps = IfThenElse(Eq(counts_iv, total_v), BitCast(di, zero),
67
0
                                 BitCast(di, FastLog2f(df, mprobs)));
68
0
    bits_lanes = Sub(bits_lanes, Mul(counts_fv, BitCast(df, nbps)));
69
0
  }
70
0
  return GetLane(SumOfLanes(df, bits_lanes));
71
0
}
Unexecuted instantiation: jxl::N_SSE4::EstimateBits(int const*, unsigned long)
Unexecuted instantiation: jxl::N_AVX2::EstimateBits(int const*, unsigned long)
Unexecuted instantiation: jxl::N_SSE2::EstimateBits(int const*, unsigned long)
72
73
void MakeSplitNode(size_t pos, int property, int splitval, Predictor lpred,
74
0
                   int64_t loff, Predictor rpred, int64_t roff, Tree *tree) {
75
  // Note that the tree splits on *strictly greater*.
76
0
  (*tree)[pos].lchild = tree->size();
77
0
  (*tree)[pos].rchild = tree->size() + 1;
78
0
  (*tree)[pos].splitval = splitval;
79
0
  (*tree)[pos].property = property;
80
0
  tree->emplace_back();
81
0
  tree->back().property = -1;
82
0
  tree->back().predictor = rpred;
83
0
  tree->back().predictor_offset = roff;
84
0
  tree->back().multiplier = 1;
85
0
  tree->emplace_back();
86
0
  tree->back().property = -1;
87
0
  tree->back().predictor = lpred;
88
0
  tree->back().predictor_offset = loff;
89
0
  tree->back().multiplier = 1;
90
0
}
Unexecuted instantiation: jxl::N_SSE4::MakeSplitNode(unsigned long, int, int, jxl::Predictor, long, jxl::Predictor, long, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
Unexecuted instantiation: jxl::N_AVX2::MakeSplitNode(unsigned long, int, int, jxl::Predictor, long, jxl::Predictor, long, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
Unexecuted instantiation: jxl::N_SSE2::MakeSplitNode(unsigned long, int, int, jxl::Predictor, long, jxl::Predictor, long, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
91
92
enum class IntersectionType { kNone, kPartial, kInside };
93
IntersectionType BoxIntersects(StaticPropRange needle, StaticPropRange haystack,
94
0
                               uint32_t &partial_axis, uint32_t &partial_val) {
95
0
  bool partial = false;
96
0
  for (size_t i = 0; i < kNumStaticProperties; i++) {
97
0
    if (haystack[i][0] >= needle[i][1]) {
98
0
      return IntersectionType::kNone;
99
0
    }
100
0
    if (haystack[i][1] <= needle[i][0]) {
101
0
      return IntersectionType::kNone;
102
0
    }
103
0
    if (haystack[i][0] <= needle[i][0] && haystack[i][1] >= needle[i][1]) {
104
0
      continue;
105
0
    }
106
0
    partial = true;
107
0
    partial_axis = i;
108
0
    if (haystack[i][0] > needle[i][0] && haystack[i][0] < needle[i][1]) {
109
0
      partial_val = haystack[i][0] - 1;
110
0
    } else {
111
0
      JXL_DASSERT(haystack[i][1] > needle[i][0] &&
112
0
                  haystack[i][1] < needle[i][1]);
113
0
      partial_val = haystack[i][1] - 1;
114
0
    }
115
0
  }
116
0
  return partial ? IntersectionType::kPartial : IntersectionType::kInside;
117
0
}
Unexecuted instantiation: jxl::N_SSE4::BoxIntersects(std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, unsigned int&, unsigned int&)
Unexecuted instantiation: jxl::N_AVX2::BoxIntersects(std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, unsigned int&, unsigned int&)
Unexecuted instantiation: jxl::N_SSE2::BoxIntersects(std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, unsigned int&, unsigned int&)
118
119
template<bool S>
120
void SplitTreeSamples(TreeSamples &tree_samples, size_t begin, size_t pos,
121
0
                      size_t end, size_t prop, uint32_t val) {
122
0
  size_t begin_pos = begin;
123
0
  size_t end_pos = pos;
124
0
  do {
125
0
    while (begin_pos < pos &&
126
0
           tree_samples.Property<S>(prop, begin_pos) <= val) {
127
0
      ++begin_pos;
128
0
    }
129
0
    while (end_pos < end && tree_samples.Property<S>(prop, end_pos) > val) {
130
0
      ++end_pos;
131
0
    }
132
0
    if (begin_pos < pos && end_pos < end) {
133
0
      tree_samples.Swap(begin_pos, end_pos);
134
0
    }
135
0
    ++begin_pos;
136
0
    ++end_pos;
137
0
  } while (begin_pos < pos && end_pos < end);
138
0
}
Unexecuted instantiation: void jxl::N_SSE4::SplitTreeSamples<true>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
Unexecuted instantiation: void jxl::N_SSE4::SplitTreeSamples<false>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
Unexecuted instantiation: void jxl::N_AVX2::SplitTreeSamples<true>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
Unexecuted instantiation: void jxl::N_AVX2::SplitTreeSamples<false>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
Unexecuted instantiation: void jxl::N_SSE2::SplitTreeSamples<true>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
Unexecuted instantiation: void jxl::N_SSE2::SplitTreeSamples<false>(jxl::TreeSamples&, unsigned long, unsigned long, unsigned long, unsigned long, unsigned int)
139
140
template <bool S>
141
void CollectExtraBitsIncrease(TreeSamples &tree_samples,
142
                              const std::vector<ResidualToken> &rtokens,
143
                              std::vector<int> &count_increase,
144
                              std::vector<size_t> &extra_bits_increase,
145
                              size_t begin, size_t end, size_t prop_idx,
146
0
                              size_t max_symbols) {
147
0
  for (size_t i2 = begin; i2 < end; i2++) {
148
0
    const ResidualToken &rt = rtokens[i2];
149
0
    size_t cnt = tree_samples.Count(i2);
150
0
    size_t p = tree_samples.Property<S>(prop_idx, i2);
151
0
    size_t sym = rt.tok;
152
0
    size_t ebi = rt.nbits * cnt;
153
0
    count_increase[p * max_symbols + sym] += cnt;
154
0
    extra_bits_increase[p] += ebi;
155
0
  }
156
0
}
Unexecuted instantiation: void jxl::N_SSE4::CollectExtraBitsIncrease<true>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
Unexecuted instantiation: void jxl::N_SSE4::CollectExtraBitsIncrease<false>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
Unexecuted instantiation: void jxl::N_AVX2::CollectExtraBitsIncrease<true>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
Unexecuted instantiation: void jxl::N_AVX2::CollectExtraBitsIncrease<false>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
Unexecuted instantiation: void jxl::N_SSE2::CollectExtraBitsIncrease<true>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
Unexecuted instantiation: void jxl::N_SSE2::CollectExtraBitsIncrease<false>(jxl::TreeSamples&, std::__1::vector<jxl::ResidualToken, std::__1::allocator<jxl::ResidualToken> > const&, std::__1::vector<int, std::__1::allocator<int> >&, std::__1::vector<unsigned long, std::__1::allocator<unsigned long> >&, unsigned long, unsigned long, unsigned long, unsigned long)
157
158
void FindBestSplit(TreeSamples &tree_samples, float threshold,
159
                   const std::vector<ModularMultiplierInfo> &mul_info,
160
                   StaticPropRange initial_static_prop_range,
161
0
                   float fast_decode_multiplier, Tree *tree) {
162
0
  struct NodeInfo {
163
0
    size_t pos;
164
0
    size_t begin;
165
0
    size_t end;
166
0
    StaticPropRange static_prop_range;
167
0
  };
168
0
  std::vector<NodeInfo> nodes;
169
0
  nodes.push_back(NodeInfo{0, 0, tree_samples.NumDistinctSamples(),
170
0
                           initial_static_prop_range});
171
172
0
  size_t num_predictors = tree_samples.NumPredictors();
173
0
  size_t num_properties = tree_samples.NumProperties();
174
175
  // TODO(veluca): consider parallelizing the search (processing multiple nodes
176
  // at a time).
177
0
  while (!nodes.empty()) {
178
0
    size_t pos = nodes.back().pos;
179
0
    size_t begin = nodes.back().begin;
180
0
    size_t end = nodes.back().end;
181
182
0
    StaticPropRange static_prop_range = nodes.back().static_prop_range;
183
0
    nodes.pop_back();
184
0
    if (begin == end) continue;
185
186
0
    struct SplitInfo {
187
0
      size_t prop = 0;
188
0
      uint32_t val = 0;
189
0
      size_t pos = 0;
190
0
      float lcost = std::numeric_limits<float>::max();
191
0
      float rcost = std::numeric_limits<float>::max();
192
0
      Predictor lpred = Predictor::Zero;
193
0
      Predictor rpred = Predictor::Zero;
194
0
      float Cost() const { return lcost + rcost; }
Unexecuted instantiation: enc_ma.cc:jxl::N_SSE4::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::SplitInfo::Cost() const
Unexecuted instantiation: enc_ma.cc:jxl::N_AVX2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::SplitInfo::Cost() const
Unexecuted instantiation: enc_ma.cc:jxl::N_SSE2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::SplitInfo::Cost() const
195
0
    };
196
197
0
    SplitInfo best_split_static_constant;
198
0
    SplitInfo best_split_static;
199
0
    SplitInfo best_split_nonstatic;
200
0
    SplitInfo best_split_nowp;
201
202
0
    JXL_DASSERT(begin <= end);
203
0
    JXL_DASSERT(end <= tree_samples.NumDistinctSamples());
204
205
    // Compute the maximum token in the range.
206
0
    size_t max_symbols = 0;
207
0
    for (size_t pred = 0; pred < num_predictors; pred++) {
208
0
      for (size_t i = begin; i < end; i++) {
209
0
        uint32_t tok = tree_samples.Token(pred, i);
210
0
        max_symbols = max_symbols > tok + 1 ? max_symbols : tok + 1;
211
0
      }
212
0
    }
213
0
    max_symbols = Padded(max_symbols);
214
0
    std::vector<int32_t> counts(max_symbols * num_predictors);
215
0
    std::vector<uint32_t> tot_extra_bits(num_predictors);
216
0
    for (size_t pred = 0; pred < num_predictors; pred++) {
217
0
      size_t extra_bits = 0;
218
0
      const std::vector<ResidualToken>& rtokens = tree_samples.RTokens(pred);
219
0
      for (size_t i = begin; i < end; i++) {
220
0
        const ResidualToken& rt = rtokens[i];
221
0
        size_t count = tree_samples.Count(i);
222
0
        size_t eb = rt.nbits * count;
223
0
        counts[pred * max_symbols + rt.tok] += count;
224
0
        extra_bits += eb;
225
0
      }
226
0
      tot_extra_bits[pred] = extra_bits;
227
0
    }
228
229
0
    float base_bits;
230
0
    {
231
0
      size_t pred = tree_samples.PredictorIndex((*tree)[pos].predictor);
232
0
      base_bits =
233
0
          EstimateBits(counts.data() + pred * max_symbols, max_symbols) +
234
0
          tot_extra_bits[pred];
235
0
    }
236
237
0
    SplitInfo *best = &best_split_nonstatic;
238
239
0
    SplitInfo forced_split;
240
    // The multiplier ranges cut halfway through the current ranges of static
241
    // properties. We do this even if the current node is not a leaf, to
242
    // minimize the number of nodes in the resulting tree.
243
0
    for (const auto &mmi : mul_info) {
244
0
      uint32_t axis;
245
0
      uint32_t val;
246
0
      IntersectionType t =
247
0
          BoxIntersects(static_prop_range, mmi.range, axis, val);
248
0
      if (t == IntersectionType::kNone) continue;
249
0
      if (t == IntersectionType::kInside) {
250
0
        (*tree)[pos].multiplier = mmi.multiplier;
251
0
        break;
252
0
      }
253
0
      if (t == IntersectionType::kPartial) {
254
0
        JXL_DASSERT(axis < kNumStaticProperties);
255
0
        forced_split.val = tree_samples.QuantizeStaticProperty(axis, val);
256
0
        forced_split.prop = axis;
257
0
        forced_split.lcost = forced_split.rcost = base_bits / 2 - threshold;
258
0
        forced_split.lpred = forced_split.rpred = (*tree)[pos].predictor;
259
0
        best = &forced_split;
260
0
        best->pos = begin;
261
0
        JXL_DASSERT(best->prop == tree_samples.PropertyFromIndex(best->prop));
262
0
        if (best->prop < tree_samples.NumStaticProps()) {
263
0
        for (size_t x = begin; x < end; x++) {
264
0
          if (tree_samples.Property<true>(best->prop, x) <= best->val) {
265
0
            best->pos++;
266
0
          }
267
0
        }
268
0
      } else {
269
0
        size_t prop = best->prop - tree_samples.NumStaticProps();
270
0
        for (size_t x = begin; x < end; x++) {
271
0
          if (tree_samples.Property<false>(prop, x) <= best->val) {
272
0
            best->pos++;
273
0
          }
274
0
        }
275
0
      }
276
0
        break;
277
0
      }
278
0
    }
279
280
0
    if (best != &forced_split) {
281
0
      std::vector<int> prop_value_used_count;
282
0
      std::vector<int> count_increase;
283
0
      std::vector<size_t> extra_bits_increase;
284
      // For each property, compute which of its values are used, and what
285
      // tokens correspond to those usages. Then, iterate through the values,
286
      // and compute the entropy of each side of the split (of the form `prop >
287
      // threshold`). Finally, find the split that minimizes the cost.
288
0
      struct CostInfo {
289
0
        float cost = std::numeric_limits<float>::max();
290
0
        float extra_cost = 0;
291
0
        float Cost() const { return cost + extra_cost; }
Unexecuted instantiation: enc_ma.cc:jxl::N_SSE4::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::CostInfo::Cost() const
Unexecuted instantiation: enc_ma.cc:jxl::N_AVX2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::CostInfo::Cost() const
Unexecuted instantiation: enc_ma.cc:jxl::N_SSE2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)::CostInfo::Cost() const
292
0
        Predictor pred;  // will be uninitialized in some cases, but never used.
293
0
      };
294
0
      std::vector<CostInfo> costs_l;
295
0
      std::vector<CostInfo> costs_r;
296
297
0
      std::vector<int32_t> counts_above(max_symbols);
298
0
      std::vector<int32_t> counts_below(max_symbols);
299
300
      // The lower the threshold, the higher the expected noisiness of the
301
      // estimate. Thus, discourage changing predictors.
302
0
      float change_pred_penalty = 800.0f / (100.0f + threshold);
303
0
      for (size_t prop = 0; prop < num_properties && base_bits > threshold;
304
0
           prop++) {
305
0
        costs_l.clear();
306
0
        costs_r.clear();
307
0
        size_t prop_size = tree_samples.NumPropertyValues(prop);
308
0
        if (extra_bits_increase.size() < prop_size) {
309
0
          count_increase.resize(prop_size * max_symbols);
310
0
          extra_bits_increase.resize(prop_size);
311
0
        }
312
        // Clear prop_value_used_count (which cannot be cleared "on the go")
313
0
        prop_value_used_count.clear();
314
0
        prop_value_used_count.resize(prop_size);
315
316
0
        size_t first_used = prop_size;
317
0
        size_t last_used = 0;
318
319
        // TODO(veluca): consider finding multiple splits along a single
320
        // property at the same time, possibly with a bottom-up approach.
321
0
        if (prop < tree_samples.NumStaticProps()) {
322
0
          for (size_t i = begin; i < end; i++) {
323
0
            size_t p = tree_samples.Property<true>(prop, i);
324
0
            prop_value_used_count[p]++;
325
0
            last_used = std::max(last_used, p);
326
0
            first_used = std::min(first_used, p);
327
0
          }
328
0
        } else {
329
0
          size_t prop_idx = prop - tree_samples.NumStaticProps();
330
0
          for (size_t i = begin; i < end; i++) {
331
0
            size_t p = tree_samples.Property<false>(prop_idx, i);
332
0
            prop_value_used_count[p]++;
333
0
            last_used = std::max(last_used, p);
334
0
            first_used = std::min(first_used, p);
335
0
          }
336
0
        }
337
0
        costs_l.resize(last_used - first_used);
338
0
        costs_r.resize(last_used - first_used);
339
        // For all predictors, compute the right and left costs of each split.
340
0
        for (size_t pred = 0; pred < num_predictors; pred++) {
341
          // Compute cost and histogram increments for each property value.
342
0
          const std::vector<ResidualToken> &rtokens =
343
0
              tree_samples.RTokens(pred);
344
0
          if (prop < tree_samples.NumStaticProps()) {
345
0
            CollectExtraBitsIncrease<true>(tree_samples, rtokens,
346
0
                                           count_increase, extra_bits_increase,
347
0
                                           begin, end, prop, max_symbols);
348
0
          } else {
349
0
            CollectExtraBitsIncrease<false>(
350
0
                tree_samples, rtokens, count_increase, extra_bits_increase,
351
0
                begin, end, prop - tree_samples.NumStaticProps(), max_symbols);
352
0
          }
353
0
          memcpy(counts_above.data(), counts.data() + pred * max_symbols,
354
0
                 max_symbols * sizeof counts_above[0]);
355
0
          memset(counts_below.data(), 0, max_symbols * sizeof counts_below[0]);
356
0
          size_t extra_bits_below = 0;
357
          // Exclude last used: this ensures neither counts_above nor
358
          // counts_below is empty.
359
0
          for (size_t i = first_used; i < last_used; i++) {
360
0
            if (!prop_value_used_count[i]) continue;
361
0
            extra_bits_below += extra_bits_increase[i];
362
            // The increase for this property value has been used, and will not
363
            // be used again: clear it. Also below.
364
0
            extra_bits_increase[i] = 0;
365
0
            for (size_t sym = 0; sym < max_symbols; sym++) {
366
0
              counts_above[sym] -= count_increase[i * max_symbols + sym];
367
0
              counts_below[sym] += count_increase[i * max_symbols + sym];
368
0
              count_increase[i * max_symbols + sym] = 0;
369
0
            }
370
0
            float rcost = EstimateBits(counts_above.data(), max_symbols) +
371
0
                          tot_extra_bits[pred] - extra_bits_below;
372
0
            float lcost = EstimateBits(counts_below.data(), max_symbols) +
373
0
                          extra_bits_below;
374
0
            JXL_DASSERT(extra_bits_below <= tot_extra_bits[pred]);
375
0
            float penalty = 0;
376
            // Never discourage moving away from the Weighted predictor.
377
0
            if (tree_samples.PredictorFromIndex(pred) !=
378
0
                    (*tree)[pos].predictor &&
379
0
                (*tree)[pos].predictor != Predictor::Weighted) {
380
0
              penalty = change_pred_penalty;
381
0
            }
382
            // If everything else is equal, disfavour Weighted (slower) and
383
            // favour Zero (faster if it's the only predictor used in a
384
            // group+channel combination)
385
0
            if (tree_samples.PredictorFromIndex(pred) == Predictor::Weighted) {
386
0
              penalty += 1e-8;
387
0
            }
388
0
            if (tree_samples.PredictorFromIndex(pred) == Predictor::Zero) {
389
0
              penalty -= 1e-8;
390
0
            }
391
0
            if (rcost + penalty < costs_r[i - first_used].Cost()) {
392
0
              costs_r[i - first_used].cost = rcost;
393
0
              costs_r[i - first_used].extra_cost = penalty;
394
0
              costs_r[i - first_used].pred =
395
0
                  tree_samples.PredictorFromIndex(pred);
396
0
            }
397
0
            if (lcost + penalty < costs_l[i - first_used].Cost()) {
398
0
              costs_l[i - first_used].cost = lcost;
399
0
              costs_l[i - first_used].extra_cost = penalty;
400
0
              costs_l[i - first_used].pred =
401
0
                  tree_samples.PredictorFromIndex(pred);
402
0
            }
403
0
          }
404
0
        }
405
        // Iterate through the possible splits and find the one with minimum sum
406
        // of costs of the two sides.
407
0
        size_t split = begin;
408
0
        for (size_t i = first_used; i < last_used; i++) {
409
0
          if (!prop_value_used_count[i]) continue;
410
0
          split += prop_value_used_count[i];
411
0
          float rcost = costs_r[i - first_used].cost;
412
0
          float lcost = costs_l[i - first_used].cost;
413
414
0
          bool uses_wp = tree_samples.PropertyFromIndex(prop) == kWPProp ||
415
0
                         costs_l[i - first_used].pred == Predictor::Weighted ||
416
0
                         costs_r[i - first_used].pred == Predictor::Weighted;
417
0
          bool zero_entropy_side = rcost == 0 || lcost == 0;
418
419
0
          SplitInfo &best_ref =
420
0
              tree_samples.PropertyFromIndex(prop) < kNumStaticProperties
421
0
                  ? (zero_entropy_side ? best_split_static_constant
422
0
                                       : best_split_static)
423
0
                  : (uses_wp ? best_split_nonstatic : best_split_nowp);
424
0
          if (lcost + rcost < best_ref.Cost()) {
425
0
            best_ref.prop = prop;
426
0
            best_ref.val = i;
427
0
            best_ref.pos = split;
428
0
            best_ref.lcost = lcost;
429
0
            best_ref.lpred = costs_l[i - first_used].pred;
430
0
            best_ref.rcost = rcost;
431
0
            best_ref.rpred = costs_r[i - first_used].pred;
432
0
          }
433
0
        }
434
        // Clear extra_bits_increase and cost_increase for last_used.
435
0
        extra_bits_increase[last_used] = 0;
436
0
        for (size_t sym = 0; sym < max_symbols; sym++) {
437
0
          count_increase[last_used * max_symbols + sym] = 0;
438
0
        }
439
0
      }
440
441
      // Try to avoid introducing WP.
442
0
      if (best_split_nowp.Cost() + threshold < base_bits &&
443
0
          best_split_nowp.Cost() <= fast_decode_multiplier * best->Cost()) {
444
0
        best = &best_split_nowp;
445
0
      }
446
      // Split along static props if possible and not significantly more
447
      // expensive.
448
0
      if (best_split_static.Cost() + threshold < base_bits &&
449
0
          best_split_static.Cost() <= fast_decode_multiplier * best->Cost()) {
450
0
        best = &best_split_static;
451
0
      }
452
      // Split along static props to create constant nodes if possible.
453
0
      if (best_split_static_constant.Cost() + threshold < base_bits) {
454
0
        best = &best_split_static_constant;
455
0
      }
456
0
    }
457
458
0
    if (best->Cost() + threshold < base_bits) {
459
0
      uint32_t p = tree_samples.PropertyFromIndex(best->prop);
460
0
      pixel_type dequant =
461
0
          tree_samples.UnquantizeProperty(best->prop, best->val);
462
      // Split node and try to split children.
463
0
      MakeSplitNode(pos, p, dequant, best->lpred, 0, best->rpred, 0, tree);
464
      // "Sort" according to winning property
465
0
      if (best->prop < tree_samples.NumStaticProps()) {
466
0
        SplitTreeSamples<true>(tree_samples, begin, best->pos, end, best->prop,
467
0
                               best->val);
468
0
      } else {
469
0
        SplitTreeSamples<false>(tree_samples, begin, best->pos, end,
470
0
                                best->prop - tree_samples.NumStaticProps(),
471
0
                                best->val);
472
0
      }
473
0
      auto new_sp_range = static_prop_range;
474
0
      if (p < kNumStaticProperties) {
475
0
        JXL_DASSERT(static_cast<uint32_t>(dequant + 1) <= new_sp_range[p][1]);
476
0
        new_sp_range[p][1] = dequant + 1;
477
0
        JXL_DASSERT(new_sp_range[p][0] < new_sp_range[p][1]);
478
0
      }
479
0
      nodes.push_back(
480
0
          NodeInfo{(*tree)[pos].rchild, begin, best->pos, new_sp_range});
481
0
      new_sp_range = static_prop_range;
482
0
      if (p < kNumStaticProperties) {
483
0
        JXL_DASSERT(new_sp_range[p][0] <= static_cast<uint32_t>(dequant + 1));
484
0
        new_sp_range[p][0] = dequant + 1;
485
0
        JXL_DASSERT(new_sp_range[p][0] < new_sp_range[p][1]);
486
0
      }
487
0
      nodes.push_back(
488
0
          NodeInfo{(*tree)[pos].lchild, best->pos, end, new_sp_range});
489
0
    }
490
0
  }
491
0
}
Unexecuted instantiation: jxl::N_SSE4::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
Unexecuted instantiation: jxl::N_AVX2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
Unexecuted instantiation: jxl::N_SSE2::FindBestSplit(jxl::TreeSamples&, float, std::__1::vector<jxl::ModularMultiplierInfo, std::__1::allocator<jxl::ModularMultiplierInfo> > const&, std::__1::array<std::__1::array<unsigned int, 2ul>, 2ul>, float, std::__1::vector<jxl::PropertyDecisionNode, std::__1::allocator<jxl::PropertyDecisionNode> >*)
492
493
// NOLINTNEXTLINE(google-readability-namespace-comments)
494
}  // namespace HWY_NAMESPACE
495
}  // namespace jxl
496
HWY_AFTER_NAMESPACE();
497
498
#if HWY_ONCE
499
namespace jxl {
500
501
HWY_EXPORT(FindBestSplit);  // Local function.
502
503
Status ComputeBestTree(TreeSamples &tree_samples, float threshold,
504
                       const std::vector<ModularMultiplierInfo> &mul_info,
505
                       StaticPropRange static_prop_range,
506
0
                       float fast_decode_multiplier, Tree *tree) {
507
  // TODO(veluca): take into account that different contexts can have different
508
  // uint configs.
509
  //
510
  // Initialize tree.
511
0
  tree->emplace_back();
512
0
  tree->back().property = -1;
513
0
  tree->back().predictor = tree_samples.PredictorFromIndex(0);
514
0
  tree->back().predictor_offset = 0;
515
0
  tree->back().multiplier = 1;
516
0
  JXL_ENSURE(tree_samples.NumProperties() < 64);
517
518
0
  JXL_ENSURE(tree_samples.NumDistinctSamples() <=
519
0
             std::numeric_limits<uint32_t>::max());
520
0
  HWY_DYNAMIC_DISPATCH(FindBestSplit)
521
0
  (tree_samples, threshold, mul_info, static_prop_range, fast_decode_multiplier,
522
0
   tree);
523
0
  return true;
524
0
}
525
526
#if JXL_CXX_LANG < JXL_CXX_17
527
constexpr int32_t TreeSamples::kPropertyRange;
528
constexpr uint32_t TreeSamples::kDedupEntryUnused;
529
#endif
530
531
Status TreeSamples::SetPredictor(Predictor predictor,
532
0
                                 ModularOptions::TreeMode wp_tree_mode) {
533
0
  if (wp_tree_mode == ModularOptions::TreeMode::kWPOnly) {
534
0
    predictors = {Predictor::Weighted};
535
0
    residuals.resize(1);
536
0
    return true;
537
0
  }
538
0
  if (wp_tree_mode == ModularOptions::TreeMode::kNoWP &&
539
0
      predictor == Predictor::Weighted) {
540
0
    return JXL_FAILURE("Invalid predictor settings");
541
0
  }
542
0
  if (predictor == Predictor::Variable) {
543
0
    for (size_t i = 0; i < kNumModularPredictors; i++) {
544
0
      predictors.push_back(static_cast<Predictor>(i));
545
0
    }
546
0
    std::swap(predictors[0], predictors[static_cast<int>(Predictor::Weighted)]);
547
0
    std::swap(predictors[1], predictors[static_cast<int>(Predictor::Gradient)]);
548
0
  } else if (predictor == Predictor::Best) {
549
0
    predictors = {Predictor::Weighted, Predictor::Gradient};
550
0
  } else {
551
0
    predictors = {predictor};
552
0
  }
553
0
  if (wp_tree_mode == ModularOptions::TreeMode::kNoWP) {
554
0
    predictors.erase(
555
0
        std::remove(predictors.begin(), predictors.end(), Predictor::Weighted),
556
0
        predictors.end());
557
0
  }
558
0
  residuals.resize(predictors.size());
559
0
  return true;
560
0
}
561
562
Status TreeSamples::SetProperties(const std::vector<uint32_t> &properties,
563
0
                                  ModularOptions::TreeMode wp_tree_mode) {
564
0
  props_to_use = properties;
565
0
  if (wp_tree_mode == ModularOptions::TreeMode::kWPOnly) {
566
0
    props_to_use = {static_cast<uint32_t>(kWPProp)};
567
0
  }
568
0
  if (wp_tree_mode == ModularOptions::TreeMode::kGradientOnly) {
569
0
    props_to_use = {static_cast<uint32_t>(kGradientProp)};
570
0
  }
571
0
  if (wp_tree_mode == ModularOptions::TreeMode::kNoWP) {
572
0
    props_to_use.erase(
573
0
        std::remove(props_to_use.begin(), props_to_use.end(), kWPProp),
574
0
        props_to_use.end());
575
0
  }
576
0
  if (props_to_use.empty()) {
577
0
    return JXL_FAILURE("Invalid property set configuration");
578
0
  }
579
0
  num_static_props = 0;
580
  // Check that if static properties present, then those are at the beginning.
581
0
  for (size_t i = 0; i < props_to_use.size(); ++i) {
582
0
    uint32_t prop = props_to_use[i];
583
0
    if (prop < kNumStaticProperties) {
584
0
      JXL_DASSERT(i == prop);
585
0
      num_static_props++;
586
0
    }
587
0
  }
588
0
  props.resize(props_to_use.size() - num_static_props);
589
0
  return true;
590
0
}
591
592
0
void TreeSamples::InitTable(size_t log_size) {
593
0
  size_t size = 1ULL << log_size;
594
0
  if (dedup_table_.size() == size) return;
595
0
  dedup_table_.resize(size, kDedupEntryUnused);
596
0
  for (size_t i = 0; i < NumDistinctSamples(); i++) {
597
0
    if (sample_counts[i] != std::numeric_limits<uint16_t>::max()) {
598
0
      AddToTable(i);
599
0
    }
600
0
  }
601
0
}
602
603
0
bool TreeSamples::AddToTableAndMerge(size_t a) {
604
0
  size_t pos1 = Hash1(a);
605
0
  size_t pos2 = Hash2(a);
606
0
  if (dedup_table_[pos1] != kDedupEntryUnused &&
607
0
      IsSameSample(a, dedup_table_[pos1])) {
608
0
    JXL_DASSERT(sample_counts[a] == 1);
609
0
    sample_counts[dedup_table_[pos1]]++;
610
    // Remove from hash table samples that are saturated.
611
0
    if (sample_counts[dedup_table_[pos1]] ==
612
0
        std::numeric_limits<uint16_t>::max()) {
613
0
      dedup_table_[pos1] = kDedupEntryUnused;
614
0
    }
615
0
    return true;
616
0
  }
617
0
  if (dedup_table_[pos2] != kDedupEntryUnused &&
618
0
      IsSameSample(a, dedup_table_[pos2])) {
619
0
    JXL_DASSERT(sample_counts[a] == 1);
620
0
    sample_counts[dedup_table_[pos2]]++;
621
    // Remove from hash table samples that are saturated.
622
0
    if (sample_counts[dedup_table_[pos2]] ==
623
0
        std::numeric_limits<uint16_t>::max()) {
624
0
      dedup_table_[pos2] = kDedupEntryUnused;
625
0
    }
626
0
    return true;
627
0
  }
628
0
  AddToTable(a);
629
0
  return false;
630
0
}
631
632
0
void TreeSamples::AddToTable(size_t a) {
633
0
  size_t pos1 = Hash1(a);
634
0
  size_t pos2 = Hash2(a);
635
0
  if (dedup_table_[pos1] == kDedupEntryUnused) {
636
0
    dedup_table_[pos1] = a;
637
0
  } else if (dedup_table_[pos2] == kDedupEntryUnused) {
638
0
    dedup_table_[pos2] = a;
639
0
  }
640
0
}
641
642
0
void TreeSamples::PrepareForSamples(size_t extra_num_samples) {
643
0
  for (auto &res : residuals) {
644
0
    res.reserve(res.size() + extra_num_samples);
645
0
  }
646
0
  for (size_t i = 0; i < num_static_props; ++i) {
647
0
    static_props[i].reserve(static_props[i].size() + extra_num_samples);
648
0
  }
649
0
  for (auto &p : props) {
650
0
    p.reserve(p.size() + extra_num_samples);
651
0
  }
652
0
  size_t total_num_samples = extra_num_samples + sample_counts.size();
653
0
  size_t next_size = CeilLog2Nonzero(total_num_samples * 3 / 2);
654
0
  InitTable(next_size);
655
0
}
656
657
0
size_t TreeSamples::Hash1(size_t a) const {
658
0
  constexpr uint64_t constant = 0x1e35a7bd;
659
0
  uint64_t h = constant;
660
0
  for (const auto &r : residuals) {
661
0
    h = h * constant + r[a].tok;
662
0
    h = h * constant + r[a].nbits;
663
0
  }
664
0
  for (size_t i = 0; i < num_static_props; ++i) {
665
0
    h = h * constant + static_props[i][a];
666
0
  }
667
0
  for (const auto &p : props) {
668
0
    h = h * constant + p[a];
669
0
  }
670
0
  return (h >> 16) & (dedup_table_.size() - 1);
671
0
}
672
0
size_t TreeSamples::Hash2(size_t a) const {
673
0
  constexpr uint64_t constant = 0x1e35a7bd1e35a7bd;
674
0
  uint64_t h = constant;
675
0
  for (size_t i = 0; i < num_static_props; ++i) {
676
0
    h = h * constant ^ static_props[i][a];
677
0
  }
678
0
  for (const auto &p : props) {
679
0
    h = h * constant ^ p[a];
680
0
  }
681
0
  for (const auto &r : residuals) {
682
0
    h = h * constant ^ r[a].tok;
683
0
    h = h * constant ^ r[a].nbits;
684
0
  }
685
0
  return (h >> 16) & (dedup_table_.size() - 1);
686
0
}
687
688
0
bool TreeSamples::IsSameSample(size_t a, size_t b) const {
689
0
  bool ret = true;
690
0
  for (const auto &r : residuals) {
691
0
    if (r[a].tok != r[b].tok) {
692
0
      ret = false;
693
0
    }
694
0
    if (r[a].nbits != r[b].nbits) {
695
0
      ret = false;
696
0
    }
697
0
  }
698
0
  for (size_t i = 0; i < num_static_props; ++i) {
699
0
    if (static_props[i][a] != static_props[i][b]) {
700
0
      ret = false;
701
0
    }
702
0
  }
703
0
  for (const auto &p : props) {
704
0
    if (p[a] != p[b]) {
705
0
      ret = false;
706
0
    }
707
0
  }
708
0
  return ret;
709
0
}
710
711
void TreeSamples::AddSample(pixel_type_w pixel, const Properties &properties,
712
0
                            const pixel_type_w *predictions) {
713
0
  for (size_t i = 0; i < predictors.size(); i++) {
714
0
    pixel_type v = pixel - predictions[static_cast<int>(predictors[i])];
715
0
    uint32_t tok, nbits, bits;
716
0
    HybridUintConfig(4, 1, 2).Encode(PackSigned(v), &tok, &nbits, &bits);
717
0
    JXL_DASSERT(tok < 256);
718
0
    JXL_DASSERT(nbits < 256);
719
0
    ResidualToken token = {static_cast<uint8_t>(tok),
720
0
                           static_cast<uint8_t>(nbits)};
721
0
    residuals[i].push_back(token);
722
0
  }
723
0
  for (size_t i = 0; i < num_static_props; ++i) {
724
0
    static_props[i].push_back(QuantizeStaticProperty(i, properties[i]));
725
0
  }
726
0
  for (size_t i = num_static_props; i < props_to_use.size(); i++) {
727
0
    props[i - num_static_props].push_back(QuantizeProperty(i, properties[props_to_use[i]]));
728
0
  }
729
0
  sample_counts.push_back(1);
730
0
  num_samples++;
731
0
  if (AddToTableAndMerge(sample_counts.size() - 1)) {
732
0
    for (auto &r : residuals) r.pop_back();
733
0
    for (size_t i = 0; i < num_static_props; ++i) static_props[i].pop_back();
734
0
    for (auto &p : props) p.pop_back();
735
0
    sample_counts.pop_back();
736
0
  }
737
0
}
738
739
0
void TreeSamples::Swap(size_t a, size_t b) {
740
0
  if (a == b) return;
741
0
  for (auto &r : residuals) {
742
0
    std::swap(r[a], r[b]);
743
0
  }
744
0
  for (size_t i = 0; i < num_static_props; ++i) {
745
0
    std::swap(static_props[i][a], static_props[i][b]);
746
0
  }
747
0
  for (auto &p : props) {
748
0
    std::swap(p[a], p[b]);
749
0
  }
750
0
  std::swap(sample_counts[a], sample_counts[b]);
751
0
}
752
753
namespace {
754
std::vector<int32_t> QuantizeHistogram(const std::vector<uint32_t> &histogram,
755
0
                                       size_t num_chunks) {
756
0
  if (histogram.empty() || num_chunks == 0) return {};
757
0
  uint64_t sum = std::accumulate(histogram.begin(), histogram.end(), 0LU);
758
0
  if (sum == 0) return {};
759
  // TODO(veluca): selecting distinct quantiles is likely not the best
760
  // way to go about this.
761
0
  std::vector<int32_t> thresholds;
762
0
  uint64_t cumsum = 0;
763
0
  uint64_t threshold = 1;
764
0
  for (size_t i = 0; i < histogram.size(); i++) {
765
0
    cumsum += histogram[i];
766
0
    if (cumsum * num_chunks >= threshold * sum) {
767
0
      thresholds.push_back(i);
768
0
      while (cumsum * num_chunks >= threshold * sum) threshold++;
769
0
    }
770
0
  }
771
0
  JXL_DASSERT(thresholds.size() <= num_chunks);
772
  // last value collects all histogram and is not really a threshold
773
0
  thresholds.pop_back();
774
0
  return thresholds;
775
0
}
776
777
std::vector<int32_t> QuantizeSamples(const std::vector<int32_t> &samples,
778
0
                                     size_t num_chunks) {
779
0
  if (samples.empty()) return {};
780
0
  int min = *std::min_element(samples.begin(), samples.end());
781
0
  constexpr int kRange = 512;
782
0
  min = jxl::Clamp1(min, -kRange, kRange);
783
0
  std::vector<uint32_t> counts(2 * kRange + 1);
784
0
  for (int s : samples) {
785
0
    uint32_t sample_offset = jxl::Clamp1(s, -kRange, kRange) - min;
786
0
    counts[sample_offset]++;
787
0
  }
788
0
  std::vector<int32_t> thresholds = QuantizeHistogram(counts, num_chunks);
789
0
  for (auto &v : thresholds) v += min;
790
0
  return thresholds;
791
0
}
792
793
// `to[i]` is assigned value `v` conforming `from[v] <= i && from[v-1] > i`.
794
// This is because the decision node in the tree splits on (property) > i,
795
// hence everything that is not > of a threshold should be clustered
796
// together.
797
template <typename T>
798
void QuantMap(const std::vector<int32_t> &from, std::vector<T> &to,
799
0
              size_t num_pegs, int bias) {
800
0
  to.resize(num_pegs);
801
0
  size_t mapped = 0;
802
0
  for (size_t i = 0; i < num_pegs; i++) {
803
0
    while (mapped < from.size() && static_cast<int>(i) - bias > from[mapped]) {
804
0
      mapped++;
805
0
    }
806
0
    JXL_DASSERT(static_cast<T>(mapped) == mapped);
807
0
    to[i] = mapped;
808
0
  }
809
0
}
Unexecuted instantiation: enc_ma.cc:void jxl::(anonymous namespace)::QuantMap<unsigned short>(std::__1::vector<int, std::__1::allocator<int> > const&, std::__1::vector<unsigned short, std::__1::allocator<unsigned short> >&, unsigned long, int)
Unexecuted instantiation: enc_ma.cc:void jxl::(anonymous namespace)::QuantMap<unsigned char>(std::__1::vector<int, std::__1::allocator<int> > const&, std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >&, unsigned long, int)
810
}  // namespace
811
812
void TreeSamples::PreQuantizeProperties(
813
    const StaticPropRange &range,
814
    const std::vector<ModularMultiplierInfo> &multiplier_info,
815
    const std::vector<uint32_t> &group_pixel_count,
816
    const std::vector<uint32_t> &channel_pixel_count,
817
    std::vector<pixel_type> &pixel_samples,
818
0
    std::vector<pixel_type> &diff_samples, size_t max_property_values) {
819
  // If we have forced splits because of multipliers, choose channel and group
820
  // thresholds accordingly.
821
0
  std::vector<int32_t> group_multiplier_thresholds;
822
0
  std::vector<int32_t> channel_multiplier_thresholds;
823
0
  for (const auto &v : multiplier_info) {
824
0
    if (v.range[0][0] != range[0][0]) {
825
0
      channel_multiplier_thresholds.push_back(v.range[0][0] - 1);
826
0
    }
827
0
    if (v.range[0][1] != range[0][1]) {
828
0
      channel_multiplier_thresholds.push_back(v.range[0][1] - 1);
829
0
    }
830
0
    if (v.range[1][0] != range[1][0]) {
831
0
      group_multiplier_thresholds.push_back(v.range[1][0] - 1);
832
0
    }
833
0
    if (v.range[1][1] != range[1][1]) {
834
0
      group_multiplier_thresholds.push_back(v.range[1][1] - 1);
835
0
    }
836
0
  }
837
0
  std::sort(channel_multiplier_thresholds.begin(),
838
0
            channel_multiplier_thresholds.end());
839
0
  channel_multiplier_thresholds.resize(
840
0
      std::unique(channel_multiplier_thresholds.begin(),
841
0
                  channel_multiplier_thresholds.end()) -
842
0
      channel_multiplier_thresholds.begin());
843
0
  std::sort(group_multiplier_thresholds.begin(),
844
0
            group_multiplier_thresholds.end());
845
0
  group_multiplier_thresholds.resize(
846
0
      std::unique(group_multiplier_thresholds.begin(),
847
0
                  group_multiplier_thresholds.end()) -
848
0
      group_multiplier_thresholds.begin());
849
850
0
  compact_properties.resize(props_to_use.size());
851
0
  auto quantize_channel = [&]() {
852
0
    if (!channel_multiplier_thresholds.empty()) {
853
0
      return channel_multiplier_thresholds;
854
0
    }
855
0
    return QuantizeHistogram(channel_pixel_count, max_property_values);
856
0
  };
857
0
  auto quantize_group_id = [&]() {
858
0
    if (!group_multiplier_thresholds.empty()) {
859
0
      return group_multiplier_thresholds;
860
0
    }
861
0
    return QuantizeHistogram(group_pixel_count, max_property_values);
862
0
  };
863
0
  auto quantize_coordinate = [&]() {
864
0
    std::vector<int32_t> quantized;
865
0
    quantized.reserve(max_property_values - 1);
866
0
    for (size_t i = 0; i + 1 < max_property_values; i++) {
867
0
      quantized.push_back((i + 1) * 256 / max_property_values - 1);
868
0
    }
869
0
    return quantized;
870
0
  };
871
0
  std::vector<int32_t> abs_pixel_thresholds;
872
0
  std::vector<int32_t> pixel_thresholds;
873
0
  auto quantize_pixel_property = [&]() {
874
0
    if (pixel_thresholds.empty()) {
875
0
      pixel_thresholds = QuantizeSamples(pixel_samples, max_property_values);
876
0
    }
877
0
    return pixel_thresholds;
878
0
  };
879
0
  auto quantize_abs_pixel_property = [&]() {
880
0
    if (abs_pixel_thresholds.empty()) {
881
0
      quantize_pixel_property();  // Compute the non-abs thresholds.
882
0
      for (auto &v : pixel_samples) v = std::abs(v);
883
0
      abs_pixel_thresholds =
884
0
          QuantizeSamples(pixel_samples, max_property_values);
885
0
    }
886
0
    return abs_pixel_thresholds;
887
0
  };
888
0
  std::vector<int32_t> abs_diff_thresholds;
889
0
  std::vector<int32_t> diff_thresholds;
890
0
  auto quantize_diff_property = [&]() {
891
0
    if (diff_thresholds.empty()) {
892
0
      diff_thresholds = QuantizeSamples(diff_samples, max_property_values);
893
0
    }
894
0
    return diff_thresholds;
895
0
  };
896
0
  auto quantize_abs_diff_property = [&]() {
897
0
    if (abs_diff_thresholds.empty()) {
898
0
      quantize_diff_property();  // Compute the non-abs thresholds.
899
0
      for (auto &v : diff_samples) v = std::abs(v);
900
0
      abs_diff_thresholds = QuantizeSamples(diff_samples, max_property_values);
901
0
    }
902
0
    return abs_diff_thresholds;
903
0
  };
904
0
  auto quantize_wp = [&]() {
905
0
    if (max_property_values < 32) {
906
0
      return std::vector<int32_t>{-127, -63, -31, -15, -7, -3, -1, 0,
907
0
                                  1,    3,   7,   15,  31, 63, 127};
908
0
    }
909
0
    if (max_property_values < 64) {
910
0
      return std::vector<int32_t>{-255, -191, -127, -95, -63, -47, -31, -23,
911
0
                                  -15,  -11,  -7,   -5,  -3,  -1,  0,   1,
912
0
                                  3,    5,    7,    11,  15,  23,  31,  47,
913
0
                                  63,   95,   127,  191, 255};
914
0
    }
915
0
    return std::vector<int32_t>{
916
0
        -255, -223, -191, -159, -127, -111, -95, -79, -63, -55, -47,
917
0
        -39,  -31,  -27,  -23,  -19,  -15,  -13, -11, -9,  -7,  -6,
918
0
        -5,   -4,   -3,   -2,   -1,   0,    1,   2,   3,   4,   5,
919
0
        6,    7,    9,    11,   13,   15,   19,  23,  27,  31,  39,
920
0
        47,   55,   63,   79,   95,   111,  127, 159, 191, 223, 255};
921
0
  };
922
923
0
  property_mapping.resize(props_to_use.size() - num_static_props);
924
0
  for (size_t i = 0; i < props_to_use.size(); i++) {
925
0
    if (props_to_use[i] == 0) {
926
0
      compact_properties[i] = quantize_channel();
927
0
    } else if (props_to_use[i] == 1) {
928
0
      compact_properties[i] = quantize_group_id();
929
0
    } else if (props_to_use[i] == 2 || props_to_use[i] == 3) {
930
0
      compact_properties[i] = quantize_coordinate();
931
0
    } else if (props_to_use[i] == 6 || props_to_use[i] == 7 ||
932
0
               props_to_use[i] == 8 ||
933
0
               (props_to_use[i] >= kNumNonrefProperties &&
934
0
                (props_to_use[i] - kNumNonrefProperties) % 4 == 1)) {
935
0
      compact_properties[i] = quantize_pixel_property();
936
0
    } else if (props_to_use[i] == 4 || props_to_use[i] == 5 ||
937
0
               (props_to_use[i] >= kNumNonrefProperties &&
938
0
                (props_to_use[i] - kNumNonrefProperties) % 4 == 0)) {
939
0
      compact_properties[i] = quantize_abs_pixel_property();
940
0
    } else if (props_to_use[i] >= kNumNonrefProperties &&
941
0
               (props_to_use[i] - kNumNonrefProperties) % 4 == 2) {
942
0
      compact_properties[i] = quantize_abs_diff_property();
943
0
    } else if (props_to_use[i] == kWPProp) {
944
0
      compact_properties[i] = quantize_wp();
945
0
    } else {
946
0
      compact_properties[i] = quantize_diff_property();
947
0
    }
948
0
    if (i < num_static_props) {
949
0
      QuantMap(compact_properties[i], static_property_mapping[i],
950
0
               kPropertyRange * 2 + 1, kPropertyRange);
951
0
    } else {
952
0
      QuantMap(compact_properties[i], property_mapping[i - num_static_props],
953
0
               kPropertyRange * 2 + 1, kPropertyRange);
954
0
    }
955
0
  }
956
0
}
957
958
void CollectPixelSamples(const Image &image, const ModularOptions &options,
959
                         uint32_t group_id,
960
                         std::vector<uint32_t> &group_pixel_count,
961
                         std::vector<uint32_t> &channel_pixel_count,
962
                         std::vector<pixel_type> &pixel_samples,
963
0
                         std::vector<pixel_type> &diff_samples) {
964
0
  if (options.nb_repeats == 0) return;
965
0
  if (group_pixel_count.size() <= group_id) {
966
0
    group_pixel_count.resize(group_id + 1);
967
0
  }
968
0
  if (channel_pixel_count.size() < image.channel.size()) {
969
0
    channel_pixel_count.resize(image.channel.size());
970
0
  }
971
0
  Rng rng(group_id);
972
  // Sample 10% of the final number of samples for property quantization.
973
0
  float fraction = std::min(options.nb_repeats * 0.1, 0.99);
974
0
  Rng::GeometricDistribution dist = Rng::MakeGeometric(fraction);
975
0
  size_t total_pixels = 0;
976
0
  std::vector<size_t> channel_ids;
977
0
  for (size_t i = 0; i < image.channel.size(); i++) {
978
0
    if (i >= image.nb_meta_channels &&
979
0
        (image.channel[i].w > options.max_chan_size ||
980
0
         image.channel[i].h > options.max_chan_size)) {
981
0
      break;
982
0
    }
983
0
    if (image.channel[i].w <= 1 || image.channel[i].h == 0) {
984
0
      continue;  // skip empty or width-1 channels.
985
0
    }
986
0
    channel_ids.push_back(i);
987
0
    group_pixel_count[group_id] += image.channel[i].w * image.channel[i].h;
988
0
    channel_pixel_count[i] += image.channel[i].w * image.channel[i].h;
989
0
    total_pixels += image.channel[i].w * image.channel[i].h;
990
0
  }
991
0
  if (channel_ids.empty()) return;
992
0
  pixel_samples.reserve(pixel_samples.size() + fraction * total_pixels);
993
0
  diff_samples.reserve(diff_samples.size() + fraction * total_pixels);
994
0
  size_t i = 0;
995
0
  size_t y = 0;
996
0
  size_t x = 0;
997
0
  auto advance = [&](size_t amount) {
998
0
    x += amount;
999
    // Detect row overflow (rare).
1000
0
    while (x >= image.channel[channel_ids[i]].w) {
1001
0
      x -= image.channel[channel_ids[i]].w;
1002
0
      y++;
1003
      // Detect end-of-channel (even rarer).
1004
0
      if (y == image.channel[channel_ids[i]].h) {
1005
0
        i++;
1006
0
        y = 0;
1007
0
        if (i >= channel_ids.size()) {
1008
0
          return;
1009
0
        }
1010
0
      }
1011
0
    }
1012
0
  };
1013
0
  advance(rng.Geometric(dist));
1014
0
  for (; i < channel_ids.size(); advance(rng.Geometric(dist) + 1)) {
1015
0
    const pixel_type *row = image.channel[channel_ids[i]].Row(y);
1016
0
    pixel_samples.push_back(row[x]);
1017
0
    size_t xp = x == 0 ? 1 : x - 1;
1018
0
    diff_samples.push_back(static_cast<int64_t>(row[x]) - row[xp]);
1019
0
  }
1020
0
}
1021
1022
// TODO(veluca): very simple encoding scheme. This should be improved.
1023
Status TokenizeTree(const Tree &tree, std::vector<Token> *tokens,
1024
202
                    Tree *decoder_tree) {
1025
202
  JXL_ENSURE(tree.size() <= kMaxTreeSize);
1026
202
  std::queue<int> q;
1027
202
  q.push(0);
1028
202
  size_t leaf_id = 0;
1029
202
  decoder_tree->clear();
1030
808
  while (!q.empty()) {
1031
606
    int cur = q.front();
1032
606
    q.pop();
1033
606
    JXL_ENSURE(tree[cur].property >= -1);
1034
606
    tokens->emplace_back(kPropertyContext, tree[cur].property + 1);
1035
606
    if (tree[cur].property == -1) {
1036
404
      tokens->emplace_back(kPredictorContext,
1037
404
                           static_cast<int>(tree[cur].predictor));
1038
404
      tokens->emplace_back(kOffsetContext,
1039
404
                           PackSigned(tree[cur].predictor_offset));
1040
404
      uint32_t mul_log = Num0BitsBelowLS1Bit_Nonzero(tree[cur].multiplier);
1041
404
      uint32_t mul_bits = (tree[cur].multiplier >> mul_log) - 1;
1042
404
      tokens->emplace_back(kMultiplierLogContext, mul_log);
1043
404
      tokens->emplace_back(kMultiplierBitsContext, mul_bits);
1044
404
      JXL_ENSURE(tree[cur].predictor < Predictor::Best);
1045
404
      decoder_tree->emplace_back(
1046
404
          -1, 0, static_cast<int>(leaf_id), 0, tree[cur].predictor,
1047
404
          tree[cur].predictor_offset, tree[cur].multiplier);
1048
404
      leaf_id++;
1049
404
      continue;
1050
404
    }
1051
202
    decoder_tree->emplace_back(
1052
202
        tree[cur].property, tree[cur].splitval,
1053
202
        static_cast<int>(decoder_tree->size() + q.size() + 1),
1054
202
        static_cast<int>(decoder_tree->size() + q.size() + 2), Predictor::Zero,
1055
202
        0, 1);
1056
202
    q.push(tree[cur].lchild);
1057
202
    q.push(tree[cur].rchild);
1058
202
    tokens->emplace_back(kSplitValContext, PackSigned(tree[cur].splitval));
1059
202
  }
1060
202
  return true;
1061
202
}
1062
1063
}  // namespace jxl
1064
#endif  // HWY_ONCE