/src/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2016 The Draco Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | #ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED |
16 | | #ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ |
17 | | #define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ |
18 | | |
19 | | #include <math.h> |
20 | | |
21 | | #include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" |
22 | | #include "draco/compression/bit_coders/rans_bit_decoder.h" |
23 | | #include "draco/core/varint_decoding.h" |
24 | | #include "draco/core/vector_d.h" |
25 | | #include "draco/draco_features.h" |
26 | | #include "draco/mesh/corner_table.h" |
27 | | |
28 | | namespace draco { |
29 | | |
30 | | // Decoder for predictions of UV coordinates encoded by our specialized texture |
31 | | // coordinate predictor. See the corresponding encoder for more details. Note |
32 | | // that this predictor is not portable and should not be used anymore. See |
33 | | // MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version |
34 | | // of this prediction scheme. |
35 | | template <typename DataTypeT, class TransformT, class MeshDataT> |
36 | | class MeshPredictionSchemeTexCoordsDecoder |
37 | | : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> { |
38 | | public: |
39 | | using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT, |
40 | | MeshDataT>::CorrType; |
41 | | MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute, |
42 | | const TransformT &transform, |
43 | | const MeshDataT &mesh_data, int version) |
44 | | : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>( |
45 | | attribute, transform, mesh_data), |
46 | | pos_attribute_(nullptr), |
47 | | entry_to_point_id_map_(nullptr), |
48 | | num_components_(0), |
49 | 543 | version_(version) {} draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::MeshPredictionSchemeTexCoordsDecoder(draco::PointAttribute const*, draco::PredictionSchemeWrapDecodingTransform<int, int> const&, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> const&, int) Line | Count | Source | 49 | 209 | version_(version) {} |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::MeshPredictionSchemeTexCoordsDecoder(draco::PointAttribute const*, draco::PredictionSchemeWrapDecodingTransform<int, int> const&, draco::MeshPredictionSchemeData<draco::CornerTable> const&, int) Line | Count | Source | 49 | 334 | version_(version) {} |
|
50 | | |
51 | | bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, |
52 | | int size, int num_components, |
53 | | const PointIndex *entry_to_point_id_map) override; |
54 | | |
55 | | bool DecodePredictionData(DecoderBuffer *buffer) override; |
56 | | |
57 | 0 | PredictionSchemeMethod GetPredictionMethod() const override { |
58 | 0 | return MESH_PREDICTION_TEX_COORDS_DEPRECATED; |
59 | 0 | } Unexecuted instantiation: draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::GetPredictionMethod() const Unexecuted instantiation: draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::GetPredictionMethod() const |
60 | | |
61 | 0 | bool IsInitialized() const override { |
62 | 0 | if (pos_attribute_ == nullptr) { |
63 | 0 | return false; |
64 | 0 | } |
65 | 0 | if (!this->mesh_data().IsInitialized()) { |
66 | 0 | return false; |
67 | 0 | } |
68 | 0 | return true; |
69 | 0 | } Unexecuted instantiation: draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::IsInitialized() const Unexecuted instantiation: draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::IsInitialized() const |
70 | | |
71 | 1.07k | int GetNumParentAttributes() const override { return 1; } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::GetNumParentAttributes() const Line | Count | Source | 71 | 414 | int GetNumParentAttributes() const override { return 1; } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::GetNumParentAttributes() const Line | Count | Source | 71 | 663 | int GetNumParentAttributes() const override { return 1; } |
|
72 | | |
73 | 543 | GeometryAttribute::Type GetParentAttributeType(int i) const override { |
74 | 543 | DRACO_DCHECK_EQ(i, 0); |
75 | 543 | (void)i; |
76 | 543 | return GeometryAttribute::POSITION; |
77 | 543 | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::GetParentAttributeType(int) const Line | Count | Source | 73 | 209 | GeometryAttribute::Type GetParentAttributeType(int i) const override { | 74 | 209 | DRACO_DCHECK_EQ(i, 0); | 75 | 209 | (void)i; | 76 | 209 | return GeometryAttribute::POSITION; | 77 | 209 | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::GetParentAttributeType(int) const Line | Count | Source | 73 | 334 | GeometryAttribute::Type GetParentAttributeType(int i) const override { | 74 | 334 | DRACO_DCHECK_EQ(i, 0); | 75 | 334 | (void)i; | 76 | 334 | return GeometryAttribute::POSITION; | 77 | 334 | } |
|
78 | | |
79 | 542 | bool SetParentAttribute(const PointAttribute *att) override { |
80 | 542 | if (att == nullptr) { |
81 | 0 | return false; |
82 | 0 | } |
83 | 542 | if (att->attribute_type() != GeometryAttribute::POSITION) { |
84 | 0 | return false; // Invalid attribute type. |
85 | 0 | } |
86 | 542 | if (att->num_components() != 3) { |
87 | 8 | return false; // Currently works only for 3 component positions. |
88 | 8 | } |
89 | 534 | pos_attribute_ = att; |
90 | 534 | return true; |
91 | 542 | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::SetParentAttribute(draco::PointAttribute const*) Line | Count | Source | 79 | 209 | bool SetParentAttribute(const PointAttribute *att) override { | 80 | 209 | if (att == nullptr) { | 81 | 0 | return false; | 82 | 0 | } | 83 | 209 | if (att->attribute_type() != GeometryAttribute::POSITION) { | 84 | 0 | return false; // Invalid attribute type. | 85 | 0 | } | 86 | 209 | if (att->num_components() != 3) { | 87 | 4 | return false; // Currently works only for 3 component positions. | 88 | 4 | } | 89 | 205 | pos_attribute_ = att; | 90 | 205 | return true; | 91 | 209 | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::SetParentAttribute(draco::PointAttribute const*) Line | Count | Source | 79 | 333 | bool SetParentAttribute(const PointAttribute *att) override { | 80 | 333 | if (att == nullptr) { | 81 | 0 | return false; | 82 | 0 | } | 83 | 333 | if (att->attribute_type() != GeometryAttribute::POSITION) { | 84 | 0 | return false; // Invalid attribute type. | 85 | 0 | } | 86 | 333 | if (att->num_components() != 3) { | 87 | 4 | return false; // Currently works only for 3 component positions. | 88 | 4 | } | 89 | 329 | pos_attribute_ = att; | 90 | 329 | return true; | 91 | 333 | } |
|
92 | | |
93 | | protected: |
94 | 80.2k | Vector3f GetPositionForEntryId(int entry_id) const { |
95 | 80.2k | const PointIndex point_id = entry_to_point_id_map_[entry_id]; |
96 | 80.2k | Vector3f pos; |
97 | 80.2k | pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), |
98 | 80.2k | &pos[0]); |
99 | 80.2k | return pos; |
100 | 80.2k | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::GetPositionForEntryId(int) const Line | Count | Source | 94 | 11.6k | Vector3f GetPositionForEntryId(int entry_id) const { | 95 | 11.6k | const PointIndex point_id = entry_to_point_id_map_[entry_id]; | 96 | 11.6k | Vector3f pos; | 97 | 11.6k | pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), | 98 | 11.6k | &pos[0]); | 99 | 11.6k | return pos; | 100 | 11.6k | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::GetPositionForEntryId(int) const Line | Count | Source | 94 | 68.6k | Vector3f GetPositionForEntryId(int entry_id) const { | 95 | 68.6k | const PointIndex point_id = entry_to_point_id_map_[entry_id]; | 96 | 68.6k | Vector3f pos; | 97 | 68.6k | pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), | 98 | 68.6k | &pos[0]); | 99 | 68.6k | return pos; | 100 | 68.6k | } |
|
101 | | |
102 | 922k | Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { |
103 | 922k | const int data_offset = entry_id * num_components_; |
104 | 922k | return Vector2f(static_cast<float>(data[data_offset]), |
105 | 922k | static_cast<float>(data[data_offset + 1])); |
106 | 922k | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::GetTexCoordForEntryId(int, int const*) const Line | Count | Source | 102 | 446k | Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { | 103 | 446k | const int data_offset = entry_id * num_components_; | 104 | 446k | return Vector2f(static_cast<float>(data[data_offset]), | 105 | 446k | static_cast<float>(data[data_offset + 1])); | 106 | 446k | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::GetTexCoordForEntryId(int, int const*) const Line | Count | Source | 102 | 476k | Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { | 103 | 476k | const int data_offset = entry_id * num_components_; | 104 | 476k | return Vector2f(static_cast<float>(data[data_offset]), | 105 | 476k | static_cast<float>(data[data_offset + 1])); | 106 | 476k | } |
|
107 | | |
108 | | bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, |
109 | | int data_id); |
110 | | |
111 | | private: |
112 | | const PointAttribute *pos_attribute_; |
113 | | const PointIndex *entry_to_point_id_map_; |
114 | | std::unique_ptr<DataTypeT[]> predicted_value_; |
115 | | int num_components_; |
116 | | // Encoded / decoded array of UV flips. |
117 | | std::vector<bool> orientations_; |
118 | | int version_; |
119 | | }; |
120 | | |
121 | | template <typename DataTypeT, class TransformT, class MeshDataT> |
122 | | bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: |
123 | | ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, |
124 | | int /* size */, int num_components, |
125 | 324 | const PointIndex *entry_to_point_id_map) { |
126 | 324 | if (num_components != 2) { |
127 | | // Corrupt/malformed input. Two output components are req'd. |
128 | 3 | return false; |
129 | 3 | } |
130 | 321 | num_components_ = num_components; |
131 | 321 | entry_to_point_id_map_ = entry_to_point_id_map; |
132 | 321 | predicted_value_ = |
133 | 321 | std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]); |
134 | 321 | this->transform().Init(num_components); |
135 | | |
136 | 321 | const int corner_map_size = |
137 | 321 | static_cast<int>(this->mesh_data().data_to_corner_map()->size()); |
138 | 894k | for (int p = 0; p < corner_map_size; ++p) { |
139 | 894k | const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); |
140 | 894k | if (!ComputePredictedValue(corner_id, out_data, p)) { |
141 | 66 | return false; |
142 | 66 | } |
143 | | |
144 | 894k | const int dst_offset = p * num_components; |
145 | 894k | this->transform().ComputeOriginalValue( |
146 | 894k | predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); |
147 | 894k | } |
148 | 255 | return true; |
149 | 321 | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::ComputeOriginalValues(int const*, int*, int, int, draco::IndexType<unsigned int, draco::PointIndex_tag_type_> const*) Line | Count | Source | 125 | 148 | const PointIndex *entry_to_point_id_map) { | 126 | 148 | if (num_components != 2) { | 127 | | // Corrupt/malformed input. Two output components are req'd. | 128 | 2 | return false; | 129 | 2 | } | 130 | 146 | num_components_ = num_components; | 131 | 146 | entry_to_point_id_map_ = entry_to_point_id_map; | 132 | 146 | predicted_value_ = | 133 | 146 | std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]); | 134 | 146 | this->transform().Init(num_components); | 135 | | | 136 | 146 | const int corner_map_size = | 137 | 146 | static_cast<int>(this->mesh_data().data_to_corner_map()->size()); | 138 | 655k | for (int p = 0; p < corner_map_size; ++p) { | 139 | 655k | const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); | 140 | 655k | if (!ComputePredictedValue(corner_id, out_data, p)) { | 141 | 44 | return false; | 142 | 44 | } | 143 | | | 144 | 655k | const int dst_offset = p * num_components; | 145 | 655k | this->transform().ComputeOriginalValue( | 146 | 655k | predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); | 147 | 655k | } | 148 | 102 | return true; | 149 | 146 | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::ComputeOriginalValues(int const*, int*, int, int, draco::IndexType<unsigned int, draco::PointIndex_tag_type_> const*) Line | Count | Source | 125 | 176 | const PointIndex *entry_to_point_id_map) { | 126 | 176 | if (num_components != 2) { | 127 | | // Corrupt/malformed input. Two output components are req'd. | 128 | 1 | return false; | 129 | 1 | } | 130 | 175 | num_components_ = num_components; | 131 | 175 | entry_to_point_id_map_ = entry_to_point_id_map; | 132 | 175 | predicted_value_ = | 133 | 175 | std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]); | 134 | 175 | this->transform().Init(num_components); | 135 | | | 136 | 175 | const int corner_map_size = | 137 | 175 | static_cast<int>(this->mesh_data().data_to_corner_map()->size()); | 138 | 239k | for (int p = 0; p < corner_map_size; ++p) { | 139 | 238k | const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); | 140 | 238k | if (!ComputePredictedValue(corner_id, out_data, p)) { | 141 | 22 | return false; | 142 | 22 | } | 143 | | | 144 | 238k | const int dst_offset = p * num_components; | 145 | 238k | this->transform().ComputeOriginalValue( | 146 | 238k | predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); | 147 | 238k | } | 148 | 153 | return true; | 149 | 175 | } |
|
150 | | |
151 | | template <typename DataTypeT, class TransformT, class MeshDataT> |
152 | | bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: |
153 | 527 | DecodePredictionData(DecoderBuffer *buffer) { |
154 | | // Decode the delta coded orientations. |
155 | 527 | uint32_t num_orientations = 0; |
156 | 527 | if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { |
157 | 74 | if (!buffer->Decode(&num_orientations)) { |
158 | 3 | return false; |
159 | 3 | } |
160 | 453 | } else { |
161 | 453 | if (!DecodeVarint(&num_orientations, buffer)) { |
162 | 5 | return false; |
163 | 5 | } |
164 | 453 | } |
165 | 519 | if (num_orientations == 0) { |
166 | 5 | return false; |
167 | 5 | } |
168 | 514 | if (num_orientations > this->mesh_data().corner_table()->num_corners()) { |
169 | | // We can't have more orientations than the maximum number of decoded |
170 | | // values. |
171 | 112 | return false; |
172 | 112 | } |
173 | 402 | orientations_.resize(num_orientations); |
174 | 402 | bool last_orientation = true; |
175 | 402 | RAnsBitDecoder decoder; |
176 | 402 | if (!decoder.StartDecoding(buffer)) { |
177 | 6 | return false; |
178 | 6 | } |
179 | 277k | for (uint32_t i = 0; i < num_orientations; ++i) { |
180 | 276k | if (!decoder.DecodeNextBit()) { |
181 | 49.2k | last_orientation = !last_orientation; |
182 | 49.2k | } |
183 | 276k | orientations_[i] = last_orientation; |
184 | 276k | } |
185 | 396 | decoder.EndDecoding(); |
186 | 396 | return MeshPredictionSchemeDecoder<DataTypeT, TransformT, |
187 | 396 | MeshDataT>::DecodePredictionData(buffer); |
188 | 402 | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::DecodePredictionData(draco::DecoderBuffer*) Line | Count | Source | 153 | 205 | DecodePredictionData(DecoderBuffer *buffer) { | 154 | | // Decode the delta coded orientations. | 155 | 205 | uint32_t num_orientations = 0; | 156 | 205 | if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { | 157 | 5 | if (!buffer->Decode(&num_orientations)) { | 158 | 2 | return false; | 159 | 2 | } | 160 | 200 | } else { | 161 | 200 | if (!DecodeVarint(&num_orientations, buffer)) { | 162 | 4 | return false; | 163 | 4 | } | 164 | 200 | } | 165 | 199 | if (num_orientations == 0) { | 166 | 3 | return false; | 167 | 3 | } | 168 | 196 | if (num_orientations > this->mesh_data().corner_table()->num_corners()) { | 169 | | // We can't have more orientations than the maximum number of decoded | 170 | | // values. | 171 | 36 | return false; | 172 | 36 | } | 173 | 160 | orientations_.resize(num_orientations); | 174 | 160 | bool last_orientation = true; | 175 | 160 | RAnsBitDecoder decoder; | 176 | 160 | if (!decoder.StartDecoding(buffer)) { | 177 | 1 | return false; | 178 | 1 | } | 179 | 37.9k | for (uint32_t i = 0; i < num_orientations; ++i) { | 180 | 37.7k | if (!decoder.DecodeNextBit()) { | 181 | 2.74k | last_orientation = !last_orientation; | 182 | 2.74k | } | 183 | 37.7k | orientations_[i] = last_orientation; | 184 | 37.7k | } | 185 | 159 | decoder.EndDecoding(); | 186 | 159 | return MeshPredictionSchemeDecoder<DataTypeT, TransformT, | 187 | 159 | MeshDataT>::DecodePredictionData(buffer); | 188 | 160 | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::DecodePredictionData(draco::DecoderBuffer*) Line | Count | Source | 153 | 322 | DecodePredictionData(DecoderBuffer *buffer) { | 154 | | // Decode the delta coded orientations. | 155 | 322 | uint32_t num_orientations = 0; | 156 | 322 | if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { | 157 | 69 | if (!buffer->Decode(&num_orientations)) { | 158 | 1 | return false; | 159 | 1 | } | 160 | 253 | } else { | 161 | 253 | if (!DecodeVarint(&num_orientations, buffer)) { | 162 | 1 | return false; | 163 | 1 | } | 164 | 253 | } | 165 | 320 | if (num_orientations == 0) { | 166 | 2 | return false; | 167 | 2 | } | 168 | 318 | if (num_orientations > this->mesh_data().corner_table()->num_corners()) { | 169 | | // We can't have more orientations than the maximum number of decoded | 170 | | // values. | 171 | 76 | return false; | 172 | 76 | } | 173 | 242 | orientations_.resize(num_orientations); | 174 | 242 | bool last_orientation = true; | 175 | 242 | RAnsBitDecoder decoder; | 176 | 242 | if (!decoder.StartDecoding(buffer)) { | 177 | 5 | return false; | 178 | 5 | } | 179 | 239k | for (uint32_t i = 0; i < num_orientations; ++i) { | 180 | 239k | if (!decoder.DecodeNextBit()) { | 181 | 46.5k | last_orientation = !last_orientation; | 182 | 46.5k | } | 183 | 239k | orientations_[i] = last_orientation; | 184 | 239k | } | 185 | 237 | decoder.EndDecoding(); | 186 | 237 | return MeshPredictionSchemeDecoder<DataTypeT, TransformT, | 187 | 237 | MeshDataT>::DecodePredictionData(buffer); | 188 | 242 | } |
|
189 | | |
190 | | template <typename DataTypeT, class TransformT, class MeshDataT> |
191 | | bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: |
192 | | ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, |
193 | 894k | int data_id) { |
194 | | // Compute the predicted UV coordinate from the positions on all corners |
195 | | // of the processed triangle. For the best prediction, the UV coordinates |
196 | | // on the next/previous corners need to be already encoded/decoded. |
197 | 894k | const CornerIndex next_corner_id = |
198 | 894k | this->mesh_data().corner_table()->Next(corner_id); |
199 | 894k | const CornerIndex prev_corner_id = |
200 | 894k | this->mesh_data().corner_table()->Previous(corner_id); |
201 | | // Get the encoded data ids from the next and previous corners. |
202 | | // The data id is the encoding order of the UV coordinates. |
203 | 894k | int next_data_id, prev_data_id; |
204 | | |
205 | 894k | int next_vert_id, prev_vert_id; |
206 | 894k | next_vert_id = |
207 | 894k | this->mesh_data().corner_table()->Vertex(next_corner_id).value(); |
208 | 894k | prev_vert_id = |
209 | 894k | this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); |
210 | | |
211 | 894k | next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); |
212 | 894k | prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); |
213 | | |
214 | 894k | if (prev_data_id < data_id && next_data_id < data_id) { |
215 | | // Both other corners have available UV coordinates for prediction. |
216 | 461k | const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); |
217 | 461k | const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); |
218 | 461k | if (p_uv == n_uv) { |
219 | | // We cannot do a reliable prediction on degenerated UV triangles. |
220 | | // Technically floats > INT_MAX are undefined, but compilers will |
221 | | // convert those values to INT_MIN. We are being explicit here for asan. |
222 | 869k | for (const int i : {0, 1}) { |
223 | 869k | if (std::isnan(p_uv[i]) || static_cast<double>(p_uv[i]) > INT_MAX || |
224 | 869k | static_cast<double>(p_uv[i]) < INT_MIN) { |
225 | 602 | predicted_value_[i] = INT_MIN; |
226 | 868k | } else { |
227 | 868k | predicted_value_[i] = static_cast<int>(p_uv[i]); |
228 | 868k | } |
229 | 869k | } |
230 | 434k | return true; |
231 | 434k | } |
232 | | |
233 | | // Get positions at all corners. |
234 | 26.7k | const Vector3f tip_pos = GetPositionForEntryId(data_id); |
235 | 26.7k | const Vector3f next_pos = GetPositionForEntryId(next_data_id); |
236 | 26.7k | const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); |
237 | | // Use the positions of the above triangle to predict the texture coordinate |
238 | | // on the tip corner C. |
239 | | // Convert the triangle into a new coordinate system defined by orthogonal |
240 | | // bases vectors S, T, where S is vector prev_pos - next_pos and T is an |
241 | | // perpendicular vector to S in the same plane as vector the |
242 | | // tip_pos - next_pos. |
243 | | // The transformed triangle in the new coordinate system is then going to |
244 | | // be represented as: |
245 | | // |
246 | | // 1 ^ |
247 | | // | |
248 | | // | |
249 | | // | C |
250 | | // | / \ |
251 | | // | / \ |
252 | | // |/ \ |
253 | | // N--------------P |
254 | | // 0 1 |
255 | | // |
256 | | // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is |
257 | | // at (1, 0). Our goal is to compute the position of the tip_pos point (C) |
258 | | // in this new coordinate space (s, t). |
259 | | // |
260 | 26.7k | const Vector3f pn = prev_pos - next_pos; |
261 | 26.7k | const Vector3f cn = tip_pos - next_pos; |
262 | 26.7k | const float pn_norm2_squared = pn.SquaredNorm(); |
263 | | // Coordinate s of the tip corner C is simply the dot product of the |
264 | | // normalized vectors |pn| and |cn| (normalized by the length of |pn|). |
265 | | // Since both of these vectors are normalized, we don't need to perform the |
266 | | // normalization explicitly and instead we can just use the squared norm |
267 | | // of |pn| as a denominator of the resulting dot product of non normalized |
268 | | // vectors. |
269 | 26.7k | float s, t; |
270 | | // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are |
271 | | // the same positions (e.g. because they were quantized to the same |
272 | | // location). |
273 | 26.7k | if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { |
274 | 2.50k | s = pn.Dot(cn) / pn_norm2_squared; |
275 | | // To get the coordinate t, we can use formula: |
276 | | // t = |C-N - (P-N) * s| / |P-N| |
277 | | // Do not use std::sqrt to avoid changes in the bitstream. |
278 | 2.50k | t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); |
279 | 24.2k | } else { |
280 | 24.2k | s = 0; |
281 | 24.2k | t = 0; |
282 | 24.2k | } |
283 | | |
284 | | // Now we need to transform the point (s, t) to the texture coordinate space |
285 | | // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets |
286 | | // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can |
287 | | // be used to define transformation from the normalized coordinate system |
288 | | // to the texture coordinate system using a 3x3 affine matrix M: |
289 | | // |
290 | | // M = | PN_UV[0] -PN_UV[1] N_UV[0] | |
291 | | // | PN_UV[1] PN_UV[0] N_UV[1] | |
292 | | // | 0 0 1 | |
293 | | // |
294 | | // The predicted point C_UV in the texture space is then equal to |
295 | | // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped |
296 | | // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) |
297 | | // as the prediction. |
298 | 26.7k | const Vector2f pn_uv = p_uv - n_uv; |
299 | 26.7k | const float pnus = pn_uv[0] * s + n_uv[0]; |
300 | 26.7k | const float pnut = pn_uv[0] * t; |
301 | 26.7k | const float pnvs = pn_uv[1] * s + n_uv[1]; |
302 | 26.7k | const float pnvt = pn_uv[1] * t; |
303 | 26.7k | Vector2f predicted_uv; |
304 | 26.7k | if (orientations_.empty()) { |
305 | 66 | return false; |
306 | 66 | } |
307 | | |
308 | | // When decoding the data, we already know which orientation to use. |
309 | 26.6k | const bool orientation = orientations_.back(); |
310 | 26.6k | orientations_.pop_back(); |
311 | 26.6k | if (orientation) { |
312 | 9.12k | predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); |
313 | 17.5k | } else { |
314 | 17.5k | predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); |
315 | 17.5k | } |
316 | 26.6k | if (std::is_integral<DataTypeT>::value) { |
317 | | // Round the predicted value for integer types. |
318 | | // Technically floats > INT_MAX are undefined, but compilers will |
319 | | // convert those values to INT_MIN. We are being explicit here for asan. |
320 | 26.6k | const double u = floor(predicted_uv[0] + 0.5); |
321 | 26.6k | if (std::isnan(u) || u > INT_MAX || u < INT_MIN) { |
322 | 840 | predicted_value_[0] = INT_MIN; |
323 | 25.8k | } else { |
324 | 25.8k | predicted_value_[0] = static_cast<int>(u); |
325 | 25.8k | } |
326 | 26.6k | const double v = floor(predicted_uv[1] + 0.5); |
327 | 26.6k | if (std::isnan(v) || v > INT_MAX || v < INT_MIN) { |
328 | 906 | predicted_value_[1] = INT_MIN; |
329 | 25.7k | } else { |
330 | 25.7k | predicted_value_[1] = static_cast<int>(v); |
331 | 25.7k | } |
332 | 26.6k | } else { |
333 | 0 | predicted_value_[0] = static_cast<int>(predicted_uv[0]); |
334 | 0 | predicted_value_[1] = static_cast<int>(predicted_uv[1]); |
335 | 0 | } |
336 | | |
337 | 26.6k | return true; |
338 | 26.7k | } |
339 | | // Else we don't have available textures on both corners. For such case we |
340 | | // can't use positions for predicting the uv value and we resort to delta |
341 | | // coding. |
342 | 433k | int data_offset = 0; |
343 | 433k | if (prev_data_id < data_id) { |
344 | | // Use the value on the previous corner as the prediction. |
345 | 216k | data_offset = prev_data_id * num_components_; |
346 | 216k | } |
347 | 433k | if (next_data_id < data_id) { |
348 | | // Use the value on the next corner as the prediction. |
349 | 78 | data_offset = next_data_id * num_components_; |
350 | 433k | } else { |
351 | | // None of the other corners have a valid value. Use the last encoded value |
352 | | // as the prediction if possible. |
353 | 433k | if (data_id > 0) { |
354 | 432k | data_offset = (data_id - 1) * num_components_; |
355 | 432k | } else { |
356 | | // We are encoding the first value. Predict 0. |
357 | 963 | for (int i = 0; i < num_components_; ++i) { |
358 | 642 | predicted_value_[i] = 0; |
359 | 642 | } |
360 | 321 | return true; |
361 | 321 | } |
362 | 433k | } |
363 | 1.29M | for (int i = 0; i < num_components_; ++i) { |
364 | 865k | predicted_value_[i] = data[data_offset + i]; |
365 | 865k | } |
366 | 432k | return true; |
367 | 433k | } draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::MeshAttributeCornerTable> >::ComputePredictedValue(draco::IndexType<unsigned int, draco::CornerIndex_tag_type_>, int const*, int) Line | Count | Source | 193 | 655k | int data_id) { | 194 | | // Compute the predicted UV coordinate from the positions on all corners | 195 | | // of the processed triangle. For the best prediction, the UV coordinates | 196 | | // on the next/previous corners need to be already encoded/decoded. | 197 | 655k | const CornerIndex next_corner_id = | 198 | 655k | this->mesh_data().corner_table()->Next(corner_id); | 199 | 655k | const CornerIndex prev_corner_id = | 200 | 655k | this->mesh_data().corner_table()->Previous(corner_id); | 201 | | // Get the encoded data ids from the next and previous corners. | 202 | | // The data id is the encoding order of the UV coordinates. | 203 | 655k | int next_data_id, prev_data_id; | 204 | | | 205 | 655k | int next_vert_id, prev_vert_id; | 206 | 655k | next_vert_id = | 207 | 655k | this->mesh_data().corner_table()->Vertex(next_corner_id).value(); | 208 | 655k | prev_vert_id = | 209 | 655k | this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); | 210 | | | 211 | 655k | next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); | 212 | 655k | prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); | 213 | | | 214 | 655k | if (prev_data_id < data_id && next_data_id < data_id) { | 215 | | // Both other corners have available UV coordinates for prediction. | 216 | 223k | const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); | 217 | 223k | const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); | 218 | 223k | if (p_uv == n_uv) { | 219 | | // We cannot do a reliable prediction on degenerated UV triangles. | 220 | | // Technically floats > INT_MAX are undefined, but compilers will | 221 | | // convert those values to INT_MIN. We are being explicit here for asan. | 222 | 438k | for (const int i : {0, 1}) { | 223 | 438k | if (std::isnan(p_uv[i]) || static_cast<double>(p_uv[i]) > INT_MAX || | 224 | 438k | static_cast<double>(p_uv[i]) < INT_MIN) { | 225 | 330 | predicted_value_[i] = INT_MIN; | 226 | 438k | } else { | 227 | 438k | predicted_value_[i] = static_cast<int>(p_uv[i]); | 228 | 438k | } | 229 | 438k | } | 230 | 219k | return true; | 231 | 219k | } | 232 | | | 233 | | // Get positions at all corners. | 234 | 3.87k | const Vector3f tip_pos = GetPositionForEntryId(data_id); | 235 | 3.87k | const Vector3f next_pos = GetPositionForEntryId(next_data_id); | 236 | 3.87k | const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); | 237 | | // Use the positions of the above triangle to predict the texture coordinate | 238 | | // on the tip corner C. | 239 | | // Convert the triangle into a new coordinate system defined by orthogonal | 240 | | // bases vectors S, T, where S is vector prev_pos - next_pos and T is an | 241 | | // perpendicular vector to S in the same plane as vector the | 242 | | // tip_pos - next_pos. | 243 | | // The transformed triangle in the new coordinate system is then going to | 244 | | // be represented as: | 245 | | // | 246 | | // 1 ^ | 247 | | // | | 248 | | // | | 249 | | // | C | 250 | | // | / \ | 251 | | // | / \ | 252 | | // |/ \ | 253 | | // N--------------P | 254 | | // 0 1 | 255 | | // | 256 | | // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is | 257 | | // at (1, 0). Our goal is to compute the position of the tip_pos point (C) | 258 | | // in this new coordinate space (s, t). | 259 | | // | 260 | 3.87k | const Vector3f pn = prev_pos - next_pos; | 261 | 3.87k | const Vector3f cn = tip_pos - next_pos; | 262 | 3.87k | const float pn_norm2_squared = pn.SquaredNorm(); | 263 | | // Coordinate s of the tip corner C is simply the dot product of the | 264 | | // normalized vectors |pn| and |cn| (normalized by the length of |pn|). | 265 | | // Since both of these vectors are normalized, we don't need to perform the | 266 | | // normalization explicitly and instead we can just use the squared norm | 267 | | // of |pn| as a denominator of the resulting dot product of non normalized | 268 | | // vectors. | 269 | 3.87k | float s, t; | 270 | | // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are | 271 | | // the same positions (e.g. because they were quantized to the same | 272 | | // location). | 273 | 3.87k | if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { | 274 | 1.30k | s = pn.Dot(cn) / pn_norm2_squared; | 275 | | // To get the coordinate t, we can use formula: | 276 | | // t = |C-N - (P-N) * s| / |P-N| | 277 | | // Do not use std::sqrt to avoid changes in the bitstream. | 278 | 1.30k | t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); | 279 | 2.57k | } else { | 280 | 2.57k | s = 0; | 281 | 2.57k | t = 0; | 282 | 2.57k | } | 283 | | | 284 | | // Now we need to transform the point (s, t) to the texture coordinate space | 285 | | // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets | 286 | | // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can | 287 | | // be used to define transformation from the normalized coordinate system | 288 | | // to the texture coordinate system using a 3x3 affine matrix M: | 289 | | // | 290 | | // M = | PN_UV[0] -PN_UV[1] N_UV[0] | | 291 | | // | PN_UV[1] PN_UV[0] N_UV[1] | | 292 | | // | 0 0 1 | | 293 | | // | 294 | | // The predicted point C_UV in the texture space is then equal to | 295 | | // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped | 296 | | // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) | 297 | | // as the prediction. | 298 | 3.87k | const Vector2f pn_uv = p_uv - n_uv; | 299 | 3.87k | const float pnus = pn_uv[0] * s + n_uv[0]; | 300 | 3.87k | const float pnut = pn_uv[0] * t; | 301 | 3.87k | const float pnvs = pn_uv[1] * s + n_uv[1]; | 302 | 3.87k | const float pnvt = pn_uv[1] * t; | 303 | 3.87k | Vector2f predicted_uv; | 304 | 3.87k | if (orientations_.empty()) { | 305 | 44 | return false; | 306 | 44 | } | 307 | | | 308 | | // When decoding the data, we already know which orientation to use. | 309 | 3.83k | const bool orientation = orientations_.back(); | 310 | 3.83k | orientations_.pop_back(); | 311 | 3.83k | if (orientation) { | 312 | 2.03k | predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); | 313 | 2.03k | } else { | 314 | 1.80k | predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); | 315 | 1.80k | } | 316 | 3.83k | if (std::is_integral<DataTypeT>::value) { | 317 | | // Round the predicted value for integer types. | 318 | | // Technically floats > INT_MAX are undefined, but compilers will | 319 | | // convert those values to INT_MIN. We are being explicit here for asan. | 320 | 3.83k | const double u = floor(predicted_uv[0] + 0.5); | 321 | 3.83k | if (std::isnan(u) || u > INT_MAX || u < INT_MIN) { | 322 | 522 | predicted_value_[0] = INT_MIN; | 323 | 3.30k | } else { | 324 | 3.30k | predicted_value_[0] = static_cast<int>(u); | 325 | 3.30k | } | 326 | 3.83k | const double v = floor(predicted_uv[1] + 0.5); | 327 | 3.83k | if (std::isnan(v) || v > INT_MAX || v < INT_MIN) { | 328 | 507 | predicted_value_[1] = INT_MIN; | 329 | 3.32k | } else { | 330 | 3.32k | predicted_value_[1] = static_cast<int>(v); | 331 | 3.32k | } | 332 | 3.83k | } else { | 333 | 0 | predicted_value_[0] = static_cast<int>(predicted_uv[0]); | 334 | 0 | predicted_value_[1] = static_cast<int>(predicted_uv[1]); | 335 | 0 | } | 336 | | | 337 | 3.83k | return true; | 338 | 3.87k | } | 339 | | // Else we don't have available textures on both corners. For such case we | 340 | | // can't use positions for predicting the uv value and we resort to delta | 341 | | // coding. | 342 | 432k | int data_offset = 0; | 343 | 432k | if (prev_data_id < data_id) { | 344 | | // Use the value on the previous corner as the prediction. | 345 | 216k | data_offset = prev_data_id * num_components_; | 346 | 216k | } | 347 | 432k | if (next_data_id < data_id) { | 348 | | // Use the value on the next corner as the prediction. | 349 | 27 | data_offset = next_data_id * num_components_; | 350 | 432k | } else { | 351 | | // None of the other corners have a valid value. Use the last encoded value | 352 | | // as the prediction if possible. | 353 | 432k | if (data_id > 0) { | 354 | 432k | data_offset = (data_id - 1) * num_components_; | 355 | 432k | } else { | 356 | | // We are encoding the first value. Predict 0. | 357 | 438 | for (int i = 0; i < num_components_; ++i) { | 358 | 292 | predicted_value_[i] = 0; | 359 | 292 | } | 360 | 146 | return true; | 361 | 146 | } | 362 | 432k | } | 363 | 1.29M | for (int i = 0; i < num_components_; ++i) { | 364 | 864k | predicted_value_[i] = data[data_offset + i]; | 365 | 864k | } | 366 | 432k | return true; | 367 | 432k | } |
draco::MeshPredictionSchemeTexCoordsDecoder<int, draco::PredictionSchemeWrapDecodingTransform<int, int>, draco::MeshPredictionSchemeData<draco::CornerTable> >::ComputePredictedValue(draco::IndexType<unsigned int, draco::CornerIndex_tag_type_>, int const*, int) Line | Count | Source | 193 | 238k | int data_id) { | 194 | | // Compute the predicted UV coordinate from the positions on all corners | 195 | | // of the processed triangle. For the best prediction, the UV coordinates | 196 | | // on the next/previous corners need to be already encoded/decoded. | 197 | 238k | const CornerIndex next_corner_id = | 198 | 238k | this->mesh_data().corner_table()->Next(corner_id); | 199 | 238k | const CornerIndex prev_corner_id = | 200 | 238k | this->mesh_data().corner_table()->Previous(corner_id); | 201 | | // Get the encoded data ids from the next and previous corners. | 202 | | // The data id is the encoding order of the UV coordinates. | 203 | 238k | int next_data_id, prev_data_id; | 204 | | | 205 | 238k | int next_vert_id, prev_vert_id; | 206 | 238k | next_vert_id = | 207 | 238k | this->mesh_data().corner_table()->Vertex(next_corner_id).value(); | 208 | 238k | prev_vert_id = | 209 | 238k | this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); | 210 | | | 211 | 238k | next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); | 212 | 238k | prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); | 213 | | | 214 | 238k | if (prev_data_id < data_id && next_data_id < data_id) { | 215 | | // Both other corners have available UV coordinates for prediction. | 216 | 238k | const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); | 217 | 238k | const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); | 218 | 238k | if (p_uv == n_uv) { | 219 | | // We cannot do a reliable prediction on degenerated UV triangles. | 220 | | // Technically floats > INT_MAX are undefined, but compilers will | 221 | | // convert those values to INT_MIN. We are being explicit here for asan. | 222 | 430k | for (const int i : {0, 1}) { | 223 | 430k | if (std::isnan(p_uv[i]) || static_cast<double>(p_uv[i]) > INT_MAX || | 224 | 430k | static_cast<double>(p_uv[i]) < INT_MIN) { | 225 | 272 | predicted_value_[i] = INT_MIN; | 226 | 430k | } else { | 227 | 430k | predicted_value_[i] = static_cast<int>(p_uv[i]); | 228 | 430k | } | 229 | 430k | } | 230 | 215k | return true; | 231 | 215k | } | 232 | | | 233 | | // Get positions at all corners. | 234 | 22.8k | const Vector3f tip_pos = GetPositionForEntryId(data_id); | 235 | 22.8k | const Vector3f next_pos = GetPositionForEntryId(next_data_id); | 236 | 22.8k | const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); | 237 | | // Use the positions of the above triangle to predict the texture coordinate | 238 | | // on the tip corner C. | 239 | | // Convert the triangle into a new coordinate system defined by orthogonal | 240 | | // bases vectors S, T, where S is vector prev_pos - next_pos and T is an | 241 | | // perpendicular vector to S in the same plane as vector the | 242 | | // tip_pos - next_pos. | 243 | | // The transformed triangle in the new coordinate system is then going to | 244 | | // be represented as: | 245 | | // | 246 | | // 1 ^ | 247 | | // | | 248 | | // | | 249 | | // | C | 250 | | // | / \ | 251 | | // | / \ | 252 | | // |/ \ | 253 | | // N--------------P | 254 | | // 0 1 | 255 | | // | 256 | | // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is | 257 | | // at (1, 0). Our goal is to compute the position of the tip_pos point (C) | 258 | | // in this new coordinate space (s, t). | 259 | | // | 260 | 22.8k | const Vector3f pn = prev_pos - next_pos; | 261 | 22.8k | const Vector3f cn = tip_pos - next_pos; | 262 | 22.8k | const float pn_norm2_squared = pn.SquaredNorm(); | 263 | | // Coordinate s of the tip corner C is simply the dot product of the | 264 | | // normalized vectors |pn| and |cn| (normalized by the length of |pn|). | 265 | | // Since both of these vectors are normalized, we don't need to perform the | 266 | | // normalization explicitly and instead we can just use the squared norm | 267 | | // of |pn| as a denominator of the resulting dot product of non normalized | 268 | | // vectors. | 269 | 22.8k | float s, t; | 270 | | // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are | 271 | | // the same positions (e.g. because they were quantized to the same | 272 | | // location). | 273 | 22.8k | if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { | 274 | 1.20k | s = pn.Dot(cn) / pn_norm2_squared; | 275 | | // To get the coordinate t, we can use formula: | 276 | | // t = |C-N - (P-N) * s| / |P-N| | 277 | | // Do not use std::sqrt to avoid changes in the bitstream. | 278 | 1.20k | t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); | 279 | 21.6k | } else { | 280 | 21.6k | s = 0; | 281 | 21.6k | t = 0; | 282 | 21.6k | } | 283 | | | 284 | | // Now we need to transform the point (s, t) to the texture coordinate space | 285 | | // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets | 286 | | // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can | 287 | | // be used to define transformation from the normalized coordinate system | 288 | | // to the texture coordinate system using a 3x3 affine matrix M: | 289 | | // | 290 | | // M = | PN_UV[0] -PN_UV[1] N_UV[0] | | 291 | | // | PN_UV[1] PN_UV[0] N_UV[1] | | 292 | | // | 0 0 1 | | 293 | | // | 294 | | // The predicted point C_UV in the texture space is then equal to | 295 | | // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped | 296 | | // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) | 297 | | // as the prediction. | 298 | 22.8k | const Vector2f pn_uv = p_uv - n_uv; | 299 | 22.8k | const float pnus = pn_uv[0] * s + n_uv[0]; | 300 | 22.8k | const float pnut = pn_uv[0] * t; | 301 | 22.8k | const float pnvs = pn_uv[1] * s + n_uv[1]; | 302 | 22.8k | const float pnvt = pn_uv[1] * t; | 303 | 22.8k | Vector2f predicted_uv; | 304 | 22.8k | if (orientations_.empty()) { | 305 | 22 | return false; | 306 | 22 | } | 307 | | | 308 | | // When decoding the data, we already know which orientation to use. | 309 | 22.8k | const bool orientation = orientations_.back(); | 310 | 22.8k | orientations_.pop_back(); | 311 | 22.8k | if (orientation) { | 312 | 7.08k | predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); | 313 | 15.7k | } else { | 314 | 15.7k | predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); | 315 | 15.7k | } | 316 | 22.8k | if (std::is_integral<DataTypeT>::value) { | 317 | | // Round the predicted value for integer types. | 318 | | // Technically floats > INT_MAX are undefined, but compilers will | 319 | | // convert those values to INT_MIN. We are being explicit here for asan. | 320 | 22.8k | const double u = floor(predicted_uv[0] + 0.5); | 321 | 22.8k | if (std::isnan(u) || u > INT_MAX || u < INT_MIN) { | 322 | 318 | predicted_value_[0] = INT_MIN; | 323 | 22.5k | } else { | 324 | 22.5k | predicted_value_[0] = static_cast<int>(u); | 325 | 22.5k | } | 326 | 22.8k | const double v = floor(predicted_uv[1] + 0.5); | 327 | 22.8k | if (std::isnan(v) || v > INT_MAX || v < INT_MIN) { | 328 | 399 | predicted_value_[1] = INT_MIN; | 329 | 22.4k | } else { | 330 | 22.4k | predicted_value_[1] = static_cast<int>(v); | 331 | 22.4k | } | 332 | 22.8k | } else { | 333 | 0 | predicted_value_[0] = static_cast<int>(predicted_uv[0]); | 334 | 0 | predicted_value_[1] = static_cast<int>(predicted_uv[1]); | 335 | 0 | } | 336 | | | 337 | 22.8k | return true; | 338 | 22.8k | } | 339 | | // Else we don't have available textures on both corners. For such case we | 340 | | // can't use positions for predicting the uv value and we resort to delta | 341 | | // coding. | 342 | 703 | int data_offset = 0; | 343 | 703 | if (prev_data_id < data_id) { | 344 | | // Use the value on the previous corner as the prediction. | 345 | 337 | data_offset = prev_data_id * num_components_; | 346 | 337 | } | 347 | 703 | if (next_data_id < data_id) { | 348 | | // Use the value on the next corner as the prediction. | 349 | 51 | data_offset = next_data_id * num_components_; | 350 | 652 | } else { | 351 | | // None of the other corners have a valid value. Use the last encoded value | 352 | | // as the prediction if possible. | 353 | 652 | if (data_id > 0) { | 354 | 477 | data_offset = (data_id - 1) * num_components_; | 355 | 477 | } else { | 356 | | // We are encoding the first value. Predict 0. | 357 | 525 | for (int i = 0; i < num_components_; ++i) { | 358 | 350 | predicted_value_[i] = 0; | 359 | 350 | } | 360 | 175 | return true; | 361 | 175 | } | 362 | 652 | } | 363 | 1.58k | for (int i = 0; i < num_components_; ++i) { | 364 | 1.05k | predicted_value_[i] = data[data_offset + i]; | 365 | 1.05k | } | 366 | 528 | return true; | 367 | 703 | } |
|
368 | | |
369 | | } // namespace draco |
370 | | |
371 | | #endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ |
372 | | #endif |