/src/assimp/code/AssetLib/Collada/ColladaLoader.cpp
Line | Count | Source |
1 | | /* |
2 | | --------------------------------------------------------------------------- |
3 | | Open Asset Import Library (assimp) |
4 | | --------------------------------------------------------------------------- |
5 | | |
6 | | Copyright (c) 2006-2025, assimp team |
7 | | |
8 | | All rights reserved. |
9 | | |
10 | | Redistribution and use of this software in source and binary forms, |
11 | | with or without modification, are permitted provided that the following |
12 | | conditions are met: |
13 | | |
14 | | * Redistributions of source code must retain the above |
15 | | copyright notice, this list of conditions and the |
16 | | following disclaimer. |
17 | | |
18 | | * Redistributions in binary form must reproduce the above |
19 | | copyright notice, this list of conditions and the |
20 | | following disclaimer in the documentation and/or other |
21 | | materials provided with the distribution. |
22 | | |
23 | | * Neither the name of the assimp team, nor the names of its |
24 | | contributors may be used to endorse or promote products |
25 | | derived from this software without specific prior |
26 | | written permission of the assimp team. |
27 | | |
28 | | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
29 | | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
30 | | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
31 | | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
32 | | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
33 | | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
34 | | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
35 | | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
36 | | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
37 | | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
38 | | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
39 | | --------------------------------------------------------------------------- |
40 | | */ |
41 | | |
42 | | /** @file Implementation of the Collada loader */ |
43 | | |
44 | | #ifndef ASSIMP_BUILD_NO_COLLADA_IMPORTER |
45 | | |
46 | | #include "ColladaLoader.h" |
47 | | #include "ColladaParser.h" |
48 | | #include <assimp/ColladaMetaData.h> |
49 | | #include <assimp/CreateAnimMesh.h> |
50 | | #include <assimp/ParsingUtils.h> |
51 | | #include <assimp/SkeletonMeshBuilder.h> |
52 | | #include <assimp/ZipArchiveIOSystem.h> |
53 | | #include <assimp/anim.h> |
54 | | #include <assimp/fast_atof.h> |
55 | | #include <assimp/importerdesc.h> |
56 | | #include <assimp/scene.h> |
57 | | #include <assimp/DefaultLogger.hpp> |
58 | | #include <assimp/Importer.hpp> |
59 | | |
60 | | #include <numeric> |
61 | | |
62 | | namespace Assimp { |
63 | | |
64 | | using namespace Assimp::Formatter; |
65 | | using namespace Assimp::Collada; |
66 | | |
67 | | static constexpr aiImporterDesc desc = { |
68 | | "Collada Importer", |
69 | | "", |
70 | | "", |
71 | | "http://collada.org", |
72 | | aiImporterFlags_SupportTextFlavour | aiImporterFlags_SupportCompressedFlavour, |
73 | | 1, |
74 | | 3, |
75 | | 1, |
76 | | 5, |
77 | | "dae xml zae" |
78 | | }; |
79 | | |
80 | | static constexpr float kMillisecondsFromSeconds = 1000.f; |
81 | | |
82 | | // Add an item of metadata to a node |
83 | | // Assumes the key is not already in the list |
84 | | template <typename T> |
85 | 0 | void AddNodeMetaData(aiNode *node, const std::string &key, const T &value) { |
86 | 0 | if (nullptr == node->mMetaData) { |
87 | 0 | node->mMetaData = new aiMetadata(); |
88 | 0 | } |
89 | 0 | node->mMetaData->Add(key, value); |
90 | 0 | } |
91 | | |
92 | | // ------------------------------------------------------------------------------------------------ |
93 | | // Reads a float value from an accessor and its data array. |
94 | 0 | static ai_real ReadFloat(const Accessor &pAccessor, const Data &pData, size_t pIndex, size_t pOffset) { |
95 | 0 | const size_t pos = pAccessor.mStride * pIndex + pAccessor.mOffset + pOffset; |
96 | 0 | ai_assert(pos < pData.mValues.size()); |
97 | 0 | return pData.mValues[pos]; |
98 | 0 | } |
99 | | |
100 | | // ------------------------------------------------------------------------------------------------ |
101 | | // Constructor to be privately used by Importer |
102 | | ColladaLoader::ColladaLoader() : |
103 | 327 | noSkeletonMesh(false), |
104 | 327 | removeEmptyBones(false), |
105 | 327 | ignoreUpDirection(false), |
106 | 327 | ignoreUnitSize(false), |
107 | 327 | useColladaName(false), |
108 | 327 | mNodeNameCounter(0) { |
109 | | // empty |
110 | 327 | } |
111 | | |
112 | | // ------------------------------------------------------------------------------------------------ |
113 | | // Returns whether the class can handle the format of the given file. |
114 | 159 | bool ColladaLoader::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool /*checkSig*/) const { |
115 | | // Look for a DAE file inside, but don't extract it |
116 | 159 | ZipArchiveIOSystem zip_archive(pIOHandler, pFile); |
117 | 159 | if (zip_archive.isOpen()) { |
118 | 7 | return !ColladaParser::ReadZaeManifest(zip_archive).empty(); |
119 | 7 | } |
120 | | |
121 | 152 | static const char *tokens[] = { "<collada" }; |
122 | 152 | return SearchFileHeaderForToken(pIOHandler, pFile, tokens, AI_COUNT_OF(tokens)); |
123 | 159 | } |
124 | | |
125 | | // ------------------------------------------------------------------------------------------------ |
126 | 0 | void ColladaLoader::SetupProperties(const Importer *pImp) { |
127 | 0 | noSkeletonMesh = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_NO_SKELETON_MESHES, 0) != 0; |
128 | 0 | removeEmptyBones = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES, true) != 0; |
129 | 0 | ignoreUpDirection = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION, 0) != 0; |
130 | 0 | ignoreUnitSize = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UNIT_SIZE, 0) != 0; |
131 | 0 | useColladaName = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_USE_COLLADA_NAMES, 0) != 0; |
132 | 0 | } |
133 | | |
134 | | // ------------------------------------------------------------------------------------------------ |
135 | | // Get file extension list |
136 | 333 | const aiImporterDesc *ColladaLoader::GetInfo() const { |
137 | 333 | return &desc; |
138 | 333 | } |
139 | | |
140 | | // ------------------------------------------------------------------------------------------------ |
141 | | // Imports the given file into the given scene structure. |
142 | 0 | void ColladaLoader::InternReadFile(const std::string &pFile, aiScene *pScene, IOSystem *pIOHandler) { |
143 | 0 | mFileName = pFile; |
144 | | |
145 | | // clean all member arrays - just for safety, it should work even if we did not |
146 | 0 | mMeshIndexByID.clear(); |
147 | 0 | mMaterialIndexByName.clear(); |
148 | 0 | mMeshes.clear(); |
149 | 0 | mTargetMeshes.clear(); |
150 | 0 | newMats.clear(); |
151 | 0 | mLights.clear(); |
152 | 0 | mCameras.clear(); |
153 | 0 | mTextures.clear(); |
154 | 0 | mAnims.clear(); |
155 | | |
156 | | // parse the input file |
157 | 0 | ColladaParser parser(pIOHandler, pFile); |
158 | |
|
159 | 0 | if (!parser.mRootNode) { |
160 | 0 | throw DeadlyImportError("Collada: File came out empty. Something is wrong here."); |
161 | 0 | } |
162 | | |
163 | | // reserve some storage to avoid unnecessary reallocates |
164 | 0 | newMats.reserve(parser.mMaterialLibrary.size() * 2u); |
165 | 0 | mMeshes.reserve(parser.mMeshLibrary.size() * 2u); |
166 | |
|
167 | 0 | mCameras.reserve(parser.mCameraLibrary.size()); |
168 | 0 | mLights.reserve(parser.mLightLibrary.size()); |
169 | | |
170 | | // create the materials first, for the meshes to find |
171 | 0 | BuildMaterials(parser, pScene); |
172 | | |
173 | | // build the node hierarchy from it |
174 | 0 | pScene->mRootNode = BuildHierarchy(parser, parser.mRootNode); |
175 | | |
176 | | // ... then fill the materials with the now adjusted settings |
177 | 0 | FillMaterials(parser, pScene); |
178 | |
|
179 | 0 | if (!ignoreUnitSize) { |
180 | | // Apply unit-size scale calculation |
181 | 0 | pScene->mRootNode->mTransformation *= aiMatrix4x4( |
182 | 0 | parser.mUnitSize, 0, 0, 0, |
183 | 0 | 0, parser.mUnitSize, 0, 0, |
184 | 0 | 0, 0, parser.mUnitSize, 0, |
185 | 0 | 0, 0, 0, 1); |
186 | 0 | } |
187 | |
|
188 | 0 | if (!ignoreUpDirection) { |
189 | | // Convert to Y_UP, if different orientation |
190 | 0 | if (parser.mUpDirection == ColladaParser::UP_X) { |
191 | 0 | pScene->mRootNode->mTransformation *= aiMatrix4x4( |
192 | 0 | 0, -1, 0, 0, |
193 | 0 | 1, 0, 0, 0, |
194 | 0 | 0, 0, 1, 0, |
195 | 0 | 0, 0, 0, 1); |
196 | 0 | } else if (parser.mUpDirection == ColladaParser::UP_Z) { |
197 | 0 | pScene->mRootNode->mTransformation *= aiMatrix4x4( |
198 | 0 | 1, 0, 0, 0, |
199 | 0 | 0, 0, 1, 0, |
200 | 0 | 0, -1, 0, 0, |
201 | 0 | 0, 0, 0, 1); |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | | // Store scene metadata |
206 | 0 | if (!parser.mAssetMetaData.empty()) { |
207 | 0 | const size_t numMeta(parser.mAssetMetaData.size()); |
208 | 0 | pScene->mMetaData = aiMetadata::Alloc(static_cast<unsigned int>(numMeta)); |
209 | 0 | size_t i = 0; |
210 | 0 | for (auto it = parser.mAssetMetaData.cbegin(); it != parser.mAssetMetaData.cend(); ++it, ++i) { |
211 | 0 | pScene->mMetaData->Set(static_cast<unsigned int>(i), (*it).first, (*it).second); |
212 | 0 | } |
213 | 0 | } |
214 | |
|
215 | 0 | StoreSceneMeshes(pScene); |
216 | 0 | StoreSceneMaterials(pScene); |
217 | 0 | StoreSceneTextures(pScene); |
218 | 0 | StoreSceneLights(pScene); |
219 | 0 | StoreSceneCameras(pScene); |
220 | 0 | StoreAnimations(pScene, parser); |
221 | | |
222 | | // If no meshes have been loaded, it's probably just an animated skeleton. |
223 | 0 | if (0u == pScene->mNumMeshes) { |
224 | 0 | if (!noSkeletonMesh) { |
225 | 0 | SkeletonMeshBuilder hero(pScene); |
226 | 0 | } |
227 | 0 | pScene->mFlags |= AI_SCENE_FLAGS_INCOMPLETE; |
228 | 0 | } |
229 | 0 | } |
230 | | |
231 | | // ------------------------------------------------------------------------------------------------ |
232 | | // Recursively constructs a scene node for the given parser node and returns it. |
233 | 0 | aiNode *ColladaLoader::BuildHierarchy(const ColladaParser &pParser, const Collada::Node *pNode) { |
234 | | // create a node for it |
235 | 0 | auto *node = new aiNode(); |
236 | | |
237 | | // find a name for the new node. It's more complicated than you might think |
238 | 0 | node->mName.Set(FindNameForNode(pNode)); |
239 | | // if we're not using the unique IDs, hold onto them for reference and export |
240 | 0 | if (useColladaName) { |
241 | 0 | if (!pNode->mID.empty()) { |
242 | 0 | AddNodeMetaData(node, AI_METADATA_COLLADA_ID, aiString(pNode->mID)); |
243 | 0 | } |
244 | 0 | if (!pNode->mSID.empty()) { |
245 | 0 | AddNodeMetaData(node, AI_METADATA_COLLADA_SID, aiString(pNode->mSID)); |
246 | 0 | } |
247 | 0 | } |
248 | | |
249 | | // calculate the transformation matrix for it |
250 | 0 | node->mTransformation = pParser.CalculateResultTransform(pNode->mTransforms); |
251 | | |
252 | | // now resolve node instances |
253 | 0 | std::vector<const Node*> instances; |
254 | 0 | ResolveNodeInstances(pParser, pNode, instances); |
255 | | |
256 | | // add children. first the *real* ones |
257 | 0 | node->mNumChildren = static_cast<unsigned int>(pNode->mChildren.size() + instances.size()); |
258 | 0 | if (node->mNumChildren != 0) { |
259 | 0 | node->mChildren = new aiNode * [node->mNumChildren]; |
260 | 0 | } |
261 | |
|
262 | 0 | for (size_t a = 0; a < pNode->mChildren.size(); ++a) { |
263 | 0 | node->mChildren[a] = BuildHierarchy(pParser, pNode->mChildren[a]); |
264 | 0 | node->mChildren[a]->mParent = node; |
265 | 0 | } |
266 | | |
267 | | // ... and finally the resolved node instances |
268 | 0 | for (size_t a = 0; a < instances.size(); ++a) { |
269 | 0 | node->mChildren[pNode->mChildren.size() + a] = BuildHierarchy(pParser, instances[a]); |
270 | 0 | node->mChildren[pNode->mChildren.size() + a]->mParent = node; |
271 | 0 | } |
272 | |
|
273 | 0 | BuildMeshesForNode(pParser, pNode, node); |
274 | 0 | BuildCamerasForNode(pParser, pNode, node); |
275 | 0 | BuildLightsForNode(pParser, pNode, node); |
276 | |
|
277 | 0 | return node; |
278 | 0 | } |
279 | | |
280 | | // ------------------------------------------------------------------------------------------------ |
281 | | // Resolve node instances |
282 | | void ColladaLoader::ResolveNodeInstances(const ColladaParser &pParser, const Node *pNode, |
283 | 0 | std::vector<const Node*> &resolved) const { |
284 | | // reserve enough storage |
285 | 0 | resolved.reserve(pNode->mNodeInstances.size()); |
286 | | |
287 | | // ... and iterate through all nodes to be instanced as children of pNode |
288 | 0 | for (const auto &[mNode] : pNode->mNodeInstances) { |
289 | | // find the corresponding node in the library |
290 | 0 | const auto itt = pParser.mNodeLibrary.find(mNode); |
291 | 0 | const Node *nd = itt == pParser.mNodeLibrary.end() ? nullptr : (*itt).second; |
292 | | |
293 | | // FIX for http://sourceforge.net/tracker/?func=detail&aid=3054873&group_id=226462&atid=1067632 |
294 | | // need to check for both name and ID to catch all. To avoid breaking valid files, |
295 | | // the workaround is only enabled when the first attempt to resolve the node has failed. |
296 | 0 | if (nullptr == nd) { |
297 | 0 | nd = FindNode(pParser.mRootNode, mNode); |
298 | 0 | } |
299 | 0 | if (nullptr == nd) { |
300 | 0 | ASSIMP_LOG_ERROR("Collada: Unable to resolve reference to instanced node ", mNode); |
301 | 0 | } else { |
302 | | // attach this node to the list of children |
303 | 0 | resolved.push_back(nd); |
304 | 0 | } |
305 | 0 | } |
306 | 0 | } |
307 | | |
308 | | // ------------------------------------------------------------------------------------------------ |
309 | | // Resolve UV channels |
310 | 0 | static void ApplyVertexToEffectSemanticMapping(Sampler &sampler, const SemanticMappingTable &table) { |
311 | 0 | const auto it = table.mMap.find(sampler.mUVChannel); |
312 | 0 | if (it == table.mMap.end()) { |
313 | 0 | return; |
314 | 0 | } |
315 | | |
316 | 0 | if (it->second.mType != IT_Texcoord) { |
317 | 0 | ASSIMP_LOG_ERROR("Collada: Unexpected effect input mapping"); |
318 | 0 | } |
319 | |
|
320 | 0 | sampler.mUVId = it->second.mSet; |
321 | 0 | } |
322 | | |
323 | | // ------------------------------------------------------------------------------------------------ |
324 | | // Builds lights for the given node and references them |
325 | 0 | void ColladaLoader::BuildLightsForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { |
326 | 0 | for (const LightInstance &lid : pNode->mLights) { |
327 | | // find the referred light |
328 | 0 | auto srcLightIt = pParser.mLightLibrary.find(lid.mLight); |
329 | 0 | if (srcLightIt == pParser.mLightLibrary.end()) { |
330 | 0 | ASSIMP_LOG_WARN("Collada: Unable to find light for ID \"", lid.mLight, "\". Skipping."); |
331 | 0 | continue; |
332 | 0 | } |
333 | 0 | const Collada::Light *srcLight = &srcLightIt->second; |
334 | | |
335 | | // now fill our ai data structure |
336 | 0 | auto out = new aiLight(); |
337 | 0 | out->mName = pTarget->mName; |
338 | 0 | out->mType = (aiLightSourceType)srcLight->mType; |
339 | | |
340 | | // collada lights point in -Z by default, rest is specified in node transform |
341 | 0 | out->mDirection = aiVector3D(0.f, 0.f, -1.f); |
342 | |
|
343 | 0 | out->mAttenuationConstant = srcLight->mAttConstant; |
344 | 0 | out->mAttenuationLinear = srcLight->mAttLinear; |
345 | 0 | out->mAttenuationQuadratic = srcLight->mAttQuadratic; |
346 | |
|
347 | 0 | out->mColorDiffuse = out->mColorSpecular = out->mColorAmbient = srcLight->mColor * srcLight->mIntensity; |
348 | 0 | if (out->mType == aiLightSource_AMBIENT) { |
349 | 0 | out->mColorDiffuse = out->mColorSpecular = aiColor3D(0, 0, 0); |
350 | 0 | out->mColorAmbient = srcLight->mColor * srcLight->mIntensity; |
351 | 0 | } else { |
352 | | // collada doesn't differentiate between these color types |
353 | 0 | out->mColorDiffuse = out->mColorSpecular = srcLight->mColor * srcLight->mIntensity; |
354 | 0 | out->mColorAmbient = aiColor3D(0, 0, 0); |
355 | 0 | } |
356 | | |
357 | | // convert falloff angle and falloff exponent in our representation, if given |
358 | 0 | if (out->mType == aiLightSource_SPOT) { |
359 | 0 | out->mAngleInnerCone = AI_DEG_TO_RAD(srcLight->mFalloffAngle); |
360 | | |
361 | | // ... some extension magic. |
362 | 0 | if (srcLight->mOuterAngle >= ASSIMP_COLLADA_LIGHT_ANGLE_NOT_SET * (1 - ai_epsilon)) { |
363 | | // ... some deprecation magic. |
364 | 0 | if (srcLight->mPenumbraAngle >= ASSIMP_COLLADA_LIGHT_ANGLE_NOT_SET * (1 - ai_epsilon)) { |
365 | | // Need to rely on falloff_exponent. I don't know how to interpret it, so I need to guess .... |
366 | | // epsilon chosen to be 0.1 |
367 | 0 | float f = 1.0f; |
368 | 0 | if ( 0.0f != srcLight->mFalloffExponent ) { |
369 | 0 | f = 1.f / srcLight->mFalloffExponent; |
370 | 0 | } |
371 | 0 | out->mAngleOuterCone = std::acos(std::pow(0.1f, f)) + |
372 | 0 | out->mAngleInnerCone; |
373 | 0 | } else { |
374 | 0 | out->mAngleOuterCone = out->mAngleInnerCone + AI_DEG_TO_RAD(srcLight->mPenumbraAngle); |
375 | 0 | if (out->mAngleOuterCone < out->mAngleInnerCone) |
376 | 0 | std::swap(out->mAngleInnerCone, out->mAngleOuterCone); |
377 | 0 | } |
378 | 0 | } else { |
379 | 0 | out->mAngleOuterCone = AI_DEG_TO_RAD(srcLight->mOuterAngle); |
380 | 0 | } |
381 | 0 | } |
382 | | |
383 | | // add to light list |
384 | 0 | mLights.push_back(out); |
385 | 0 | } |
386 | 0 | } |
387 | | |
388 | | // ------------------------------------------------------------------------------------------------ |
389 | | // Builds cameras for the given node and references them |
390 | 0 | void ColladaLoader::BuildCamerasForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { |
391 | 0 | for (const CameraInstance &cid : pNode->mCameras) { |
392 | | // find the referred light |
393 | 0 | auto srcCameraIt = pParser.mCameraLibrary.find(cid.mCamera); |
394 | 0 | if (srcCameraIt == pParser.mCameraLibrary.end()) { |
395 | 0 | ASSIMP_LOG_WARN("Collada: Unable to find camera for ID \"", cid.mCamera, "\". Skipping."); |
396 | 0 | continue; |
397 | 0 | } |
398 | 0 | const Collada::Camera *srcCamera = &srcCameraIt->second; |
399 | | |
400 | | // orthographic cameras not yet supported in Assimp |
401 | 0 | if (srcCamera->mOrtho) { |
402 | 0 | ASSIMP_LOG_WARN("Collada: Orthographic cameras are not supported."); |
403 | 0 | } |
404 | | |
405 | | // now fill our ai data structure |
406 | 0 | auto *out = new aiCamera(); |
407 | 0 | out->mName = pTarget->mName; |
408 | | |
409 | | // collada cameras point in -Z by default, rest is specified in node transform |
410 | 0 | out->mLookAt = aiVector3D(0.f, 0.f, -1.f); |
411 | | |
412 | | // near/far z is already ok |
413 | 0 | out->mClipPlaneFar = srcCamera->mZFar; |
414 | 0 | out->mClipPlaneNear = srcCamera->mZNear; |
415 | | |
416 | | // ... but for the rest some values are optional |
417 | | // and we need to compute the others in any combination. |
418 | 0 | if (srcCamera->mAspect != 10e10f) { |
419 | 0 | out->mAspect = srcCamera->mAspect; |
420 | 0 | } |
421 | |
|
422 | 0 | if (srcCamera->mHorFov != 10e10f) { |
423 | 0 | out->mHorizontalFOV = srcCamera->mHorFov; |
424 | |
|
425 | 0 | if (srcCamera->mVerFov != 10e10f && srcCamera->mAspect == 10e10f) { |
426 | 0 | out->mAspect = std::tan(AI_DEG_TO_RAD(srcCamera->mHorFov)) / |
427 | 0 | std::tan(AI_DEG_TO_RAD(srcCamera->mVerFov)); |
428 | 0 | } |
429 | |
|
430 | 0 | } else if (srcCamera->mAspect != 10e10f && srcCamera->mVerFov != 10e10f) { |
431 | 0 | out->mHorizontalFOV = 2.0f * AI_RAD_TO_DEG(std::atan(srcCamera->mAspect * |
432 | 0 | std::tan(AI_DEG_TO_RAD(srcCamera->mVerFov) * 0.5f))); |
433 | 0 | } |
434 | | |
435 | | // Collada uses degrees, we use radians |
436 | 0 | out->mHorizontalFOV = AI_DEG_TO_RAD(out->mHorizontalFOV); |
437 | | |
438 | | // add to camera list |
439 | 0 | mCameras.push_back(out); |
440 | 0 | } |
441 | 0 | } |
442 | | |
443 | | // ------------------------------------------------------------------------------------------------ |
444 | | // Builds meshes for the given node and references them |
445 | 0 | void ColladaLoader::BuildMeshesForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { |
446 | | // accumulated mesh references by this node |
447 | 0 | std::vector<size_t> newMeshRefs; |
448 | 0 | newMeshRefs.reserve(pNode->mMeshes.size()); |
449 | | |
450 | | // add a mesh for each subgroup in each collada mesh |
451 | 0 | for (const MeshInstance &mid : pNode->mMeshes) { |
452 | 0 | const Mesh *srcMesh = nullptr; |
453 | 0 | const Controller *srcController = nullptr; |
454 | | |
455 | | // find the referred mesh |
456 | 0 | auto srcMeshIt = pParser.mMeshLibrary.find(mid.mMeshOrController); |
457 | 0 | if (srcMeshIt == pParser.mMeshLibrary.end()) { |
458 | | // if not found in the mesh-library, it might also be a controller referring to a mesh |
459 | 0 | auto srcContrIt = pParser.mControllerLibrary.find(mid.mMeshOrController); |
460 | 0 | if (srcContrIt != pParser.mControllerLibrary.end()) { |
461 | 0 | srcController = &srcContrIt->second; |
462 | 0 | srcMeshIt = pParser.mMeshLibrary.find(srcController->mMeshId); |
463 | 0 | if (srcMeshIt != pParser.mMeshLibrary.end()) { |
464 | 0 | srcMesh = srcMeshIt->second; |
465 | 0 | } |
466 | 0 | } |
467 | |
|
468 | 0 | if (nullptr == srcMesh) { |
469 | 0 | ASSIMP_LOG_WARN("Collada: Unable to find geometry for ID \"", mid.mMeshOrController, "\". Skipping."); |
470 | 0 | continue; |
471 | 0 | } |
472 | 0 | } else { |
473 | | // ID found in the mesh library -> direct reference to a not skinned mesh |
474 | 0 | srcMesh = srcMeshIt->second; |
475 | 0 | } |
476 | | |
477 | | // build a mesh for each of its subgroups |
478 | 0 | size_t vertexStart = 0, faceStart = 0; |
479 | 0 | for (size_t sm = 0; sm < srcMesh->mSubMeshes.size(); ++sm) { |
480 | 0 | const Collada::SubMesh &submesh = srcMesh->mSubMeshes[sm]; |
481 | 0 | if (submesh.mNumFaces == 0) { |
482 | 0 | continue; |
483 | 0 | } |
484 | | |
485 | | // find material assigned to this submesh |
486 | 0 | std::string meshMaterial; |
487 | 0 | auto meshMatIt = mid.mMaterials.find(submesh.mMaterial); |
488 | |
|
489 | 0 | const Collada::SemanticMappingTable *table = nullptr; |
490 | 0 | if (meshMatIt != mid.mMaterials.end()) { |
491 | 0 | table = &meshMatIt->second; |
492 | 0 | meshMaterial = table->mMatName; |
493 | 0 | } else { |
494 | 0 | ASSIMP_LOG_WARN("Collada: No material specified for subgroup <", submesh.mMaterial, "> in geometry <", |
495 | 0 | mid.mMeshOrController, ">."); |
496 | 0 | if (!mid.mMaterials.empty()) { |
497 | 0 | meshMaterial = mid.mMaterials.begin()->second.mMatName; |
498 | 0 | } |
499 | 0 | } |
500 | | |
501 | | // OK ... here the *real* fun starts ... we have the vertex-input-to-effect-semantic-table |
502 | | // given. The only mapping stuff which we do actually support is the UV channel. |
503 | 0 | auto matIt = mMaterialIndexByName.find(meshMaterial); |
504 | 0 | unsigned int matIdx = 0; |
505 | 0 | if (matIt != mMaterialIndexByName.end()) { |
506 | 0 | matIdx = static_cast<unsigned int>(matIt->second); |
507 | 0 | } |
508 | |
|
509 | 0 | if (table && !table->mMap.empty()) { |
510 | 0 | std::pair<Collada::Effect *, aiMaterial *> &mat = newMats[matIdx]; |
511 | | |
512 | | // Iterate through all texture channels assigned to the effect and |
513 | | // check whether we have mapping information for it. |
514 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexDiffuse, *table); |
515 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexAmbient, *table); |
516 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexSpecular, *table); |
517 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexEmissive, *table); |
518 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexTransparent, *table); |
519 | 0 | ApplyVertexToEffectSemanticMapping(mat.first->mTexBump, *table); |
520 | 0 | } |
521 | | |
522 | | // built lookup index of the Mesh-Submesh-Material combination |
523 | 0 | ColladaMeshIndex index(mid.mMeshOrController, sm, meshMaterial); |
524 | | |
525 | | // if we already have the mesh at the library, just add its index to the node's array |
526 | 0 | auto dstMeshIt = mMeshIndexByID.find(index); |
527 | 0 | if (dstMeshIt != mMeshIndexByID.end()) { |
528 | 0 | newMeshRefs.push_back(dstMeshIt->second); |
529 | 0 | } else { |
530 | | // else we have to add the mesh to the collection and store its newly assigned index at the node |
531 | 0 | aiMesh *dstMesh = CreateMesh(pParser, srcMesh, submesh, srcController, vertexStart, faceStart); |
532 | | |
533 | | // store the mesh, and store its new index in the node |
534 | 0 | newMeshRefs.push_back(mMeshes.size()); |
535 | 0 | mMeshIndexByID[index] = mMeshes.size(); |
536 | 0 | mMeshes.push_back(dstMesh); |
537 | 0 | vertexStart += dstMesh->mNumVertices; |
538 | 0 | faceStart += submesh.mNumFaces; |
539 | | |
540 | | // assign the material index |
541 | 0 | auto subMatIt = mMaterialIndexByName.find(submesh.mMaterial); |
542 | 0 | if (subMatIt != mMaterialIndexByName.end()) { |
543 | 0 | dstMesh->mMaterialIndex = static_cast<unsigned int>(subMatIt->second); |
544 | 0 | } else { |
545 | 0 | dstMesh->mMaterialIndex = matIdx; |
546 | 0 | } |
547 | 0 | if (dstMesh->mName.length == 0) { |
548 | 0 | dstMesh->mName = mid.mMeshOrController; |
549 | 0 | } |
550 | 0 | } |
551 | 0 | } |
552 | 0 | } |
553 | | |
554 | | // now place all mesh references we gathered in the target node |
555 | 0 | pTarget->mNumMeshes = static_cast<unsigned int>(newMeshRefs.size()); |
556 | 0 | if (!newMeshRefs.empty()) { |
557 | 0 | struct UIntTypeConverter { |
558 | 0 | unsigned int operator()(const size_t &v) const { |
559 | 0 | return static_cast<unsigned int>(v); |
560 | 0 | } |
561 | 0 | }; |
562 | |
|
563 | 0 | pTarget->mMeshes = new unsigned int[pTarget->mNumMeshes]; |
564 | 0 | std::transform(newMeshRefs.begin(), newMeshRefs.end(), pTarget->mMeshes, UIntTypeConverter()); |
565 | 0 | } |
566 | 0 | } |
567 | | |
568 | | // ------------------------------------------------------------------------------------------------ |
569 | | // Find mesh from either meshes or morph target meshes |
570 | 0 | aiMesh *ColladaLoader::findMesh(const std::string &meshid) { |
571 | 0 | if (meshid.empty()) { |
572 | 0 | return nullptr; |
573 | 0 | } |
574 | | |
575 | 0 | for (auto & mMeshe : mMeshes) { |
576 | 0 | if (std::string(mMeshe->mName.data) == meshid) { |
577 | 0 | return mMeshe; |
578 | 0 | } |
579 | 0 | } |
580 | | |
581 | 0 | for (auto & mTargetMeshe : mTargetMeshes) { |
582 | 0 | if (std::string(mTargetMeshe->mName.data) == meshid) { |
583 | 0 | return mTargetMeshe; |
584 | 0 | } |
585 | 0 | } |
586 | | |
587 | 0 | return nullptr; |
588 | 0 | } |
589 | | |
590 | | // ------------------------------------------------------------------------------------------------ |
591 | | // Creates a mesh for the given ColladaMesh face subset and returns the newly created mesh |
592 | | aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Mesh *pSrcMesh, const SubMesh &pSubMesh, |
593 | 0 | const Controller *pSrcController, size_t pStartVertex, size_t pStartFace) { |
594 | 0 | std::unique_ptr<aiMesh> dstMesh(new aiMesh); |
595 | |
|
596 | 0 | if (useColladaName) { |
597 | 0 | dstMesh->mName = pSrcMesh->mName; |
598 | 0 | } else { |
599 | 0 | dstMesh->mName = pSrcMesh->mId; |
600 | 0 | } |
601 | |
|
602 | 0 | if (pSrcMesh->mPositions.empty()) { |
603 | 0 | return dstMesh.release(); |
604 | 0 | } |
605 | | |
606 | | // count the vertices addressed by its faces |
607 | 0 | const size_t numVertices = std::accumulate(pSrcMesh->mFaceSize.begin() + pStartFace, |
608 | 0 | pSrcMesh->mFaceSize.begin() + pStartFace + pSubMesh.mNumFaces, size_t(0)); |
609 | | |
610 | | // copy positions |
611 | 0 | dstMesh->mNumVertices = static_cast<unsigned int>(numVertices); |
612 | 0 | dstMesh->mVertices = new aiVector3D[numVertices]; |
613 | 0 | std::copy(pSrcMesh->mPositions.begin() + pStartVertex, pSrcMesh->mPositions.begin() + pStartVertex + numVertices, dstMesh->mVertices); |
614 | | |
615 | | // normals, if given. HACK: (thom) Due to the glorious Collada spec we never |
616 | | // know if we have the same number of normals as there are positions. So we |
617 | | // also ignore any vertex attribute if it has a different count |
618 | 0 | if (pSrcMesh->mNormals.size() >= pStartVertex + numVertices) { |
619 | 0 | dstMesh->mNormals = new aiVector3D[numVertices]; |
620 | 0 | std::copy(pSrcMesh->mNormals.begin() + pStartVertex, pSrcMesh->mNormals.begin() + pStartVertex + numVertices, dstMesh->mNormals); |
621 | 0 | } |
622 | | |
623 | | // tangents, if given. |
624 | 0 | if (pSrcMesh->mTangents.size() >= pStartVertex + numVertices) { |
625 | 0 | dstMesh->mTangents = new aiVector3D[numVertices]; |
626 | 0 | std::copy(pSrcMesh->mTangents.begin() + pStartVertex, pSrcMesh->mTangents.begin() + pStartVertex + numVertices, dstMesh->mTangents); |
627 | 0 | } |
628 | | |
629 | | // bi-tangents, if given. |
630 | 0 | if (pSrcMesh->mBitangents.size() >= pStartVertex + numVertices) { |
631 | 0 | dstMesh->mBitangents = new aiVector3D[numVertices]; |
632 | 0 | std::copy(pSrcMesh->mBitangents.begin() + pStartVertex, pSrcMesh->mBitangents.begin() + pStartVertex + numVertices, dstMesh->mBitangents); |
633 | 0 | } |
634 | | |
635 | | // same for texture coords, as many as we have |
636 | 0 | for (size_t a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a) { |
637 | 0 | if (pSrcMesh->mTexCoords[a].size() >= pStartVertex + numVertices) { |
638 | 0 | dstMesh->mTextureCoords[a] = new aiVector3D[numVertices]; |
639 | 0 | for (size_t b = 0; b < numVertices; ++b) { |
640 | 0 | dstMesh->mTextureCoords[a][b] = pSrcMesh->mTexCoords[a][pStartVertex + b]; |
641 | 0 | } |
642 | |
|
643 | 0 | dstMesh->mNumUVComponents[a] = pSrcMesh->mNumUVComponents[a]; |
644 | 0 | } |
645 | 0 | } |
646 | | |
647 | | // same for vertex colors, as many as we have. again the same packing to avoid empty slots |
648 | 0 | for (size_t a = 0, real = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; ++a) { |
649 | 0 | if (pSrcMesh->mColors[a].size() >= pStartVertex + numVertices) { |
650 | 0 | dstMesh->mColors[real] = new aiColor4D[numVertices]; |
651 | 0 | std::copy(pSrcMesh->mColors[a].begin() + pStartVertex, pSrcMesh->mColors[a].begin() + pStartVertex + numVertices, dstMesh->mColors[real]); |
652 | 0 | ++real; |
653 | 0 | } |
654 | 0 | } |
655 | | |
656 | | // create faces. Due to the fact that each face uses unique vertices, we can simply count up on each vertex |
657 | 0 | size_t vertex = 0; |
658 | 0 | dstMesh->mNumFaces = static_cast<unsigned int>(pSubMesh.mNumFaces); |
659 | 0 | dstMesh->mFaces = new aiFace[dstMesh->mNumFaces]; |
660 | 0 | for (size_t a = 0; a < dstMesh->mNumFaces; ++a) { |
661 | 0 | size_t s = pSrcMesh->mFaceSize[pStartFace + a]; |
662 | 0 | aiFace &face = dstMesh->mFaces[a]; |
663 | 0 | face.mNumIndices = static_cast<unsigned int>(s); |
664 | 0 | face.mIndices = new unsigned int[s]; |
665 | 0 | for (size_t b = 0; b < s; ++b) { |
666 | 0 | face.mIndices[b] = static_cast<unsigned int>(vertex++); |
667 | 0 | } |
668 | 0 | } |
669 | | |
670 | | // create morph target meshes if any |
671 | 0 | std::vector<aiMesh *> targetMeshes; |
672 | 0 | std::vector<float> targetWeights; |
673 | 0 | Collada::MorphMethod method = Normalized; |
674 | |
|
675 | 0 | for (auto it = pParser.mControllerLibrary.begin(); |
676 | 0 | it != pParser.mControllerLibrary.end(); ++it) { |
677 | 0 | const Controller &c = it->second; |
678 | 0 | const Collada::Mesh *baseMesh = pParser.ResolveLibraryReference(pParser.mMeshLibrary, c.mMeshId); |
679 | |
|
680 | 0 | if (c.mType == Collada::Morph && baseMesh->mName == pSrcMesh->mName) { |
681 | 0 | const Collada::Accessor &targetAccessor = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, c.mMorphTarget); |
682 | 0 | const Collada::Accessor &weightAccessor = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, c.mMorphWeight); |
683 | 0 | const Collada::Data &targetData = pParser.ResolveLibraryReference(pParser.mDataLibrary, targetAccessor.mSource); |
684 | 0 | const Collada::Data &weightData = pParser.ResolveLibraryReference(pParser.mDataLibrary, weightAccessor.mSource); |
685 | | |
686 | | // take method |
687 | 0 | method = c.mMethod; |
688 | |
|
689 | 0 | if (!targetData.mIsStringArray) { |
690 | 0 | throw DeadlyImportError("target data must contain id. "); |
691 | 0 | } |
692 | 0 | if (weightData.mIsStringArray) { |
693 | 0 | throw DeadlyImportError("target weight data must not be textual "); |
694 | 0 | } |
695 | | |
696 | 0 | for (const auto & mString : targetData.mStrings) { |
697 | 0 | const Mesh *targetMesh = pParser.ResolveLibraryReference(pParser.mMeshLibrary, mString); |
698 | |
|
699 | 0 | aiMesh *aimesh = findMesh(useColladaName ? targetMesh->mName : targetMesh->mId); |
700 | 0 | if (!aimesh) { |
701 | 0 | if (targetMesh->mSubMeshes.size() > 1) { |
702 | 0 | throw DeadlyImportError("Morphing target mesh must be a single"); |
703 | 0 | } |
704 | 0 | aimesh = CreateMesh(pParser, targetMesh, targetMesh->mSubMeshes.at(0), nullptr, 0, 0); |
705 | 0 | mTargetMeshes.push_back(aimesh); |
706 | 0 | } |
707 | 0 | targetMeshes.push_back(aimesh); |
708 | 0 | } |
709 | 0 | for (float mValue : weightData.mValues) { |
710 | 0 | targetWeights.push_back(mValue); |
711 | 0 | } |
712 | 0 | } |
713 | 0 | } |
714 | 0 | if (!targetMeshes.empty() && targetWeights.size() == targetMeshes.size()) { |
715 | 0 | std::vector<aiAnimMesh *> animMeshes; |
716 | 0 | for (unsigned int i = 0; i < targetMeshes.size(); ++i) { |
717 | 0 | aiMesh *targetMesh = targetMeshes.at(i); |
718 | 0 | aiAnimMesh *animMesh = aiCreateAnimMesh(targetMesh); |
719 | 0 | float weight = targetWeights[i]; |
720 | 0 | animMesh->mWeight = weight == 0 ? 1.0f : weight; |
721 | 0 | animMesh->mName = targetMesh->mName; |
722 | 0 | animMeshes.push_back(animMesh); |
723 | 0 | } |
724 | 0 | dstMesh->mMethod = (method == Relative) ? aiMorphingMethod_MORPH_RELATIVE : aiMorphingMethod_MORPH_NORMALIZED; |
725 | 0 | dstMesh->mAnimMeshes = new aiAnimMesh *[animMeshes.size()]; |
726 | 0 | dstMesh->mNumAnimMeshes = static_cast<unsigned int>(animMeshes.size()); |
727 | 0 | for (unsigned int i = 0; i < animMeshes.size(); ++i) { |
728 | 0 | dstMesh->mAnimMeshes[i] = animMeshes.at(i); |
729 | 0 | } |
730 | 0 | } |
731 | | |
732 | | // create bones if given |
733 | 0 | if (pSrcController && pSrcController->mType == Collada::Skin) { |
734 | | // resolve references - joint names |
735 | 0 | const Collada::Accessor &jointNamesAcc = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, pSrcController->mJointNameSource); |
736 | 0 | const Collada::Data &jointNames = pParser.ResolveLibraryReference(pParser.mDataLibrary, jointNamesAcc.mSource); |
737 | | // joint offset matrices |
738 | 0 | const Collada::Accessor &jointMatrixAcc = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, pSrcController->mJointOffsetMatrixSource); |
739 | 0 | const Collada::Data &jointMatrices = pParser.ResolveLibraryReference(pParser.mDataLibrary, jointMatrixAcc.mSource); |
740 | | // joint vertex_weight name list - should refer to the same list as the joint names above. If not, report and reconsider |
741 | 0 | const Collada::Accessor &weightNamesAcc = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, pSrcController->mWeightInputJoints.mAccessor); |
742 | 0 | if (&weightNamesAcc != &jointNamesAcc) |
743 | 0 | throw DeadlyImportError("Temporary implementational laziness. If you read this, please report to the author."); |
744 | | // vertex weights |
745 | 0 | const Collada::Accessor &weightsAcc = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, pSrcController->mWeightInputWeights.mAccessor); |
746 | 0 | const Collada::Data &weights = pParser.ResolveLibraryReference(pParser.mDataLibrary, weightsAcc.mSource); |
747 | |
|
748 | 0 | if (!jointNames.mIsStringArray || jointMatrices.mIsStringArray || weights.mIsStringArray) { |
749 | 0 | throw DeadlyImportError("Data type mismatch while resolving mesh joints"); |
750 | 0 | } |
751 | | // sanity check: we rely on the vertex weights always coming as pairs of BoneIndex-WeightIndex |
752 | 0 | if (pSrcController->mWeightInputJoints.mOffset != 0 || pSrcController->mWeightInputWeights.mOffset != 1) { |
753 | 0 | throw DeadlyImportError("Unsupported vertex_weight addressing scheme. "); |
754 | 0 | } |
755 | | |
756 | | // create containers to collect the weights for each bone |
757 | 0 | size_t numBones = jointNames.mStrings.size(); |
758 | 0 | std::vector<std::vector<aiVertexWeight>> dstBones(numBones); |
759 | | |
760 | | // build a temporary array of pointers to the start of each vertex's weights |
761 | 0 | using IndexPairVector = std::vector<std::pair<size_t, size_t>>; |
762 | 0 | std::vector<IndexPairVector::const_iterator> weightStartPerVertex; |
763 | 0 | weightStartPerVertex.resize(pSrcController->mWeightCounts.size(), pSrcController->mWeights.end()); |
764 | |
|
765 | 0 | auto pit = pSrcController->mWeights.begin(); |
766 | 0 | for (size_t a = 0; a < pSrcController->mWeightCounts.size(); ++a) { |
767 | 0 | weightStartPerVertex[a] = pit; |
768 | 0 | pit += pSrcController->mWeightCounts[a]; |
769 | 0 | } |
770 | | |
771 | | // now for each vertex put the corresponding vertex weights into each bone's weight collection |
772 | 0 | for (size_t a = pStartVertex; a < pStartVertex + numVertices; ++a) { |
773 | | // which position index was responsible for this vertex? that's also the index by which |
774 | | // the controller assigns the vertex weights |
775 | 0 | size_t orgIndex = pSrcMesh->mFacePosIndices[a]; |
776 | | // find the vertex weights for this vertex |
777 | 0 | auto iit = weightStartPerVertex[orgIndex]; |
778 | 0 | size_t pairCount = pSrcController->mWeightCounts[orgIndex]; |
779 | |
|
780 | 0 | for (size_t b = 0; b < pairCount; ++b, ++iit) { |
781 | 0 | const size_t jointIndex = iit->first; |
782 | 0 | const size_t vertexIndex = iit->second; |
783 | 0 | ai_real weight = 1.0f; |
784 | 0 | if (!weights.mValues.empty()) { |
785 | 0 | weight = ReadFloat(weightsAcc, weights, vertexIndex, 0); |
786 | 0 | } |
787 | | |
788 | | // one day I gonna kill that XSI Collada exporter |
789 | 0 | if (weight > 0.0f) { |
790 | 0 | aiVertexWeight w; |
791 | 0 | w.mVertexId = static_cast<unsigned int>(a - pStartVertex); |
792 | 0 | w.mWeight = weight; |
793 | 0 | dstBones[jointIndex].push_back(w); |
794 | 0 | } |
795 | 0 | } |
796 | 0 | } |
797 | | |
798 | | // count the number of bones which influence vertices of the current submesh |
799 | 0 | size_t numRemainingBones = 0; |
800 | 0 | for (const auto & dstBone : dstBones) { |
801 | 0 | if (dstBone.empty() && removeEmptyBones) { |
802 | 0 | continue; |
803 | 0 | } |
804 | 0 | ++numRemainingBones; |
805 | 0 | } |
806 | | |
807 | | // create bone array and copy bone weights one by one |
808 | 0 | dstMesh->mNumBones = static_cast<unsigned int>(numRemainingBones); |
809 | 0 | dstMesh->mBones = new aiBone *[numRemainingBones]; |
810 | 0 | size_t boneCount = 0; |
811 | 0 | for (size_t a = 0; a < numBones; ++a) { |
812 | | // omit bones without weights |
813 | 0 | if (dstBones[a].empty() && removeEmptyBones) { |
814 | 0 | continue; |
815 | 0 | } |
816 | | |
817 | | // create bone with its weights |
818 | 0 | auto bone = new aiBone; |
819 | 0 | bone->mName = ReadString(jointNamesAcc, jointNames, a); |
820 | 0 | bone->mOffsetMatrix.a1 = ReadFloat(jointMatrixAcc, jointMatrices, a, 0); |
821 | 0 | bone->mOffsetMatrix.a2 = ReadFloat(jointMatrixAcc, jointMatrices, a, 1); |
822 | 0 | bone->mOffsetMatrix.a3 = ReadFloat(jointMatrixAcc, jointMatrices, a, 2); |
823 | 0 | bone->mOffsetMatrix.a4 = ReadFloat(jointMatrixAcc, jointMatrices, a, 3); |
824 | 0 | bone->mOffsetMatrix.b1 = ReadFloat(jointMatrixAcc, jointMatrices, a, 4); |
825 | 0 | bone->mOffsetMatrix.b2 = ReadFloat(jointMatrixAcc, jointMatrices, a, 5); |
826 | 0 | bone->mOffsetMatrix.b3 = ReadFloat(jointMatrixAcc, jointMatrices, a, 6); |
827 | 0 | bone->mOffsetMatrix.b4 = ReadFloat(jointMatrixAcc, jointMatrices, a, 7); |
828 | 0 | bone->mOffsetMatrix.c1 = ReadFloat(jointMatrixAcc, jointMatrices, a, 8); |
829 | 0 | bone->mOffsetMatrix.c2 = ReadFloat(jointMatrixAcc, jointMatrices, a, 9); |
830 | 0 | bone->mOffsetMatrix.c3 = ReadFloat(jointMatrixAcc, jointMatrices, a, 10); |
831 | 0 | bone->mOffsetMatrix.c4 = ReadFloat(jointMatrixAcc, jointMatrices, a, 11); |
832 | 0 | bone->mNumWeights = static_cast<unsigned int>(dstBones[a].size()); |
833 | 0 | bone->mWeights = new aiVertexWeight[bone->mNumWeights]; |
834 | 0 | std::copy(dstBones[a].begin(), dstBones[a].end(), bone->mWeights); |
835 | | |
836 | | // apply bind shape matrix to offset matrix |
837 | 0 | aiMatrix4x4 bindShapeMatrix; |
838 | 0 | bindShapeMatrix.a1 = pSrcController->mBindShapeMatrix[0]; |
839 | 0 | bindShapeMatrix.a2 = pSrcController->mBindShapeMatrix[1]; |
840 | 0 | bindShapeMatrix.a3 = pSrcController->mBindShapeMatrix[2]; |
841 | 0 | bindShapeMatrix.a4 = pSrcController->mBindShapeMatrix[3]; |
842 | 0 | bindShapeMatrix.b1 = pSrcController->mBindShapeMatrix[4]; |
843 | 0 | bindShapeMatrix.b2 = pSrcController->mBindShapeMatrix[5]; |
844 | 0 | bindShapeMatrix.b3 = pSrcController->mBindShapeMatrix[6]; |
845 | 0 | bindShapeMatrix.b4 = pSrcController->mBindShapeMatrix[7]; |
846 | 0 | bindShapeMatrix.c1 = pSrcController->mBindShapeMatrix[8]; |
847 | 0 | bindShapeMatrix.c2 = pSrcController->mBindShapeMatrix[9]; |
848 | 0 | bindShapeMatrix.c3 = pSrcController->mBindShapeMatrix[10]; |
849 | 0 | bindShapeMatrix.c4 = pSrcController->mBindShapeMatrix[11]; |
850 | 0 | bindShapeMatrix.d1 = pSrcController->mBindShapeMatrix[12]; |
851 | 0 | bindShapeMatrix.d2 = pSrcController->mBindShapeMatrix[13]; |
852 | 0 | bindShapeMatrix.d3 = pSrcController->mBindShapeMatrix[14]; |
853 | 0 | bindShapeMatrix.d4 = pSrcController->mBindShapeMatrix[15]; |
854 | 0 | bone->mOffsetMatrix *= bindShapeMatrix; |
855 | | |
856 | | // HACK: (thom) Some exporters address the bone nodes by SID, others address them by ID or even name. |
857 | | // Therefore I added a little name replacement here: I search for the bone's node by either name, ID or SID, |
858 | | // and replace the bone's name by the node's name so that the user can use the standard |
859 | | // find-by-name method to associate nodes with bones. |
860 | 0 | const Collada::Node *bnode = FindNode(pParser.mRootNode, bone->mName.data); |
861 | 0 | if (nullptr == bnode) { |
862 | 0 | bnode = FindNodeBySID(pParser.mRootNode, bone->mName.data); |
863 | 0 | } |
864 | | |
865 | | // assign the name that we would have assigned for the source node |
866 | 0 | if (nullptr != bnode) { |
867 | 0 | bone->mName.Set(FindNameForNode(bnode)); |
868 | 0 | } else { |
869 | 0 | ASSIMP_LOG_WARN("ColladaLoader::CreateMesh(): could not find corresponding node for joint \"", bone->mName.data, "\"."); |
870 | 0 | } |
871 | | |
872 | | // and insert bone |
873 | 0 | dstMesh->mBones[boneCount++] = bone; |
874 | 0 | } |
875 | 0 | } |
876 | | |
877 | 0 | return dstMesh.release(); |
878 | 0 | } |
879 | | |
880 | | // ------------------------------------------------------------------------------------------------ |
881 | | // Stores all meshes in the given scene |
882 | 0 | void ColladaLoader::StoreSceneMeshes(aiScene *pScene) { |
883 | 0 | pScene->mNumMeshes = static_cast<unsigned int>(mMeshes.size()); |
884 | 0 | if (mMeshes.empty()) { |
885 | 0 | return; |
886 | 0 | } |
887 | 0 | pScene->mMeshes = new aiMesh *[mMeshes.size()]; |
888 | 0 | std::copy(mMeshes.begin(), mMeshes.end(), pScene->mMeshes); |
889 | 0 | mMeshes.clear(); |
890 | 0 | } |
891 | | |
892 | | // ------------------------------------------------------------------------------------------------ |
893 | | // Stores all cameras in the given scene |
894 | 0 | void ColladaLoader::StoreSceneCameras(aiScene *pScene) { |
895 | 0 | pScene->mNumCameras = static_cast<unsigned int>(mCameras.size()); |
896 | 0 | if (mCameras.empty()) { |
897 | 0 | return; |
898 | 0 | } |
899 | 0 | pScene->mCameras = new aiCamera *[mCameras.size()]; |
900 | 0 | std::copy(mCameras.begin(), mCameras.end(), pScene->mCameras); |
901 | 0 | mCameras.clear(); |
902 | 0 | } |
903 | | |
904 | | // ------------------------------------------------------------------------------------------------ |
905 | | // Stores all lights in the given scene |
906 | 0 | void ColladaLoader::StoreSceneLights(aiScene *pScene) { |
907 | 0 | pScene->mNumLights = static_cast<unsigned int>(mLights.size()); |
908 | 0 | if (mLights.empty()) { |
909 | 0 | return; |
910 | 0 | } |
911 | 0 | pScene->mLights = new aiLight *[mLights.size()]; |
912 | 0 | std::copy(mLights.begin(), mLights.end(), pScene->mLights); |
913 | 0 | mLights.clear(); |
914 | 0 | } |
915 | | |
916 | | // ------------------------------------------------------------------------------------------------ |
917 | | // Stores all textures in the given scene |
918 | 0 | void ColladaLoader::StoreSceneTextures(aiScene *pScene) { |
919 | 0 | pScene->mNumTextures = static_cast<unsigned int>(mTextures.size()); |
920 | 0 | if (mTextures.empty()) { |
921 | 0 | return; |
922 | 0 | } |
923 | 0 | pScene->mTextures = new aiTexture *[mTextures.size()]; |
924 | 0 | std::copy(mTextures.begin(), mTextures.end(), pScene->mTextures); |
925 | 0 | mTextures.clear(); |
926 | 0 | } |
927 | | |
928 | | // ------------------------------------------------------------------------------------------------ |
929 | | // Stores all materials in the given scene |
930 | 0 | void ColladaLoader::StoreSceneMaterials(aiScene *pScene) { |
931 | 0 | pScene->mNumMaterials = static_cast<unsigned int>(newMats.size()); |
932 | 0 | if (newMats.empty()) { |
933 | 0 | return; |
934 | 0 | } |
935 | 0 | pScene->mMaterials = new aiMaterial *[newMats.size()]; |
936 | 0 | for (unsigned int i = 0; i < newMats.size(); ++i) { |
937 | 0 | pScene->mMaterials[i] = newMats[i].second; |
938 | 0 | } |
939 | 0 | newMats.clear(); |
940 | 0 | } |
941 | | |
942 | | // ------------------------------------------------------------------------------------------------ |
943 | | // Stores all animations |
944 | 0 | void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParser) { |
945 | | // recursively collect all animations from the collada scene |
946 | 0 | StoreAnimations(pScene, pParser, &pParser.mAnims, ""); |
947 | | |
948 | | // catch special case: many animations with the same length, each affecting only a single node. |
949 | | // we need to unite all those single-node-anims to a proper combined animation |
950 | 0 | for (size_t a = 0; a < mAnims.size(); ++a) { |
951 | 0 | aiAnimation *templateAnim = mAnims[a]; |
952 | |
|
953 | 0 | if (templateAnim->mNumChannels == 1) { |
954 | | // search for other single-channel-anims with the same duration |
955 | 0 | std::vector<size_t> collectedAnimIndices; |
956 | 0 | for (size_t b = a + 1; b < mAnims.size(); ++b) { |
957 | 0 | aiAnimation *other = mAnims[b]; |
958 | 0 | if (other->mNumChannels == 1 && other->mDuration == templateAnim->mDuration && |
959 | 0 | other->mTicksPerSecond == templateAnim->mTicksPerSecond) |
960 | 0 | collectedAnimIndices.push_back(b); |
961 | 0 | } |
962 | | |
963 | | // We only want to combine the animations if they have different channels |
964 | 0 | std::set<std::string> animTargets; |
965 | 0 | animTargets.insert(templateAnim->mChannels[0]->mNodeName.C_Str()); |
966 | 0 | bool collectedAnimationsHaveDifferentChannels = true; |
967 | 0 | for (unsigned long long collectedAnimIndice : collectedAnimIndices) { |
968 | 0 | aiAnimation *srcAnimation = mAnims[(int)collectedAnimIndice]; |
969 | 0 | std::string channelName = std::string(srcAnimation->mChannels[0]->mNodeName.C_Str()); |
970 | 0 | if (animTargets.find(channelName) == animTargets.end()) { |
971 | 0 | animTargets.insert(channelName); |
972 | 0 | } else { |
973 | 0 | collectedAnimationsHaveDifferentChannels = false; |
974 | 0 | break; |
975 | 0 | } |
976 | 0 | } |
977 | |
|
978 | 0 | if (!collectedAnimationsHaveDifferentChannels) { |
979 | 0 | continue; |
980 | 0 | } |
981 | | |
982 | | // if there are other animations which fit the template anim, combine all channels into a single anim |
983 | 0 | if (!collectedAnimIndices.empty()) { |
984 | 0 | auto *combinedAnim = new aiAnimation(); |
985 | 0 | combinedAnim->mName = aiString(std::string("combinedAnim_") + char('0' + a)); |
986 | 0 | combinedAnim->mDuration = templateAnim->mDuration; |
987 | 0 | combinedAnim->mTicksPerSecond = templateAnim->mTicksPerSecond; |
988 | 0 | combinedAnim->mNumChannels = static_cast<unsigned int>(collectedAnimIndices.size() + 1); |
989 | 0 | combinedAnim->mChannels = new aiNodeAnim *[combinedAnim->mNumChannels]; |
990 | | // add the template anim as first channel by moving its aiNodeAnim to the combined animation |
991 | 0 | combinedAnim->mChannels[0] = templateAnim->mChannels[0]; |
992 | 0 | templateAnim->mChannels[0] = nullptr; |
993 | 0 | delete templateAnim; |
994 | | // combined animation replaces template animation in the anim array |
995 | 0 | mAnims[a] = combinedAnim; |
996 | | |
997 | | // move the memory of all other anims to the combined anim and erase them from the source anims |
998 | 0 | for (size_t b = 0; b < collectedAnimIndices.size(); ++b) { |
999 | 0 | aiAnimation *srcAnimation = mAnims[collectedAnimIndices[b]]; |
1000 | 0 | combinedAnim->mChannels[1 + b] = srcAnimation->mChannels[0]; |
1001 | 0 | srcAnimation->mChannels[0] = nullptr; |
1002 | 0 | delete srcAnimation; |
1003 | 0 | } |
1004 | | |
1005 | | // in a second go, delete all the single-channel-anims that we've stripped from their channels |
1006 | | // back to front to preserve indices - you know, removing an element from a vector moves all elements behind the removed one |
1007 | 0 | while (!collectedAnimIndices.empty()) { |
1008 | 0 | mAnims.erase(mAnims.begin() + collectedAnimIndices.back()); |
1009 | 0 | collectedAnimIndices.pop_back(); |
1010 | 0 | } |
1011 | 0 | } |
1012 | 0 | } |
1013 | 0 | } |
1014 | | |
1015 | | // now store all anims in the scene |
1016 | 0 | if (!mAnims.empty()) { |
1017 | 0 | pScene->mNumAnimations = static_cast<unsigned int>(mAnims.size()); |
1018 | 0 | pScene->mAnimations = new aiAnimation *[mAnims.size()]; |
1019 | 0 | std::copy(mAnims.begin(), mAnims.end(), pScene->mAnimations); |
1020 | 0 | } |
1021 | |
|
1022 | 0 | mAnims.clear(); |
1023 | 0 | } |
1024 | | |
1025 | | // ------------------------------------------------------------------------------------------------ |
1026 | | // Constructs the animations for the given source anim |
1027 | 0 | void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParser, const Animation *pSrcAnim, const std::string &pPrefix) { |
1028 | 0 | std::string animName = pPrefix.empty() ? pSrcAnim->mName : pPrefix + "_" + pSrcAnim->mName; |
1029 | | |
1030 | | // create nested animations, if given |
1031 | 0 | for (auto mSubAnim : pSrcAnim->mSubAnims) { |
1032 | 0 | StoreAnimations(pScene, pParser, mSubAnim, animName); |
1033 | 0 | } |
1034 | | |
1035 | | // create animation channels, if any |
1036 | 0 | if (!pSrcAnim->mChannels.empty()) { |
1037 | 0 | CreateAnimation(pScene, pParser, pSrcAnim, animName); |
1038 | 0 | } |
1039 | 0 | } |
1040 | | |
1041 | | struct MorphTimeValues { |
1042 | | float mTime; |
1043 | | struct key { |
1044 | | float mWeight; |
1045 | | unsigned int mValue; |
1046 | | }; |
1047 | | std::vector<key> mKeys; |
1048 | | }; |
1049 | | |
1050 | 0 | void insertMorphTimeValue(std::vector<MorphTimeValues> &values, float time, float weight, unsigned int value) { |
1051 | 0 | MorphTimeValues::key k{}; |
1052 | 0 | k.mValue = value; |
1053 | 0 | k.mWeight = weight; |
1054 | 0 | if (values.empty() || time < values[0].mTime) { |
1055 | 0 | MorphTimeValues val; |
1056 | 0 | val.mTime = time; |
1057 | 0 | val.mKeys.push_back(k); |
1058 | 0 | values.insert(values.begin(), val); |
1059 | 0 | return; |
1060 | 0 | } |
1061 | 0 | if (time > values.back().mTime) { |
1062 | 0 | MorphTimeValues val; |
1063 | 0 | val.mTime = time; |
1064 | 0 | val.mKeys.push_back(k); |
1065 | 0 | values.insert(values.end(), val); |
1066 | 0 | return; |
1067 | 0 | } |
1068 | 0 | for (unsigned int i = 0; i < values.size(); i++) { |
1069 | 0 | if (std::abs(time - values[i].mTime) < ai_epsilon) { |
1070 | 0 | values[i].mKeys.push_back(k); |
1071 | 0 | return; |
1072 | 0 | } else if (time > values[i].mTime && time < values[i + 1].mTime) { |
1073 | 0 | MorphTimeValues val; |
1074 | 0 | val.mTime = time; |
1075 | 0 | val.mKeys.push_back(k); |
1076 | 0 | values.insert(values.begin() + i, val); |
1077 | 0 | return; |
1078 | 0 | } |
1079 | 0 | } |
1080 | 0 | } |
1081 | | |
1082 | 0 | static float getWeightAtKey(const std::vector<MorphTimeValues> &values, int key, unsigned int value) { |
1083 | 0 | for (auto mKey : values[key].mKeys) { |
1084 | 0 | if (mKey.mValue == value) { |
1085 | 0 | return mKey.mWeight; |
1086 | 0 | } |
1087 | 0 | } |
1088 | | |
1089 | | // no value at key found, try to interpolate if present at other keys. if not, return zero |
1090 | | // TODO: interpolation |
1091 | 0 | return 0.0f; |
1092 | 0 | } |
1093 | | |
1094 | | // ------------------------------------------------------------------------------------------------ |
1095 | | // Constructs the animation for the given source anim |
1096 | 0 | void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParser, const Animation *pSrcAnim, const std::string &pName) { |
1097 | | // collect a list of animatable nodes |
1098 | 0 | std::vector<const aiNode *> nodes; |
1099 | 0 | CollectNodes(pScene->mRootNode, nodes); |
1100 | |
|
1101 | 0 | std::vector<aiNodeAnim *> anims; |
1102 | 0 | std::vector<aiMeshMorphAnim *> morphAnims; |
1103 | |
|
1104 | 0 | for (auto node : nodes) { |
1105 | | // find all the collada anim channels which refer to the current node |
1106 | 0 | std::vector<ChannelEntry> entries; |
1107 | 0 | std::string nodeName = node->mName.data; |
1108 | | |
1109 | | // find the collada node corresponding to the aiNode |
1110 | 0 | const Node *srcNode = FindNode(pParser.mRootNode, nodeName); |
1111 | 0 | if (!srcNode) { |
1112 | 0 | continue; |
1113 | 0 | } |
1114 | | |
1115 | | // now check all channels if they affect the current node |
1116 | 0 | std::string targetID, subElement; |
1117 | 0 | for (auto cit = pSrcAnim->mChannels.begin(); |
1118 | 0 | cit != pSrcAnim->mChannels.end(); ++cit) { |
1119 | 0 | const AnimationChannel &srcChannel = *cit; |
1120 | 0 | ChannelEntry entry; |
1121 | | |
1122 | | // we expect the animation target to be of type "nodeName/transformID.subElement". Ignore all others |
1123 | | // find the slash that separates the node name - there should be only one |
1124 | 0 | std::string::size_type slashPos = srcChannel.mTarget.find('/'); |
1125 | 0 | if (slashPos == std::string::npos) { |
1126 | 0 | std::string::size_type targetPos = srcChannel.mTarget.find(srcNode->mID); |
1127 | 0 | if (targetPos == std::string::npos) { |
1128 | 0 | continue; |
1129 | 0 | } |
1130 | | |
1131 | | // not node transform, but something else. store as unknown animation channel for now |
1132 | 0 | entry.mChannel = &(*cit); |
1133 | 0 | entry.mTargetId = srcChannel.mTarget.substr(targetPos + pSrcAnim->mName.length(), |
1134 | 0 | srcChannel.mTarget.length() - targetPos - pSrcAnim->mName.length()); |
1135 | 0 | if (entry.mTargetId.front() == '-') { |
1136 | 0 | entry.mTargetId = entry.mTargetId.substr(1); |
1137 | 0 | } |
1138 | 0 | entries.push_back(entry); |
1139 | 0 | continue; |
1140 | 0 | } |
1141 | 0 | if (srcChannel.mTarget.find('/', slashPos + 1) != std::string::npos) { |
1142 | 0 | continue; |
1143 | 0 | } |
1144 | | |
1145 | 0 | targetID.clear(); |
1146 | 0 | targetID = srcChannel.mTarget.substr(0, slashPos); |
1147 | 0 | if (targetID != srcNode->mID) { |
1148 | 0 | continue; |
1149 | 0 | } |
1150 | | |
1151 | | // find the dot that separates the transformID - there should be only one or zero |
1152 | 0 | std::string::size_type dotPos = srcChannel.mTarget.find('.'); |
1153 | 0 | if (dotPos != std::string::npos) { |
1154 | 0 | if (srcChannel.mTarget.find('.', dotPos + 1) != std::string::npos) { |
1155 | 0 | continue; |
1156 | 0 | } |
1157 | | |
1158 | 0 | entry.mTransformId = srcChannel.mTarget.substr(slashPos + 1, dotPos - slashPos - 1); |
1159 | |
|
1160 | 0 | subElement.clear(); |
1161 | 0 | subElement = srcChannel.mTarget.substr(dotPos + 1); |
1162 | 0 | if (subElement == "ANGLE") |
1163 | 0 | entry.mSubElement = 3; // last number in an Axis-Angle-Transform is the angle |
1164 | 0 | else if (subElement == "X") |
1165 | 0 | entry.mSubElement = 0; |
1166 | 0 | else if (subElement == "Y") |
1167 | 0 | entry.mSubElement = 1; |
1168 | 0 | else if (subElement == "Z") |
1169 | 0 | entry.mSubElement = 2; |
1170 | 0 | else |
1171 | 0 | ASSIMP_LOG_WARN("Unknown anim subelement <", subElement, ">. Ignoring"); |
1172 | 0 | } else { |
1173 | | // no sub-element following, transformId is remaining string |
1174 | 0 | entry.mTransformId = srcChannel.mTarget.substr(slashPos + 1); |
1175 | 0 | } |
1176 | | |
1177 | 0 | std::string::size_type bracketPos = srcChannel.mTarget.find('('); |
1178 | 0 | if (bracketPos != std::string::npos) { |
1179 | 0 | entry.mTransformId = srcChannel.mTarget.substr(slashPos + 1, bracketPos - slashPos - 1); |
1180 | 0 | subElement.clear(); |
1181 | 0 | subElement = srcChannel.mTarget.substr(bracketPos); |
1182 | |
|
1183 | 0 | if (subElement == "(0)(0)") |
1184 | 0 | entry.mSubElement = 0; |
1185 | 0 | else if (subElement == "(1)(0)") |
1186 | 0 | entry.mSubElement = 1; |
1187 | 0 | else if (subElement == "(2)(0)") |
1188 | 0 | entry.mSubElement = 2; |
1189 | 0 | else if (subElement == "(3)(0)") |
1190 | 0 | entry.mSubElement = 3; |
1191 | 0 | else if (subElement == "(0)(1)") |
1192 | 0 | entry.mSubElement = 4; |
1193 | 0 | else if (subElement == "(1)(1)") |
1194 | 0 | entry.mSubElement = 5; |
1195 | 0 | else if (subElement == "(2)(1)") |
1196 | 0 | entry.mSubElement = 6; |
1197 | 0 | else if (subElement == "(3)(1)") |
1198 | 0 | entry.mSubElement = 7; |
1199 | 0 | else if (subElement == "(0)(2)") |
1200 | 0 | entry.mSubElement = 8; |
1201 | 0 | else if (subElement == "(1)(2)") |
1202 | 0 | entry.mSubElement = 9; |
1203 | 0 | else if (subElement == "(2)(2)") |
1204 | 0 | entry.mSubElement = 10; |
1205 | 0 | else if (subElement == "(3)(2)") |
1206 | 0 | entry.mSubElement = 11; |
1207 | 0 | else if (subElement == "(0)(3)") |
1208 | 0 | entry.mSubElement = 12; |
1209 | 0 | else if (subElement == "(1)(3)") |
1210 | 0 | entry.mSubElement = 13; |
1211 | 0 | else if (subElement == "(2)(3)") |
1212 | 0 | entry.mSubElement = 14; |
1213 | 0 | else if (subElement == "(3)(3)") |
1214 | 0 | entry.mSubElement = 15; |
1215 | 0 | } |
1216 | | |
1217 | | // determine which transform step is affected by this channel |
1218 | 0 | entry.mTransformIndex = SIZE_MAX; |
1219 | 0 | for (size_t a = 0; a < srcNode->mTransforms.size(); ++a) |
1220 | 0 | if (srcNode->mTransforms[a].mID == entry.mTransformId) |
1221 | 0 | entry.mTransformIndex = a; |
1222 | |
|
1223 | 0 | if (entry.mTransformIndex == SIZE_MAX) { |
1224 | 0 | if (entry.mTransformId.find("morph-weights") == std::string::npos) { |
1225 | 0 | continue; |
1226 | 0 | } |
1227 | 0 | entry.mTargetId = entry.mTransformId; |
1228 | 0 | entry.mTransformId = std::string(); |
1229 | 0 | } |
1230 | | |
1231 | 0 | entry.mChannel = &(*cit); |
1232 | 0 | entries.push_back(entry); |
1233 | 0 | } |
1234 | | |
1235 | | // if there's no channel affecting the current node, we skip it |
1236 | 0 | if (entries.empty()) { |
1237 | 0 | continue; |
1238 | 0 | } |
1239 | | |
1240 | | // resolve the data pointers for all anim channels. Find the minimum time while we're at it |
1241 | 0 | ai_real startTime = ai_real(1e20), endTime = ai_real(-1e20); |
1242 | 0 | for (ChannelEntry & e : entries) { |
1243 | 0 | e.mTimeAccessor = &pParser.ResolveLibraryReference(pParser.mAccessorLibrary, e.mChannel->mSourceTimes); |
1244 | 0 | e.mTimeData = &pParser.ResolveLibraryReference(pParser.mDataLibrary, e.mTimeAccessor->mSource); |
1245 | 0 | e.mValueAccessor = &pParser.ResolveLibraryReference(pParser.mAccessorLibrary, e.mChannel->mSourceValues); |
1246 | 0 | e.mValueData = &pParser.ResolveLibraryReference(pParser.mDataLibrary, e.mValueAccessor->mSource); |
1247 | | |
1248 | | // time count and value count must match |
1249 | 0 | if (e.mTimeAccessor->mCount != e.mValueAccessor->mCount) { |
1250 | 0 | throw DeadlyImportError("Time count / value count mismatch in animation channel \"", e.mChannel->mTarget, "\"."); |
1251 | 0 | } |
1252 | | |
1253 | 0 | if (e.mTimeAccessor->mCount > 0) { |
1254 | | // find bounding times |
1255 | 0 | startTime = std::min(startTime, ReadFloat(*e.mTimeAccessor, *e.mTimeData, 0, 0)); |
1256 | 0 | endTime = std::max(endTime, ReadFloat(*e.mTimeAccessor, *e.mTimeData, e.mTimeAccessor->mCount - 1, 0)); |
1257 | 0 | } |
1258 | 0 | } |
1259 | | |
1260 | 0 | std::vector<aiMatrix4x4> resultTrafos; |
1261 | 0 | if (!entries.empty() && entries.front().mTimeAccessor->mCount > 0) { |
1262 | | // create a local transformation chain of the node's transforms |
1263 | 0 | std::vector<Collada::Transform> transforms = srcNode->mTransforms; |
1264 | | |
1265 | | // now for every unique point in time, find or interpolate the key values for that time |
1266 | | // and apply them to the transform chain. Then the node's present transformation can be calculated. |
1267 | 0 | ai_real time = startTime; |
1268 | 0 | while (true) { |
1269 | 0 | for (ChannelEntry & e : entries) { |
1270 | | // find the keyframe behind the current point in time |
1271 | 0 | size_t pos = 0; |
1272 | 0 | ai_real postTime = 0.0; |
1273 | 0 | while (true) { |
1274 | 0 | if (pos >= e.mTimeAccessor->mCount) { |
1275 | 0 | break; |
1276 | 0 | } |
1277 | 0 | postTime = ReadFloat(*e.mTimeAccessor, *e.mTimeData, pos, 0); |
1278 | 0 | if (postTime >= time) { |
1279 | 0 | break; |
1280 | 0 | } |
1281 | 0 | ++pos; |
1282 | 0 | } |
1283 | |
|
1284 | 0 | pos = std::min(pos, e.mTimeAccessor->mCount - 1); |
1285 | | |
1286 | | // read values from there |
1287 | 0 | ai_real temp[16]; |
1288 | 0 | for (size_t c = 0; c < e.mValueAccessor->mSize; ++c) { |
1289 | 0 | temp[c] = ReadFloat(*e.mValueAccessor, *e.mValueData, pos, c); |
1290 | 0 | } |
1291 | | |
1292 | | // if not exactly at the key time, interpolate with previous value set |
1293 | 0 | if (postTime > time && pos > 0) { |
1294 | 0 | ai_real preTime = ReadFloat(*e.mTimeAccessor, *e.mTimeData, pos - 1, 0); |
1295 | 0 | ai_real factor = (time - postTime) / (preTime - postTime); |
1296 | |
|
1297 | 0 | for (size_t c = 0; c < e.mValueAccessor->mSize; ++c) { |
1298 | 0 | ai_real v = ReadFloat(*e.mValueAccessor, *e.mValueData, pos - 1, c); |
1299 | 0 | temp[c] += (v - temp[c]) * factor; |
1300 | 0 | } |
1301 | 0 | } |
1302 | | |
1303 | | // Apply values to current transformation |
1304 | 0 | std::copy(temp, temp + e.mValueAccessor->mSize, transforms[e.mTransformIndex].f + e.mSubElement); |
1305 | 0 | } |
1306 | | |
1307 | | // Calculate resulting transformation |
1308 | 0 | aiMatrix4x4 mat = pParser.CalculateResultTransform(transforms); |
1309 | | |
1310 | | // out of laziness: we store the time in matrix.d4 |
1311 | 0 | mat.d4 = time; |
1312 | 0 | resultTrafos.push_back(mat); |
1313 | | |
1314 | | // find next point in time to evaluate. That's the closest frame larger than the current in any channel |
1315 | 0 | ai_real nextTime = ai_real(1e20); |
1316 | 0 | for (ChannelEntry & channelElement : entries) { |
1317 | | // find the next time value larger than the current |
1318 | 0 | size_t pos = 0; |
1319 | 0 | while (pos < channelElement.mTimeAccessor->mCount) { |
1320 | 0 | const ai_real t = ReadFloat(*channelElement.mTimeAccessor, *channelElement.mTimeData, pos, 0); |
1321 | 0 | if (t > time) { |
1322 | 0 | nextTime = std::min(nextTime, t); |
1323 | 0 | break; |
1324 | 0 | } |
1325 | 0 | ++pos; |
1326 | 0 | } |
1327 | | |
1328 | | // https://github.com/assimp/assimp/issues/458 |
1329 | | // Sub-sample axis-angle channels if the delta between two consecutive |
1330 | | // key-frame angles is >= 180 degrees. |
1331 | 0 | if (transforms[channelElement.mTransformIndex].mType == TF_ROTATE && channelElement.mSubElement == 3 && pos > 0 && pos < channelElement.mTimeAccessor->mCount) { |
1332 | 0 | const ai_real cur_key_angle = ReadFloat(*channelElement.mValueAccessor, *channelElement.mValueData, pos, 0); |
1333 | 0 | const ai_real last_key_angle = ReadFloat(*channelElement.mValueAccessor, *channelElement.mValueData, pos - 1, 0); |
1334 | 0 | const ai_real cur_key_time = ReadFloat(*channelElement.mTimeAccessor, *channelElement.mTimeData, pos, 0); |
1335 | 0 | const ai_real last_key_time = ReadFloat(*channelElement.mTimeAccessor, *channelElement.mTimeData, pos - 1, 0); |
1336 | 0 | const ai_real last_eval_angle = last_key_angle + (cur_key_angle - last_key_angle) * (time - last_key_time) / (cur_key_time - last_key_time); |
1337 | 0 | const ai_real delta = std::abs(cur_key_angle - last_eval_angle); |
1338 | 0 | if (delta >= 180.0) { |
1339 | 0 | const int subSampleCount = static_cast<int>(std::floor(delta / 90.0)); |
1340 | 0 | if (cur_key_time != time) { |
1341 | 0 | const ai_real nextSampleTime = time + (cur_key_time - time) / subSampleCount; |
1342 | 0 | nextTime = std::min(nextTime, nextSampleTime); |
1343 | 0 | } |
1344 | 0 | } |
1345 | 0 | } |
1346 | 0 | } |
1347 | | |
1348 | | // no more keys on any channel after the current time -> we're done |
1349 | 0 | if (nextTime > 1e19) { |
1350 | 0 | break; |
1351 | 0 | } |
1352 | | |
1353 | | // else construct next key-frame at this following time point |
1354 | 0 | time = nextTime; |
1355 | 0 | } |
1356 | 0 | } |
1357 | | |
1358 | | // build an animation channel for the given node out of these trafo keys |
1359 | 0 | if (!resultTrafos.empty()) { |
1360 | 0 | auto *dstAnim = new aiNodeAnim; |
1361 | 0 | dstAnim->mNodeName = nodeName; |
1362 | 0 | dstAnim->mNumPositionKeys = static_cast<unsigned int>(resultTrafos.size()); |
1363 | 0 | dstAnim->mNumRotationKeys = static_cast<unsigned int>(resultTrafos.size()); |
1364 | 0 | dstAnim->mNumScalingKeys = static_cast<unsigned int>(resultTrafos.size()); |
1365 | 0 | dstAnim->mPositionKeys = new aiVectorKey[resultTrafos.size()]; |
1366 | 0 | dstAnim->mRotationKeys = new aiQuatKey[resultTrafos.size()]; |
1367 | 0 | dstAnim->mScalingKeys = new aiVectorKey[resultTrafos.size()]; |
1368 | |
|
1369 | 0 | for (size_t a = 0; a < resultTrafos.size(); ++a) { |
1370 | 0 | aiMatrix4x4 mat = resultTrafos[a]; |
1371 | 0 | double time = double(mat.d4); // remember? time is stored in mat.d4 |
1372 | 0 | mat.d4 = 1.0f; |
1373 | |
|
1374 | 0 | dstAnim->mPositionKeys[a].mTime = time * kMillisecondsFromSeconds; |
1375 | 0 | dstAnim->mRotationKeys[a].mTime = time * kMillisecondsFromSeconds; |
1376 | 0 | dstAnim->mScalingKeys[a].mTime = time * kMillisecondsFromSeconds; |
1377 | 0 | mat.Decompose(dstAnim->mScalingKeys[a].mValue, dstAnim->mRotationKeys[a].mValue, dstAnim->mPositionKeys[a].mValue); |
1378 | 0 | } |
1379 | |
|
1380 | 0 | anims.push_back(dstAnim); |
1381 | 0 | } else { |
1382 | 0 | ASSIMP_LOG_WARN("Collada loader: found empty animation channel, ignored. Please check your exporter."); |
1383 | 0 | } |
1384 | |
|
1385 | 0 | if (!entries.empty() && entries.front().mTimeAccessor->mCount > 0) { |
1386 | 0 | std::vector<ChannelEntry> morphChannels; |
1387 | 0 | for (ChannelEntry & e : entries) { |
1388 | | // skip non-transform types |
1389 | 0 | if (e.mTargetId.empty()) { |
1390 | 0 | continue; |
1391 | 0 | } |
1392 | | |
1393 | 0 | if (e.mTargetId.find("morph-weights") != std::string::npos) { |
1394 | 0 | morphChannels.push_back(e); |
1395 | 0 | } |
1396 | 0 | } |
1397 | 0 | if (!morphChannels.empty()) { |
1398 | | // either 1) morph weight animation count should contain morph target count channels |
1399 | | // or 2) one channel with morph target count arrays |
1400 | | // assume first |
1401 | |
|
1402 | 0 | auto *morphAnim = new aiMeshMorphAnim; |
1403 | 0 | morphAnim->mName.Set(nodeName); |
1404 | |
|
1405 | 0 | std::vector<MorphTimeValues> morphTimeValues; |
1406 | 0 | int morphAnimChannelIndex = 0; |
1407 | 0 | for (ChannelEntry & e : morphChannels) { |
1408 | 0 | std::string::size_type apos = e.mTargetId.find('('); |
1409 | 0 | std::string::size_type bpos = e.mTargetId.find(')'); |
1410 | | |
1411 | | // If unknown way to specify weight -> ignore this animation |
1412 | 0 | if (apos == std::string::npos || bpos == std::string::npos) { |
1413 | 0 | continue; |
1414 | 0 | } |
1415 | | |
1416 | | // weight target can be in format Weight_M_N, Weight_N, WeightN, or some other way |
1417 | | // we ignore the name and just assume the channels are in the right order |
1418 | 0 | for (unsigned int i = 0; i < e.mTimeData->mValues.size(); i++) { |
1419 | 0 | insertMorphTimeValue(morphTimeValues, e.mTimeData->mValues[i], e.mValueData->mValues[i], morphAnimChannelIndex); |
1420 | 0 | } |
1421 | |
|
1422 | 0 | ++morphAnimChannelIndex; |
1423 | 0 | } |
1424 | |
|
1425 | 0 | morphAnim->mNumKeys = static_cast<unsigned int>(morphTimeValues.size()); |
1426 | 0 | morphAnim->mKeys = new aiMeshMorphKey[morphAnim->mNumKeys]; |
1427 | 0 | for (unsigned int key = 0; key < morphAnim->mNumKeys; key++) { |
1428 | 0 | morphAnim->mKeys[key].mNumValuesAndWeights = static_cast<unsigned int>(morphChannels.size()); |
1429 | 0 | morphAnim->mKeys[key].mValues = new unsigned int[morphChannels.size()]; |
1430 | 0 | morphAnim->mKeys[key].mWeights = new double[morphChannels.size()]; |
1431 | |
|
1432 | 0 | morphAnim->mKeys[key].mTime = morphTimeValues[key].mTime * kMillisecondsFromSeconds; |
1433 | 0 | for (unsigned int valueIndex = 0; valueIndex < morphChannels.size(); ++valueIndex) { |
1434 | 0 | morphAnim->mKeys[key].mValues[valueIndex] = valueIndex; |
1435 | 0 | morphAnim->mKeys[key].mWeights[valueIndex] = getWeightAtKey(morphTimeValues, key, valueIndex); |
1436 | 0 | } |
1437 | 0 | } |
1438 | |
|
1439 | 0 | morphAnims.push_back(morphAnim); |
1440 | 0 | } |
1441 | 0 | } |
1442 | 0 | } |
1443 | | |
1444 | 0 | if (!anims.empty() || !morphAnims.empty()) { |
1445 | 0 | auto anim = new aiAnimation; |
1446 | 0 | anim->mName.Set(pName); |
1447 | 0 | anim->mNumChannels = static_cast<unsigned int>(anims.size()); |
1448 | 0 | if (anim->mNumChannels > 0) { |
1449 | 0 | anim->mChannels = new aiNodeAnim *[anims.size()]; |
1450 | 0 | std::copy(anims.begin(), anims.end(), anim->mChannels); |
1451 | 0 | } |
1452 | 0 | anim->mNumMorphMeshChannels = static_cast<unsigned int>(morphAnims.size()); |
1453 | 0 | if (anim->mNumMorphMeshChannels > 0) { |
1454 | 0 | anim->mMorphMeshChannels = new aiMeshMorphAnim *[anim->mNumMorphMeshChannels]; |
1455 | 0 | std::copy(morphAnims.begin(), morphAnims.end(), anim->mMorphMeshChannels); |
1456 | 0 | } |
1457 | 0 | anim->mDuration = 0.0f; |
1458 | 0 | for (auto & a : anims) { |
1459 | 0 | anim->mDuration = std::max(anim->mDuration, a->mPositionKeys[a->mNumPositionKeys - 1].mTime); |
1460 | 0 | anim->mDuration = std::max(anim->mDuration, a->mRotationKeys[a->mNumRotationKeys - 1].mTime); |
1461 | 0 | anim->mDuration = std::max(anim->mDuration, a->mScalingKeys[a->mNumScalingKeys - 1].mTime); |
1462 | 0 | } |
1463 | 0 | for (auto & morphAnim : morphAnims) { |
1464 | 0 | anim->mDuration = std::max(anim->mDuration, morphAnim->mKeys[morphAnim->mNumKeys - 1].mTime); |
1465 | 0 | } |
1466 | 0 | anim->mTicksPerSecond = 1000.0; |
1467 | 0 | mAnims.push_back(anim); |
1468 | 0 | } |
1469 | 0 | } |
1470 | | |
1471 | | // ------------------------------------------------------------------------------------------------ |
1472 | | // Add a texture to a material structure |
1473 | | void ColladaLoader::AddTexture(aiMaterial &mat, |
1474 | | const ColladaParser &pParser, |
1475 | | const Effect &effect, |
1476 | | const Sampler &sampler, |
1477 | | aiTextureType type, |
1478 | 0 | unsigned int idx) { |
1479 | | // first of all, basic file name |
1480 | 0 | const aiString name = FindFilenameForEffectTexture(pParser, effect, sampler.mName); |
1481 | 0 | mat.AddProperty(&name, _AI_MATKEY_TEXTURE_BASE, type, idx); |
1482 | | |
1483 | | // mapping mode |
1484 | 0 | int map = aiTextureMapMode_Clamp; |
1485 | 0 | if (sampler.mWrapU) { |
1486 | 0 | map = aiTextureMapMode_Wrap; |
1487 | 0 | } |
1488 | 0 | if (sampler.mWrapU && sampler.mMirrorU) { |
1489 | 0 | map = aiTextureMapMode_Mirror; |
1490 | 0 | } |
1491 | |
|
1492 | 0 | mat.AddProperty(&map, 1, _AI_MATKEY_MAPPINGMODE_U_BASE, type, idx); |
1493 | |
|
1494 | 0 | map = aiTextureMapMode_Clamp; |
1495 | 0 | if (sampler.mWrapV) { |
1496 | 0 | map = aiTextureMapMode_Wrap; |
1497 | 0 | } |
1498 | 0 | if (sampler.mWrapV && sampler.mMirrorV) { |
1499 | 0 | map = aiTextureMapMode_Mirror; |
1500 | 0 | } |
1501 | |
|
1502 | 0 | mat.AddProperty(&map, 1, _AI_MATKEY_MAPPINGMODE_V_BASE, type, idx); |
1503 | | |
1504 | | // UV transformation |
1505 | 0 | mat.AddProperty(&sampler.mTransform, 1, |
1506 | 0 | _AI_MATKEY_UVTRANSFORM_BASE, type, idx); |
1507 | | |
1508 | | // Blend mode |
1509 | 0 | mat.AddProperty((int *)&sampler.mOp, 1, |
1510 | 0 | _AI_MATKEY_TEXBLEND_BASE, type, idx); |
1511 | | |
1512 | | // Blend factor |
1513 | 0 | mat.AddProperty((ai_real *)&sampler.mWeighting, 1, |
1514 | 0 | _AI_MATKEY_TEXBLEND_BASE, type, idx); |
1515 | | |
1516 | | // UV source index ... if we didn't resolve the mapping, it is actually just |
1517 | | // a guess but it works in most cases. We search for the frst occurrence of a |
1518 | | // number in the channel name. We assume it is the zero-based index into the |
1519 | | // UV channel array of all corresponding meshes. It could also be one-based |
1520 | | // for some exporters, but we won't care of it unless someone complains about. |
1521 | 0 | if (sampler.mUVId != UINT_MAX) { |
1522 | 0 | map = sampler.mUVId; |
1523 | 0 | } else { |
1524 | 0 | map = -1; |
1525 | 0 | for (auto it = sampler.mUVChannel.begin(); it != sampler.mUVChannel.end(); ++it) { |
1526 | 0 | if (IsNumeric(*it)) { |
1527 | 0 | map = strtoul10(&(*it)); |
1528 | 0 | break; |
1529 | 0 | } |
1530 | 0 | } |
1531 | 0 | if (-1 == map) { |
1532 | 0 | ASSIMP_LOG_WARN("Collada: unable to determine UV channel for texture"); |
1533 | 0 | map = 0; |
1534 | 0 | } |
1535 | 0 | } |
1536 | 0 | mat.AddProperty(&map, 1, _AI_MATKEY_UVWSRC_BASE, type, idx); |
1537 | 0 | } |
1538 | | |
1539 | | // ------------------------------------------------------------------------------------------------ |
1540 | | // Fills materials from the collada material definitions |
1541 | 0 | void ColladaLoader::FillMaterials(const ColladaParser &pParser, aiScene * /*pScene*/) { |
1542 | 0 | for (auto &elem : newMats) { |
1543 | 0 | auto &mat = (aiMaterial &)*elem.second; |
1544 | 0 | Collada::Effect &effect = *elem.first; |
1545 | | |
1546 | | // resolve shading mode |
1547 | 0 | int shadeMode; |
1548 | 0 | if (effect.mFaceted) { |
1549 | 0 | shadeMode = aiShadingMode_Flat; |
1550 | 0 | } else { |
1551 | 0 | switch (effect.mShadeType) { |
1552 | 0 | case Collada::Shade_Constant: |
1553 | 0 | shadeMode = aiShadingMode_NoShading; |
1554 | 0 | break; |
1555 | 0 | case Collada::Shade_Lambert: |
1556 | 0 | shadeMode = aiShadingMode_Gouraud; |
1557 | 0 | break; |
1558 | 0 | case Collada::Shade_Blinn: |
1559 | 0 | shadeMode = aiShadingMode_Blinn; |
1560 | 0 | break; |
1561 | 0 | case Collada::Shade_Phong: |
1562 | 0 | shadeMode = aiShadingMode_Phong; |
1563 | 0 | break; |
1564 | | |
1565 | 0 | default: |
1566 | 0 | ASSIMP_LOG_WARN("Collada: Unrecognized shading mode, using gouraud shading"); |
1567 | 0 | shadeMode = aiShadingMode_Gouraud; |
1568 | 0 | break; |
1569 | 0 | } |
1570 | 0 | } |
1571 | 0 | mat.AddProperty<int>(&shadeMode, 1, AI_MATKEY_SHADING_MODEL); |
1572 | | |
1573 | | // double-sided? |
1574 | 0 | shadeMode = effect.mDoubleSided; |
1575 | 0 | mat.AddProperty<int>(&shadeMode, 1, AI_MATKEY_TWOSIDED); |
1576 | | |
1577 | | // wire-frame? |
1578 | 0 | shadeMode = effect.mWireframe; |
1579 | 0 | mat.AddProperty<int>(&shadeMode, 1, AI_MATKEY_ENABLE_WIREFRAME); |
1580 | | |
1581 | | // add material colors |
1582 | 0 | mat.AddProperty(&effect.mAmbient, 1, AI_MATKEY_COLOR_AMBIENT); |
1583 | 0 | mat.AddProperty(&effect.mDiffuse, 1, AI_MATKEY_COLOR_DIFFUSE); |
1584 | 0 | mat.AddProperty(&effect.mSpecular, 1, AI_MATKEY_COLOR_SPECULAR); |
1585 | 0 | mat.AddProperty(&effect.mEmissive, 1, AI_MATKEY_COLOR_EMISSIVE); |
1586 | 0 | mat.AddProperty(&effect.mReflective, 1, AI_MATKEY_COLOR_REFLECTIVE); |
1587 | | |
1588 | | // scalar properties |
1589 | 0 | mat.AddProperty(&effect.mShininess, 1, AI_MATKEY_SHININESS); |
1590 | 0 | mat.AddProperty(&effect.mReflectivity, 1, AI_MATKEY_REFLECTIVITY); |
1591 | 0 | mat.AddProperty(&effect.mRefractIndex, 1, AI_MATKEY_REFRACTI); |
1592 | | |
1593 | | // transparency, a very hard one. seemingly not all files are following the |
1594 | | // specification here (1.0 transparency => completely opaque)... |
1595 | | // therefore, we let the opportunity for the user to manually invert |
1596 | | // the transparency if necessary and we add preliminary support for RGB_ZERO mode |
1597 | 0 | if (effect.mTransparency >= 0.f && effect.mTransparency <= 1.f) { |
1598 | | // handle RGB transparency completely, cf Collada specs 1.5.0 pages 249 and 304 |
1599 | 0 | if (effect.mRGBTransparency) { |
1600 | | // use luminance as defined by ISO/CIE color standards (see ITU-R Recommendation BT.709-4) |
1601 | 0 | effect.mTransparency *= (0.212671f * effect.mTransparent.r + |
1602 | 0 | 0.715160f * effect.mTransparent.g + |
1603 | 0 | 0.072169f * effect.mTransparent.b); |
1604 | |
|
1605 | 0 | effect.mTransparent.a = 1.f; |
1606 | |
|
1607 | 0 | mat.AddProperty(&effect.mTransparent, 1, AI_MATKEY_COLOR_TRANSPARENT); |
1608 | 0 | } else { |
1609 | 0 | effect.mTransparency *= effect.mTransparent.a; |
1610 | 0 | } |
1611 | |
|
1612 | 0 | if (effect.mInvertTransparency) { |
1613 | 0 | effect.mTransparency = 1.f - effect.mTransparency; |
1614 | 0 | } |
1615 | | |
1616 | | // Is the material finally transparent ? |
1617 | 0 | if (effect.mHasTransparency || effect.mTransparency < 1.f) { |
1618 | 0 | mat.AddProperty(&effect.mTransparency, 1, AI_MATKEY_OPACITY); |
1619 | 0 | } |
1620 | 0 | } |
1621 | | |
1622 | | // add textures, if given |
1623 | 0 | if (!effect.mTexAmbient.mName.empty()) { |
1624 | | // It is merely a light-map |
1625 | 0 | AddTexture(mat, pParser, effect, effect.mTexAmbient, aiTextureType_LIGHTMAP); |
1626 | 0 | } |
1627 | |
|
1628 | 0 | if (!effect.mTexEmissive.mName.empty()) |
1629 | 0 | AddTexture(mat, pParser, effect, effect.mTexEmissive, aiTextureType_EMISSIVE); |
1630 | |
|
1631 | 0 | if (!effect.mTexSpecular.mName.empty()) |
1632 | 0 | AddTexture(mat, pParser, effect, effect.mTexSpecular, aiTextureType_SPECULAR); |
1633 | |
|
1634 | 0 | if (!effect.mTexDiffuse.mName.empty()) |
1635 | 0 | AddTexture(mat, pParser, effect, effect.mTexDiffuse, aiTextureType_DIFFUSE); |
1636 | |
|
1637 | 0 | if (!effect.mTexBump.mName.empty()) |
1638 | 0 | AddTexture(mat, pParser, effect, effect.mTexBump, aiTextureType_NORMALS); |
1639 | |
|
1640 | 0 | if (!effect.mTexTransparent.mName.empty()) |
1641 | 0 | AddTexture(mat, pParser, effect, effect.mTexTransparent, aiTextureType_OPACITY); |
1642 | |
|
1643 | 0 | if (!effect.mTexReflective.mName.empty()) |
1644 | 0 | AddTexture(mat, pParser, effect, effect.mTexReflective, aiTextureType_REFLECTION); |
1645 | 0 | } |
1646 | 0 | } |
1647 | | |
1648 | | // ------------------------------------------------------------------------------------------------ |
1649 | | // Constructs materials from the collada material definitions |
1650 | 0 | void ColladaLoader::BuildMaterials(ColladaParser &pParser, aiScene * /*pScene*/) { |
1651 | 0 | newMats.reserve(pParser.mMaterialLibrary.size()); |
1652 | |
|
1653 | 0 | for (auto matIt = pParser.mMaterialLibrary.begin(); |
1654 | 0 | matIt != pParser.mMaterialLibrary.end(); ++matIt) { |
1655 | 0 | const Material &material = matIt->second; |
1656 | | // a material is only a reference to an effect |
1657 | 0 | auto effIt = pParser.mEffectLibrary.find(material.mEffect); |
1658 | 0 | if (effIt == pParser.mEffectLibrary.end()) |
1659 | 0 | continue; |
1660 | 0 | Effect &effect = effIt->second; |
1661 | | |
1662 | | // create material |
1663 | 0 | auto *mat = new aiMaterial; |
1664 | 0 | aiString name(material.mName.empty() ? matIt->first : material.mName); |
1665 | 0 | mat->AddProperty(&name, AI_MATKEY_NAME); |
1666 | | |
1667 | | // store the material |
1668 | 0 | mMaterialIndexByName[matIt->first] = newMats.size(); |
1669 | 0 | newMats.emplace_back(&effect, mat); |
1670 | 0 | } |
1671 | | // ScenePreprocessor generates a default material automatically if none is there. |
1672 | | // All further code here in this loader works well without a valid material so |
1673 | | // we can safely let it to ScenePreprocessor. |
1674 | 0 | } |
1675 | | |
1676 | | // ------------------------------------------------------------------------------------------------ |
1677 | | // Resolves the texture name for the given effect texture entry and loads the texture data |
1678 | | aiString ColladaLoader::FindFilenameForEffectTexture(const ColladaParser &pParser, |
1679 | 0 | const Effect &pEffect, const std::string &pName) { |
1680 | 0 | aiString result; |
1681 | | |
1682 | | // recurse through the param references until we end up at an image |
1683 | 0 | std::string name = pName; |
1684 | 0 | while (true) { |
1685 | | // the given string is a param entry. Find it |
1686 | 0 | auto it = pEffect.mParams.find(name); |
1687 | | // if not found, we're at the end of the recursion. The resulting string should be the image ID |
1688 | 0 | if (it == pEffect.mParams.end()) |
1689 | 0 | break; |
1690 | | |
1691 | | // else recurse on |
1692 | 0 | name = it->second.mReference; |
1693 | 0 | } |
1694 | | |
1695 | | // find the image referred by this name in the image library of the scene |
1696 | 0 | auto imIt = pParser.mImageLibrary.find(name); |
1697 | 0 | if (imIt == pParser.mImageLibrary.end()) { |
1698 | 0 | ASSIMP_LOG_WARN("Collada: Unable to resolve effect texture entry \"", pName, "\", ended up at ID \"", name, "\"."); |
1699 | | |
1700 | | //set default texture file name |
1701 | 0 | result.Set(name + ".jpg"); |
1702 | 0 | ColladaParser::UriDecodePath(result); |
1703 | 0 | return result; |
1704 | 0 | } |
1705 | | |
1706 | | // if this is an embedded texture image setup an aiTexture for it |
1707 | 0 | if (!imIt->second.mImageData.empty()) { |
1708 | 0 | auto *tex = new aiTexture(); |
1709 | | |
1710 | | // Store embedded texture name reference |
1711 | 0 | tex->mFilename.Set(imIt->second.mFileName.c_str()); |
1712 | 0 | result.Set(imIt->second.mFileName); |
1713 | | |
1714 | | // setup format hint |
1715 | 0 | if (imIt->second.mEmbeddedFormat.length() >= HINTMAXTEXTURELEN) { |
1716 | 0 | ASSIMP_LOG_WARN("Collada: texture format hint is too long, truncating to 3 characters"); |
1717 | 0 | } |
1718 | 0 | strncpy(tex->achFormatHint, imIt->second.mEmbeddedFormat.c_str(), 3); |
1719 | | |
1720 | | // and copy texture data |
1721 | 0 | tex->mHeight = 0; |
1722 | 0 | tex->mWidth = static_cast<unsigned int>(imIt->second.mImageData.size()); |
1723 | 0 | tex->pcData = (aiTexel *)new char[tex->mWidth]; |
1724 | 0 | memcpy(tex->pcData, &imIt->second.mImageData[0], tex->mWidth); |
1725 | | |
1726 | | // and add this texture to the list |
1727 | 0 | mTextures.push_back(tex); |
1728 | 0 | return result; |
1729 | 0 | } |
1730 | | |
1731 | 0 | if (imIt->second.mFileName.empty()) { |
1732 | 0 | throw DeadlyImportError("Collada: Invalid texture, no data or file reference given"); |
1733 | 0 | } |
1734 | | |
1735 | 0 | result.Set(imIt->second.mFileName); |
1736 | |
|
1737 | 0 | return result; |
1738 | 0 | } |
1739 | | |
1740 | | // ------------------------------------------------------------------------------------------------ |
1741 | | // Reads a string value from an accessor and its data array. |
1742 | 0 | const std::string &ColladaLoader::ReadString(const Accessor &pAccessor, const Data &pData, size_t pIndex) const { |
1743 | 0 | size_t pos = pAccessor.mStride * pIndex + pAccessor.mOffset; |
1744 | 0 | ai_assert(pos < pData.mStrings.size()); |
1745 | 0 | return pData.mStrings[pos]; |
1746 | 0 | } |
1747 | | |
1748 | | // ------------------------------------------------------------------------------------------------ |
1749 | | // Collects all nodes into the given array |
1750 | 0 | void ColladaLoader::CollectNodes(const aiNode *pNode, std::vector<const aiNode *> &poNodes) const { |
1751 | 0 | poNodes.push_back(pNode); |
1752 | 0 | for (size_t a = 0; a < pNode->mNumChildren; ++a) { |
1753 | 0 | CollectNodes(pNode->mChildren[a], poNodes); |
1754 | 0 | } |
1755 | 0 | } |
1756 | | |
1757 | | // ------------------------------------------------------------------------------------------------ |
1758 | | // Finds a node in the collada scene by the given name |
1759 | 0 | const Node *ColladaLoader::FindNode(const Node *pNode, const std::string &pName) const { |
1760 | 0 | if (pNode->mName == pName || pNode->mID == pName) |
1761 | 0 | return pNode; |
1762 | | |
1763 | 0 | for (auto a : pNode->mChildren) { |
1764 | 0 | const Collada::Node *node = FindNode(a, pName); |
1765 | 0 | if (node) { |
1766 | 0 | return node; |
1767 | 0 | } |
1768 | 0 | } |
1769 | | |
1770 | 0 | return nullptr; |
1771 | 0 | } |
1772 | | |
1773 | | // ------------------------------------------------------------------------------------------------ |
1774 | | // Finds a node in the collada scene by the given SID |
1775 | 0 | const Node *ColladaLoader::FindNodeBySID(const Node *pNode, const std::string &pSID) const { |
1776 | 0 | if (nullptr == pNode) { |
1777 | 0 | return nullptr; |
1778 | 0 | } |
1779 | | |
1780 | 0 | if (pNode->mSID == pSID) { |
1781 | 0 | return pNode; |
1782 | 0 | } |
1783 | | |
1784 | 0 | for (auto a : pNode->mChildren) { |
1785 | 0 | const Collada::Node *node = FindNodeBySID(a, pSID); |
1786 | 0 | if (node) { |
1787 | 0 | return node; |
1788 | 0 | } |
1789 | 0 | } |
1790 | | |
1791 | 0 | return nullptr; |
1792 | 0 | } |
1793 | | |
1794 | | // ------------------------------------------------------------------------------------------------ |
1795 | | // Finds a proper unique name for a node derived from the collada-node's properties. |
1796 | | // The name must be unique for proper node-bone association. |
1797 | 0 | std::string ColladaLoader::FindNameForNode(const Node *pNode) { |
1798 | | // If explicitly requested, just use the collada name. |
1799 | 0 | if (useColladaName) { |
1800 | 0 | if (!pNode->mName.empty()) { |
1801 | 0 | return pNode->mName; |
1802 | 0 | } else { |
1803 | 0 | return format() << "$ColladaAutoName$_" << mNodeNameCounter++; |
1804 | 0 | } |
1805 | 0 | } else { |
1806 | | // Now setup the name of the assimp node. The collada name might not be |
1807 | | // unique, so we use the collada ID. |
1808 | 0 | if (!pNode->mID.empty()) |
1809 | 0 | return pNode->mID; |
1810 | 0 | else if (!pNode->mSID.empty()) |
1811 | 0 | return pNode->mSID; |
1812 | 0 | else { |
1813 | | // No need to worry. Unnamed nodes are no problem at all, except |
1814 | | // if cameras or lights need to be assigned to them. |
1815 | 0 | return format() << "$ColladaAutoName$_" << mNodeNameCounter++; |
1816 | 0 | } |
1817 | 0 | } |
1818 | 0 | } |
1819 | | |
1820 | | } // Namespace Assimp |
1821 | | |
1822 | | #endif // !! ASSIMP_BUILD_NO_COLLADA_IMPORTER |