Coverage Report

Created: 2026-03-12 06:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/assimp/code/AssetLib/FBX/FBXTokenizer.cpp
Line
Count
Source
1
/*
2
Open Asset Import Library (assimp)
3
----------------------------------------------------------------------
4
5
Copyright (c) 2006-2026, assimp team
6
7
All rights reserved.
8
9
Redistribution and use of this software in source and binary forms,
10
with or without modification, are permitted provided that the
11
following conditions are met:
12
13
* Redistributions of source code must retain the above
14
  copyright notice, this list of conditions and the
15
  following disclaimer.
16
17
* Redistributions in binary form must reproduce the above
18
  copyright notice, this list of conditions and the
19
  following disclaimer in the documentation and/or other
20
  materials provided with the distribution.
21
22
* Neither the name of the assimp team, nor the names of its
23
  contributors may be used to endorse or promote products
24
  derived from this software without specific prior
25
  written permission of the assimp team.
26
27
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
39
----------------------------------------------------------------------
40
*/
41
42
/** @file  FBXTokenizer.cpp
43
 *  @brief Implementation of the FBX broadphase lexer
44
 */
45
46
#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER
47
48
// tab width for logging columns
49
222k
#define ASSIMP_FBX_TAB_WIDTH 4
50
51
#include <assimp/ParsingUtils.h>
52
53
#include "FBXTokenizer.h"
54
#include "FBXUtil.h"
55
#include <assimp/Exceptional.h>
56
#include <assimp/DefaultLogger.hpp>
57
58
namespace Assimp {
59
namespace FBX {
60
61
// ------------------------------------------------------------------------------------------------
62
Token::Token(const char* sbegin, const char* send, TokenType type, unsigned int line, unsigned int column)
63
    :
64
#ifdef DEBUG
65
    contents(sbegin, static_cast<size_t>(send-sbegin)),
66
#endif
67
5.26M
    sbegin(sbegin)
68
5.26M
    , send(send)
69
5.26M
    , type(type)
70
5.26M
    , line(line)
71
5.26M
    , column(column)
72
5.26M
{
73
5.26M
    ai_assert(sbegin);
74
5.26M
    ai_assert(send);
75
76
    // tokens must be of non-zero length
77
5.26M
    ai_assert(static_cast<size_t>(send-sbegin) > 0);
78
5.26M
}
79
80
// ------------------------------------------------------------------------------------------------
81
82
83
namespace {
84
85
// ------------------------------------------------------------------------------------------------
86
// signal tokenization error, this is always unrecoverable. Throws DeadlyImportError.
87
AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column) AI_WONT_RETURN_SUFFIX;
88
AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column)
89
3
{
90
3
    throw DeadlyImportError("FBX-Tokenize", Util::GetLineAndColumnText(line,column), message);
91
3
}
92
93
94
// process a potential data token up to 'cur', adding it to 'output_tokens'.
95
// ------------------------------------------------------------------------------------------------
96
void ProcessDataToken(TokenList &output_tokens, StackAllocator &token_allocator,
97
                      const char*& start, const char*& end,
98
                      unsigned int line,
99
                      unsigned int column,
100
                      TokenType type = TokenType_DATA,
101
                      bool must_have_token = false)
102
3.59M
{
103
3.59M
    if (start && end) {
104
        // sanity check:
105
        // tokens should have no whitespace outside quoted text and [start,end] should
106
        // properly delimit the valid range.
107
1.84M
        bool in_double_quotes = false;
108
27.9M
        for (const char* c = start; c != end + 1; ++c) {
109
26.0M
            if (*c == '\"') {
110
222k
                in_double_quotes = !in_double_quotes;
111
222k
            }
112
113
26.0M
            if (!in_double_quotes && IsSpaceOrNewLine(*c)) {
114
0
                TokenizeError("unexpected whitespace in token", line, column);
115
0
            }
116
26.0M
        }
117
118
1.84M
        if (in_double_quotes) {
119
0
            TokenizeError("non-terminated double quotes", line, column);
120
0
        }
121
122
1.84M
        output_tokens.push_back(new_Token(start,end + 1,type,line,column));
123
1.84M
    }
124
1.74M
    else if (must_have_token) {
125
1
        TokenizeError("unexpected character, expected data token", line, column);
126
1
    }
127
128
3.59M
    start = end = nullptr;
129
3.59M
}
130
131
}
132
133
// ------------------------------------------------------------------------------------------------
134
113
void Tokenize(TokenList &output_tokens, const char *input, StackAllocator &token_allocator) {
135
113
  ai_assert(input);
136
113
  ASSIMP_LOG_DEBUG("Tokenizing ASCII FBX file");
137
138
    // line and column numbers numbers are one-based
139
113
    unsigned int line = 1;
140
113
    unsigned int column = 1;
141
142
113
    bool comment = false;
143
113
    bool in_double_quotes = false;
144
113
    bool pending_data_token = false;
145
146
113
    const char *token_begin = nullptr, *token_end = nullptr;
147
31.2M
    for (const char* cur = input;*cur;column += (*cur == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1), ++cur) {
148
31.2M
        const char c = *cur;
149
150
31.2M
        if (IsLineEnd(c)) {
151
148k
            comment = false;
152
153
148k
            column = 0;
154
148k
            ++line;
155
148k
        }
156
157
31.2M
        if(comment) {
158
1.08M
            continue;
159
1.08M
        }
160
161
30.1M
        if(in_double_quotes) {
162
11.0M
            if (c == '\"') {
163
111k
                in_double_quotes = false;
164
111k
                token_end = cur;
165
166
111k
                ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column);
167
111k
                pending_data_token = false;
168
111k
            }
169
11.0M
            continue;
170
11.0M
        }
171
172
19.0M
        switch(c)
173
19.0M
        {
174
111k
        case '\"':
175
111k
            if (token_begin) {
176
1
                TokenizeError("unexpected double-quote", line, column);
177
1
            }
178
111k
            token_begin = cur;
179
111k
            in_double_quotes = true;
180
111k
            continue;
181
182
33.5k
        case ';':
183
33.5k
            ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column);
184
33.5k
            comment = true;
185
33.5k
            continue;
186
187
1.68M
        case '{':
188
1.68M
            ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column);
189
1.68M
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_OPEN_BRACKET,line,column));
190
1.68M
            continue;
191
192
126k
        case '}':
193
126k
            ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column);
194
126k
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_CLOSE_BRACKET,line,column));
195
126k
            continue;
196
197
1.60M
        case ',':
198
1.60M
            if (pending_data_token) {
199
1.50M
                ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column, TokenType_DATA, true);
200
1.50M
            }
201
1.60M
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_COMMA,line,column));
202
1.60M
            continue;
203
204
79.9k
        case ':':
205
79.9k
            if (pending_data_token) {
206
79.9k
                ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column, TokenType_KEY, true);
207
79.9k
            }
208
1
            else {
209
1
                TokenizeError("unexpected colon", line, column);
210
1
            }
211
79.9k
            continue;
212
19.0M
        }
213
214
15.4M
        if (IsSpaceOrNewLine(c)) {
215
216
511k
            if (token_begin) {
217
                // peek ahead and check if the next token is a colon in which
218
                // case this counts as KEY token.
219
56.3k
                TokenType type = TokenType_DATA;
220
249k
                for (const char* peek = cur;  *peek && IsSpaceOrNewLine(*peek); ++peek) {
221
193k
                    if (*peek == ':') {
222
0
                        type = TokenType_KEY;
223
0
                        cur = peek;
224
0
                        break;
225
0
                    }
226
193k
                }
227
228
56.3k
                ProcessDataToken(output_tokens, token_allocator, token_begin, token_end, line, column, type);
229
56.3k
            }
230
231
511k
            pending_data_token = false;
232
511k
        }
233
14.9M
        else {
234
14.9M
            token_end = cur;
235
14.9M
            if (!token_begin) {
236
1.73M
                token_begin = cur;
237
1.73M
            }
238
239
14.9M
            pending_data_token = true;
240
14.9M
        }
241
15.4M
    }
242
113
}
243
244
} // !FBX
245
} // !Assimp
246
247
#endif