Coverage Report

Created: 2023-03-26 07:54

/src/LPM/external.protobuf/include/google/protobuf/parse_context.h
Line
Count
Source (jump to first uncovered line)
1
// Protocol Buffers - Google's data interchange format
2
// Copyright 2008 Google Inc.  All rights reserved.
3
// https://developers.google.com/protocol-buffers/
4
//
5
// Redistribution and use in source and binary forms, with or without
6
// modification, are permitted provided that the following conditions are
7
// met:
8
//
9
//     * Redistributions of source code must retain the above copyright
10
// notice, this list of conditions and the following disclaimer.
11
//     * Redistributions in binary form must reproduce the above
12
// copyright notice, this list of conditions and the following disclaimer
13
// in the documentation and/or other materials provided with the
14
// distribution.
15
//     * Neither the name of Google Inc. nor the names of its
16
// contributors may be used to endorse or promote products derived from
17
// this software without specific prior written permission.
18
//
19
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31
#ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
32
#define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
33
34
#include <cstdint>
35
#include <cstring>
36
#include <string>
37
#include <type_traits>
38
39
#include <google/protobuf/io/coded_stream.h>
40
#include <google/protobuf/io/zero_copy_stream.h>
41
#include <google/protobuf/arena.h>
42
#include <google/protobuf/port.h>
43
#include <google/protobuf/stubs/strutil.h>
44
#include <google/protobuf/arenastring.h>
45
#include <google/protobuf/endian.h>
46
#include <google/protobuf/implicit_weak_message.h>
47
#include <google/protobuf/inlined_string_field.h>
48
#include <google/protobuf/metadata_lite.h>
49
#include <google/protobuf/repeated_field.h>
50
#include <google/protobuf/wire_format_lite.h>
51
52
// Must be included last.
53
#include <google/protobuf/port_def.inc>
54
55
56
namespace google {
57
namespace protobuf {
58
59
class UnknownFieldSet;
60
class DescriptorPool;
61
class MessageFactory;
62
63
namespace internal {
64
65
// Template code below needs to know about the existence of these functions.
66
PROTOBUF_EXPORT void WriteVarint(uint32_t num, uint64_t val, std::string* s);
67
PROTOBUF_EXPORT void WriteLengthDelimited(uint32_t num, StringPiece val,
68
                                          std::string* s);
69
// Inline because it is just forwarding to s->WriteVarint
70
inline void WriteVarint(uint32_t num, uint64_t val, UnknownFieldSet* s);
71
inline void WriteLengthDelimited(uint32_t num, StringPiece val,
72
                                 UnknownFieldSet* s);
73
74
75
// The basic abstraction the parser is designed for is a slight modification
76
// of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
77
// stream as a series of buffers that concatenate to the full stream.
78
// Pictorially a ZCIS presents a stream in chunks like so
79
// [---------------------------------------------------------------]
80
// [---------------------] chunk 1
81
//                      [----------------------------] chunk 2
82
//                                          chunk 3 [--------------]
83
//
84
// Where the '-' represent the bytes which are vertically lined up with the
85
// bytes of the stream. The proto parser requires its input to be presented
86
// similarly with the extra
87
// property that each chunk has kSlopBytes past its end that overlaps with the
88
// first kSlopBytes of the next chunk, or if there is no next chunk at least its
89
// still valid to read those bytes. Again, pictorially, we now have
90
//
91
// [---------------------------------------------------------------]
92
// [-------------------....] chunk 1
93
//                    [------------------------....] chunk 2
94
//                                    chunk 3 [------------------..**]
95
//                                                      chunk 4 [--****]
96
// Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
97
// chunk that match up with the start of the next chunk. Above each chunk has
98
// 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
99
// past the stream, indicated by '*' above, their values are unspecified. It is
100
// still legal to read them (ie. should not segfault). Reading past the
101
// end should be detected by the user and indicated as an error.
102
//
103
// The reason for this, admittedly, unconventional invariant is to ruthlessly
104
// optimize the protobuf parser. Having an overlap helps in two important ways.
105
// Firstly it alleviates having to performing bounds checks if a piece of code
106
// is guaranteed to not read more than kSlopBytes. Secondly, and more
107
// importantly, the protobuf wireformat is such that reading a key/value pair is
108
// always less than 16 bytes. This removes the need to change to next buffer in
109
// the middle of reading primitive values. Hence there is no need to store and
110
// load the current position.
111
112
class PROTOBUF_EXPORT EpsCopyInputStream {
113
 public:
114
  enum { kSlopBytes = 16, kMaxCordBytesToCopy = 512 };
115
116
  explicit EpsCopyInputStream(bool enable_aliasing)
117
0
      : aliasing_(enable_aliasing ? kOnPatch : kNoAliasing) {}
118
119
0
  void BackUp(const char* ptr) {
120
0
    GOOGLE_DCHECK(ptr <= buffer_end_ + kSlopBytes);
121
0
    int count;
122
0
    if (next_chunk_ == buffer_) {
123
0
      count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
124
0
    } else {
125
0
      count = size_ + static_cast<int>(buffer_end_ - ptr);
126
0
    }
127
0
    if (count > 0) StreamBackUp(count);
128
0
  }
129
130
  // If return value is negative it's an error
131
0
  PROTOBUF_NODISCARD int PushLimit(const char* ptr, int limit) {
132
0
    GOOGLE_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
133
0
    // This add is safe due to the invariant above, because
134
0
    // ptr - buffer_end_ <= kSlopBytes.
135
0
    limit += static_cast<int>(ptr - buffer_end_);
136
0
    limit_end_ = buffer_end_ + (std::min)(0, limit);
137
0
    auto old_limit = limit_;
138
0
    limit_ = limit;
139
0
    return old_limit - limit;
140
0
  }
141
142
0
  PROTOBUF_NODISCARD bool PopLimit(int delta) {
143
0
    if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit())) return false;
144
0
    limit_ = limit_ + delta;
145
0
    // TODO(gerbens) We could remove this line and hoist the code to
146
0
    // DoneFallback. Study the perf/bin-size effects.
147
0
    limit_end_ = buffer_end_ + (std::min)(0, limit_);
148
0
    return true;
149
0
  }
150
151
0
  PROTOBUF_NODISCARD const char* Skip(const char* ptr, int size) {
152
0
    if (size <= buffer_end_ + kSlopBytes - ptr) {
153
0
      return ptr + size;
154
0
    }
155
0
    return SkipFallback(ptr, size);
156
0
  }
157
  PROTOBUF_NODISCARD const char* ReadString(const char* ptr, int size,
158
0
                                            std::string* s) {
159
0
    if (size <= buffer_end_ + kSlopBytes - ptr) {
160
0
      s->assign(ptr, size);
161
0
      return ptr + size;
162
0
    }
163
0
    return ReadStringFallback(ptr, size, s);
164
0
  }
165
  PROTOBUF_NODISCARD const char* AppendString(const char* ptr, int size,
166
0
                                              std::string* s) {
167
0
    if (size <= buffer_end_ + kSlopBytes - ptr) {
168
0
      s->append(ptr, size);
169
0
      return ptr + size;
170
0
    }
171
0
    return AppendStringFallback(ptr, size, s);
172
0
  }
173
  // Implemented in arenastring.cc
174
  PROTOBUF_NODISCARD const char* ReadArenaString(const char* ptr,
175
                                                 ArenaStringPtr* s,
176
                                                 Arena* arena);
177
178
  template <typename Tag, typename T>
179
  PROTOBUF_NODISCARD const char* ReadRepeatedFixed(const char* ptr,
180
                                                   Tag expected_tag,
181
                                                   RepeatedField<T>* out);
182
183
  template <typename T>
184
  PROTOBUF_NODISCARD const char* ReadPackedFixed(const char* ptr, int size,
185
                                                 RepeatedField<T>* out);
186
  template <typename Add>
187
  PROTOBUF_NODISCARD const char* ReadPackedVarint(const char* ptr, Add add);
188
189
0
  uint32_t LastTag() const { return last_tag_minus_1_ + 1; }
190
0
  bool ConsumeEndGroup(uint32_t start_tag) {
191
0
    bool res = last_tag_minus_1_ == start_tag;
192
0
    last_tag_minus_1_ = 0;
193
0
    return res;
194
0
  }
195
0
  bool EndedAtLimit() const { return last_tag_minus_1_ == 0; }
196
0
  bool EndedAtEndOfStream() const { return last_tag_minus_1_ == 1; }
197
0
  void SetLastTag(uint32_t tag) { last_tag_minus_1_ = tag - 1; }
198
0
  void SetEndOfStream() { last_tag_minus_1_ = 1; }
199
0
  bool IsExceedingLimit(const char* ptr) {
200
0
    return ptr > limit_end_ &&
201
0
           (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
202
0
  }
203
0
  bool AliasingEnabled() const { return aliasing_ != kNoAliasing; }
204
0
  int BytesUntilLimit(const char* ptr) const {
205
0
    return limit_ + static_cast<int>(buffer_end_ - ptr);
206
0
  }
207
  // Returns true if more data is available, if false is returned one has to
208
  // call Done for further checks.
209
0
  bool DataAvailable(const char* ptr) { return ptr < limit_end_; }
210
211
 protected:
212
  // Returns true is limit (either an explicit limit or end of stream) is
213
  // reached. It aligns *ptr across buffer seams.
214
  // If limit is exceeded it returns true and ptr is set to null.
215
0
  bool DoneWithCheck(const char** ptr, int d) {
216
0
    GOOGLE_DCHECK(*ptr);
217
0
    if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_)) return false;
218
0
    int overrun = static_cast<int>(*ptr - buffer_end_);
219
0
    GOOGLE_DCHECK_LE(overrun, kSlopBytes);  // Guaranteed by parse loop.
220
0
    if (overrun ==
221
0
        limit_) {  //  No need to flip buffers if we ended on a limit.
222
      // If we actually overrun the buffer and next_chunk_ is null. It means
223
      // the stream ended and we passed the stream end.
224
0
      if (overrun > 0 && next_chunk_ == nullptr) *ptr = nullptr;
225
0
      return true;
226
0
    }
227
0
    auto res = DoneFallback(overrun, d);
228
0
    *ptr = res.first;
229
0
    return res.second;
230
0
  }
231
232
0
  const char* InitFrom(StringPiece flat) {
233
0
    overall_limit_ = 0;
234
0
    if (flat.size() > kSlopBytes) {
235
0
      limit_ = kSlopBytes;
236
0
      limit_end_ = buffer_end_ = flat.data() + flat.size() - kSlopBytes;
237
0
      next_chunk_ = buffer_;
238
0
      if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
239
0
      return flat.data();
240
0
    } else {
241
0
      std::memcpy(buffer_, flat.data(), flat.size());
242
0
      limit_ = 0;
243
0
      limit_end_ = buffer_end_ = buffer_ + flat.size();
244
0
      next_chunk_ = nullptr;
245
0
      if (aliasing_ == kOnPatch) {
246
0
        aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
247
0
                    reinterpret_cast<std::uintptr_t>(buffer_);
248
0
      }
249
0
      return buffer_;
250
0
    }
251
0
  }
252
253
  const char* InitFrom(io::ZeroCopyInputStream* zcis);
254
255
0
  const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit) {
256
0
    if (limit == -1) return InitFrom(zcis);
257
0
    overall_limit_ = limit;
258
0
    auto res = InitFrom(zcis);
259
0
    limit_ = limit - static_cast<int>(buffer_end_ - res);
260
0
    limit_end_ = buffer_end_ + (std::min)(0, limit_);
261
0
    return res;
262
0
  }
263
264
 private:
265
  const char* limit_end_;  // buffer_end_ + min(limit_, 0)
266
  const char* buffer_end_;
267
  const char* next_chunk_;
268
  int size_;
269
  int limit_;  // relative to buffer_end_;
270
  io::ZeroCopyInputStream* zcis_ = nullptr;
271
  char buffer_[2 * kSlopBytes] = {};
272
  enum { kNoAliasing = 0, kOnPatch = 1, kNoDelta = 2 };
273
  std::uintptr_t aliasing_ = kNoAliasing;
274
  // This variable is used to communicate how the parse ended, in order to
275
  // completely verify the parsed data. A wire-format parse can end because of
276
  // one of the following conditions:
277
  // 1) A parse can end on a pushed limit.
278
  // 2) A parse can end on End Of Stream (EOS).
279
  // 3) A parse can end on 0 tag (only valid for toplevel message).
280
  // 4) A parse can end on an end-group tag.
281
  // This variable should always be set to 0, which indicates case 1. If the
282
  // parse terminated due to EOS (case 2), it's set to 1. In case the parse
283
  // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
284
  // This var doesn't really belong in EpsCopyInputStream and should be part of
285
  // the ParseContext, but case 2 is most easily and optimally implemented in
286
  // DoneFallback.
287
  uint32_t last_tag_minus_1_ = 0;
288
  int overall_limit_ = INT_MAX;  // Overall limit independent of pushed limits.
289
  // Pretty random large number that seems like a safe allocation on most
290
  // systems. TODO(gerbens) do we need to set this as build flag?
291
  enum { kSafeStringSize = 50000000 };
292
293
  // Advances to next buffer chunk returns a pointer to the same logical place
294
  // in the stream as set by overrun. Overrun indicates the position in the slop
295
  // region the parse was left (0 <= overrun <= kSlopBytes). Returns true if at
296
  // limit, at which point the returned pointer maybe null if there was an
297
  // error. The invariant of this function is that it's guaranteed that
298
  // kSlopBytes bytes can be accessed from the returned ptr. This function might
299
  // advance more buffers than one in the underlying ZeroCopyInputStream.
300
  std::pair<const char*, bool> DoneFallback(int overrun, int depth);
301
  // Advances to the next buffer, at most one call to Next() on the underlying
302
  // ZeroCopyInputStream is made. This function DOES NOT match the returned
303
  // pointer to where in the slop region the parse ends, hence no overrun
304
  // parameter. This is useful for string operations where you always copy
305
  // to the end of the buffer (including the slop region).
306
  const char* Next();
307
  // overrun is the location in the slop region the stream currently is
308
  // (0 <= overrun <= kSlopBytes). To prevent flipping to the next buffer of
309
  // the ZeroCopyInputStream in the case the parse will end in the last
310
  // kSlopBytes of the current buffer. depth is the current depth of nested
311
  // groups (or negative if the use case does not need careful tracking).
312
  inline const char* NextBuffer(int overrun, int depth);
313
  const char* SkipFallback(const char* ptr, int size);
314
  const char* AppendStringFallback(const char* ptr, int size, std::string* str);
315
  const char* ReadStringFallback(const char* ptr, int size, std::string* str);
316
0
  bool StreamNext(const void** data) {
317
0
    bool res = zcis_->Next(data, &size_);
318
0
    if (res) overall_limit_ -= size_;
319
0
    return res;
320
0
  }
321
0
  void StreamBackUp(int count) {
322
0
    zcis_->BackUp(count);
323
0
    overall_limit_ += count;
324
0
  }
325
326
  template <typename A>
327
  const char* AppendSize(const char* ptr, int size, const A& append) {
328
    int chunk_size = buffer_end_ + kSlopBytes - ptr;
329
    do {
330
      GOOGLE_DCHECK(size > chunk_size);
331
      if (next_chunk_ == nullptr) return nullptr;
332
      append(ptr, chunk_size);
333
      ptr += chunk_size;
334
      size -= chunk_size;
335
      // TODO(gerbens) Next calls NextBuffer which generates buffers with
336
      // overlap and thus incurs cost of copying the slop regions. This is not
337
      // necessary for reading strings. We should just call Next buffers.
338
      if (limit_ <= kSlopBytes) return nullptr;
339
      ptr = Next();
340
      if (ptr == nullptr) return nullptr;  // passed the limit
341
      ptr += kSlopBytes;
342
      chunk_size = buffer_end_ + kSlopBytes - ptr;
343
    } while (size > chunk_size);
344
    append(ptr, size);
345
    return ptr + size;
346
  }
347
348
  // AppendUntilEnd appends data until a limit (either a PushLimit or end of
349
  // stream. Normal payloads are from length delimited fields which have an
350
  // explicit size. Reading until limit only comes when the string takes
351
  // the place of a protobuf, ie RawMessage/StringRawMessage, lazy fields and
352
  // implicit weak messages. We keep these methods private and friend them.
353
  template <typename A>
354
0
  const char* AppendUntilEnd(const char* ptr, const A& append) {
355
0
    if (ptr - buffer_end_ > limit_) return nullptr;
356
0
    while (limit_ > kSlopBytes) {
357
0
      size_t chunk_size = buffer_end_ + kSlopBytes - ptr;
358
0
      append(ptr, chunk_size);
359
0
      ptr = Next();
360
0
      if (ptr == nullptr) return limit_end_;
361
0
      ptr += kSlopBytes;
362
0
    }
363
0
    auto end = buffer_end_ + limit_;
364
0
    GOOGLE_DCHECK(end >= ptr);
365
0
    append(ptr, end - ptr);
366
0
    return end;
367
0
  }
368
369
  PROTOBUF_NODISCARD const char* AppendString(const char* ptr,
370
0
                                              std::string* str) {
371
0
    return AppendUntilEnd(
372
0
        ptr, [str](const char* p, ptrdiff_t s) { str->append(p, s); });
373
0
  }
374
  friend class ImplicitWeakMessage;
375
};
376
377
using LazyEagerVerifyFnType = const char* (*)(const char* ptr,
378
                                              ParseContext* ctx);
379
using LazyEagerVerifyFnRef = std::remove_pointer<LazyEagerVerifyFnType>::type&;
380
381
// ParseContext holds all data that is global to the entire parse. Most
382
// importantly it contains the input stream, but also recursion depth and also
383
// stores the end group tag, in case a parser ended on a endgroup, to verify
384
// matching start/end group tags.
385
class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream {
386
 public:
387
  struct Data {
388
    const DescriptorPool* pool = nullptr;
389
    MessageFactory* factory = nullptr;
390
    Arena* arena = nullptr;
391
  };
392
393
  template <typename... T>
394
  ParseContext(int depth, bool aliasing, const char** start, T&&... args)
395
      : EpsCopyInputStream(aliasing), depth_(depth) {
396
    *start = InitFrom(std::forward<T>(args)...);
397
  }
398
399
0
  void TrackCorrectEnding() { group_depth_ = 0; }
400
401
0
  bool Done(const char** ptr) { return DoneWithCheck(ptr, group_depth_); }
402
403
0
  int depth() const { return depth_; }
404
405
0
  Data& data() { return data_; }
406
0
  const Data& data() const { return data_; }
407
408
  const char* ParseMessage(MessageLite* msg, const char* ptr);
409
410
  // Spawns a child parsing context that inherits key properties. New context
411
  // inherits the following:
412
  // --depth_, data_, check_required_fields_, lazy_parse_mode_
413
  // The spawned context always disables aliasing (different input).
414
  template <typename... T>
415
  ParseContext Spawn(const char** start, T&&... args) {
416
    ParseContext spawned(depth_, false, start, std::forward<T>(args)...);
417
    // Transfer key context states.
418
    spawned.data_ = data_;
419
    return spawned;
420
  }
421
422
  // This overload supports those few cases where ParseMessage is called
423
  // on a class that is not actually a proto message.
424
  // TODO(jorg): Eliminate this use case.
425
  template <typename T,
426
            typename std::enable_if<!std::is_base_of<MessageLite, T>::value,
427
                                    bool>::type = true>
428
  PROTOBUF_NODISCARD const char* ParseMessage(T* msg, const char* ptr);
429
430
  template <typename T>
431
  PROTOBUF_NODISCARD PROTOBUF_NDEBUG_INLINE const char* ParseGroup(
432
      T* msg, const char* ptr, uint32_t tag) {
433
    if (--depth_ < 0) return nullptr;
434
    group_depth_++;
435
    ptr = msg->_InternalParse(ptr, this);
436
    group_depth_--;
437
    depth_++;
438
    if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag))) return nullptr;
439
    return ptr;
440
  }
441
442
 private:
443
  // Out-of-line routine to save space in ParseContext::ParseMessage<T>
444
  //   int old;
445
  //   ptr = ReadSizeAndPushLimitAndDepth(ptr, &old)
446
  // is equivalent to:
447
  //   int size = ReadSize(&ptr);
448
  //   if (!ptr) return nullptr;
449
  //   int old = PushLimit(ptr, size);
450
  //   if (--depth_ < 0) return nullptr;
451
  PROTOBUF_NODISCARD const char* ReadSizeAndPushLimitAndDepth(const char* ptr,
452
                                                              int* old_limit);
453
454
  // The context keeps an internal stack to keep track of the recursive
455
  // part of the parse state.
456
  // Current depth of the active parser, depth counts down.
457
  // This is used to limit recursion depth (to prevent overflow on malicious
458
  // data), but is also used to index in stack_ to store the current state.
459
  int depth_;
460
  // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
461
  // in the last kSlopBytes of a ZeroCopyInputStream chunk.
462
  int group_depth_ = INT_MIN;
463
  Data data_;
464
};
465
466
template <uint32_t tag>
467
0
bool ExpectTag(const char* ptr) {
468
0
  if (tag < 128) {
469
0
    return *ptr == static_cast<char>(tag);
470
0
  } else {
471
0
    static_assert(tag < 128 * 128, "We only expect tags for 1 or 2 bytes");
472
0
    char buf[2] = {static_cast<char>(tag | 0x80), static_cast<char>(tag >> 7)};
473
0
    return std::memcmp(ptr, buf, 2) == 0;
474
0
  }
475
0
}
Unexecuted instantiation: bool google::protobuf::internal::ExpectTag<26u>(char const*)
Unexecuted instantiation: bool google::protobuf::internal::ExpectTag<10u>(char const*)
Unexecuted instantiation: bool google::protobuf::internal::ExpectTag<18u>(char const*)
Unexecuted instantiation: bool google::protobuf::internal::ExpectTag<34u>(char const*)
476
477
template <int>
478
struct EndianHelper;
479
480
template <>
481
struct EndianHelper<1> {
482
0
  static uint8_t Load(const void* p) { return *static_cast<const uint8_t*>(p); }
483
};
484
485
template <>
486
struct EndianHelper<2> {
487
0
  static uint16_t Load(const void* p) {
488
0
    uint16_t tmp;
489
0
    std::memcpy(&tmp, p, 2);
490
0
    return little_endian::ToHost(tmp);
491
0
  }
492
};
493
494
template <>
495
struct EndianHelper<4> {
496
0
  static uint32_t Load(const void* p) {
497
0
    uint32_t tmp;
498
0
    std::memcpy(&tmp, p, 4);
499
0
    return little_endian::ToHost(tmp);
500
0
  }
501
};
502
503
template <>
504
struct EndianHelper<8> {
505
0
  static uint64_t Load(const void* p) {
506
0
    uint64_t tmp;
507
0
    std::memcpy(&tmp, p, 8);
508
0
    return little_endian::ToHost(tmp);
509
0
  }
510
};
511
512
template <typename T>
513
0
T UnalignedLoad(const char* p) {
514
0
  auto tmp = EndianHelper<sizeof(T)>::Load(p);
515
0
  T res;
516
0
  memcpy(&res, &tmp, sizeof(T));
517
0
  return res;
518
0
}
Unexecuted instantiation: unsigned short google::protobuf::internal::UnalignedLoad<unsigned short>(char const*)
Unexecuted instantiation: float google::protobuf::internal::UnalignedLoad<float>(char const*)
Unexecuted instantiation: double google::protobuf::internal::UnalignedLoad<double>(char const*)
Unexecuted instantiation: unsigned long google::protobuf::internal::UnalignedLoad<unsigned long>(char const*)
Unexecuted instantiation: unsigned int google::protobuf::internal::UnalignedLoad<unsigned int>(char const*)
Unexecuted instantiation: long google::protobuf::internal::UnalignedLoad<long>(char const*)
Unexecuted instantiation: int google::protobuf::internal::UnalignedLoad<int>(char const*)
519
520
PROTOBUF_EXPORT
521
std::pair<const char*, uint32_t> VarintParseSlow32(const char* p, uint32_t res);
522
PROTOBUF_EXPORT
523
std::pair<const char*, uint64_t> VarintParseSlow64(const char* p, uint32_t res);
524
525
0
inline const char* VarintParseSlow(const char* p, uint32_t res, uint32_t* out) {
526
0
  auto tmp = VarintParseSlow32(p, res);
527
0
  *out = tmp.second;
528
0
  return tmp.first;
529
0
}
530
531
0
inline const char* VarintParseSlow(const char* p, uint32_t res, uint64_t* out) {
532
0
  auto tmp = VarintParseSlow64(p, res);
533
0
  *out = tmp.second;
534
0
  return tmp.first;
535
0
}
536
537
template <typename T>
538
0
PROTOBUF_NODISCARD const char* VarintParse(const char* p, T* out) {
539
0
  auto ptr = reinterpret_cast<const uint8_t*>(p);
540
0
  uint32_t res = ptr[0];
541
0
  if (!(res & 0x80)) {
542
0
    *out = res;
543
0
    return p + 1;
544
0
  }
545
0
  uint32_t byte = ptr[1];
546
0
  res += (byte - 1) << 7;
547
0
  if (!(byte & 0x80)) {
548
0
    *out = res;
549
0
    return p + 2;
550
0
  }
551
0
  return VarintParseSlow(p, res, out);
552
0
}
Unexecuted instantiation: char const* google::protobuf::internal::VarintParse<unsigned long>(char const*, unsigned long*)
Unexecuted instantiation: char const* google::protobuf::internal::VarintParse<unsigned int>(char const*, unsigned int*)
553
554
// Used for tags, could read up to 5 bytes which must be available.
555
// Caller must ensure its safe to call.
556
557
PROTOBUF_EXPORT
558
std::pair<const char*, uint32_t> ReadTagFallback(const char* p, uint32_t res);
559
560
// Same as ParseVarint but only accept 5 bytes at most.
561
inline const char* ReadTag(const char* p, uint32_t* out,
562
0
                           uint32_t /*max_tag*/ = 0) {
563
0
  uint32_t res = static_cast<uint8_t>(p[0]);
564
0
  if (res < 128) {
565
0
    *out = res;
566
0
    return p + 1;
567
0
  }
568
0
  uint32_t second = static_cast<uint8_t>(p[1]);
569
0
  res += (second - 1) << 7;
570
0
  if (second < 128) {
571
0
    *out = res;
572
0
    return p + 2;
573
0
  }
574
0
  auto tmp = ReadTagFallback(p, res);
575
0
  *out = tmp.second;
576
0
  return tmp.first;
577
0
}
578
579
// As above, but optimized to consume very few registers while still being fast,
580
// ReadTagInlined is useful for callers that don't mind the extra code but would
581
// like to avoid an extern function call causing spills into the stack.
582
//
583
// Two support routines for ReadTagInlined come first...
584
template <class T>
585
PROTOBUF_NODISCARD PROTOBUF_ALWAYS_INLINE constexpr T RotateLeft(
586
0
    T x, int s) noexcept {
587
0
  return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
588
0
         static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
589
0
}
590
591
PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE uint64_t
592
0
RotRight7AndReplaceLowByte(uint64_t res, const char& byte) {
593
0
#if defined(__x86_64__) && defined(__GNUC__)
594
0
  // This will only use one register for `res`.
595
0
  // `byte` comes as a reference to allow the compiler to generate code like:
596
0
  //
597
0
  //   rorq    $7, %rcx
598
0
  //   movb    1(%rax), %cl
599
0
  //
600
0
  // which avoids loading the incoming bytes into a separate register first.
601
0
  asm("ror $7,%0\n\t"
602
0
      "movb %1,%b0"
603
0
      : "+r"(res)
604
0
      : "m"(byte));
605
0
#else
606
0
  res = RotateLeft(res, -7);
607
0
  res = res & ~0xFF;
608
0
  res |= 0xFF & byte;
609
0
#endif
610
0
  return res;
611
0
};
612
613
inline PROTOBUF_ALWAYS_INLINE
614
0
const char* ReadTagInlined(const char* ptr, uint32_t* out) {
615
0
  uint64_t res = 0xFF & ptr[0];
616
0
  if (PROTOBUF_PREDICT_FALSE(res >= 128)) {
617
0
    res = RotRight7AndReplaceLowByte(res, ptr[1]);
618
0
    if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
619
0
      res = RotRight7AndReplaceLowByte(res, ptr[2]);
620
0
      if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
621
0
        res = RotRight7AndReplaceLowByte(res, ptr[3]);
622
0
        if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
623
0
          // Note: this wouldn't work if res were 32-bit,
624
0
          // because then replacing the low byte would overwrite
625
0
          // the bottom 4 bits of the result.
626
0
          res = RotRight7AndReplaceLowByte(res, ptr[4]);
627
0
          if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
628
0
            // The proto format does not permit longer than 5-byte encodings for
629
0
            // tags.
630
0
            *out = 0;
631
0
            return nullptr;
632
0
          }
633
0
          *out = static_cast<uint32_t>(RotateLeft(res, 28));
634
0
#if defined(__GNUC__)
635
0
          // Note: this asm statement prevents the compiler from
636
0
          // trying to share the "return ptr + constant" among all
637
0
          // branches.
638
0
          asm("" : "+r"(ptr));
639
0
#endif
640
0
          return ptr + 5;
641
0
        }
642
0
        *out = static_cast<uint32_t>(RotateLeft(res, 21));
643
0
        return ptr + 4;
644
0
      }
645
0
      *out = static_cast<uint32_t>(RotateLeft(res, 14));
646
0
      return ptr + 3;
647
0
    }
648
0
    *out = static_cast<uint32_t>(RotateLeft(res, 7));
649
0
    return ptr + 2;
650
0
  }
651
0
  *out = static_cast<uint32_t>(res);
652
0
  return ptr + 1;
653
0
}
654
655
// Decode 2 consecutive bytes of a varint and returns the value, shifted left
656
// by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
657
// first byte's continuation bit is set.
658
// If bit 15 of return value is set (equivalent to the continuation bits of both
659
// bytes being set) the varint continues, otherwise the parse is done. On x86
660
// movsx eax, dil
661
// and edi, eax
662
// add eax, edi
663
// adc [rsi], 1
664
0
inline uint32_t DecodeTwoBytes(const char** ptr) {
665
0
  uint32_t value = UnalignedLoad<uint16_t>(*ptr);
666
0
  // Sign extend the low byte continuation bit
667
0
  uint32_t x = static_cast<int8_t>(value);
668
0
  value &= x;  // Mask out the high byte iff no continuation
669
0
  // This add is an amazing operation, it cancels the low byte continuation bit
670
0
  // from y transferring it to the carry. Simultaneously it also shifts the 7
671
0
  // LSB left by one tightly against high byte varint bits. Hence value now
672
0
  // contains the unpacked value shifted left by 1.
673
0
  value += x;
674
0
  // Use the carry to update the ptr appropriately.
675
0
  *ptr += value < x ? 2 : 1;
676
0
  return value;
677
0
}
678
679
// More efficient varint parsing for big varints
680
0
inline const char* ParseBigVarint(const char* p, uint64_t* out) {
681
0
  auto pnew = p;
682
0
  auto tmp = DecodeTwoBytes(&pnew);
683
0
  uint64_t res = tmp >> 1;
684
0
  if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
685
0
    *out = res;
686
0
    return pnew;
687
0
  }
688
0
  for (std::uint32_t i = 1; i < 5; i++) {
689
0
    pnew = p + 2 * i;
690
0
    tmp = DecodeTwoBytes(&pnew);
691
0
    res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
692
0
    if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
693
0
      *out = res;
694
0
      return pnew;
695
0
    }
696
0
  }
697
0
  return nullptr;
698
0
}
699
700
PROTOBUF_EXPORT
701
std::pair<const char*, int32_t> ReadSizeFallback(const char* p, uint32_t first);
702
// Used for tags, could read up to 5 bytes which must be available. Additionally
703
// it makes sure the unsigned value fits a int32_t, otherwise returns nullptr.
704
// Caller must ensure its safe to call.
705
0
inline uint32_t ReadSize(const char** pp) {
706
0
  auto p = *pp;
707
0
  uint32_t res = static_cast<uint8_t>(p[0]);
708
0
  if (res < 128) {
709
0
    *pp = p + 1;
710
0
    return res;
711
0
  }
712
0
  auto x = ReadSizeFallback(p, res);
713
0
  *pp = x.first;
714
0
  return x.second;
715
0
}
716
717
// Some convenience functions to simplify the generated parse loop code.
718
// Returning the value and updating the buffer pointer allows for nicer
719
// function composition. We rely on the compiler to inline this.
720
// Also in debug compiles having local scoped variables tend to generated
721
// stack frames that scale as O(num fields).
722
0
inline uint64_t ReadVarint64(const char** p) {
723
0
  uint64_t tmp;
724
0
  *p = VarintParse(*p, &tmp);
725
0
  return tmp;
726
0
}
727
728
0
inline uint32_t ReadVarint32(const char** p) {
729
0
  uint32_t tmp;
730
0
  *p = VarintParse(*p, &tmp);
731
0
  return tmp;
732
0
}
733
734
0
inline int64_t ReadVarintZigZag64(const char** p) {
735
0
  uint64_t tmp;
736
0
  *p = VarintParse(*p, &tmp);
737
0
  return WireFormatLite::ZigZagDecode64(tmp);
738
0
}
739
740
0
inline int32_t ReadVarintZigZag32(const char** p) {
741
0
  uint64_t tmp;
742
0
  *p = VarintParse(*p, &tmp);
743
0
  return WireFormatLite::ZigZagDecode32(static_cast<uint32_t>(tmp));
744
0
}
745
746
template <typename T, typename std::enable_if<
747
                          !std::is_base_of<MessageLite, T>::value, bool>::type>
748
PROTOBUF_NODISCARD const char* ParseContext::ParseMessage(T* msg,
749
                                                          const char* ptr) {
750
  int old;
751
  ptr = ReadSizeAndPushLimitAndDepth(ptr, &old);
752
  ptr = ptr ? msg->_InternalParse(ptr, this) : nullptr;
753
  depth_++;
754
  if (!PopLimit(old)) return nullptr;
755
  return ptr;
756
}
757
758
template <typename Tag, typename T>
759
const char* EpsCopyInputStream::ReadRepeatedFixed(const char* ptr,
760
                                                  Tag expected_tag,
761
                                                  RepeatedField<T>* out) {
762
  do {
763
    out->Add(UnalignedLoad<T>(ptr));
764
    ptr += sizeof(T);
765
    if (PROTOBUF_PREDICT_FALSE(ptr >= limit_end_)) return ptr;
766
  } while (UnalignedLoad<Tag>(ptr) == expected_tag && (ptr += sizeof(Tag)));
767
  return ptr;
768
}
769
770
// Add any of the following lines to debug which parse function is failing.
771
772
#define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
773
  if (!(predicate)) {                                  \
774
    /*  ::raise(SIGINT);  */                           \
775
    /*  GOOGLE_LOG(ERROR) << "Parse failure";  */             \
776
    return ret;                                        \
777
  }
778
779
#define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
780
  GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
781
782
template <typename T>
783
const char* EpsCopyInputStream::ReadPackedFixed(const char* ptr, int size,
784
                                                RepeatedField<T>* out) {
785
  GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
786
  int nbytes = buffer_end_ + kSlopBytes - ptr;
787
  while (size > nbytes) {
788
    int num = nbytes / sizeof(T);
789
    int old_entries = out->size();
790
    out->Reserve(old_entries + num);
791
    int block_size = num * sizeof(T);
792
    auto dst = out->AddNAlreadyReserved(num);
793
#ifdef PROTOBUF_LITTLE_ENDIAN
794
    std::memcpy(dst, ptr, block_size);
795
#else
796
    for (int i = 0; i < num; i++)
797
      dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
798
#endif
799
    size -= block_size;
800
    if (limit_ <= kSlopBytes) return nullptr;
801
    ptr = Next();
802
    if (ptr == nullptr) return nullptr;
803
    ptr += kSlopBytes - (nbytes - block_size);
804
    nbytes = buffer_end_ + kSlopBytes - ptr;
805
  }
806
  int num = size / sizeof(T);
807
  int old_entries = out->size();
808
  out->Reserve(old_entries + num);
809
  int block_size = num * sizeof(T);
810
  auto dst = out->AddNAlreadyReserved(num);
811
#ifdef PROTOBUF_LITTLE_ENDIAN
812
  std::memcpy(dst, ptr, block_size);
813
#else
814
  for (int i = 0; i < num; i++) dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
815
#endif
816
  ptr += block_size;
817
  if (size != block_size) return nullptr;
818
  return ptr;
819
}
820
821
template <typename Add>
822
const char* ReadPackedVarintArray(const char* ptr, const char* end, Add add) {
823
  while (ptr < end) {
824
    uint64_t varint;
825
    ptr = VarintParse(ptr, &varint);
826
    if (ptr == nullptr) return nullptr;
827
    add(varint);
828
  }
829
  return ptr;
830
}
831
832
template <typename Add>
833
const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add) {
834
  int size = ReadSize(&ptr);
835
  GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
836
  int chunk_size = buffer_end_ - ptr;
837
  while (size > chunk_size) {
838
    ptr = ReadPackedVarintArray(ptr, buffer_end_, add);
839
    if (ptr == nullptr) return nullptr;
840
    int overrun = ptr - buffer_end_;
841
    GOOGLE_DCHECK(overrun >= 0 && overrun <= kSlopBytes);
842
    if (size - chunk_size <= kSlopBytes) {
843
      // The current buffer contains all the information needed, we don't need
844
      // to flip buffers. However we must parse from a buffer with enough space
845
      // so we are not prone to a buffer overflow.
846
      char buf[kSlopBytes + 10] = {};
847
      std::memcpy(buf, buffer_end_, kSlopBytes);
848
      GOOGLE_CHECK_LE(size - chunk_size, kSlopBytes);
849
      auto end = buf + (size - chunk_size);
850
      auto res = ReadPackedVarintArray(buf + overrun, end, add);
851
      if (res == nullptr || res != end) return nullptr;
852
      return buffer_end_ + (res - buf);
853
    }
854
    size -= overrun + chunk_size;
855
    GOOGLE_DCHECK_GT(size, 0);
856
    // We must flip buffers
857
    if (limit_ <= kSlopBytes) return nullptr;
858
    ptr = Next();
859
    if (ptr == nullptr) return nullptr;
860
    ptr += overrun;
861
    chunk_size = buffer_end_ - ptr;
862
  }
863
  auto end = ptr + size;
864
  ptr = ReadPackedVarintArray(ptr, end, add);
865
  return end == ptr ? ptr : nullptr;
866
}
867
868
// Helper for verification of utf8
869
PROTOBUF_EXPORT
870
bool VerifyUTF8(StringPiece s, const char* field_name);
871
872
0
inline bool VerifyUTF8(const std::string* s, const char* field_name) {
873
0
  return VerifyUTF8(*s, field_name);
874
0
}
875
876
// All the string parsers with or without UTF checking and for all CTypes.
877
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* InlineGreedyStringParser(
878
    std::string* s, const char* ptr, ParseContext* ctx);
879
880
881
template <typename T>
882
PROTOBUF_NODISCARD const char* FieldParser(uint64_t tag, T& field_parser,
883
                                           const char* ptr, ParseContext* ctx) {
884
  uint32_t number = tag >> 3;
885
  GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
886
  using WireType = internal::WireFormatLite::WireType;
887
  switch (tag & 7) {
888
    case WireType::WIRETYPE_VARINT: {
889
      uint64_t value;
890
      ptr = VarintParse(ptr, &value);
891
      GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
892
      field_parser.AddVarint(number, value);
893
      break;
894
    }
895
    case WireType::WIRETYPE_FIXED64: {
896
      uint64_t value = UnalignedLoad<uint64_t>(ptr);
897
      ptr += 8;
898
      field_parser.AddFixed64(number, value);
899
      break;
900
    }
901
    case WireType::WIRETYPE_LENGTH_DELIMITED: {
902
      ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
903
      GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
904
      break;
905
    }
906
    case WireType::WIRETYPE_START_GROUP: {
907
      ptr = field_parser.ParseGroup(number, ptr, ctx);
908
      GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
909
      break;
910
    }
911
    case WireType::WIRETYPE_END_GROUP: {
912
      GOOGLE_LOG(FATAL) << "Can't happen";
913
      break;
914
    }
915
    case WireType::WIRETYPE_FIXED32: {
916
      uint32_t value = UnalignedLoad<uint32_t>(ptr);
917
      ptr += 4;
918
      field_parser.AddFixed32(number, value);
919
      break;
920
    }
921
    default:
922
      return nullptr;
923
  }
924
  return ptr;
925
}
926
927
template <typename T>
928
PROTOBUF_NODISCARD const char* WireFormatParser(T& field_parser,
929
                                                const char* ptr,
930
                                                ParseContext* ctx) {
931
  while (!ctx->Done(&ptr)) {
932
    uint32_t tag;
933
    ptr = ReadTag(ptr, &tag);
934
    GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
935
    if (tag == 0 || (tag & 7) == 4) {
936
      ctx->SetLastTag(tag);
937
      return ptr;
938
    }
939
    ptr = FieldParser(tag, field_parser, ptr, ctx);
940
    GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
941
  }
942
  return ptr;
943
}
944
945
// The packed parsers parse repeated numeric primitives directly into  the
946
// corresponding field
947
948
// These are packed varints
949
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt32Parser(
950
    void* object, const char* ptr, ParseContext* ctx);
951
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt32Parser(
952
    void* object, const char* ptr, ParseContext* ctx);
953
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt64Parser(
954
    void* object, const char* ptr, ParseContext* ctx);
955
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt64Parser(
956
    void* object, const char* ptr, ParseContext* ctx);
957
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt32Parser(
958
    void* object, const char* ptr, ParseContext* ctx);
959
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt64Parser(
960
    void* object, const char* ptr, ParseContext* ctx);
961
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedEnumParser(
962
    void* object, const char* ptr, ParseContext* ctx);
963
964
template <typename T>
965
PROTOBUF_NODISCARD const char* PackedEnumParser(void* object, const char* ptr,
966
                                                ParseContext* ctx,
967
                                                bool (*is_valid)(int),
968
                                                InternalMetadata* metadata,
969
                                                int field_num) {
970
  return ctx->ReadPackedVarint(
971
      ptr, [object, is_valid, metadata, field_num](uint64_t val) {
972
        if (is_valid(val)) {
973
          static_cast<RepeatedField<int>*>(object)->Add(val);
974
        } else {
975
          WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
976
        }
977
      });
978
}
979
980
template <typename T>
981
PROTOBUF_NODISCARD const char* PackedEnumParserArg(
982
    void* object, const char* ptr, ParseContext* ctx,
983
    bool (*is_valid)(const void*, int), const void* data,
984
    InternalMetadata* metadata, int field_num) {
985
  return ctx->ReadPackedVarint(
986
      ptr, [object, is_valid, data, metadata, field_num](uint64_t val) {
987
        if (is_valid(data, val)) {
988
          static_cast<RepeatedField<int>*>(object)->Add(val);
989
        } else {
990
          WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
991
        }
992
      });
993
}
994
995
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedBoolParser(
996
    void* object, const char* ptr, ParseContext* ctx);
997
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed32Parser(
998
    void* object, const char* ptr, ParseContext* ctx);
999
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed32Parser(
1000
    void* object, const char* ptr, ParseContext* ctx);
1001
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed64Parser(
1002
    void* object, const char* ptr, ParseContext* ctx);
1003
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed64Parser(
1004
    void* object, const char* ptr, ParseContext* ctx);
1005
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFloatParser(
1006
    void* object, const char* ptr, ParseContext* ctx);
1007
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedDoubleParser(
1008
    void* object, const char* ptr, ParseContext* ctx);
1009
1010
// This is the only recursive parser.
1011
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownGroupLiteParse(
1012
    std::string* unknown, const char* ptr, ParseContext* ctx);
1013
// This is a helper to for the UnknownGroupLiteParse but is actually also
1014
// useful in the generated code. It uses overload on std::string* vs
1015
// UnknownFieldSet* to make the generated code isomorphic between full and lite.
1016
PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownFieldParse(
1017
    uint32_t tag, std::string* unknown, const char* ptr, ParseContext* ctx);
1018
1019
}  // namespace internal
1020
}  // namespace protobuf
1021
}  // namespace google
1022
1023
#include <google/protobuf/port_undef.inc>
1024
1025
#endif  // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__