Coverage Report

Created: 2023-06-07 07:09

/src/LPM/external.protobuf/include/google/protobuf/map.h
Line
Count
Source (jump to first uncovered line)
1
// Protocol Buffers - Google's data interchange format
2
// Copyright 2008 Google Inc.  All rights reserved.
3
// https://developers.google.com/protocol-buffers/
4
//
5
// Redistribution and use in source and binary forms, with or without
6
// modification, are permitted provided that the following conditions are
7
// met:
8
//
9
//     * Redistributions of source code must retain the above copyright
10
// notice, this list of conditions and the following disclaimer.
11
//     * Redistributions in binary form must reproduce the above
12
// copyright notice, this list of conditions and the following disclaimer
13
// in the documentation and/or other materials provided with the
14
// distribution.
15
//     * Neither the name of Google Inc. nor the names of its
16
// contributors may be used to endorse or promote products derived from
17
// this software without specific prior written permission.
18
//
19
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31
// This file defines the map container and its helpers to support protobuf maps.
32
//
33
// The Map and MapIterator types are provided by this header file.
34
// Please avoid using other types defined here, unless they are public
35
// types within Map or MapIterator, such as Map::value_type.
36
37
#ifndef GOOGLE_PROTOBUF_MAP_H__
38
#define GOOGLE_PROTOBUF_MAP_H__
39
40
#include <algorithm>
41
#include <functional>
42
#include <initializer_list>
43
#include <iterator>
44
#include <limits>  // To support Visual Studio 2008
45
#include <string>
46
#include <type_traits>
47
#include <utility>
48
49
#if !defined(GOOGLE_PROTOBUF_NO_RDTSC) && defined(__APPLE__)
50
#include <mach/mach_time.h>
51
#endif
52
53
#include "google/protobuf/stubs/common.h"
54
#include "absl/container/btree_map.h"
55
#include "absl/hash/hash.h"
56
#include "absl/meta/type_traits.h"
57
#include "absl/strings/string_view.h"
58
#include "google/protobuf/arena.h"
59
#include "google/protobuf/generated_enum_util.h"
60
#include "google/protobuf/map_type_handler.h"
61
#include "google/protobuf/port.h"
62
63
64
#ifdef SWIG
65
#error "You cannot SWIG proto headers"
66
#endif
67
68
// Must be included last.
69
#include "google/protobuf/port_def.inc"
70
71
namespace google {
72
namespace protobuf {
73
74
template <typename Key, typename T>
75
class Map;
76
77
class MapIterator;
78
79
template <typename Enum>
80
struct is_proto_enum;
81
82
namespace internal {
83
template <typename Derived, typename Key, typename T,
84
          WireFormatLite::FieldType key_wire_type,
85
          WireFormatLite::FieldType value_wire_type>
86
class MapFieldLite;
87
88
template <typename Derived, typename Key, typename T,
89
          WireFormatLite::FieldType key_wire_type,
90
          WireFormatLite::FieldType value_wire_type>
91
class MapField;
92
93
template <typename Key, typename T>
94
class TypeDefinedMapFieldBase;
95
96
class DynamicMapField;
97
98
class GeneratedMessageReflection;
99
100
// Internal type traits that can be used to define custom key/value types. These
101
// are only be specialized by protobuf internals, and never by users.
102
template <typename T, typename VoidT = void>
103
struct is_internal_map_key_type : std::false_type {};
104
105
template <typename T, typename VoidT = void>
106
struct is_internal_map_value_type : std::false_type {};
107
108
// re-implement std::allocator to use arena allocator for memory allocation.
109
// Used for Map implementation. Users should not use this class
110
// directly.
111
template <typename U>
112
class MapAllocator {
113
 public:
114
  using value_type = U;
115
  using pointer = value_type*;
116
  using const_pointer = const value_type*;
117
  using reference = value_type&;
118
  using const_reference = const value_type&;
119
  using size_type = size_t;
120
  using difference_type = ptrdiff_t;
121
122
  constexpr MapAllocator() : arena_(nullptr) {}
123
  explicit constexpr MapAllocator(Arena* arena) : arena_(arena) {}
124
  template <typename X>
125
  MapAllocator(const MapAllocator<X>& allocator)  // NOLINT(runtime/explicit)
126
      : arena_(allocator.arena()) {}
127
128
  // MapAllocator does not support alignments beyond 8. Technically we should
129
  // support up to std::max_align_t, but this fails with ubsan and tcmalloc
130
  // debug allocation logic which assume 8 as default alignment.
131
  static_assert(alignof(value_type) <= 8, "");
132
133
0
  pointer allocate(size_type n, const void* /* hint */ = nullptr) {
134
0
    // If arena is not given, malloc needs to be called which doesn't
135
0
    // construct element object.
136
0
    if (arena_ == nullptr) {
137
0
      return static_cast<pointer>(::operator new(n * sizeof(value_type)));
138
0
    } else {
139
0
      return reinterpret_cast<pointer>(
140
0
          Arena::CreateArray<uint8_t>(arena_, n * sizeof(value_type)));
141
0
    }
142
0
  }
Unexecuted instantiation: google::protobuf::internal::MapAllocator<google::protobuf::internal::NodeBase>::allocate(unsigned long, void const*)
Unexecuted instantiation: google::protobuf::internal::MapAllocator<google::protobuf::internal::TableEntryPtr>::allocate(unsigned long, void const*)
143
144
0
  void deallocate(pointer p, size_type n) {
145
0
    if (arena_ == nullptr) {
146
0
      internal::SizedDelete(p, n * sizeof(value_type));
147
0
    }
148
0
  }
Unexecuted instantiation: google::protobuf::internal::MapAllocator<google::protobuf::internal::NodeBase>::deallocate(google::protobuf::internal::NodeBase*, unsigned long)
Unexecuted instantiation: google::protobuf::internal::MapAllocator<google::protobuf::internal::TableEntryPtr>::deallocate(google::protobuf::internal::TableEntryPtr*, unsigned long)
149
150
#if !defined(GOOGLE_PROTOBUF_OS_APPLE) && !defined(GOOGLE_PROTOBUF_OS_NACL) && \
151
    !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
152
  template <class NodeType, class... Args>
153
  void construct(NodeType* p, Args&&... args) {
154
    // Clang 3.6 doesn't compile static casting to void* directly. (Issue
155
    // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
156
    // not cast away constness". So first the maybe const pointer is casted to
157
    // const void* and after the const void* is const casted.
158
    new (const_cast<void*>(static_cast<const void*>(p)))
159
        NodeType(std::forward<Args>(args)...);
160
  }
161
162
  template <class NodeType>
163
  void destroy(NodeType* p) {
164
    p->~NodeType();
165
  }
166
#else
167
  void construct(pointer p, const_reference t) { new (p) value_type(t); }
168
169
  void destroy(pointer p) { p->~value_type(); }
170
#endif
171
172
  template <typename X>
173
  struct rebind {
174
    using other = MapAllocator<X>;
175
  };
176
177
  template <typename X>
178
  bool operator==(const MapAllocator<X>& other) const {
179
    return arena_ == other.arena_;
180
  }
181
182
  template <typename X>
183
  bool operator!=(const MapAllocator<X>& other) const {
184
    return arena_ != other.arena_;
185
  }
186
187
  // To support Visual Studio 2008
188
  size_type max_size() const {
189
    // parentheses around (std::...:max) prevents macro warning of max()
190
    return (std::numeric_limits<size_type>::max)();
191
  }
192
193
  // To support gcc-4.4, which does not properly
194
  // support templated friend classes
195
0
  Arena* arena() const { return arena_; }
196
197
 private:
198
  using DestructorSkippable_ = void;
199
  Arena* arena_;
200
};
201
202
// To save on binary size and simplify generic uses of the map types we collapse
203
// signed/unsigned versions of the same sized integer to the unsigned version.
204
template <typename T, typename = void>
205
struct KeyForBaseImpl {
206
  using type = T;
207
};
208
template <typename T>
209
struct KeyForBaseImpl<T, std::enable_if_t<std::is_integral<T>::value &&
210
                                          std::is_signed<T>::value>> {
211
  using type = std::make_unsigned_t<T>;
212
};
213
template <typename T>
214
using KeyForBase = typename KeyForBaseImpl<T>::type;
215
216
// Default case: Not transparent.
217
// We use std::hash<key_type>/std::less<key_type> and all the lookup functions
218
// only accept `key_type`.
219
template <typename key_type>
220
struct TransparentSupport {
221
  using hash = std::hash<key_type>;
222
  using less = std::less<key_type>;
223
224
  static bool Equals(const key_type& a, const key_type& b) { return a == b; }
225
226
  template <typename K>
227
  using key_arg = key_type;
228
};
229
230
// We add transparent support for std::string keys. We use
231
// std::hash<absl::string_view> as it supports the input types we care about.
232
// The lookup functions accept arbitrary `K`. This will include any key type
233
// that is convertible to absl::string_view.
234
template <>
235
struct TransparentSupport<std::string> {
236
  // If the element is not convertible to absl::string_view, try to convert to
237
  // std::string first, and then fallback to support for converting from
238
  // std::string_view. The ranked overload pattern is used to specify our
239
  // order of preference.
240
  struct Rank0 {};
241
  struct Rank1 : Rank0 {};
242
  struct Rank2 : Rank1 {};
243
  template <typename T, typename = std::enable_if_t<
244
                            std::is_convertible<T, absl::string_view>::value>>
245
  static absl::string_view ImplicitConvertImpl(T&& str, Rank2) {
246
    absl::string_view ref = str;
247
    return ref;
248
  }
249
  template <typename T, typename = std::enable_if_t<
250
                            std::is_convertible<T, const std::string&>::value>>
251
  static absl::string_view ImplicitConvertImpl(T&& str, Rank1) {
252
    const std::string& ref = str;
253
    return ref;
254
  }
255
  template <typename T>
256
  static absl::string_view ImplicitConvertImpl(T&& str, Rank0) {
257
    return {str.data(), str.size()};
258
  }
259
260
  template <typename T>
261
  static absl::string_view ImplicitConvert(T&& str) {
262
    return ImplicitConvertImpl(std::forward<T>(str), Rank2{});
263
  }
264
265
  struct hash : public absl::Hash<absl::string_view> {
266
    using is_transparent = void;
267
268
    template <typename T>
269
    size_t operator()(T&& str) const {
270
      return absl::Hash<absl::string_view>::operator()(
271
          ImplicitConvert(std::forward<T>(str)));
272
    }
273
  };
274
  struct less {
275
    using is_transparent = void;
276
277
    template <typename T, typename U>
278
    bool operator()(T&& t, U&& u) const {
279
      return ImplicitConvert(std::forward<T>(t)) <
280
             ImplicitConvert(std::forward<U>(u));
281
    }
282
  };
283
284
  template <typename T, typename U>
285
  static bool Equals(T&& t, U&& u) {
286
    return ImplicitConvert(std::forward<T>(t)) ==
287
           ImplicitConvert(std::forward<U>(u));
288
  }
289
290
  template <typename K>
291
  using key_arg = K;
292
};
293
294
struct NodeBase {
295
  // Align the node to allow KeyNode to predict the location of the key.
296
  // This way sizeof(NodeBase) contains any possible padding it was going to
297
  // have between NodeBase and the key.
298
  alignas(kMaxMessageAlignment) NodeBase* next;
299
};
300
301
0
inline NodeBase* EraseFromLinkedList(NodeBase* item, NodeBase* head) {
302
0
  if (head == item) {
303
0
    return head->next;
304
0
  } else {
305
0
    head->next = EraseFromLinkedList(item, head->next);
306
0
    return head;
307
0
  }
308
0
}
309
310
0
inline bool TableEntryIsTooLong(NodeBase* node) {
311
0
  const size_t kMaxLength = 8;
312
0
  size_t count = 0;
313
0
  do {
314
0
    ++count;
315
0
    node = node->next;
316
0
  } while (node != nullptr);
317
0
  // Invariant: no linked list ever is more than kMaxLength in length.
318
0
  ABSL_DCHECK_LE(count, kMaxLength);
319
0
  return count >= kMaxLength;
320
0
}
321
322
template <typename T>
323
using KeyForTree = std::conditional_t<std::is_integral<T>::value, uint64_t,
324
                                      std::reference_wrapper<const T>>;
325
326
template <typename T>
327
using LessForTree = typename TransparentSupport<
328
    std::conditional_t<std::is_integral<T>::value, uint64_t, T>>::less;
329
330
template <typename Key>
331
using TreeForMap =
332
    absl::btree_map<KeyForTree<Key>, NodeBase*, LessForTree<Key>,
333
                    MapAllocator<std::pair<const KeyForTree<Key>, NodeBase*>>>;
334
335
// Type safe tagged pointer.
336
// We convert to/from nodes and trees using the operations below.
337
// They ensure that the tags are used correctly.
338
// There are three states:
339
//  - x == 0: the entry is empty
340
//  - x != 0 && (x&1) == 0: the entry is a node list
341
//  - x != 0 && (x&1) == 1: the entry is a tree
342
enum class TableEntryPtr : uintptr_t;
343
344
0
inline bool TableEntryIsEmpty(TableEntryPtr entry) {
345
0
  return entry == TableEntryPtr{};
346
0
}
347
0
inline bool TableEntryIsTree(TableEntryPtr entry) {
348
0
  return (static_cast<uintptr_t>(entry) & 1) == 1;
349
0
}
350
0
inline bool TableEntryIsList(TableEntryPtr entry) {
351
0
  return !TableEntryIsTree(entry);
352
0
}
353
0
inline bool TableEntryIsNonEmptyList(TableEntryPtr entry) {
354
0
  return !TableEntryIsEmpty(entry) && TableEntryIsList(entry);
355
0
}
356
0
inline NodeBase* TableEntryToNode(TableEntryPtr entry) {
357
0
  ABSL_DCHECK(TableEntryIsList(entry));
358
0
  return reinterpret_cast<NodeBase*>(static_cast<uintptr_t>(entry));
359
0
}
360
0
inline TableEntryPtr NodeToTableEntry(NodeBase* node) {
361
0
  ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
362
0
  return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node));
363
0
}
364
template <typename Tree>
365
Tree* TableEntryToTree(TableEntryPtr entry) {
366
  ABSL_DCHECK(TableEntryIsTree(entry));
367
  return reinterpret_cast<Tree*>(static_cast<uintptr_t>(entry) - 1);
368
}
369
template <typename Tree>
370
TableEntryPtr TreeToTableEntry(Tree* node) {
371
  ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
372
  return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node) | 1);
373
}
374
375
// This captures all numeric types.
376
0
inline size_t MapValueSpaceUsedExcludingSelfLong(bool) { return 0; }
377
0
inline size_t MapValueSpaceUsedExcludingSelfLong(const std::string& str) {
378
0
  return StringSpaceUsedExcludingSelfLong(str);
379
0
}
380
template <typename T,
381
          typename = decltype(std::declval<const T&>().SpaceUsedLong())>
382
size_t MapValueSpaceUsedExcludingSelfLong(const T& message) {
383
  return message.SpaceUsedLong() - sizeof(T);
384
}
385
386
constexpr size_t kGlobalEmptyTableSize = 1;
387
PROTOBUF_EXPORT extern const TableEntryPtr
388
    kGlobalEmptyTable[kGlobalEmptyTableSize];
389
390
// Space used for the table, trees, and nodes.
391
// Does not include the indirect space used. Eg the data of a std::string.
392
template <typename Key>
393
PROTOBUF_NOINLINE size_t SpaceUsedInTable(TableEntryPtr* table,
394
                                          size_t num_buckets,
395
                                          size_t num_elements,
396
                                          size_t sizeof_node) {
397
  size_t size = 0;
398
  // The size of the table.
399
  size += sizeof(void*) * num_buckets;
400
  // All the nodes.
401
  size += sizeof_node * num_elements;
402
  // For each tree, count the overhead of the those nodes.
403
  // Two buckets at a time because we only care about trees.
404
  for (size_t b = 0; b < num_buckets; ++b) {
405
    if (internal::TableEntryIsTree(table[b])) {
406
      using Tree = TreeForMap<Key>;
407
      Tree* tree = TableEntryToTree<Tree>(table[b]);
408
      // Estimated cost of the red-black tree nodes, 3 pointers plus a
409
      // bool (plus alignment, so 4 pointers).
410
      size += tree->size() *
411
              (sizeof(typename Tree::value_type) + sizeof(void*) * 4);
412
    }
413
  }
414
  return size;
415
}
416
417
template <typename Map,
418
          typename = typename std::enable_if<
419
              !std::is_scalar<typename Map::key_type>::value ||
420
              !std::is_scalar<typename Map::mapped_type>::value>::type>
421
size_t SpaceUsedInValues(const Map* map) {
422
  size_t size = 0;
423
  for (const auto& v : *map) {
424
    size += internal::MapValueSpaceUsedExcludingSelfLong(v.first) +
425
            internal::MapValueSpaceUsedExcludingSelfLong(v.second);
426
  }
427
  return size;
428
}
429
430
0
inline size_t SpaceUsedInValues(const void*) { return 0; }
431
432
// Base class for all Map instantiations.
433
// This class holds all the data and provides the basic functionality shared
434
// among all instantiations.
435
// Having an untyped base class helps generic consumers (like the table-driven
436
// parser) by having non-template code that can handle all instantiations.
437
class PROTOBUF_EXPORT UntypedMapBase {
438
  using Allocator = internal::MapAllocator<void*>;
439
440
 public:
441
  using size_type = size_t;
442
443
  explicit constexpr UntypedMapBase(Arena* arena)
444
      : num_elements_(0),
445
        num_buckets_(internal::kGlobalEmptyTableSize),
446
        seed_(0),
447
        index_of_first_non_null_(internal::kGlobalEmptyTableSize),
448
        table_(const_cast<TableEntryPtr*>(internal::kGlobalEmptyTable)),
449
0
        alloc_(arena) {}
450
451
  UntypedMapBase(const UntypedMapBase&) = delete;
452
  UntypedMapBase& operator=(const UntypedMapBase&) = delete;
453
454
 protected:
455
  enum { kMinTableSize = 8 };
456
457
 public:
458
0
  Arena* arena() const { return this->alloc_.arena(); }
459
460
0
  void Swap(UntypedMapBase* other) {
461
0
    std::swap(num_elements_, other->num_elements_);
462
0
    std::swap(num_buckets_, other->num_buckets_);
463
0
    std::swap(seed_, other->seed_);
464
0
    std::swap(index_of_first_non_null_, other->index_of_first_non_null_);
465
0
    std::swap(table_, other->table_);
466
0
    std::swap(alloc_, other->alloc_);
467
0
  }
468
469
0
  static size_type max_size() {
470
0
    return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28);
471
0
  }
472
0
  size_type size() const { return num_elements_; }
473
0
  bool empty() const { return size() == 0; }
474
475
 protected:
476
  friend class TcParser;
477
478
  struct NodeAndBucket {
479
    NodeBase* node;
480
    size_type bucket;
481
  };
482
483
  // Returns whether we should insert after the head of the list. For
484
  // non-optimized builds, we randomly decide whether to insert right at the
485
  // head of the list or just after the head. This helps add a little bit of
486
  // non-determinism to the map ordering.
487
0
  bool ShouldInsertAfterHead(void* node) {
488
0
#ifdef NDEBUG
489
0
    (void)node;
490
0
    return false;
491
0
#else
492
0
    // Doing modulo with a prime mixes the bits more.
493
0
    return (reinterpret_cast<uintptr_t>(node) ^ seed_) % 13 > 6;
494
0
#endif
495
0
  }
496
497
  // Helper for InsertUnique.  Handles the case where bucket b is a
498
  // not-too-long linked list.
499
0
  void InsertUniqueInList(size_type b, NodeBase* node) {
500
0
    if (!TableEntryIsEmpty(b) && ShouldInsertAfterHead(node)) {
501
0
      auto* first = TableEntryToNode(table_[b]);
502
0
      node->next = first->next;
503
0
      first->next = node;
504
0
    } else {
505
0
      node->next = TableEntryToNode(table_[b]);
506
0
      table_[b] = NodeToTableEntry(node);
507
0
    }
508
0
  }
509
510
0
  bool TableEntryIsEmpty(size_type b) const {
511
0
    return internal::TableEntryIsEmpty(table_[b]);
512
0
  }
513
0
  bool TableEntryIsNonEmptyList(size_type b) const {
514
0
    return internal::TableEntryIsNonEmptyList(table_[b]);
515
0
  }
516
0
  bool TableEntryIsTree(size_type b) const {
517
0
    return internal::TableEntryIsTree(table_[b]);
518
0
  }
519
0
  bool TableEntryIsList(size_type b) const {
520
0
    return internal::TableEntryIsList(table_[b]);
521
0
  }
522
523
  // Return whether table_[b] is a linked list that seems awfully long.
524
  // Requires table_[b] to point to a non-empty linked list.
525
0
  bool TableEntryIsTooLong(size_type b) {
526
0
    return internal::TableEntryIsTooLong(TableEntryToNode(table_[b]));
527
0
  }
528
529
  // Return a power of two no less than max(kMinTableSize, n).
530
  // Assumes either n < kMinTableSize or n is a power of two.
531
0
  size_type TableSize(size_type n) {
532
0
    return n < static_cast<size_type>(kMinTableSize)
533
0
               ? static_cast<size_type>(kMinTableSize)
534
0
               : n;
535
0
  }
536
537
  template <typename T>
538
  using AllocFor = absl::allocator_traits<Allocator>::template rebind_alloc<T>;
539
540
  // Alignment of the nodes is the same as alignment of NodeBase.
541
0
  NodeBase* AllocNode(size_t node_size) {
542
0
    PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
543
0
    return AllocFor<NodeBase>(alloc_).allocate(node_size / sizeof(NodeBase));
544
0
  }
545
546
0
  void DeallocNode(NodeBase* node, size_t node_size) {
547
0
    PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
548
0
    AllocFor<NodeBase>(alloc_).deallocate(node, node_size / sizeof(NodeBase));
549
0
  }
550
551
0
  void DeleteTable(TableEntryPtr* table, size_type n) {
552
0
    AllocFor<TableEntryPtr>(alloc_).deallocate(table, n);
553
0
  }
554
555
0
  TableEntryPtr* CreateEmptyTable(size_type n) {
556
0
    ABSL_DCHECK_GE(n, size_type{kMinTableSize});
557
0
    ABSL_DCHECK_EQ(n & (n - 1), 0u);
558
0
    TableEntryPtr* result = AllocFor<TableEntryPtr>(alloc_).allocate(n);
559
0
    memset(result, 0, n * sizeof(result[0]));
560
0
    return result;
561
0
  }
562
563
  // Return a randomish value.
564
0
  size_type Seed() const {
565
0
    // We get a little bit of randomness from the address of the map. The
566
0
    // lower bits are not very random, due to alignment, so we discard them
567
0
    // and shift the higher bits into their place.
568
0
    size_type s = reinterpret_cast<uintptr_t>(this) >> 4;
569
0
#if !defined(GOOGLE_PROTOBUF_NO_RDTSC)
570
0
#if defined(__APPLE__)
571
0
    // Use a commpage-based fast time function on Apple environments (MacOS,
572
0
    // iOS, tvOS, watchOS, etc).
573
0
    s += mach_absolute_time();
574
0
#elif defined(__x86_64__) && defined(__GNUC__)
575
0
    uint32_t hi, lo;
576
0
    asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
577
0
    s += ((static_cast<uint64_t>(hi) << 32) | lo);
578
0
#elif defined(__aarch64__) && defined(__GNUC__)
579
0
    // There is no rdtsc on ARMv8. CNTVCT_EL0 is the virtual counter of the
580
0
    // system timer. It runs at a different frequency than the CPU's, but is
581
0
    // the best source of time-based entropy we get.
582
0
    uint64_t virtual_timer_value;
583
0
    asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
584
0
    s += virtual_timer_value;
585
0
#endif
586
0
#endif  // !defined(GOOGLE_PROTOBUF_NO_RDTSC)
587
0
    return s;
588
0
  }
589
590
  size_type num_elements_;
591
  size_type num_buckets_;
592
  size_type seed_;
593
  size_type index_of_first_non_null_;
594
  TableEntryPtr* table_;  // an array with num_buckets_ entries
595
  Allocator alloc_;
596
};
597
598
// The value might be of different signedness, so use memcpy to extract it.
599
template <typename T, std::enable_if_t<std::is_integral<T>::value, int> = 0>
600
T ReadKey(const void* ptr) {
601
  T out;
602
  memcpy(&out, ptr, sizeof(T));
603
  return out;
604
}
605
606
template <typename T, std::enable_if_t<!std::is_integral<T>::value, int> = 0>
607
const T& ReadKey(const void* ptr) {
608
  return *reinterpret_cast<const T*>(ptr);
609
}
610
611
// KeyMapBase is a chaining hash map with the additional feature that some
612
// buckets can be converted to use an ordered container.  This ensures O(lg n)
613
// bounds on find, insert, and erase, while avoiding the overheads of ordered
614
// containers most of the time.
615
//
616
// The implementation doesn't need the full generality of unordered_map,
617
// and it doesn't have it.  More bells and whistles can be added as needed.
618
// Some implementation details:
619
// 1. The number of buckets is a power of two.
620
// 2. As is typical for hash_map and such, the Keys and Values are always
621
//    stored in linked list nodes.  Pointers to elements are never invalidated
622
//    until the element is deleted.
623
// 3. The trees' payload type is pointer to linked-list node.  Tree-converting
624
//    a bucket doesn't copy Key-Value pairs.
625
// 4. Once we've tree-converted a bucket, it is never converted back unless the
626
//    bucket is completely emptied out. Note that the items a tree contains may
627
//    wind up assigned to trees or lists upon a rehash.
628
// 5. Mutations to a map do not invalidate the map's iterators, pointers to
629
//    elements, or references to elements.
630
// 6. Except for erase(iterator), any non-const method can reorder iterators.
631
// 7. Uses KeyForTree<Key> when using the Tree representation, which
632
//    is either `uint64_t` if `Key` is an integer, or
633
//    `reference_wrapper<const Key>` otherwise. This avoids unnecessary copies
634
//    of string keys, for example.
635
636
template <typename Key>
637
class KeyMapBase : public UntypedMapBase {
638
  static_assert(!std::is_signed<Key>::value || !std::is_integral<Key>::value,
639
                "");
640
641
 public:
642
  using hasher = typename TransparentSupport<Key>::hash;
643
644
  using UntypedMapBase::UntypedMapBase;
645
646
 protected:
647
  struct KeyNode : NodeBase {
648
    static constexpr size_t kOffset = sizeof(NodeBase);
649
    decltype(auto) key() const {
650
      return ReadKey<Key>(reinterpret_cast<const char*>(this) + kOffset);
651
    }
652
  };
653
654
  // Trees. The payload type is a copy of Key, so that we can query the tree
655
  // with Keys that are not in any particular data structure.
656
  // The value is a void* pointing to Node. We use void* instead of Node* to
657
  // avoid code bloat. That way there is only one instantiation of the tree
658
  // class per key type.
659
  using Tree = internal::TreeForMap<Key>;
660
  using TreeIterator = typename Tree::iterator;
661
662
  class KeyIteratorBase {
663
   public:
664
    // Invariants:
665
    // node_ is always correct. This is handy because the most common
666
    // operations are operator* and operator-> and they only use node_.
667
    // When node_ is set to a non-null value, all the other non-const fields
668
    // are updated to be correct also, but those fields can become stale
669
    // if the underlying map is modified.  When those fields are needed they
670
    // are rechecked, and updated if necessary.
671
    KeyIteratorBase() : node_(nullptr), m_(nullptr), bucket_index_(0) {}
672
673
    explicit KeyIteratorBase(const KeyMapBase* m) : m_(m) {
674
      SearchFrom(m->index_of_first_non_null_);
675
    }
676
677
    KeyIteratorBase(KeyNode* n, const KeyMapBase* m, size_type index)
678
        : node_(n), m_(m), bucket_index_(index) {}
679
680
    KeyIteratorBase(TreeIterator tree_it, const KeyMapBase* m, size_type index)
681
        : node_(NodeFromTreeIterator(tree_it)), m_(m), bucket_index_(index) {}
682
683
    // Advance through buckets, looking for the first that isn't empty.
684
    // If nothing non-empty is found then leave node_ == nullptr.
685
    void SearchFrom(size_type start_bucket) {
686
      ABSL_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
687
                  !m_->TableEntryIsEmpty(m_->index_of_first_non_null_));
688
      for (size_type i = start_bucket; i < m_->num_buckets_; ++i) {
689
        TableEntryPtr entry = m_->table_[i];
690
        if (entry == TableEntryPtr{}) continue;
691
        bucket_index_ = i;
692
        if (PROTOBUF_PREDICT_TRUE(internal::TableEntryIsList(entry))) {
693
          node_ = static_cast<KeyNode*>(TableEntryToNode(entry));
694
        } else {
695
          Tree* tree = TableEntryToTree<Tree>(entry);
696
          ABSL_DCHECK(!tree->empty());
697
          node_ = static_cast<KeyNode*>(tree->begin()->second);
698
        }
699
        return;
700
      }
701
      node_ = nullptr;
702
      bucket_index_ = 0;
703
    }
704
705
    // The definition of operator== is handled by the derived type. If we were
706
    // to do it in this class it would allow comparing iterators of different
707
    // map types.
708
    bool Equals(const KeyIteratorBase& other) const {
709
      return node_ == other.node_;
710
    }
711
712
    // The definition of operator++ is handled in the derived type. We would not
713
    // be able to return the right type from here.
714
    void PlusPlus() {
715
      if (node_->next == nullptr) {
716
        SearchFrom(bucket_index_ + 1);
717
      } else {
718
        node_ = static_cast<KeyNode*>(node_->next);
719
      }
720
    }
721
722
    KeyNode* node_;
723
    const KeyMapBase* m_;
724
    size_type bucket_index_;
725
  };
726
727
 public:
728
  hasher hash_function() const { return {}; }
729
730
 protected:
731
  PROTOBUF_NOINLINE void erase_no_destroy(size_type b, KeyNode* node) {
732
    TreeIterator tree_it;
733
    const bool is_list = revalidate_if_necessary(b, node, &tree_it);
734
    if (is_list) {
735
      ABSL_DCHECK(TableEntryIsNonEmptyList(b));
736
      auto* head = TableEntryToNode(table_[b]);
737
      head = EraseFromLinkedList(node, head);
738
      table_[b] = NodeToTableEntry(head);
739
    } else {
740
      ABSL_DCHECK(this->TableEntryIsTree(b));
741
      Tree* tree = internal::TableEntryToTree<Tree>(this->table_[b]);
742
      if (tree_it != tree->begin()) {
743
        auto* prev = std::prev(tree_it)->second;
744
        prev->next = prev->next->next;
745
      }
746
      tree->erase(tree_it);
747
      if (tree->empty()) {
748
        this->DestroyTree(tree);
749
        this->table_[b] = TableEntryPtr{};
750
      }
751
    }
752
    --num_elements_;
753
    if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) {
754
      while (index_of_first_non_null_ < num_buckets_ &&
755
             TableEntryIsEmpty(index_of_first_non_null_)) {
756
        ++index_of_first_non_null_;
757
      }
758
    }
759
  }
760
761
  // TODO(sbenza): We can reduce duplication by coercing `K` to a common type.
762
  // Eg, for string keys we can coerce to string_view. Otherwise, we instantiate
763
  // this with all the different `char[N]` of the caller.
764
  template <typename K>
765
  NodeAndBucket FindHelper(const K& k, TreeIterator* it = nullptr) const {
766
    size_type b = BucketNumber(k);
767
    if (TableEntryIsNonEmptyList(b)) {
768
      auto* node = internal::TableEntryToNode(table_[b]);
769
      do {
770
        if (internal::TransparentSupport<Key>::Equals(
771
                static_cast<KeyNode*>(node)->key(), k)) {
772
          return {node, b};
773
        } else {
774
          node = node->next;
775
        }
776
      } while (node != nullptr);
777
    } else if (TableEntryIsTree(b)) {
778
      Tree* tree = internal::TableEntryToTree<Tree>(table_[b]);
779
      auto tree_it = tree->find(k);
780
      if (it != nullptr) *it = tree_it;
781
      if (tree_it != tree->end()) {
782
        return {tree_it->second, b};
783
      }
784
    }
785
    return {nullptr, b};
786
  }
787
788
  // Insert the given Node in bucket b.  If that would make bucket b too big,
789
  // and bucket b is not a tree, create a tree for buckets b.
790
  // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
791
  // bucket.  num_elements_ is not modified.
792
  void InsertUnique(size_type b, KeyNode* node) {
793
    ABSL_DCHECK(index_of_first_non_null_ == num_buckets_ ||
794
                !TableEntryIsEmpty(index_of_first_non_null_));
795
    // In practice, the code that led to this point may have already
796
    // determined whether we are inserting into an empty list, a short list,
797
    // or whatever.  But it's probably cheap enough to recompute that here;
798
    // it's likely that we're inserting into an empty or short list.
799
    ABSL_DCHECK(FindHelper(node->key()).node == nullptr);
800
    if (TableEntryIsEmpty(b)) {
801
      InsertUniqueInList(b, node);
802
      index_of_first_non_null_ = (std::min)(index_of_first_non_null_, b);
803
    } else if (TableEntryIsNonEmptyList(b) && !TableEntryIsTooLong(b)) {
804
      InsertUniqueInList(b, node);
805
    } else {
806
      if (TableEntryIsNonEmptyList(b)) {
807
        TreeConvert(b);
808
      }
809
      ABSL_DCHECK(TableEntryIsTree(b))
810
          << (void*)table_[b] << " " << (uintptr_t)table_[b];
811
      InsertUniqueInTree(b, node);
812
      index_of_first_non_null_ = (std::min)(index_of_first_non_null_, b);
813
    }
814
  }
815
816
  // Helper for InsertUnique.  Handles the case where bucket b points to a
817
  // Tree.
818
  void InsertUniqueInTree(size_type b, KeyNode* node) {
819
    auto* tree = TableEntryToTree<Tree>(table_[b]);
820
    auto it = tree->insert({node->key(), node}).first;
821
    // Maintain the linked list of the nodes in the tree.
822
    // For simplicity, they are in the same order as the tree iteration.
823
    if (it != tree->begin()) {
824
      auto* prev = std::prev(it)->second;
825
      prev->next = node;
826
    }
827
    auto next = std::next(it);
828
    node->next = next != tree->end() ? next->second : nullptr;
829
  }
830
831
  // Returns whether it did resize.  Currently this is only used when
832
  // num_elements_ increases, though it could be used in other situations.
833
  // It checks for load too low as well as load too high: because any number
834
  // of erases can occur between inserts, the load could be as low as 0 here.
835
  // Resizing to a lower size is not always helpful, but failing to do so can
836
  // destroy the expected big-O bounds for some operations. By having the
837
  // policy that sometimes we resize down as well as up, clients can easily
838
  // keep O(size()) = O(number of buckets) if they want that.
839
  bool ResizeIfLoadIsOutOfRange(size_type new_size) {
840
    const size_type kMaxMapLoadTimes16 = 12;  // controls RAM vs CPU tradeoff
841
    const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16;
842
    const size_type lo_cutoff = hi_cutoff / 4;
843
    // We don't care how many elements are in trees.  If a lot are,
844
    // we may resize even though there are many empty buckets.  In
845
    // practice, this seems fine.
846
    if (PROTOBUF_PREDICT_FALSE(new_size >= hi_cutoff)) {
847
      if (num_buckets_ <= max_size() / 2) {
848
        Resize(num_buckets_ * 2);
849
        return true;
850
      }
851
    } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff &&
852
                                      num_buckets_ > kMinTableSize)) {
853
      size_type lg2_of_size_reduction_factor = 1;
854
      // It's possible we want to shrink a lot here... size() could even be 0.
855
      // So, estimate how much to shrink by making sure we don't shrink so
856
      // much that we would need to grow the table after a few inserts.
857
      const size_type hypothetical_size = new_size * 5 / 4 + 1;
858
      while ((hypothetical_size << lg2_of_size_reduction_factor) < hi_cutoff) {
859
        ++lg2_of_size_reduction_factor;
860
      }
861
      size_type new_num_buckets = std::max<size_type>(
862
          kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
863
      if (new_num_buckets != num_buckets_) {
864
        Resize(new_num_buckets);
865
        return true;
866
      }
867
    }
868
    return false;
869
  }
870
871
  // Resize to the given number of buckets.
872
  void Resize(size_t new_num_buckets) {
873
    if (num_buckets_ == kGlobalEmptyTableSize) {
874
      // This is the global empty array.
875
      // Just overwrite with a new one. No need to transfer or free anything.
876
      num_buckets_ = index_of_first_non_null_ = kMinTableSize;
877
      table_ = CreateEmptyTable(num_buckets_);
878
      seed_ = Seed();
879
      return;
880
    }
881
882
    ABSL_DCHECK_GE(new_num_buckets, kMinTableSize);
883
    const auto old_table = table_;
884
    const size_type old_table_size = num_buckets_;
885
    num_buckets_ = new_num_buckets;
886
    table_ = CreateEmptyTable(num_buckets_);
887
    const size_type start = index_of_first_non_null_;
888
    index_of_first_non_null_ = num_buckets_;
889
    for (size_type i = start; i < old_table_size; ++i) {
890
      if (internal::TableEntryIsNonEmptyList(old_table[i])) {
891
        TransferList(static_cast<KeyNode*>(TableEntryToNode(old_table[i])));
892
      } else if (internal::TableEntryIsTree(old_table[i])) {
893
        TransferTree(TableEntryToTree<Tree>(old_table[i]));
894
      }
895
    }
896
    DeleteTable(old_table, old_table_size);
897
  }
898
899
  // Transfer all nodes in the list `node` into `this`.
900
  void TransferList(KeyNode* node) {
901
    do {
902
      auto* next = static_cast<KeyNode*>(node->next);
903
      InsertUnique(BucketNumber(node->key()), node);
904
      node = next;
905
    } while (node != nullptr);
906
  }
907
908
  // Transfer all nodes in the tree `tree` into `this` and destroy the tree.
909
  void TransferTree(Tree* tree) {
910
    auto* node = tree->begin()->second;
911
    DestroyTree(tree);
912
    TransferList(static_cast<KeyNode*>(node));
913
  }
914
915
  void TreeConvert(size_type b) {
916
    ABSL_DCHECK(!TableEntryIsTree(b));
917
    Tree* tree =
918
        Arena::Create<Tree>(alloc_.arena(), typename Tree::key_compare(),
919
                            typename Tree::allocator_type(alloc_));
920
    size_type count = CopyListToTree(b, tree);
921
    ABSL_DCHECK_EQ(count, tree->size());
922
    table_[b] = TreeToTableEntry(tree);
923
    // Relink the nodes.
924
    NodeBase* next = nullptr;
925
    auto it = tree->end();
926
    do {
927
      auto* node = (--it)->second;
928
      node->next = next;
929
      next = node;
930
    } while (it != tree->begin());
931
  }
932
933
  // Copy a linked list in the given bucket to a tree.
934
  // Returns the number of things it copied.
935
  size_type CopyListToTree(size_type b, Tree* tree) {
936
    size_type count = 0;
937
    auto* node = TableEntryToNode(table_[b]);
938
    while (node != nullptr) {
939
      tree->insert({static_cast<KeyNode*>(node)->key(), node});
940
      ++count;
941
      auto* next = node->next;
942
      node->next = nullptr;
943
      node = next;
944
    }
945
    return count;
946
  }
947
948
  template <typename K>
949
  size_type BucketNumber(const K& k) const {
950
    // We xor the hash value against the random seed so that we effectively
951
    // have a random hash function.
952
    uint64_t h = hash_function()(k) ^ seed_;
953
954
    // We use the multiplication method to determine the bucket number from
955
    // the hash value. The constant kPhi (suggested by Knuth) is roughly
956
    // (sqrt(5) - 1) / 2 * 2^64.
957
    constexpr uint64_t kPhi = uint64_t{0x9e3779b97f4a7c15};
958
    return ((kPhi * h) >> 32) & (num_buckets_ - 1);
959
  }
960
961
  void DestroyTree(Tree* tree) {
962
    if (alloc_.arena() == nullptr) {
963
      delete tree;
964
    }
965
  }
966
967
  // Assumes node_ and m_ are correct and non-null, but other fields may be
968
  // stale.  Fix them as needed.  Then return true iff node_ points to a
969
  // Node in a list.  If false is returned then *it is modified to be
970
  // a valid iterator for node_.
971
  bool revalidate_if_necessary(size_t& bucket_index, KeyNode* node,
972
                               TreeIterator* it) const {
973
    // Force bucket_index to be in range.
974
    bucket_index &= (num_buckets_ - 1);
975
    // Common case: the bucket we think is relevant points to `node`.
976
    if (table_[bucket_index] == NodeToTableEntry(node)) return true;
977
    // Less common: the bucket is a linked list with node_ somewhere in it,
978
    // but not at the head.
979
    if (TableEntryIsNonEmptyList(bucket_index)) {
980
      auto* l = TableEntryToNode(table_[bucket_index]);
981
      while ((l = l->next) != nullptr) {
982
        if (l == node) {
983
          return true;
984
        }
985
      }
986
    }
987
    // Well, bucket_index_ still might be correct, but probably
988
    // not.  Revalidate just to be sure.  This case is rare enough that we
989
    // don't worry about potential optimizations, such as having a custom
990
    // find-like method that compares Node* instead of the key.
991
    auto res = FindHelper(node->key(), it);
992
    bucket_index = res.bucket;
993
    return TableEntryIsList(bucket_index);
994
  }
995
};
996
997
}  // namespace internal
998
999
// This is the class for Map's internal value_type.
1000
template <typename Key, typename T>
1001
using MapPair = std::pair<const Key, T>;
1002
1003
// Map is an associative container type used to store protobuf map
1004
// fields.  Each Map instance may or may not use a different hash function, a
1005
// different iteration order, and so on.  E.g., please don't examine
1006
// implementation details to decide if the following would work:
1007
//  Map<int, int> m0, m1;
1008
//  m0[0] = m1[0] = m0[1] = m1[1] = 0;
1009
//  assert(m0.begin()->first == m1.begin()->first);  // Bug!
1010
//
1011
// Map's interface is similar to std::unordered_map, except that Map is not
1012
// designed to play well with exceptions.
1013
template <typename Key, typename T>
1014
class Map : private internal::KeyMapBase<internal::KeyForBase<Key>> {
1015
  using Base = typename Map::KeyMapBase;
1016
1017
 public:
1018
  using key_type = Key;
1019
  using mapped_type = T;
1020
  using init_type = std::pair<Key, T>;
1021
  using value_type = MapPair<Key, T>;
1022
1023
  using pointer = value_type*;
1024
  using const_pointer = const value_type*;
1025
  using reference = value_type&;
1026
  using const_reference = const value_type&;
1027
1028
  using size_type = size_t;
1029
  using hasher = typename internal::TransparentSupport<Key>::hash;
1030
1031
  constexpr Map() : Base(nullptr) { StaticValidityCheck(); }
1032
  explicit Map(Arena* arena) : Base(arena) { StaticValidityCheck(); }
1033
1034
  Map(const Map& other) : Map() { insert(other.begin(), other.end()); }
1035
1036
  Map(Map&& other) noexcept : Map() {
1037
    if (other.arena() != nullptr) {
1038
      *this = other;
1039
    } else {
1040
      swap(other);
1041
    }
1042
  }
1043
1044
  Map& operator=(Map&& other) noexcept {
1045
    if (this != &other) {
1046
      if (arena() != other.arena()) {
1047
        *this = other;
1048
      } else {
1049
        swap(other);
1050
      }
1051
    }
1052
    return *this;
1053
  }
1054
1055
  template <class InputIt>
1056
  Map(const InputIt& first, const InputIt& last) : Map() {
1057
    insert(first, last);
1058
  }
1059
1060
  ~Map() {
1061
    // Fail-safe in case we miss calling this in a constructor.  Note: this one
1062
    // won't trigger for leaked maps that never get destructed.
1063
    StaticValidityCheck();
1064
1065
    if (this->alloc_.arena() == nullptr &&
1066
        this->num_buckets_ != internal::kGlobalEmptyTableSize) {
1067
      clear();
1068
      this->DeleteTable(this->table_, this->num_buckets_);
1069
    }
1070
  }
1071
1072
 private:
1073
  static_assert(!std::is_const<mapped_type>::value &&
1074
                    !std::is_const<key_type>::value,
1075
                "We do not support const types.");
1076
  static_assert(!std::is_volatile<mapped_type>::value &&
1077
                    !std::is_volatile<key_type>::value,
1078
                "We do not support volatile types.");
1079
  static_assert(!std::is_pointer<mapped_type>::value &&
1080
                    !std::is_pointer<key_type>::value,
1081
                "We do not support pointer types.");
1082
  static_assert(!std::is_reference<mapped_type>::value &&
1083
                    !std::is_reference<key_type>::value,
1084
                "We do not support reference types.");
1085
  static constexpr PROTOBUF_ALWAYS_INLINE void StaticValidityCheck() {
1086
    static_assert(alignof(internal::NodeBase) >= alignof(mapped_type),
1087
                  "Alignment of mapped type is too high.");
1088
    static_assert(
1089
        absl::disjunction<internal::is_supported_integral_type<key_type>,
1090
                          internal::is_supported_string_type<key_type>,
1091
                          internal::is_internal_map_key_type<key_type>>::value,
1092
        "We only support integer, string, or designated internal key "
1093
        "types.");
1094
    static_assert(absl::disjunction<
1095
                      internal::is_supported_scalar_type<mapped_type>,
1096
                      is_proto_enum<mapped_type>,
1097
                      internal::is_supported_message_type<mapped_type>,
1098
                      internal::is_internal_map_value_type<mapped_type>>::value,
1099
                  "We only support scalar, Message, and designated internal "
1100
                  "mapped types.");
1101
  }
1102
1103
  template <typename P>
1104
  struct SameAsElementReference
1105
      : std::is_same<typename std::remove_cv<
1106
                         typename std::remove_reference<reference>::type>::type,
1107
                     typename std::remove_cv<
1108
                         typename std::remove_reference<P>::type>::type> {};
1109
1110
  template <class P>
1111
  using RequiresInsertable =
1112
      typename std::enable_if<std::is_convertible<P, init_type>::value ||
1113
                                  SameAsElementReference<P>::value,
1114
                              int>::type;
1115
  template <class P>
1116
  using RequiresNotInit =
1117
      typename std::enable_if<!std::is_same<P, init_type>::value, int>::type;
1118
1119
  template <typename LookupKey>
1120
  using key_arg = typename internal::TransparentSupport<
1121
      key_type>::template key_arg<LookupKey>;
1122
1123
 public:
1124
  // Iterators
1125
  class const_iterator : private Base::KeyIteratorBase {
1126
    using BaseIt = typename Base::KeyIteratorBase;
1127
1128
   public:
1129
    using iterator_category = std::forward_iterator_tag;
1130
    using value_type = typename Map::value_type;
1131
    using difference_type = ptrdiff_t;
1132
    using pointer = const value_type*;
1133
    using reference = const value_type&;
1134
1135
    const_iterator() {}
1136
    const_iterator(const const_iterator&) = default;
1137
    const_iterator& operator=(const const_iterator&) = default;
1138
1139
    reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1140
    pointer operator->() const { return &(operator*()); }
1141
1142
    const_iterator& operator++() {
1143
      this->PlusPlus();
1144
      return *this;
1145
    }
1146
    const_iterator operator++(int) {
1147
      auto copy = *this;
1148
      this->PlusPlus();
1149
      return copy;
1150
    }
1151
1152
    friend bool operator==(const const_iterator& a, const const_iterator& b) {
1153
      return a.Equals(b);
1154
    }
1155
    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1156
      return !a.Equals(b);
1157
    }
1158
1159
   private:
1160
    using BaseIt::BaseIt;
1161
    explicit const_iterator(const BaseIt& base) : BaseIt(base) {}
1162
    friend class Map;
1163
  };
1164
1165
  class iterator : private Base::KeyIteratorBase {
1166
    using BaseIt = typename Base::KeyIteratorBase;
1167
1168
   public:
1169
    using iterator_category = std::forward_iterator_tag;
1170
    using value_type = typename Map::value_type;
1171
    using difference_type = ptrdiff_t;
1172
    using pointer = value_type*;
1173
    using reference = value_type&;
1174
1175
    iterator() {}
1176
    iterator(const iterator&) = default;
1177
    iterator& operator=(const iterator&) = default;
1178
1179
    reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1180
    pointer operator->() const { return &(operator*()); }
1181
1182
    iterator& operator++() {
1183
      this->PlusPlus();
1184
      return *this;
1185
    }
1186
    iterator operator++(int) {
1187
      auto copy = *this;
1188
      this->PlusPlus();
1189
      return copy;
1190
    }
1191
1192
    // Allow implicit conversion to const_iterator.
1193
    operator const_iterator() const {  // NOLINT(runtime/explicit)
1194
      return const_iterator(static_cast<const BaseIt&>(*this));
1195
    }
1196
1197
    friend bool operator==(const iterator& a, const iterator& b) {
1198
      return a.Equals(b);
1199
    }
1200
    friend bool operator!=(const iterator& a, const iterator& b) {
1201
      return !a.Equals(b);
1202
    }
1203
1204
   private:
1205
    using BaseIt::BaseIt;
1206
    friend class Map;
1207
  };
1208
1209
  iterator begin() { return iterator(this); }
1210
  iterator end() { return iterator(); }
1211
  const_iterator begin() const { return const_iterator(this); }
1212
  const_iterator end() const { return const_iterator(); }
1213
  const_iterator cbegin() const { return begin(); }
1214
  const_iterator cend() const { return end(); }
1215
1216
  using Base::empty;
1217
  using Base::size;
1218
1219
  // Element access
1220
  template <typename K = key_type>
1221
  T& operator[](const key_arg<K>& key) {
1222
    return try_emplace(key).first->second;
1223
  }
1224
  template <
1225
      typename K = key_type,
1226
      // Disable for integral types to reduce code bloat.
1227
      typename = typename std::enable_if<!std::is_integral<K>::value>::type>
1228
  T& operator[](key_arg<K>&& key) {
1229
    return try_emplace(std::forward<K>(key)).first->second;
1230
  }
1231
1232
  template <typename K = key_type>
1233
  const T& at(const key_arg<K>& key) const {
1234
    const_iterator it = find(key);
1235
    ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1236
    return it->second;
1237
  }
1238
1239
  template <typename K = key_type>
1240
  T& at(const key_arg<K>& key) {
1241
    iterator it = find(key);
1242
    ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1243
    return it->second;
1244
  }
1245
1246
  // Lookup
1247
  template <typename K = key_type>
1248
  size_type count(const key_arg<K>& key) const {
1249
    return find(key) == end() ? 0 : 1;
1250
  }
1251
1252
  template <typename K = key_type>
1253
  const_iterator find(const key_arg<K>& key) const {
1254
    return const_cast<Map*>(this)->find(key);
1255
  }
1256
  template <typename K = key_type>
1257
  iterator find(const key_arg<K>& key) {
1258
    auto res = this->FindHelper(key);
1259
    return iterator(static_cast<Node*>(res.node), this, res.bucket);
1260
  }
1261
1262
  template <typename K = key_type>
1263
  bool contains(const key_arg<K>& key) const {
1264
    return find(key) != end();
1265
  }
1266
1267
  template <typename K = key_type>
1268
  std::pair<const_iterator, const_iterator> equal_range(
1269
      const key_arg<K>& key) const {
1270
    const_iterator it = find(key);
1271
    if (it == end()) {
1272
      return std::pair<const_iterator, const_iterator>(it, it);
1273
    } else {
1274
      const_iterator begin = it++;
1275
      return std::pair<const_iterator, const_iterator>(begin, it);
1276
    }
1277
  }
1278
1279
  template <typename K = key_type>
1280
  std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1281
    iterator it = find(key);
1282
    if (it == end()) {
1283
      return std::pair<iterator, iterator>(it, it);
1284
    } else {
1285
      iterator begin = it++;
1286
      return std::pair<iterator, iterator>(begin, it);
1287
    }
1288
  }
1289
1290
  // insert
1291
  template <typename K, typename... Args>
1292
  std::pair<iterator, bool> try_emplace(K&& k, Args&&... args) {
1293
    // Inserts a new element into the container if there is no element with the
1294
    // key in the container.
1295
    // The new element is:
1296
    //  (1) Constructed in-place with the given args, if mapped_type is not
1297
    //      arena constructible.
1298
    //  (2) Constructed in-place with the arena and then assigned with a
1299
    //      mapped_type temporary constructed with the given args, otherwise.
1300
    return ArenaAwareTryEmplace(Arena::is_arena_constructable<mapped_type>(),
1301
                                std::forward<K>(k),
1302
                                std::forward<Args>(args)...);
1303
  }
1304
  std::pair<iterator, bool> insert(init_type&& value) {
1305
    return try_emplace(std::move(value.first), std::move(value.second));
1306
  }
1307
  template <typename P, RequiresInsertable<P> = 0>
1308
  std::pair<iterator, bool> insert(P&& value) {
1309
    return try_emplace(std::forward<P>(value).first,
1310
                       std::forward<P>(value).second);
1311
  }
1312
  template <typename... Args>
1313
  std::pair<iterator, bool> emplace(Args&&... args) {
1314
    return EmplaceInternal(Rank0{}, std::forward<Args>(args)...);
1315
  }
1316
  template <class InputIt>
1317
  void insert(InputIt first, InputIt last) {
1318
    for (; first != last; ++first) {
1319
      auto&& pair = *first;
1320
      try_emplace(pair.first, pair.second);
1321
    }
1322
  }
1323
  void insert(std::initializer_list<init_type> values) {
1324
    insert(values.begin(), values.end());
1325
  }
1326
  template <typename P, RequiresNotInit<P> = 0,
1327
            RequiresInsertable<const P&> = 0>
1328
  void insert(std::initializer_list<P> values) {
1329
    insert(values.begin(), values.end());
1330
  }
1331
1332
  // Erase and clear
1333
  template <typename K = key_type>
1334
  size_type erase(const key_arg<K>& key) {
1335
    iterator it = find(key);
1336
    if (it == end()) {
1337
      return 0;
1338
    } else {
1339
      erase(it);
1340
      return 1;
1341
    }
1342
  }
1343
1344
  iterator erase(iterator pos) {
1345
    auto next = std::next(pos);
1346
    ABSL_DCHECK_EQ(pos.m_, static_cast<Base*>(this));
1347
    auto* node = static_cast<Node*>(pos.node_);
1348
    this->erase_no_destroy(pos.bucket_index_, node);
1349
    DestroyNode(node);
1350
    return next;
1351
  }
1352
1353
  void erase(iterator first, iterator last) {
1354
    while (first != last) {
1355
      first = erase(first);
1356
    }
1357
  }
1358
1359
  void clear() {
1360
    for (size_type b = 0; b < this->num_buckets_; b++) {
1361
      internal::NodeBase* node;
1362
      if (this->TableEntryIsNonEmptyList(b)) {
1363
        node = internal::TableEntryToNode(this->table_[b]);
1364
        this->table_[b] = TableEntryPtr{};
1365
      } else if (this->TableEntryIsTree(b)) {
1366
        Tree* tree = internal::TableEntryToTree<Tree>(this->table_[b]);
1367
        this->table_[b] = TableEntryPtr{};
1368
        node = NodeFromTreeIterator(tree->begin());
1369
        this->DestroyTree(tree);
1370
      } else {
1371
        continue;
1372
      }
1373
      do {
1374
        auto* next = node->next;
1375
        DestroyNode(static_cast<Node*>(node));
1376
        node = next;
1377
      } while (node != nullptr);
1378
    }
1379
    this->num_elements_ = 0;
1380
    this->index_of_first_non_null_ = this->num_buckets_;
1381
  }
1382
1383
  // Assign
1384
  Map& operator=(const Map& other) {
1385
    if (this != &other) {
1386
      clear();
1387
      insert(other.begin(), other.end());
1388
    }
1389
    return *this;
1390
  }
1391
1392
  void swap(Map& other) {
1393
    if (arena() == other.arena()) {
1394
      InternalSwap(&other);
1395
    } else {
1396
      // TODO(zuguang): optimize this. The temporary copy can be allocated
1397
      // in the same arena as the other message, and the "other = copy" can
1398
      // be replaced with the fast-path swap above.
1399
      Map copy = *this;
1400
      *this = other;
1401
      other = copy;
1402
    }
1403
  }
1404
1405
  void InternalSwap(Map* other) { this->Swap(other); }
1406
1407
  hasher hash_function() const { return {}; }
1408
1409
  size_t SpaceUsedExcludingSelfLong() const {
1410
    if (empty()) return 0;
1411
    return SpaceUsedInternal() + internal::SpaceUsedInValues(this);
1412
  }
1413
1414
 private:
1415
  struct Rank1 {};
1416
  struct Rank0 : Rank1 {};
1417
1418
  // Linked-list nodes, as one would expect for a chaining hash table.
1419
  struct Node : Base::KeyNode {
1420
    value_type kv;
1421
  };
1422
1423
  using Tree = internal::TreeForMap<Key>;
1424
  using TreeIterator = typename Tree::iterator;
1425
  using TableEntryPtr = internal::TableEntryPtr;
1426
1427
  static Node* NodeFromTreeIterator(TreeIterator it) {
1428
    static_assert(
1429
        PROTOBUF_FIELD_OFFSET(Node, kv.first) == Base::KeyNode::kOffset, "");
1430
    static_assert(alignof(Node) == alignof(internal::NodeBase), "");
1431
    return static_cast<Node*>(it->second);
1432
  }
1433
1434
  void DestroyNode(Node* node) {
1435
    if (this->alloc_.arena() == nullptr) {
1436
      node->kv.first.~key_type();
1437
      node->kv.second.~mapped_type();
1438
      this->DeallocNode(node, sizeof(Node));
1439
    }
1440
  }
1441
1442
  size_t SpaceUsedInternal() const {
1443
    return internal::SpaceUsedInTable<Key>(this->table_, this->num_buckets_,
1444
                                           this->num_elements_, sizeof(Node));
1445
  }
1446
1447
  // We try to construct `init_type` from `Args` with a fall back to
1448
  // `value_type`. The latter is less desired as it unconditionally makes a copy
1449
  // of `value_type::first`.
1450
  template <typename... Args>
1451
  auto EmplaceInternal(Rank0, Args&&... args) ->
1452
      typename std::enable_if<std::is_constructible<init_type, Args...>::value,
1453
                              std::pair<iterator, bool>>::type {
1454
    return insert(init_type(std::forward<Args>(args)...));
1455
  }
1456
  template <typename... Args>
1457
  std::pair<iterator, bool> EmplaceInternal(Rank1, Args&&... args) {
1458
    return insert(value_type(std::forward<Args>(args)...));
1459
  }
1460
1461
  template <typename K, typename... Args>
1462
  std::pair<iterator, bool> TryEmplaceInternal(K&& k, Args&&... args) {
1463
    auto p = this->FindHelper(k);
1464
    // Case 1: key was already present.
1465
    if (p.node != nullptr)
1466
      return std::make_pair(
1467
          iterator(static_cast<Node*>(p.node), this, p.bucket), false);
1468
    // Case 2: insert.
1469
    if (this->ResizeIfLoadIsOutOfRange(this->num_elements_ + 1)) {
1470
      p = this->FindHelper(k);
1471
    }
1472
    const size_type b = p.bucket;  // bucket number
1473
    // If K is not key_type, make the conversion to key_type explicit.
1474
    using TypeToInit = typename std::conditional<
1475
        std::is_same<typename std::decay<K>::type, key_type>::value, K&&,
1476
        key_type>::type;
1477
    Node* node = static_cast<Node*>(this->AllocNode(sizeof(Node)));
1478
    // Even when arena is nullptr, CreateInArenaStorage is still used to
1479
    // ensure the arena of submessage will be consistent. Otherwise,
1480
    // submessage may have its own arena when message-owned arena is enabled.
1481
    // Note: This only works if `Key` is not arena constructible.
1482
    Arena::CreateInArenaStorage(const_cast<Key*>(&node->kv.first),
1483
                                this->alloc_.arena(),
1484
                                static_cast<TypeToInit>(std::forward<K>(k)));
1485
    // Note: if `T` is arena constructible, `Args` needs to be empty.
1486
    Arena::CreateInArenaStorage(&node->kv.second, this->alloc_.arena(),
1487
                                std::forward<Args>(args)...);
1488
1489
    this->InsertUnique(b, node);
1490
    ++this->num_elements_;
1491
    return std::make_pair(iterator(node, this, b), true);
1492
  }
1493
1494
  // A helper function to perform an assignment of `mapped_type`.
1495
  // If the first argument is true, then it is a regular assignment.
1496
  // Otherwise, we first create a temporary and then perform an assignment.
1497
  template <typename V>
1498
  static void AssignMapped(std::true_type, mapped_type& mapped, V&& v) {
1499
    mapped = std::forward<V>(v);
1500
  }
1501
  template <typename... Args>
1502
  static void AssignMapped(std::false_type, mapped_type& mapped,
1503
                           Args&&... args) {
1504
    mapped = mapped_type(std::forward<Args>(args)...);
1505
  }
1506
1507
  // Case 1: `mapped_type` is arena constructible. A temporary object is
1508
  // created and then (if `Args` are not empty) assigned to a mapped value
1509
  // that was created with the arena.
1510
  template <typename K>
1511
  std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k) {
1512
    // case 1.1: "default" constructed (e.g. from arena only).
1513
    return TryEmplaceInternal(std::forward<K>(k));
1514
  }
1515
  template <typename K, typename... Args>
1516
  std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k,
1517
                                                 Args&&... args) {
1518
    // case 1.2: "default" constructed + copy/move assignment
1519
    auto p = TryEmplaceInternal(std::forward<K>(k));
1520
    if (p.second) {
1521
      AssignMapped(std::is_same<void(typename std::decay<Args>::type...),
1522
                                void(mapped_type)>(),
1523
                   p.first->second, std::forward<Args>(args)...);
1524
    }
1525
    return p;
1526
  }
1527
  // Case 2: `mapped_type` is not arena constructible. Using in-place
1528
  // construction.
1529
  template <typename... Args>
1530
  std::pair<iterator, bool> ArenaAwareTryEmplace(std::false_type,
1531
                                                 Args&&... args) {
1532
    return TryEmplaceInternal(std::forward<Args>(args)...);
1533
  }
1534
1535
  using Base::arena;
1536
1537
  friend class Arena;
1538
  template <typename, typename>
1539
  friend class internal::TypeDefinedMapFieldBase;
1540
  using InternalArenaConstructable_ = void;
1541
  using DestructorSkippable_ = void;
1542
  template <typename Derived, typename K, typename V,
1543
            internal::WireFormatLite::FieldType key_wire_type,
1544
            internal::WireFormatLite::FieldType value_wire_type>
1545
  friend class internal::MapFieldLite;
1546
};
1547
1548
namespace internal {
1549
template <typename... T>
1550
PROTOBUF_NOINLINE void MapMergeFrom(Map<T...>& dest, const Map<T...>& src) {
1551
  for (const auto& elem : src) {
1552
    dest[elem.first] = elem.second;
1553
  }
1554
}
1555
}  // namespace internal
1556
1557
}  // namespace protobuf
1558
}  // namespace google
1559
1560
#include "google/protobuf/port_undef.inc"
1561
1562
#endif  // GOOGLE_PROTOBUF_MAP_H__