Coverage Report

Created: 2025-07-12 06:42

/src/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h
Line
Count
Source (jump to first uncovered line)
1
// Protocol Buffers - Google's data interchange format
2
// Copyright 2008 Google Inc.  All rights reserved.
3
// https://developers.google.com/protocol-buffers/
4
//
5
// Redistribution and use in source and binary forms, with or without
6
// modification, are permitted provided that the following conditions are
7
// met:
8
//
9
//     * Redistributions of source code must retain the above copyright
10
// notice, this list of conditions and the following disclaimer.
11
//     * Redistributions in binary form must reproduce the above
12
// copyright notice, this list of conditions and the following disclaimer
13
// in the documentation and/or other materials provided with the
14
// distribution.
15
//     * Neither the name of Google Inc. nor the names of its
16
// contributors may be used to endorse or promote products derived from
17
// this software without specific prior written permission.
18
//
19
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31
// This file defines the map container and its helpers to support protobuf maps.
32
//
33
// The Map and MapIterator types are provided by this header file.
34
// Please avoid using other types defined here, unless they are public
35
// types within Map or MapIterator, such as Map::value_type.
36
37
#ifndef GOOGLE_PROTOBUF_MAP_H__
38
#define GOOGLE_PROTOBUF_MAP_H__
39
40
#include <functional>
41
#include <initializer_list>
42
#include <iterator>
43
#include <limits>  // To support Visual Studio 2008
44
#include <map>
45
#include <string>
46
#include <type_traits>
47
#include <utility>
48
49
#if defined(__cpp_lib_string_view)
50
#include <string_view>
51
#endif  // defined(__cpp_lib_string_view)
52
53
#include <google/protobuf/stubs/common.h>
54
#include <google/protobuf/arena.h>
55
#include <google/protobuf/generated_enum_util.h>
56
#include <google/protobuf/map_type_handler.h>
57
#include <google/protobuf/stubs/hash.h>
58
59
#ifdef SWIG
60
#error "You cannot SWIG proto headers"
61
#endif
62
63
#include <google/protobuf/port_def.inc>
64
65
namespace google {
66
namespace protobuf {
67
68
template <typename Key, typename T>
69
class Map;
70
71
class MapIterator;
72
73
template <typename Enum>
74
struct is_proto_enum;
75
76
namespace internal {
77
template <typename Derived, typename Key, typename T,
78
          WireFormatLite::FieldType key_wire_type,
79
          WireFormatLite::FieldType value_wire_type>
80
class MapFieldLite;
81
82
template <typename Derived, typename Key, typename T,
83
          WireFormatLite::FieldType key_wire_type,
84
          WireFormatLite::FieldType value_wire_type>
85
class MapField;
86
87
template <typename Key, typename T>
88
class TypeDefinedMapFieldBase;
89
90
class DynamicMapField;
91
92
class GeneratedMessageReflection;
93
94
// re-implement std::allocator to use arena allocator for memory allocation.
95
// Used for Map implementation. Users should not use this class
96
// directly.
97
template <typename U>
98
class MapAllocator {
99
 public:
100
  using value_type = U;
101
  using pointer = value_type*;
102
  using const_pointer = const value_type*;
103
  using reference = value_type&;
104
  using const_reference = const value_type&;
105
  using size_type = size_t;
106
  using difference_type = ptrdiff_t;
107
108
  constexpr MapAllocator() : arena_(nullptr) {}
109
  explicit constexpr MapAllocator(Arena* arena) : arena_(arena) {}
110
  template <typename X>
111
  MapAllocator(const MapAllocator<X>& allocator)  // NOLINT(runtime/explicit)
112
      : arena_(allocator.arena()) {}
113
114
  pointer allocate(size_type n, const void* /* hint */ = nullptr) {
115
    // If arena is not given, malloc needs to be called which doesn't
116
    // construct element object.
117
    if (arena_ == nullptr) {
118
      return static_cast<pointer>(::operator new(n * sizeof(value_type)));
119
    } else {
120
      return reinterpret_cast<pointer>(
121
          Arena::CreateArray<uint8>(arena_, n * sizeof(value_type)));
122
    }
123
  }
124
125
  void deallocate(pointer p, size_type n) {
126
    if (arena_ == nullptr) {
127
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
128
      ::operator delete(p, n * sizeof(value_type));
129
#else
130
      (void)n;
131
      ::operator delete(p);
132
#endif
133
    }
134
  }
135
136
#if !defined(GOOGLE_PROTOBUF_OS_APPLE) && !defined(GOOGLE_PROTOBUF_OS_NACL) && \
137
    !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
138
  template <class NodeType, class... Args>
139
  void construct(NodeType* p, Args&&... args) {
140
    // Clang 3.6 doesn't compile static casting to void* directly. (Issue
141
    // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
142
    // not cast away constness". So first the maybe const pointer is casted to
143
    // const void* and after the const void* is const casted.
144
    new (const_cast<void*>(static_cast<const void*>(p)))
145
        NodeType(std::forward<Args>(args)...);
146
  }
147
148
  template <class NodeType>
149
  void destroy(NodeType* p) {
150
    p->~NodeType();
151
  }
152
#else
153
  void construct(pointer p, const_reference t) { new (p) value_type(t); }
154
155
  void destroy(pointer p) { p->~value_type(); }
156
#endif
157
158
  template <typename X>
159
  struct rebind {
160
    using other = MapAllocator<X>;
161
  };
162
163
  template <typename X>
164
  bool operator==(const MapAllocator<X>& other) const {
165
    return arena_ == other.arena_;
166
  }
167
168
  template <typename X>
169
  bool operator!=(const MapAllocator<X>& other) const {
170
    return arena_ != other.arena_;
171
  }
172
173
  // To support Visual Studio 2008
174
  size_type max_size() const {
175
    // parentheses around (std::...:max) prevents macro warning of max()
176
    return (std::numeric_limits<size_type>::max)();
177
  }
178
179
  // To support gcc-4.4, which does not properly
180
  // support templated friend classes
181
  Arena* arena() const { return arena_; }
182
183
 private:
184
  using DestructorSkippable_ = void;
185
  Arena* arena_;
186
};
187
188
template <typename T>
189
using KeyForTree =
190
    typename std::conditional<std::is_scalar<T>::value, T,
191
                              std::reference_wrapper<const T>>::type;
192
193
// Default case: Not transparent.
194
// We use std::hash<key_type>/std::less<key_type> and all the lookup functions
195
// only accept `key_type`.
196
template <typename key_type>
197
struct TransparentSupport {
198
  using hash = std::hash<key_type>;
199
  using less = std::less<key_type>;
200
201
  static bool Equals(const key_type& a, const key_type& b) { return a == b; }
202
203
  template <typename K>
204
  using key_arg = key_type;
205
};
206
207
#if defined(__cpp_lib_string_view)
208
// If std::string_view is available, we add transparent support for std::string
209
// keys. We use std::hash<std::string_view> as it supports the input types we
210
// care about. The lookup functions accept arbitrary `K`. This will include any
211
// key type that is convertible to std::string_view.
212
template <>
213
struct TransparentSupport<std::string> {
214
0
  static std::string_view ImplicitConvert(std::string_view str) { return str; }
215
  // If the element is not convertible to std::string_view, try to convert to
216
  // std::string first.
217
  // The template makes this overload lose resolution when both have the same
218
  // rank otherwise.
219
  template <typename = void>
220
  static std::string_view ImplicitConvert(const std::string& str) {
221
    return str;
222
  }
223
224
  struct hash : private std::hash<std::string_view> {
225
    using is_transparent = void;
226
227
    template <typename T>
228
    size_t operator()(const T& str) const {
229
      return base()(ImplicitConvert(str));
230
    }
231
232
   private:
233
0
    const std::hash<std::string_view>& base() const { return *this; }
234
  };
235
  struct less {
236
    using is_transparent = void;
237
238
    template <typename T, typename U>
239
    bool operator()(const T& t, const U& u) const {
240
      return ImplicitConvert(t) < ImplicitConvert(u);
241
    }
242
  };
243
244
  template <typename T, typename U>
245
  static bool Equals(const T& t, const U& u) {
246
    return ImplicitConvert(t) == ImplicitConvert(u);
247
  }
248
249
  template <typename K>
250
  using key_arg = K;
251
};
252
#endif  // defined(__cpp_lib_string_view)
253
254
template <typename Key>
255
using TreeForMap =
256
    std::map<KeyForTree<Key>, void*, typename TransparentSupport<Key>::less,
257
             MapAllocator<std::pair<const KeyForTree<Key>, void*>>>;
258
259
0
inline bool TableEntryIsEmpty(void* const* table, size_t b) {
260
0
  return table[b] == nullptr;
261
0
}
262
0
inline bool TableEntryIsNonEmptyList(void* const* table, size_t b) {
263
0
  return table[b] != nullptr && table[b] != table[b ^ 1];
264
0
}
265
0
inline bool TableEntryIsTree(void* const* table, size_t b) {
266
0
  return !TableEntryIsEmpty(table, b) && !TableEntryIsNonEmptyList(table, b);
267
0
}
268
0
inline bool TableEntryIsList(void* const* table, size_t b) {
269
0
  return !TableEntryIsTree(table, b);
270
0
}
271
272
// This captures all numeric types.
273
0
inline size_t MapValueSpaceUsedExcludingSelfLong(bool) { return 0; }
274
0
inline size_t MapValueSpaceUsedExcludingSelfLong(const std::string& str) {
275
0
  return StringSpaceUsedExcludingSelfLong(str);
276
0
}
277
template <typename T,
278
          typename = decltype(std::declval<const T&>().SpaceUsedLong())>
279
size_t MapValueSpaceUsedExcludingSelfLong(const T& message) {
280
  return message.SpaceUsedLong() - sizeof(T);
281
}
282
283
constexpr size_t kGlobalEmptyTableSize = 1;
284
PROTOBUF_EXPORT extern void* const kGlobalEmptyTable[kGlobalEmptyTableSize];
285
286
// Space used for the table, trees, and nodes.
287
// Does not include the indirect space used. Eg the data of a std::string.
288
template <typename Key>
289
PROTOBUF_NOINLINE size_t SpaceUsedInTable(void** table, size_t num_buckets,
290
                                          size_t num_elements,
291
                                          size_t sizeof_node) {
292
  size_t size = 0;
293
  // The size of the table.
294
  size += sizeof(void*) * num_buckets;
295
  // All the nodes.
296
  size += sizeof_node * num_elements;
297
  // For each tree, count the overhead of the those nodes.
298
  // Two buckets at a time because we only care about trees.
299
  for (size_t b = 0; b < num_buckets; b += 2) {
300
    if (internal::TableEntryIsTree(table, b)) {
301
      using Tree = TreeForMap<Key>;
302
      Tree* tree = static_cast<Tree*>(table[b]);
303
      // Estimated cost of the red-black tree nodes, 3 pointers plus a
304
      // bool (plus alignment, so 4 pointers).
305
      size += tree->size() *
306
              (sizeof(typename Tree::value_type) + sizeof(void*) * 4);
307
    }
308
  }
309
  return size;
310
}
311
312
template <typename Map,
313
          typename = typename std::enable_if<
314
              !std::is_scalar<typename Map::key_type>::value ||
315
              !std::is_scalar<typename Map::mapped_type>::value>::type>
316
size_t SpaceUsedInValues(const Map* map) {
317
  size_t size = 0;
318
  for (const auto& v : *map) {
319
    size += internal::MapValueSpaceUsedExcludingSelfLong(v.first) +
320
            internal::MapValueSpaceUsedExcludingSelfLong(v.second);
321
  }
322
  return size;
323
}
324
325
0
inline size_t SpaceUsedInValues(const void*) { return 0; }
326
327
}  // namespace internal
328
329
// This is the class for Map's internal value_type. Instead of using
330
// std::pair as value_type, we use this class which provides us more control of
331
// its process of construction and destruction.
332
template <typename Key, typename T>
333
struct MapPair {
334
  using first_type = const Key;
335
  using second_type = T;
336
337
  MapPair(const Key& other_first, const T& other_second)
338
      : first(other_first), second(other_second) {}
339
  explicit MapPair(const Key& other_first) : first(other_first), second() {}
340
  explicit MapPair(Key&& other_first)
341
      : first(std::move(other_first)), second() {}
342
  MapPair(const MapPair& other) : first(other.first), second(other.second) {}
343
344
  ~MapPair() {}
345
346
  // Implicitly convertible to std::pair of compatible types.
347
  template <typename T1, typename T2>
348
  operator std::pair<T1, T2>() const {  // NOLINT(runtime/explicit)
349
    return std::pair<T1, T2>(first, second);
350
  }
351
352
  const Key first;
353
  T second;
354
355
 private:
356
  friend class Arena;
357
  friend class Map<Key, T>;
358
};
359
360
// Map is an associative container type used to store protobuf map
361
// fields.  Each Map instance may or may not use a different hash function, a
362
// different iteration order, and so on.  E.g., please don't examine
363
// implementation details to decide if the following would work:
364
//  Map<int, int> m0, m1;
365
//  m0[0] = m1[0] = m0[1] = m1[1] = 0;
366
//  assert(m0.begin()->first == m1.begin()->first);  // Bug!
367
//
368
// Map's interface is similar to std::unordered_map, except that Map is not
369
// designed to play well with exceptions.
370
template <typename Key, typename T>
371
class Map {
372
 public:
373
  using key_type = Key;
374
  using mapped_type = T;
375
  using value_type = MapPair<Key, T>;
376
377
  using pointer = value_type*;
378
  using const_pointer = const value_type*;
379
  using reference = value_type&;
380
  using const_reference = const value_type&;
381
382
  using size_type = size_t;
383
  using hasher = typename internal::TransparentSupport<Key>::hash;
384
385
  constexpr Map() : elements_(nullptr) {}
386
  explicit Map(Arena* arena) : elements_(arena) {}
387
388
  Map(const Map& other) : Map() { insert(other.begin(), other.end()); }
389
390
  Map(Map&& other) noexcept : Map() {
391
    if (other.arena() != nullptr) {
392
      *this = other;
393
    } else {
394
      swap(other);
395
    }
396
  }
397
398
  Map& operator=(Map&& other) noexcept {
399
    if (this != &other) {
400
      if (arena() != other.arena()) {
401
        *this = other;
402
      } else {
403
        swap(other);
404
      }
405
    }
406
    return *this;
407
  }
408
409
  template <class InputIt>
410
  Map(const InputIt& first, const InputIt& last) : Map() {
411
    insert(first, last);
412
  }
413
414
  ~Map() {}
415
416
 private:
417
  using Allocator = internal::MapAllocator<void*>;
418
419
  // InnerMap is a generic hash-based map.  It doesn't contain any
420
  // protocol-buffer-specific logic.  It is a chaining hash map with the
421
  // additional feature that some buckets can be converted to use an ordered
422
  // container.  This ensures O(lg n) bounds on find, insert, and erase, while
423
  // avoiding the overheads of ordered containers most of the time.
424
  //
425
  // The implementation doesn't need the full generality of unordered_map,
426
  // and it doesn't have it.  More bells and whistles can be added as needed.
427
  // Some implementation details:
428
  // 1. The hash function has type hasher and the equality function
429
  //    equal_to<Key>.  We inherit from hasher to save space
430
  //    (empty-base-class optimization).
431
  // 2. The number of buckets is a power of two.
432
  // 3. Buckets are converted to trees in pairs: if we convert bucket b then
433
  //    buckets b and b^1 will share a tree.  Invariant: buckets b and b^1 have
434
  //    the same non-null value iff they are sharing a tree.  (An alternative
435
  //    implementation strategy would be to have a tag bit per bucket.)
436
  // 4. As is typical for hash_map and such, the Keys and Values are always
437
  //    stored in linked list nodes.  Pointers to elements are never invalidated
438
  //    until the element is deleted.
439
  // 5. The trees' payload type is pointer to linked-list node.  Tree-converting
440
  //    a bucket doesn't copy Key-Value pairs.
441
  // 6. Once we've tree-converted a bucket, it is never converted back. However,
442
  //    the items a tree contains may wind up assigned to trees or lists upon a
443
  //    rehash.
444
  // 7. The code requires no C++ features from C++14 or later.
445
  // 8. Mutations to a map do not invalidate the map's iterators, pointers to
446
  //    elements, or references to elements.
447
  // 9. Except for erase(iterator), any non-const method can reorder iterators.
448
  // 10. InnerMap uses KeyForTree<Key> when using the Tree representation, which
449
  //    is either `Key`, if Key is a scalar, or `reference_wrapper<const Key>`
450
  //    otherwise. This avoids unnecessary copies of string keys, for example.
451
  class InnerMap : private hasher {
452
   public:
453
    explicit constexpr InnerMap(Arena* arena)
454
        : hasher(),
455
          num_elements_(0),
456
          num_buckets_(internal::kGlobalEmptyTableSize),
457
          seed_(0),
458
          index_of_first_non_null_(internal::kGlobalEmptyTableSize),
459
          table_(const_cast<void**>(internal::kGlobalEmptyTable)),
460
          alloc_(arena) {}
461
462
    ~InnerMap() {
463
      if (alloc_.arena() == nullptr &&
464
          num_buckets_ != internal::kGlobalEmptyTableSize) {
465
        clear();
466
        Dealloc<void*>(table_, num_buckets_);
467
      }
468
    }
469
470
   private:
471
    enum { kMinTableSize = 8 };
472
473
    // Linked-list nodes, as one would expect for a chaining hash table.
474
    struct Node {
475
      value_type kv;
476
      Node* next;
477
    };
478
479
    // Trees. The payload type is a copy of Key, so that we can query the tree
480
    // with Keys that are not in any particular data structure.
481
    // The value is a void* pointing to Node. We use void* instead of Node* to
482
    // avoid code bloat. That way there is only one instantiation of the tree
483
    // class per key type.
484
    using Tree = internal::TreeForMap<Key>;
485
    using TreeIterator = typename Tree::iterator;
486
487
    static Node* NodeFromTreeIterator(TreeIterator it) {
488
      return static_cast<Node*>(it->second);
489
    }
490
491
    // iterator and const_iterator are instantiations of iterator_base.
492
    template <typename KeyValueType>
493
    class iterator_base {
494
     public:
495
      using reference = KeyValueType&;
496
      using pointer = KeyValueType*;
497
498
      // Invariants:
499
      // node_ is always correct. This is handy because the most common
500
      // operations are operator* and operator-> and they only use node_.
501
      // When node_ is set to a non-null value, all the other non-const fields
502
      // are updated to be correct also, but those fields can become stale
503
      // if the underlying map is modified.  When those fields are needed they
504
      // are rechecked, and updated if necessary.
505
      iterator_base() : node_(nullptr), m_(nullptr), bucket_index_(0) {}
506
507
      explicit iterator_base(const InnerMap* m) : m_(m) {
508
        SearchFrom(m->index_of_first_non_null_);
509
      }
510
511
      // Any iterator_base can convert to any other.  This is overkill, and we
512
      // rely on the enclosing class to use it wisely.  The standard "iterator
513
      // can convert to const_iterator" is OK but the reverse direction is not.
514
      template <typename U>
515
      explicit iterator_base(const iterator_base<U>& it)
516
          : node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {}
517
518
      iterator_base(Node* n, const InnerMap* m, size_type index)
519
          : node_(n), m_(m), bucket_index_(index) {}
520
521
      iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index)
522
          : node_(NodeFromTreeIterator(tree_it)), m_(m), bucket_index_(index) {
523
        // Invariant: iterators that use buckets with trees have an even
524
        // bucket_index_.
525
        GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0u);
526
      }
527
528
      // Advance through buckets, looking for the first that isn't empty.
529
      // If nothing non-empty is found then leave node_ == nullptr.
530
      void SearchFrom(size_type start_bucket) {
531
        GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
532
               m_->table_[m_->index_of_first_non_null_] != nullptr);
533
        node_ = nullptr;
534
        for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_;
535
             bucket_index_++) {
536
          if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
537
            node_ = static_cast<Node*>(m_->table_[bucket_index_]);
538
            break;
539
          } else if (m_->TableEntryIsTree(bucket_index_)) {
540
            Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
541
            GOOGLE_DCHECK(!tree->empty());
542
            node_ = NodeFromTreeIterator(tree->begin());
543
            break;
544
          }
545
        }
546
      }
547
548
      reference operator*() const { return node_->kv; }
549
      pointer operator->() const { return &(operator*()); }
550
551
      friend bool operator==(const iterator_base& a, const iterator_base& b) {
552
        return a.node_ == b.node_;
553
      }
554
      friend bool operator!=(const iterator_base& a, const iterator_base& b) {
555
        return a.node_ != b.node_;
556
      }
557
558
      iterator_base& operator++() {
559
        if (node_->next == nullptr) {
560
          TreeIterator tree_it;
561
          const bool is_list = revalidate_if_necessary(&tree_it);
562
          if (is_list) {
563
            SearchFrom(bucket_index_ + 1);
564
          } else {
565
            GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0u);
566
            Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
567
            if (++tree_it == tree->end()) {
568
              SearchFrom(bucket_index_ + 2);
569
            } else {
570
              node_ = NodeFromTreeIterator(tree_it);
571
            }
572
          }
573
        } else {
574
          node_ = node_->next;
575
        }
576
        return *this;
577
      }
578
579
      iterator_base operator++(int /* unused */) {
580
        iterator_base tmp = *this;
581
        ++*this;
582
        return tmp;
583
      }
584
585
      // Assumes node_ and m_ are correct and non-null, but other fields may be
586
      // stale.  Fix them as needed.  Then return true iff node_ points to a
587
      // Node in a list.  If false is returned then *it is modified to be
588
      // a valid iterator for node_.
589
      bool revalidate_if_necessary(TreeIterator* it) {
590
        GOOGLE_DCHECK(node_ != nullptr && m_ != nullptr);
591
        // Force bucket_index_ to be in range.
592
        bucket_index_ &= (m_->num_buckets_ - 1);
593
        // Common case: the bucket we think is relevant points to node_.
594
        if (m_->table_[bucket_index_] == static_cast<void*>(node_)) return true;
595
        // Less common: the bucket is a linked list with node_ somewhere in it,
596
        // but not at the head.
597
        if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
598
          Node* l = static_cast<Node*>(m_->table_[bucket_index_]);
599
          while ((l = l->next) != nullptr) {
600
            if (l == node_) {
601
              return true;
602
            }
603
          }
604
        }
605
        // Well, bucket_index_ still might be correct, but probably
606
        // not.  Revalidate just to be sure.  This case is rare enough that we
607
        // don't worry about potential optimizations, such as having a custom
608
        // find-like method that compares Node* instead of the key.
609
        iterator_base i(m_->find(node_->kv.first, it));
610
        bucket_index_ = i.bucket_index_;
611
        return m_->TableEntryIsList(bucket_index_);
612
      }
613
614
      Node* node_;
615
      const InnerMap* m_;
616
      size_type bucket_index_;
617
    };
618
619
   public:
620
    using iterator = iterator_base<value_type>;
621
    using const_iterator = iterator_base<const value_type>;
622
623
    Arena* arena() const { return alloc_.arena(); }
624
625
    void Swap(InnerMap* other) {
626
      std::swap(num_elements_, other->num_elements_);
627
      std::swap(num_buckets_, other->num_buckets_);
628
      std::swap(seed_, other->seed_);
629
      std::swap(index_of_first_non_null_, other->index_of_first_non_null_);
630
      std::swap(table_, other->table_);
631
      std::swap(alloc_, other->alloc_);
632
    }
633
634
    iterator begin() { return iterator(this); }
635
    iterator end() { return iterator(); }
636
    const_iterator begin() const { return const_iterator(this); }
637
    const_iterator end() const { return const_iterator(); }
638
639
    void clear() {
640
      for (size_type b = 0; b < num_buckets_; b++) {
641
        if (TableEntryIsNonEmptyList(b)) {
642
          Node* node = static_cast<Node*>(table_[b]);
643
          table_[b] = nullptr;
644
          do {
645
            Node* next = node->next;
646
            DestroyNode(node);
647
            node = next;
648
          } while (node != nullptr);
649
        } else if (TableEntryIsTree(b)) {
650
          Tree* tree = static_cast<Tree*>(table_[b]);
651
          GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0);
652
          table_[b] = table_[b + 1] = nullptr;
653
          typename Tree::iterator tree_it = tree->begin();
654
          do {
655
            Node* node = NodeFromTreeIterator(tree_it);
656
            typename Tree::iterator next = tree_it;
657
            ++next;
658
            tree->erase(tree_it);
659
            DestroyNode(node);
660
            tree_it = next;
661
          } while (tree_it != tree->end());
662
          DestroyTree(tree);
663
          b++;
664
        }
665
      }
666
      num_elements_ = 0;
667
      index_of_first_non_null_ = num_buckets_;
668
    }
669
670
    const hasher& hash_function() const { return *this; }
671
672
    static size_type max_size() {
673
      return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28);
674
    }
675
    size_type size() const { return num_elements_; }
676
    bool empty() const { return size() == 0; }
677
678
    template <typename K>
679
    iterator find(const K& k) {
680
      return iterator(FindHelper(k).first);
681
    }
682
683
    template <typename K>
684
    const_iterator find(const K& k) const {
685
      return FindHelper(k).first;
686
    }
687
688
    // Insert the key into the map, if not present. In that case, the value will
689
    // be value initialized.
690
    template <typename K>
691
    std::pair<iterator, bool> insert(K&& k) {
692
      std::pair<const_iterator, size_type> p = FindHelper(k);
693
      // Case 1: key was already present.
694
      if (p.first.node_ != nullptr)
695
        return std::make_pair(iterator(p.first), false);
696
      // Case 2: insert.
697
      if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
698
        p = FindHelper(k);
699
      }
700
      const size_type b = p.second;  // bucket number
701
      Node* node;
702
      // If K is not key_type, make the conversion to key_type explicit.
703
      using TypeToInit = typename std::conditional<
704
          std::is_same<typename std::decay<K>::type, key_type>::value, K&&,
705
          key_type>::type;
706
      if (alloc_.arena() == nullptr) {
707
        node = new Node{value_type(static_cast<TypeToInit>(std::forward<K>(k))),
708
                        nullptr};
709
      } else {
710
        node = Alloc<Node>(1);
711
        Arena::CreateInArenaStorage(
712
            const_cast<Key*>(&node->kv.first), alloc_.arena(),
713
            static_cast<TypeToInit>(std::forward<K>(k)));
714
        Arena::CreateInArenaStorage(&node->kv.second, alloc_.arena());
715
      }
716
717
      iterator result = InsertUnique(b, node);
718
      ++num_elements_;
719
      return std::make_pair(result, true);
720
    }
721
722
    template <typename K>
723
    value_type& operator[](K&& k) {
724
      return *insert(std::forward<K>(k)).first;
725
    }
726
727
    void erase(iterator it) {
728
      GOOGLE_DCHECK_EQ(it.m_, this);
729
      typename Tree::iterator tree_it;
730
      const bool is_list = it.revalidate_if_necessary(&tree_it);
731
      size_type b = it.bucket_index_;
732
      Node* const item = it.node_;
733
      if (is_list) {
734
        GOOGLE_DCHECK(TableEntryIsNonEmptyList(b));
735
        Node* head = static_cast<Node*>(table_[b]);
736
        head = EraseFromLinkedList(item, head);
737
        table_[b] = static_cast<void*>(head);
738
      } else {
739
        GOOGLE_DCHECK(TableEntryIsTree(b));
740
        Tree* tree = static_cast<Tree*>(table_[b]);
741
        tree->erase(tree_it);
742
        if (tree->empty()) {
743
          // Force b to be the minimum of b and b ^ 1.  This is important
744
          // only because we want index_of_first_non_null_ to be correct.
745
          b &= ~static_cast<size_type>(1);
746
          DestroyTree(tree);
747
          table_[b] = table_[b + 1] = nullptr;
748
        }
749
      }
750
      DestroyNode(item);
751
      --num_elements_;
752
      if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) {
753
        while (index_of_first_non_null_ < num_buckets_ &&
754
               table_[index_of_first_non_null_] == nullptr) {
755
          ++index_of_first_non_null_;
756
        }
757
      }
758
    }
759
760
    size_t SpaceUsedInternal() const {
761
      return internal::SpaceUsedInTable<Key>(table_, num_buckets_,
762
                                             num_elements_, sizeof(Node));
763
    }
764
765
   private:
766
    const_iterator find(const Key& k, TreeIterator* it) const {
767
      return FindHelper(k, it).first;
768
    }
769
    template <typename K>
770
    std::pair<const_iterator, size_type> FindHelper(const K& k) const {
771
      return FindHelper(k, nullptr);
772
    }
773
    template <typename K>
774
    std::pair<const_iterator, size_type> FindHelper(const K& k,
775
                                                    TreeIterator* it) const {
776
      size_type b = BucketNumber(k);
777
      if (TableEntryIsNonEmptyList(b)) {
778
        Node* node = static_cast<Node*>(table_[b]);
779
        do {
780
          if (internal::TransparentSupport<Key>::Equals(node->kv.first, k)) {
781
            return std::make_pair(const_iterator(node, this, b), b);
782
          } else {
783
            node = node->next;
784
          }
785
        } while (node != nullptr);
786
      } else if (TableEntryIsTree(b)) {
787
        GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
788
        b &= ~static_cast<size_t>(1);
789
        Tree* tree = static_cast<Tree*>(table_[b]);
790
        auto tree_it = tree->find(k);
791
        if (tree_it != tree->end()) {
792
          if (it != nullptr) *it = tree_it;
793
          return std::make_pair(const_iterator(tree_it, this, b), b);
794
        }
795
      }
796
      return std::make_pair(end(), b);
797
    }
798
799
    // Insert the given Node in bucket b.  If that would make bucket b too big,
800
    // and bucket b is not a tree, create a tree for buckets b and b^1 to share.
801
    // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
802
    // bucket.  num_elements_ is not modified.
803
    iterator InsertUnique(size_type b, Node* node) {
804
      GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ ||
805
             table_[index_of_first_non_null_] != nullptr);
806
      // In practice, the code that led to this point may have already
807
      // determined whether we are inserting into an empty list, a short list,
808
      // or whatever.  But it's probably cheap enough to recompute that here;
809
      // it's likely that we're inserting into an empty or short list.
810
      iterator result;
811
      GOOGLE_DCHECK(find(node->kv.first) == end());
812
      if (TableEntryIsEmpty(b)) {
813
        result = InsertUniqueInList(b, node);
814
      } else if (TableEntryIsNonEmptyList(b)) {
815
        if (PROTOBUF_PREDICT_FALSE(TableEntryIsTooLong(b))) {
816
          TreeConvert(b);
817
          result = InsertUniqueInTree(b, node);
818
          GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1));
819
        } else {
820
          // Insert into a pre-existing list.  This case cannot modify
821
          // index_of_first_non_null_, so we skip the code to update it.
822
          return InsertUniqueInList(b, node);
823
        }
824
      } else {
825
        // Insert into a pre-existing tree.  This case cannot modify
826
        // index_of_first_non_null_, so we skip the code to update it.
827
        return InsertUniqueInTree(b, node);
828
      }
829
      // parentheses around (std::min) prevents macro expansion of min(...)
830
      index_of_first_non_null_ =
831
          (std::min)(index_of_first_non_null_, result.bucket_index_);
832
      return result;
833
    }
834
835
    // Returns whether we should insert after the head of the list. For
836
    // non-optimized builds, we randomly decide whether to insert right at the
837
    // head of the list or just after the head. This helps add a little bit of
838
    // non-determinism to the map ordering.
839
    bool ShouldInsertAfterHead(void* node) {
840
#ifdef NDEBUG
841
      (void) node;
842
      return false;
843
#else
844
      // Doing modulo with a prime mixes the bits more.
845
      return (reinterpret_cast<uintptr_t>(node) ^ seed_) % 13 > 6;
846
#endif
847
    }
848
849
    // Helper for InsertUnique.  Handles the case where bucket b is a
850
    // not-too-long linked list.
851
    iterator InsertUniqueInList(size_type b, Node* node) {
852
      if (table_[b] != nullptr && ShouldInsertAfterHead(node)) {
853
        Node* first = static_cast<Node*>(table_[b]);
854
        node->next = first->next;
855
        first->next = node;
856
        return iterator(node, this, b);
857
      }
858
859
      node->next = static_cast<Node*>(table_[b]);
860
      table_[b] = static_cast<void*>(node);
861
      return iterator(node, this, b);
862
    }
863
864
    // Helper for InsertUnique.  Handles the case where bucket b points to a
865
    // Tree.
866
    iterator InsertUniqueInTree(size_type b, Node* node) {
867
      GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
868
      // Maintain the invariant that node->next is null for all Nodes in Trees.
869
      node->next = nullptr;
870
      return iterator(
871
          static_cast<Tree*>(table_[b])->insert({node->kv.first, node}).first,
872
          this, b & ~static_cast<size_t>(1));
873
    }
874
875
    // Returns whether it did resize.  Currently this is only used when
876
    // num_elements_ increases, though it could be used in other situations.
877
    // It checks for load too low as well as load too high: because any number
878
    // of erases can occur between inserts, the load could be as low as 0 here.
879
    // Resizing to a lower size is not always helpful, but failing to do so can
880
    // destroy the expected big-O bounds for some operations. By having the
881
    // policy that sometimes we resize down as well as up, clients can easily
882
    // keep O(size()) = O(number of buckets) if they want that.
883
    bool ResizeIfLoadIsOutOfRange(size_type new_size) {
884
      const size_type kMaxMapLoadTimes16 = 12;  // controls RAM vs CPU tradeoff
885
      const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16;
886
      const size_type lo_cutoff = hi_cutoff / 4;
887
      // We don't care how many elements are in trees.  If a lot are,
888
      // we may resize even though there are many empty buckets.  In
889
      // practice, this seems fine.
890
      if (PROTOBUF_PREDICT_FALSE(new_size >= hi_cutoff)) {
891
        if (num_buckets_ <= max_size() / 2) {
892
          Resize(num_buckets_ * 2);
893
          return true;
894
        }
895
      } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff &&
896
                                        num_buckets_ > kMinTableSize)) {
897
        size_type lg2_of_size_reduction_factor = 1;
898
        // It's possible we want to shrink a lot here... size() could even be 0.
899
        // So, estimate how much to shrink by making sure we don't shrink so
900
        // much that we would need to grow the table after a few inserts.
901
        const size_type hypothetical_size = new_size * 5 / 4 + 1;
902
        while ((hypothetical_size << lg2_of_size_reduction_factor) <
903
               hi_cutoff) {
904
          ++lg2_of_size_reduction_factor;
905
        }
906
        size_type new_num_buckets = std::max<size_type>(
907
            kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
908
        if (new_num_buckets != num_buckets_) {
909
          Resize(new_num_buckets);
910
          return true;
911
        }
912
      }
913
      return false;
914
    }
915
916
    // Resize to the given number of buckets.
917
    void Resize(size_t new_num_buckets) {
918
      if (num_buckets_ == internal::kGlobalEmptyTableSize) {
919
        // This is the global empty array.
920
        // Just overwrite with a new one. No need to transfer or free anything.
921
        num_buckets_ = index_of_first_non_null_ = kMinTableSize;
922
        table_ = CreateEmptyTable(num_buckets_);
923
        seed_ = Seed();
924
        return;
925
      }
926
927
      GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize);
928
      void** const old_table = table_;
929
      const size_type old_table_size = num_buckets_;
930
      num_buckets_ = new_num_buckets;
931
      table_ = CreateEmptyTable(num_buckets_);
932
      const size_type start = index_of_first_non_null_;
933
      index_of_first_non_null_ = num_buckets_;
934
      for (size_type i = start; i < old_table_size; i++) {
935
        if (internal::TableEntryIsNonEmptyList(old_table, i)) {
936
          TransferList(old_table, i);
937
        } else if (internal::TableEntryIsTree(old_table, i)) {
938
          TransferTree(old_table, i++);
939
        }
940
      }
941
      Dealloc<void*>(old_table, old_table_size);
942
    }
943
944
    void TransferList(void* const* table, size_type index) {
945
      Node* node = static_cast<Node*>(table[index]);
946
      do {
947
        Node* next = node->next;
948
        InsertUnique(BucketNumber(node->kv.first), node);
949
        node = next;
950
      } while (node != nullptr);
951
    }
952
953
    void TransferTree(void* const* table, size_type index) {
954
      Tree* tree = static_cast<Tree*>(table[index]);
955
      typename Tree::iterator tree_it = tree->begin();
956
      do {
957
        InsertUnique(BucketNumber(std::cref(tree_it->first).get()),
958
                     NodeFromTreeIterator(tree_it));
959
      } while (++tree_it != tree->end());
960
      DestroyTree(tree);
961
    }
962
963
    Node* EraseFromLinkedList(Node* item, Node* head) {
964
      if (head == item) {
965
        return head->next;
966
      } else {
967
        head->next = EraseFromLinkedList(item, head->next);
968
        return head;
969
      }
970
    }
971
972
    bool TableEntryIsEmpty(size_type b) const {
973
      return internal::TableEntryIsEmpty(table_, b);
974
    }
975
    bool TableEntryIsNonEmptyList(size_type b) const {
976
      return internal::TableEntryIsNonEmptyList(table_, b);
977
    }
978
    bool TableEntryIsTree(size_type b) const {
979
      return internal::TableEntryIsTree(table_, b);
980
    }
981
    bool TableEntryIsList(size_type b) const {
982
      return internal::TableEntryIsList(table_, b);
983
    }
984
985
    void TreeConvert(size_type b) {
986
      GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1));
987
      Tree* tree =
988
          Arena::Create<Tree>(alloc_.arena(), typename Tree::key_compare(),
989
                              typename Tree::allocator_type(alloc_));
990
      size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree);
991
      GOOGLE_DCHECK_EQ(count, tree->size());
992
      table_[b] = table_[b ^ 1] = static_cast<void*>(tree);
993
    }
994
995
    // Copy a linked list in the given bucket to a tree.
996
    // Returns the number of things it copied.
997
    size_type CopyListToTree(size_type b, Tree* tree) {
998
      size_type count = 0;
999
      Node* node = static_cast<Node*>(table_[b]);
1000
      while (node != nullptr) {
1001
        tree->insert({node->kv.first, node});
1002
        ++count;
1003
        Node* next = node->next;
1004
        node->next = nullptr;
1005
        node = next;
1006
      }
1007
      return count;
1008
    }
1009
1010
    // Return whether table_[b] is a linked list that seems awfully long.
1011
    // Requires table_[b] to point to a non-empty linked list.
1012
    bool TableEntryIsTooLong(size_type b) {
1013
      const size_type kMaxLength = 8;
1014
      size_type count = 0;
1015
      Node* node = static_cast<Node*>(table_[b]);
1016
      do {
1017
        ++count;
1018
        node = node->next;
1019
      } while (node != nullptr);
1020
      // Invariant: no linked list ever is more than kMaxLength in length.
1021
      GOOGLE_DCHECK_LE(count, kMaxLength);
1022
      return count >= kMaxLength;
1023
    }
1024
1025
    template <typename K>
1026
    size_type BucketNumber(const K& k) const {
1027
      // We xor the hash value against the random seed so that we effectively
1028
      // have a random hash function.
1029
      uint64 h = hash_function()(k) ^ seed_;
1030
1031
      // We use the multiplication method to determine the bucket number from
1032
      // the hash value. The constant kPhi (suggested by Knuth) is roughly
1033
      // (sqrt(5) - 1) / 2 * 2^64.
1034
      constexpr uint64 kPhi = uint64{0x9e3779b97f4a7c15};
1035
      return ((kPhi * h) >> 32) & (num_buckets_ - 1);
1036
    }
1037
1038
    // Return a power of two no less than max(kMinTableSize, n).
1039
    // Assumes either n < kMinTableSize or n is a power of two.
1040
    size_type TableSize(size_type n) {
1041
      return n < static_cast<size_type>(kMinTableSize)
1042
                 ? static_cast<size_type>(kMinTableSize)
1043
                 : n;
1044
    }
1045
1046
    // Use alloc_ to allocate an array of n objects of type U.
1047
    template <typename U>
1048
    U* Alloc(size_type n) {
1049
      using alloc_type = typename Allocator::template rebind<U>::other;
1050
      return alloc_type(alloc_).allocate(n);
1051
    }
1052
1053
    // Use alloc_ to deallocate an array of n objects of type U.
1054
    template <typename U>
1055
    void Dealloc(U* t, size_type n) {
1056
      using alloc_type = typename Allocator::template rebind<U>::other;
1057
      alloc_type(alloc_).deallocate(t, n);
1058
    }
1059
1060
    void DestroyNode(Node* node) {
1061
      if (alloc_.arena() == nullptr) {
1062
        delete node;
1063
      }
1064
    }
1065
1066
    void DestroyTree(Tree* tree) {
1067
      if (alloc_.arena() == nullptr) {
1068
        delete tree;
1069
      }
1070
    }
1071
1072
    void** CreateEmptyTable(size_type n) {
1073
      GOOGLE_DCHECK(n >= kMinTableSize);
1074
      GOOGLE_DCHECK_EQ(n & (n - 1), 0);
1075
      void** result = Alloc<void*>(n);
1076
      memset(result, 0, n * sizeof(result[0]));
1077
      return result;
1078
    }
1079
1080
    // Return a randomish value.
1081
    size_type Seed() const {
1082
      // We get a little bit of randomness from the address of the map. The
1083
      // lower bits are not very random, due to alignment, so we discard them
1084
      // and shift the higher bits into their place.
1085
      size_type s = reinterpret_cast<uintptr_t>(this) >> 12;
1086
#if defined(__x86_64__) && defined(__GNUC__) && \
1087
    !defined(GOOGLE_PROTOBUF_NO_RDTSC)
1088
      uint32 hi, lo;
1089
      asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
1090
      s += ((static_cast<uint64>(hi) << 32) | lo);
1091
#endif
1092
      return s;
1093
    }
1094
1095
    friend class Arena;
1096
    using InternalArenaConstructable_ = void;
1097
    using DestructorSkippable_ = void;
1098
1099
    size_type num_elements_;
1100
    size_type num_buckets_;
1101
    size_type seed_;
1102
    size_type index_of_first_non_null_;
1103
    void** table_;  // an array with num_buckets_ entries
1104
    Allocator alloc_;
1105
    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap);
1106
  };  // end of class InnerMap
1107
1108
  template <typename LookupKey>
1109
  using key_arg = typename internal::TransparentSupport<
1110
      key_type>::template key_arg<LookupKey>;
1111
1112
 public:
1113
  // Iterators
1114
  class const_iterator {
1115
    using InnerIt = typename InnerMap::const_iterator;
1116
1117
   public:
1118
    using iterator_category = std::forward_iterator_tag;
1119
    using value_type = typename Map::value_type;
1120
    using difference_type = ptrdiff_t;
1121
    using pointer = const value_type*;
1122
    using reference = const value_type&;
1123
1124
    const_iterator() {}
1125
    explicit const_iterator(const InnerIt& it) : it_(it) {}
1126
1127
    const_reference operator*() const { return *it_; }
1128
    const_pointer operator->() const { return &(operator*()); }
1129
1130
    const_iterator& operator++() {
1131
      ++it_;
1132
      return *this;
1133
    }
1134
    const_iterator operator++(int) { return const_iterator(it_++); }
1135
1136
    friend bool operator==(const const_iterator& a, const const_iterator& b) {
1137
      return a.it_ == b.it_;
1138
    }
1139
    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1140
      return !(a == b);
1141
    }
1142
1143
   private:
1144
    InnerIt it_;
1145
  };
1146
1147
  class iterator {
1148
    using InnerIt = typename InnerMap::iterator;
1149
1150
   public:
1151
    using iterator_category = std::forward_iterator_tag;
1152
    using value_type = typename Map::value_type;
1153
    using difference_type = ptrdiff_t;
1154
    using pointer = value_type*;
1155
    using reference = value_type&;
1156
1157
    iterator() {}
1158
    explicit iterator(const InnerIt& it) : it_(it) {}
1159
1160
    reference operator*() const { return *it_; }
1161
    pointer operator->() const { return &(operator*()); }
1162
1163
    iterator& operator++() {
1164
      ++it_;
1165
      return *this;
1166
    }
1167
    iterator operator++(int) { return iterator(it_++); }
1168
1169
    // Allow implicit conversion to const_iterator.
1170
    operator const_iterator() const {  // NOLINT(runtime/explicit)
1171
      return const_iterator(typename InnerMap::const_iterator(it_));
1172
    }
1173
1174
    friend bool operator==(const iterator& a, const iterator& b) {
1175
      return a.it_ == b.it_;
1176
    }
1177
    friend bool operator!=(const iterator& a, const iterator& b) {
1178
      return !(a == b);
1179
    }
1180
1181
   private:
1182
    friend class Map;
1183
1184
    InnerIt it_;
1185
  };
1186
1187
  iterator begin() { return iterator(elements_.begin()); }
1188
  iterator end() { return iterator(elements_.end()); }
1189
  const_iterator begin() const { return const_iterator(elements_.begin()); }
1190
  const_iterator end() const { return const_iterator(elements_.end()); }
1191
  const_iterator cbegin() const { return begin(); }
1192
  const_iterator cend() const { return end(); }
1193
1194
  // Capacity
1195
  size_type size() const { return elements_.size(); }
1196
  bool empty() const { return size() == 0; }
1197
1198
  // Element access
1199
  template <typename K = key_type>
1200
  T& operator[](const key_arg<K>& key) {
1201
    return elements_[key].second;
1202
  }
1203
  template <
1204
      typename K = key_type,
1205
      // Disable for integral types to reduce code bloat.
1206
      typename = typename std::enable_if<!std::is_integral<K>::value>::type>
1207
  T& operator[](key_arg<K>&& key) {
1208
    return elements_[std::forward<K>(key)].second;
1209
  }
1210
1211
  template <typename K = key_type>
1212
  const T& at(const key_arg<K>& key) const {
1213
    const_iterator it = find(key);
1214
    GOOGLE_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1215
    return it->second;
1216
  }
1217
1218
  template <typename K = key_type>
1219
  T& at(const key_arg<K>& key) {
1220
    iterator it = find(key);
1221
    GOOGLE_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1222
    return it->second;
1223
  }
1224
1225
  // Lookup
1226
  template <typename K = key_type>
1227
  size_type count(const key_arg<K>& key) const {
1228
    return find(key) == end() ? 0 : 1;
1229
  }
1230
1231
  template <typename K = key_type>
1232
  const_iterator find(const key_arg<K>& key) const {
1233
    return const_iterator(elements_.find(key));
1234
  }
1235
  template <typename K = key_type>
1236
  iterator find(const key_arg<K>& key) {
1237
    return iterator(elements_.find(key));
1238
  }
1239
1240
  template <typename K = key_type>
1241
  bool contains(const key_arg<K>& key) const {
1242
    return find(key) != end();
1243
  }
1244
1245
  template <typename K = key_type>
1246
  std::pair<const_iterator, const_iterator> equal_range(
1247
      const key_arg<K>& key) const {
1248
    const_iterator it = find(key);
1249
    if (it == end()) {
1250
      return std::pair<const_iterator, const_iterator>(it, it);
1251
    } else {
1252
      const_iterator begin = it++;
1253
      return std::pair<const_iterator, const_iterator>(begin, it);
1254
    }
1255
  }
1256
1257
  template <typename K = key_type>
1258
  std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1259
    iterator it = find(key);
1260
    if (it == end()) {
1261
      return std::pair<iterator, iterator>(it, it);
1262
    } else {
1263
      iterator begin = it++;
1264
      return std::pair<iterator, iterator>(begin, it);
1265
    }
1266
  }
1267
1268
  // insert
1269
  std::pair<iterator, bool> insert(const value_type& value) {
1270
    std::pair<typename InnerMap::iterator, bool> p =
1271
        elements_.insert(value.first);
1272
    if (p.second) {
1273
      p.first->second = value.second;
1274
    }
1275
    return std::pair<iterator, bool>(iterator(p.first), p.second);
1276
  }
1277
  template <class InputIt>
1278
  void insert(InputIt first, InputIt last) {
1279
    for (InputIt it = first; it != last; ++it) {
1280
      iterator exist_it = find(it->first);
1281
      if (exist_it == end()) {
1282
        operator[](it->first) = it->second;
1283
      }
1284
    }
1285
  }
1286
  void insert(std::initializer_list<value_type> values) {
1287
    insert(values.begin(), values.end());
1288
  }
1289
1290
  // Erase and clear
1291
  template <typename K = key_type>
1292
  size_type erase(const key_arg<K>& key) {
1293
    iterator it = find(key);
1294
    if (it == end()) {
1295
      return 0;
1296
    } else {
1297
      erase(it);
1298
      return 1;
1299
    }
1300
  }
1301
  iterator erase(iterator pos) {
1302
    iterator i = pos++;
1303
    elements_.erase(i.it_);
1304
    return pos;
1305
  }
1306
  void erase(iterator first, iterator last) {
1307
    while (first != last) {
1308
      first = erase(first);
1309
    }
1310
  }
1311
  void clear() { elements_.clear(); }
1312
1313
  // Assign
1314
  Map& operator=(const Map& other) {
1315
    if (this != &other) {
1316
      clear();
1317
      insert(other.begin(), other.end());
1318
    }
1319
    return *this;
1320
  }
1321
1322
  void swap(Map& other) {
1323
    if (arena() == other.arena()) {
1324
      elements_.Swap(&other.elements_);
1325
    } else {
1326
      // TODO(zuguang): optimize this. The temporary copy can be allocated
1327
      // in the same arena as the other message, and the "other = copy" can
1328
      // be replaced with the fast-path swap above.
1329
      Map copy = *this;
1330
      *this = other;
1331
      other = copy;
1332
    }
1333
  }
1334
1335
  // Access to hasher.  Currently this returns a copy, but it may
1336
  // be modified to return a const reference in the future.
1337
  hasher hash_function() const { return elements_.hash_function(); }
1338
1339
  size_t SpaceUsedExcludingSelfLong() const {
1340
    if (empty()) return 0;
1341
    return elements_.SpaceUsedInternal() + internal::SpaceUsedInValues(this);
1342
  }
1343
1344
 private:
1345
  Arena* arena() const { return elements_.arena(); }
1346
  InnerMap elements_;
1347
1348
  friend class Arena;
1349
  using InternalArenaConstructable_ = void;
1350
  using DestructorSkippable_ = void;
1351
  template <typename Derived, typename K, typename V,
1352
            internal::WireFormatLite::FieldType key_wire_type,
1353
            internal::WireFormatLite::FieldType value_wire_type>
1354
  friend class internal::MapFieldLite;
1355
};
1356
1357
}  // namespace protobuf
1358
}  // namespace google
1359
1360
#include <google/protobuf/port_undef.inc>
1361
1362
#endif  // GOOGLE_PROTOBUF_MAP_H__