/src/rocksdb/util/aligned_buffer.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under both the GPLv2 (found in the |
3 | | // COPYING file in the root directory) and Apache 2.0 License |
4 | | // (found in the LICENSE.Apache file in the root directory). |
5 | | // |
6 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
7 | | // Use of this source code is governed by a BSD-style license that can be |
8 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
9 | | #pragma once |
10 | | |
11 | | #include <algorithm> |
12 | | #include <cassert> |
13 | | |
14 | | #include "port/port.h" |
15 | | |
16 | | namespace ROCKSDB_NAMESPACE { |
17 | | |
18 | | // This file contains utilities to handle the alignment of pages and buffers. |
19 | | |
20 | | // Truncate to a multiple of page_size, which is also a page boundary. This |
21 | | // helps to figuring out the right alignment. |
22 | | // Example: |
23 | | // TruncateToPageBoundary(4096, 5000) => 4096 |
24 | | // TruncateToPageBoundary((4096, 10000) => 8192 |
25 | 0 | inline size_t TruncateToPageBoundary(size_t page_size, size_t s) { |
26 | 0 | s -= (s & (page_size - 1)); |
27 | 0 | assert((s % page_size) == 0); |
28 | 0 | return s; |
29 | 0 | } |
30 | | |
31 | | // Round up x to a multiple of y. |
32 | | // Example: |
33 | | // Roundup(13, 5) => 15 |
34 | | // Roundup(201, 16) => 208 |
35 | 201k | inline size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; } |
36 | | |
37 | | // Round down x to a multiple of y. |
38 | | // Example: |
39 | | // Rounddown(13, 5) => 10 |
40 | | // Rounddown(201, 16) => 192 |
41 | 0 | inline size_t Rounddown(size_t x, size_t y) { return (x / y) * y; } |
42 | | |
43 | | // AlignedBuffer manages a buffer by taking alignment into consideration, and |
44 | | // aligns the buffer start and end positions. It is mainly used for direct I/O, |
45 | | // though it can be used other purposes as well. |
46 | | // It also supports expanding the managed buffer, and copying whole or part of |
47 | | // the data from old buffer into the new expanded buffer. Such a copy especially |
48 | | // helps in cases avoiding an IO to re-fetch the data from disk. |
49 | | // |
50 | | // Example: |
51 | | // AlignedBuffer buf; |
52 | | // buf.Alignment(alignment); |
53 | | // buf.AllocateNewBuffer(user_requested_buf_size); |
54 | | // ... |
55 | | // buf.AllocateNewBuffer(2*user_requested_buf_size, /*copy_data*/ true, |
56 | | // copy_offset, copy_len); |
57 | | class AlignedBuffer { |
58 | | size_t alignment_; |
59 | | std::unique_ptr<char[]> buf_; |
60 | | size_t capacity_; |
61 | | size_t cursize_; |
62 | | char* bufstart_; |
63 | | |
64 | | public: |
65 | | AlignedBuffer() |
66 | 194k | : alignment_(), capacity_(0), cursize_(0), bufstart_(nullptr) {} |
67 | | |
68 | 0 | AlignedBuffer(AlignedBuffer&& o) noexcept { *this = std::move(o); } |
69 | | |
70 | 0 | AlignedBuffer& operator=(AlignedBuffer&& o) noexcept { |
71 | 0 | alignment_ = std::move(o.alignment_); |
72 | 0 | buf_ = std::move(o.buf_); |
73 | 0 | capacity_ = std::move(o.capacity_); |
74 | 0 | cursize_ = std::move(o.cursize_); |
75 | 0 | bufstart_ = std::move(o.bufstart_); |
76 | 0 | return *this; |
77 | 0 | } |
78 | | |
79 | | AlignedBuffer(const AlignedBuffer&) = delete; |
80 | | |
81 | | AlignedBuffer& operator=(const AlignedBuffer&) = delete; |
82 | | |
83 | 0 | static bool isAligned(const void* ptr, size_t alignment) { |
84 | 0 | return reinterpret_cast<uintptr_t>(ptr) % alignment == 0; |
85 | 0 | } |
86 | | |
87 | 0 | static bool isAligned(size_t n, size_t alignment) { |
88 | 0 | return n % alignment == 0; |
89 | 0 | } |
90 | | |
91 | 0 | size_t Alignment() const { return alignment_; } |
92 | | |
93 | 30.6M | size_t Capacity() const { return capacity_; } |
94 | | |
95 | 24.3M | size_t CurrentSize() const { return cursize_; } |
96 | | |
97 | 0 | const char* BufferStart() const { return bufstart_; } |
98 | | |
99 | 1.85M | char* BufferStart() { return bufstart_; } |
100 | | |
101 | 0 | void Clear() { cursize_ = 0; } |
102 | | |
103 | 0 | char* Release() { |
104 | 0 | cursize_ = 0; |
105 | 0 | capacity_ = 0; |
106 | 0 | bufstart_ = nullptr; |
107 | 0 | return buf_.release(); |
108 | 0 | } |
109 | | |
110 | 164k | void Alignment(size_t alignment) { |
111 | 164k | assert(alignment > 0); |
112 | 164k | assert((alignment & (alignment - 1)) == 0); |
113 | 164k | alignment_ = alignment; |
114 | 164k | } |
115 | | |
116 | | // Allocates a new buffer and sets the start position to the first aligned |
117 | | // byte. |
118 | | // |
119 | | // requested_capacity: requested new buffer capacity. This capacity will be |
120 | | // rounded up based on alignment. |
121 | | // copy_data: Copy data from old buffer to new buffer. If copy_offset and |
122 | | // copy_len are not passed in and the new requested capacity is bigger |
123 | | // than the existing buffer's capacity, the data in the exising buffer is |
124 | | // fully copied over to the new buffer. |
125 | | // copy_offset: Copy data from this offset in old buffer. |
126 | | // copy_len: Number of bytes to copy. |
127 | | // |
128 | | // The function does nothing if the new requested_capacity is smaller than |
129 | | // the current buffer capacity and copy_data is true i.e. the old buffer is |
130 | | // retained as is. |
131 | | void AllocateNewBuffer(size_t requested_capacity, bool copy_data = false, |
132 | 166k | uint64_t copy_offset = 0, size_t copy_len = 0) { |
133 | 166k | assert(alignment_ > 0); |
134 | 166k | assert((alignment_ & (alignment_ - 1)) == 0); |
135 | | |
136 | 166k | copy_len = copy_len > 0 ? copy_len : cursize_; |
137 | 166k | if (copy_data && requested_capacity < copy_len) { |
138 | | // If we are downsizing to a capacity that is smaller than the current |
139 | | // data in the buffer -- Ignore the request. |
140 | 0 | return; |
141 | 0 | } |
142 | | |
143 | 166k | size_t new_capacity = Roundup(requested_capacity, alignment_); |
144 | 166k | char* new_buf = new char[new_capacity + alignment_]; |
145 | 166k | char* new_bufstart = reinterpret_cast<char*>( |
146 | 166k | (reinterpret_cast<uintptr_t>(new_buf) + (alignment_ - 1)) & |
147 | 166k | ~static_cast<uintptr_t>(alignment_ - 1)); |
148 | | |
149 | 166k | if (copy_data) { |
150 | 2.41k | assert(bufstart_ + copy_offset + copy_len <= bufstart_ + cursize_); |
151 | 2.41k | memcpy(new_bufstart, bufstart_ + copy_offset, copy_len); |
152 | 2.41k | cursize_ = copy_len; |
153 | 164k | } else { |
154 | 164k | cursize_ = 0; |
155 | 164k | } |
156 | | |
157 | 166k | bufstart_ = new_bufstart; |
158 | 166k | capacity_ = new_capacity; |
159 | 166k | buf_.reset(new_buf); |
160 | 166k | } |
161 | | |
162 | | // Append to the buffer. |
163 | | // |
164 | | // src : source to copy the data from. |
165 | | // append_size : number of bytes to copy from src. |
166 | | // Returns the number of bytes appended. |
167 | | // |
168 | | // If append_size is more than the remaining buffer size only the |
169 | | // remaining-size worth of bytes are copied. |
170 | 10.1M | size_t Append(const char* src, size_t append_size) { |
171 | 10.1M | size_t buffer_remaining = capacity_ - cursize_; |
172 | 10.1M | size_t to_copy = std::min(append_size, buffer_remaining); |
173 | | |
174 | 10.1M | if (to_copy > 0) { |
175 | 10.1M | memcpy(bufstart_ + cursize_, src, to_copy); |
176 | 10.1M | cursize_ += to_copy; |
177 | 10.1M | } |
178 | 10.1M | return to_copy; |
179 | 10.1M | } |
180 | | |
181 | | // Read from the buffer. |
182 | | // |
183 | | // dest : destination buffer to copy the data to. |
184 | | // offset : the buffer offset to start reading from. |
185 | | // read_size : the number of bytes to copy from the buffer to dest. |
186 | | // Returns the number of bytes read/copied to dest. |
187 | 0 | size_t Read(char* dest, size_t offset, size_t read_size) const { |
188 | 0 | assert(offset < cursize_); |
189 | |
|
190 | 0 | size_t to_read = 0; |
191 | 0 | if (offset < cursize_) { |
192 | 0 | to_read = std::min(cursize_ - offset, read_size); |
193 | 0 | } |
194 | 0 | if (to_read > 0) { |
195 | 0 | memcpy(dest, bufstart_ + offset, to_read); |
196 | 0 | } |
197 | 0 | return to_read; |
198 | 0 | } |
199 | | |
200 | | // Pad to the end of alignment with "padding" |
201 | 0 | void PadToAlignmentWith(int padding) { |
202 | 0 | size_t total_size = Roundup(cursize_, alignment_); |
203 | 0 | size_t pad_size = total_size - cursize_; |
204 | |
|
205 | 0 | if (pad_size > 0) { |
206 | 0 | assert((pad_size + cursize_) <= capacity_); |
207 | 0 | memset(bufstart_ + cursize_, padding, pad_size); |
208 | 0 | cursize_ += pad_size; |
209 | 0 | } |
210 | 0 | } |
211 | | |
212 | 0 | void PadWith(size_t pad_size, int padding) { |
213 | 0 | assert((pad_size + cursize_) <= capacity_); |
214 | 0 | memset(bufstart_ + cursize_, padding, pad_size); |
215 | 0 | cursize_ += pad_size; |
216 | 0 | } |
217 | | |
218 | | // After a partial flush move the tail to the beginning of the buffer. |
219 | 0 | void RefitTail(size_t tail_offset, size_t tail_size) { |
220 | 0 | if (tail_size > 0) { |
221 | 0 | memmove(bufstart_, bufstart_ + tail_offset, tail_size); |
222 | 0 | } |
223 | 0 | cursize_ = tail_size; |
224 | 0 | } |
225 | | |
226 | | // Returns a place to start appending. |
227 | | // WARNING: Note that it is possible to write past the end of the buffer if |
228 | | // the buffer is modified without using the write APIs or encapsulation |
229 | | // offered by AlignedBuffer. It is up to the user to guard against such |
230 | | // errors. |
231 | 0 | char* Destination() { return bufstart_ + cursize_; } |
232 | | |
233 | 1.80M | void Size(size_t cursize) { cursize_ = cursize; } |
234 | | }; |
235 | | } // namespace ROCKSDB_NAMESPACE |