Line data Source code
1 : // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package sstable
6 :
7 : import (
8 : "bytes"
9 : "encoding/binary"
10 : "fmt"
11 : "math"
12 : "reflect"
13 : "sort"
14 : "unsafe"
15 :
16 : "github.com/cockroachdb/pebble/internal/intern"
17 : )
18 :
19 : const propertiesBlockRestartInterval = math.MaxInt32
20 :
21 : var propTagMap = make(map[string]reflect.StructField)
22 : var propBoolTrue = []byte{'1'}
23 : var propBoolFalse = []byte{'0'}
24 :
25 : var propOffsetTagMap = make(map[uintptr]string)
26 :
27 1 : func generateTagMaps(t reflect.Type, indexPrefix []int) {
28 1 : for i := 0; i < t.NumField(); i++ {
29 1 : f := t.Field(i)
30 1 : if f.Type.Kind() == reflect.Struct {
31 1 : if tag := f.Tag.Get("prop"); i == 0 && tag == "pebble.embbeded_common_properties" {
32 1 : // CommonProperties struct embedded in Properties. Note that since
33 1 : // CommonProperties is placed at the top of properties we can use
34 1 : // the offsets of the fields within CommonProperties to determine
35 1 : // the offsets of those fields within Properties.
36 1 : generateTagMaps(f.Type, []int{i})
37 1 : continue
38 : }
39 0 : panic("pebble: unknown struct type in Properties")
40 : }
41 1 : if tag := f.Tag.Get("prop"); tag != "" {
42 1 : switch f.Type.Kind() {
43 1 : case reflect.Bool:
44 1 : case reflect.Uint32:
45 1 : case reflect.Uint64:
46 1 : case reflect.String:
47 0 : default:
48 0 : panic(fmt.Sprintf("unsupported property field type: %s %s", f.Name, f.Type))
49 : }
50 1 : if len(indexPrefix) > 0 {
51 1 : // Prepend the index prefix so that we can use FieldByIndex on the top-level struct.
52 1 : f.Index = append(indexPrefix[:len(indexPrefix):len(indexPrefix)], f.Index...)
53 1 : }
54 1 : propTagMap[tag] = f
55 1 : propOffsetTagMap[f.Offset] = tag
56 : }
57 : }
58 : }
59 :
60 1 : func init() {
61 1 : generateTagMaps(reflect.TypeOf(Properties{}), nil)
62 1 : }
63 :
64 : // CommonProperties holds properties for either a virtual or a physical sstable. This
65 : // can be used by code which doesn't care to make the distinction between physical
66 : // and virtual sstables properties.
67 : //
68 : // For virtual sstables, fields are constructed through extrapolation upon virtual
69 : // reader construction. See MakeVirtualReader for implementation details.
70 : //
71 : // NB: The values of these properties can affect correctness. For example,
72 : // if NumRangeKeySets == 0, but the sstable actually contains range keys, then
73 : // the iterators will behave incorrectly.
74 : type CommonProperties struct {
75 : // The number of entries in this table.
76 : NumEntries uint64 `prop:"rocksdb.num.entries"`
77 : // Total raw key size.
78 : RawKeySize uint64 `prop:"rocksdb.raw.key.size"`
79 : // Total raw value size.
80 : RawValueSize uint64 `prop:"rocksdb.raw.value.size"`
81 : // Total raw key size of point deletion tombstones. This value is comparable
82 : // to RawKeySize.
83 : RawPointTombstoneKeySize uint64 `prop:"pebble.raw.point-tombstone.key.size"`
84 : // Sum of the raw value sizes carried by point deletion tombstones
85 : // containing size estimates. See the DeleteSized key kind. This value is
86 : // comparable to Raw{Key,Value}Size.
87 : RawPointTombstoneValueSize uint64 `prop:"pebble.raw.point-tombstone.value.size"`
88 : // The number of point deletion entries ("tombstones") in this table that
89 : // carry a size hint indicating the size of the value the tombstone deletes.
90 : NumSizedDeletions uint64 `prop:"pebble.num.deletions.sized"`
91 : // The number of deletion entries in this table, including both point and
92 : // range deletions.
93 : NumDeletions uint64 `prop:"rocksdb.deleted.keys"`
94 : // The number of range deletions in this table.
95 : NumRangeDeletions uint64 `prop:"rocksdb.num.range-deletions"`
96 : // The number of RANGEKEYDELs in this table.
97 : NumRangeKeyDels uint64 `prop:"pebble.num.range-key-dels"`
98 : // The number of RANGEKEYSETs in this table.
99 : NumRangeKeySets uint64 `prop:"pebble.num.range-key-sets"`
100 : // Total size of value blocks and value index block. Only serialized if > 0.
101 : ValueBlocksSize uint64 `prop:"pebble.value-blocks.size"`
102 : }
103 :
104 : // String is only used for testing purposes.
105 0 : func (c *CommonProperties) String() string {
106 0 : var buf bytes.Buffer
107 0 : v := reflect.ValueOf(*c)
108 0 : loaded := make(map[uintptr]struct{})
109 0 : writeProperties(loaded, v, &buf)
110 0 : return buf.String()
111 0 : }
112 :
113 : // NumPointDeletions is the number of point deletions in the sstable. For virtual
114 : // sstables, this is an estimate.
115 1 : func (c *CommonProperties) NumPointDeletions() uint64 {
116 1 : return c.NumDeletions - c.NumRangeDeletions
117 1 : }
118 :
119 : // Properties holds the sstable property values. The properties are
120 : // automatically populated during sstable creation and load from the properties
121 : // meta block when an sstable is opened.
122 : type Properties struct {
123 : // CommonProperties needs to be at the top of the Properties struct so that the
124 : // offsets of the fields in CommonProperties match the offsets of the embedded
125 : // fields of CommonProperties in Properties.
126 : CommonProperties `prop:"pebble.embbeded_common_properties"`
127 :
128 : // The name of the comparer used in this table.
129 : ComparerName string `prop:"rocksdb.comparator"`
130 : // The compression algorithm used to compress blocks.
131 : CompressionName string `prop:"rocksdb.compression"`
132 : // The compression options used to compress blocks.
133 : CompressionOptions string `prop:"rocksdb.compression_options"`
134 : // The total size of all data blocks.
135 : DataSize uint64 `prop:"rocksdb.data.size"`
136 : // The external sstable version format. Version 2 is the one RocksDB has been
137 : // using since 5.13. RocksDB only uses the global sequence number for an
138 : // sstable if this property has been set.
139 : ExternalFormatVersion uint32 `prop:"rocksdb.external_sst_file.version"`
140 : // The name of the filter policy used in this table. Empty if no filter
141 : // policy is used.
142 : FilterPolicyName string `prop:"rocksdb.filter.policy"`
143 : // The size of filter block.
144 : FilterSize uint64 `prop:"rocksdb.filter.size"`
145 : // Total number of index partitions if kTwoLevelIndexSearch is used.
146 : IndexPartitions uint64 `prop:"rocksdb.index.partitions"`
147 : // The size of index block.
148 : IndexSize uint64 `prop:"rocksdb.index.size"`
149 : // The index type. TODO(peter): add a more detailed description.
150 : IndexType uint32 `prop:"rocksdb.block.based.table.index.type"`
151 : // For formats >= TableFormatPebblev4, this is set to true if the obsolete
152 : // bit is strict for all the point keys.
153 : IsStrictObsolete bool `prop:"pebble.obsolete.is_strict"`
154 : // The name of the merger used in this table. Empty if no merger is used.
155 : MergerName string `prop:"rocksdb.merge.operator"`
156 : // The number of blocks in this table.
157 : NumDataBlocks uint64 `prop:"rocksdb.num.data.blocks"`
158 : // The number of merge operands in the table.
159 : NumMergeOperands uint64 `prop:"rocksdb.merge.operands"`
160 : // The number of RANGEKEYUNSETs in this table.
161 : NumRangeKeyUnsets uint64 `prop:"pebble.num.range-key-unsets"`
162 : // The number of value blocks in this table. Only serialized if > 0.
163 : NumValueBlocks uint64 `prop:"pebble.num.value-blocks"`
164 : // The number of values stored in value blocks. Only serialized if > 0.
165 : NumValuesInValueBlocks uint64 `prop:"pebble.num.values.in.value-blocks"`
166 : // A comma separated list of names of the property collectors used in this
167 : // table.
168 : PropertyCollectorNames string `prop:"rocksdb.property.collectors"`
169 : // Total raw rangekey key size.
170 : RawRangeKeyKeySize uint64 `prop:"pebble.raw.range-key.key.size"`
171 : // Total raw rangekey value size.
172 : RawRangeKeyValueSize uint64 `prop:"pebble.raw.range-key.value.size"`
173 : // The total number of keys in this table that were pinned by open snapshots.
174 : SnapshotPinnedKeys uint64 `prop:"pebble.num.snapshot-pinned-keys"`
175 : // The cumulative bytes of keys in this table that were pinned by
176 : // open snapshots. This value is comparable to RawKeySize.
177 : SnapshotPinnedKeySize uint64 `prop:"pebble.raw.snapshot-pinned-keys.size"`
178 : // The cumulative bytes of values in this table that were pinned by
179 : // open snapshots. This value is comparable to RawValueSize.
180 : SnapshotPinnedValueSize uint64 `prop:"pebble.raw.snapshot-pinned-values.size"`
181 : // Size of the top-level index if kTwoLevelIndexSearch is used.
182 : TopLevelIndexSize uint64 `prop:"rocksdb.top-level.index.size"`
183 : // User collected properties. Currently, we only use them to store block
184 : // properties aggregated at the table level.
185 : UserProperties map[string]string
186 :
187 : // Loaded set indicating which fields have been loaded from disk. Indexed by
188 : // the field's byte offset within the struct
189 : // (reflect.StructField.Offset). Only set if the properties have been loaded
190 : // from a file. Only exported for testing purposes.
191 : Loaded map[uintptr]struct{}
192 : }
193 :
194 : // NumPointDeletions returns the number of point deletions in this table.
195 0 : func (p *Properties) NumPointDeletions() uint64 {
196 0 : return p.NumDeletions - p.NumRangeDeletions
197 0 : }
198 :
199 : // NumRangeKeys returns a count of the number of range keys in this table.
200 1 : func (p *Properties) NumRangeKeys() uint64 {
201 1 : return p.NumRangeKeyDels + p.NumRangeKeySets + p.NumRangeKeyUnsets
202 1 : }
203 :
204 0 : func writeProperties(loaded map[uintptr]struct{}, v reflect.Value, buf *bytes.Buffer) {
205 0 : vt := v.Type()
206 0 : for i := 0; i < v.NumField(); i++ {
207 0 : ft := vt.Field(i)
208 0 : if ft.Type.Kind() == reflect.Struct {
209 0 : // Embedded struct within the properties.
210 0 : writeProperties(loaded, v.Field(i), buf)
211 0 : continue
212 : }
213 0 : tag := ft.Tag.Get("prop")
214 0 : if tag == "" {
215 0 : continue
216 : }
217 :
218 0 : f := v.Field(i)
219 0 : // TODO(peter): Use f.IsZero() when we can rely on go1.13.
220 0 : if zero := reflect.Zero(f.Type()); zero.Interface() == f.Interface() {
221 0 : // Skip printing of zero values which were not loaded from disk.
222 0 : if _, ok := loaded[ft.Offset]; !ok {
223 0 : continue
224 : }
225 : }
226 :
227 0 : fmt.Fprintf(buf, "%s: ", tag)
228 0 : switch ft.Type.Kind() {
229 0 : case reflect.Bool:
230 0 : fmt.Fprintf(buf, "%t\n", f.Bool())
231 0 : case reflect.Uint32:
232 0 : fmt.Fprintf(buf, "%d\n", f.Uint())
233 0 : case reflect.Uint64:
234 0 : fmt.Fprintf(buf, "%d\n", f.Uint())
235 0 : case reflect.String:
236 0 : fmt.Fprintf(buf, "%s\n", f.String())
237 0 : default:
238 0 : panic("not reached")
239 : }
240 : }
241 : }
242 :
243 0 : func (p *Properties) String() string {
244 0 : var buf bytes.Buffer
245 0 : v := reflect.ValueOf(*p)
246 0 : writeProperties(p.Loaded, v, &buf)
247 0 :
248 0 : // Write the UserProperties.
249 0 : keys := make([]string, 0, len(p.UserProperties))
250 0 : for key := range p.UserProperties {
251 0 : keys = append(keys, key)
252 0 : }
253 0 : sort.Strings(keys)
254 0 : for _, key := range keys {
255 0 : fmt.Fprintf(&buf, "%s: %s\n", key, p.UserProperties[key])
256 0 : }
257 0 : return buf.String()
258 : }
259 :
260 : func (p *Properties) load(
261 : b block, blockOffset uint64, deniedUserProperties map[string]struct{},
262 1 : ) error {
263 1 : i, err := newRawBlockIter(bytes.Compare, b)
264 1 : if err != nil {
265 0 : return err
266 0 : }
267 1 : p.Loaded = make(map[uintptr]struct{})
268 1 : v := reflect.ValueOf(p).Elem()
269 1 :
270 1 : for valid := i.First(); valid; valid = i.Next() {
271 1 : if f, ok := propTagMap[string(i.Key().UserKey)]; ok {
272 1 : p.Loaded[f.Offset] = struct{}{}
273 1 : field := v.FieldByIndex(f.Index)
274 1 : switch f.Type.Kind() {
275 0 : case reflect.Bool:
276 0 : field.SetBool(bytes.Equal(i.Value(), propBoolTrue))
277 1 : case reflect.Uint32:
278 1 : field.SetUint(uint64(binary.LittleEndian.Uint32(i.Value())))
279 1 : case reflect.Uint64:
280 1 : n, _ := binary.Uvarint(i.Value())
281 1 : field.SetUint(n)
282 1 : case reflect.String:
283 1 : field.SetString(intern.Bytes(i.Value()))
284 0 : default:
285 0 : panic("not reached")
286 : }
287 1 : continue
288 : }
289 1 : if p.UserProperties == nil {
290 1 : p.UserProperties = make(map[string]string)
291 1 : }
292 :
293 1 : if _, denied := deniedUserProperties[string(i.Key().UserKey)]; !denied {
294 1 : p.UserProperties[intern.Bytes(i.Key().UserKey)] = string(i.Value())
295 1 : }
296 : }
297 1 : return nil
298 : }
299 :
300 0 : func (p *Properties) saveBool(m map[string][]byte, offset uintptr, value bool) {
301 0 : tag := propOffsetTagMap[offset]
302 0 : if value {
303 0 : m[tag] = propBoolTrue
304 0 : } else {
305 0 : m[tag] = propBoolFalse
306 0 : }
307 : }
308 :
309 1 : func (p *Properties) saveUint32(m map[string][]byte, offset uintptr, value uint32) {
310 1 : var buf [4]byte
311 1 : binary.LittleEndian.PutUint32(buf[:], value)
312 1 : m[propOffsetTagMap[offset]] = buf[:]
313 1 : }
314 :
315 0 : func (p *Properties) saveUint64(m map[string][]byte, offset uintptr, value uint64) {
316 0 : var buf [8]byte
317 0 : binary.LittleEndian.PutUint64(buf[:], value)
318 0 : m[propOffsetTagMap[offset]] = buf[:]
319 0 : }
320 :
321 : var _ = (*Properties).saveUint64
322 :
323 1 : func (p *Properties) saveUvarint(m map[string][]byte, offset uintptr, value uint64) {
324 1 : var buf [10]byte
325 1 : n := binary.PutUvarint(buf[:], value)
326 1 : m[propOffsetTagMap[offset]] = buf[:n]
327 1 : }
328 :
329 1 : func (p *Properties) saveString(m map[string][]byte, offset uintptr, value string) {
330 1 : m[propOffsetTagMap[offset]] = []byte(value)
331 1 : }
332 :
333 1 : func (p *Properties) save(tblFormat TableFormat, w *rawBlockWriter) {
334 1 : m := make(map[string][]byte)
335 1 : for k, v := range p.UserProperties {
336 1 : m[k] = []byte(v)
337 1 : }
338 :
339 1 : if p.ComparerName != "" {
340 1 : p.saveString(m, unsafe.Offsetof(p.ComparerName), p.ComparerName)
341 1 : }
342 1 : if p.CompressionName != "" {
343 1 : p.saveString(m, unsafe.Offsetof(p.CompressionName), p.CompressionName)
344 1 : }
345 1 : if p.CompressionOptions != "" {
346 1 : p.saveString(m, unsafe.Offsetof(p.CompressionOptions), p.CompressionOptions)
347 1 : }
348 1 : p.saveUvarint(m, unsafe.Offsetof(p.DataSize), p.DataSize)
349 1 : if p.ExternalFormatVersion != 0 {
350 1 : p.saveUint32(m, unsafe.Offsetof(p.ExternalFormatVersion), p.ExternalFormatVersion)
351 1 : }
352 1 : if p.FilterPolicyName != "" {
353 1 : p.saveString(m, unsafe.Offsetof(p.FilterPolicyName), p.FilterPolicyName)
354 1 : }
355 1 : p.saveUvarint(m, unsafe.Offsetof(p.FilterSize), p.FilterSize)
356 1 : if p.IndexPartitions != 0 {
357 1 : p.saveUvarint(m, unsafe.Offsetof(p.IndexPartitions), p.IndexPartitions)
358 1 : p.saveUvarint(m, unsafe.Offsetof(p.TopLevelIndexSize), p.TopLevelIndexSize)
359 1 : }
360 1 : p.saveUvarint(m, unsafe.Offsetof(p.IndexSize), p.IndexSize)
361 1 : p.saveUint32(m, unsafe.Offsetof(p.IndexType), p.IndexType)
362 1 : if p.IsStrictObsolete {
363 0 : p.saveBool(m, unsafe.Offsetof(p.IsStrictObsolete), p.IsStrictObsolete)
364 0 : }
365 1 : if p.MergerName != "" {
366 1 : p.saveString(m, unsafe.Offsetof(p.MergerName), p.MergerName)
367 1 : }
368 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumDataBlocks), p.NumDataBlocks)
369 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumEntries), p.NumEntries)
370 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumDeletions), p.NumDeletions)
371 1 : if p.NumSizedDeletions > 0 {
372 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumSizedDeletions), p.NumSizedDeletions)
373 1 : }
374 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumMergeOperands), p.NumMergeOperands)
375 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeDeletions), p.NumRangeDeletions)
376 1 : // NB: We only write out some properties for Pebble formats. This isn't
377 1 : // strictly necessary because unrecognized properties are interpreted as
378 1 : // user-defined properties, however writing them prevents byte-for-byte
379 1 : // equivalence with RocksDB files that some of our testing requires.
380 1 : if p.RawPointTombstoneKeySize > 0 && tblFormat >= TableFormatPebblev1 {
381 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawPointTombstoneKeySize), p.RawPointTombstoneKeySize)
382 1 : }
383 1 : if p.RawPointTombstoneValueSize > 0 {
384 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawPointTombstoneValueSize), p.RawPointTombstoneValueSize)
385 1 : }
386 1 : if p.NumRangeKeys() > 0 {
387 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeyDels), p.NumRangeKeyDels)
388 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeySets), p.NumRangeKeySets)
389 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeyUnsets), p.NumRangeKeyUnsets)
390 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawRangeKeyKeySize), p.RawRangeKeyKeySize)
391 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawRangeKeyValueSize), p.RawRangeKeyValueSize)
392 1 : }
393 1 : if p.NumValueBlocks > 0 {
394 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumValueBlocks), p.NumValueBlocks)
395 1 : }
396 1 : if p.NumValuesInValueBlocks > 0 {
397 1 : p.saveUvarint(m, unsafe.Offsetof(p.NumValuesInValueBlocks), p.NumValuesInValueBlocks)
398 1 : }
399 1 : if p.PropertyCollectorNames != "" {
400 1 : p.saveString(m, unsafe.Offsetof(p.PropertyCollectorNames), p.PropertyCollectorNames)
401 1 : }
402 1 : if p.SnapshotPinnedKeys > 0 {
403 1 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedKeys), p.SnapshotPinnedKeys)
404 1 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedKeySize), p.SnapshotPinnedKeySize)
405 1 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedValueSize), p.SnapshotPinnedValueSize)
406 1 : }
407 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawKeySize), p.RawKeySize)
408 1 : p.saveUvarint(m, unsafe.Offsetof(p.RawValueSize), p.RawValueSize)
409 1 : if p.ValueBlocksSize > 0 {
410 1 : p.saveUvarint(m, unsafe.Offsetof(p.ValueBlocksSize), p.ValueBlocksSize)
411 1 : }
412 :
413 1 : if tblFormat < TableFormatPebblev1 {
414 0 : m["rocksdb.column.family.id"] = binary.AppendUvarint([]byte(nil), math.MaxInt32)
415 0 : m["rocksdb.fixed.key.length"] = []byte{0x00}
416 0 : m["rocksdb.index.key.is.user.key"] = []byte{0x00}
417 0 : m["rocksdb.index.value.is.delta.encoded"] = []byte{0x00}
418 0 : m["rocksdb.oldest.key.time"] = []byte{0x00}
419 0 : m["rocksdb.creation.time"] = []byte{0x00}
420 0 : m["rocksdb.format.version"] = []byte{0x00}
421 0 : }
422 :
423 1 : keys := make([]string, 0, len(m))
424 1 : for key := range m {
425 1 : keys = append(keys, key)
426 1 : }
427 1 : sort.Strings(keys)
428 1 : for _, key := range keys {
429 1 : w.add(InternalKey{UserKey: []byte(key)}, m[key])
430 1 : }
431 : }
|