Line data Source code
1 : // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package sstable
6 :
7 : import (
8 : "bytes"
9 : "encoding/binary"
10 : "fmt"
11 : "math"
12 : "reflect"
13 : "sort"
14 : "unsafe"
15 :
16 : "github.com/cockroachdb/pebble/internal/intern"
17 : )
18 :
19 : const propertiesBlockRestartInterval = math.MaxInt32
20 :
21 : var propTagMap = make(map[string]reflect.StructField)
22 : var propBoolTrue = []byte{'1'}
23 : var propBoolFalse = []byte{'0'}
24 :
25 : var propOffsetTagMap = make(map[uintptr]string)
26 :
27 2 : func generateTagMaps(t reflect.Type, indexPrefix []int) {
28 2 : for i := 0; i < t.NumField(); i++ {
29 2 : f := t.Field(i)
30 2 : if f.Type.Kind() == reflect.Struct {
31 2 : if tag := f.Tag.Get("prop"); i == 0 && tag == "pebble.embbeded_common_properties" {
32 2 : // CommonProperties struct embedded in Properties. Note that since
33 2 : // CommonProperties is placed at the top of properties we can use
34 2 : // the offsets of the fields within CommonProperties to determine
35 2 : // the offsets of those fields within Properties.
36 2 : generateTagMaps(f.Type, []int{i})
37 2 : continue
38 : }
39 0 : panic("pebble: unknown struct type in Properties")
40 : }
41 2 : if tag := f.Tag.Get("prop"); tag != "" {
42 2 : switch f.Type.Kind() {
43 2 : case reflect.Bool:
44 2 : case reflect.Uint32:
45 2 : case reflect.Uint64:
46 2 : case reflect.String:
47 0 : default:
48 0 : panic(fmt.Sprintf("unsupported property field type: %s %s", f.Name, f.Type))
49 : }
50 2 : if len(indexPrefix) > 0 {
51 2 : // Prepend the index prefix so that we can use FieldByIndex on the top-level struct.
52 2 : f.Index = append(indexPrefix[:len(indexPrefix):len(indexPrefix)], f.Index...)
53 2 : }
54 2 : propTagMap[tag] = f
55 2 : propOffsetTagMap[f.Offset] = tag
56 : }
57 : }
58 : }
59 :
60 2 : func init() {
61 2 : generateTagMaps(reflect.TypeOf(Properties{}), nil)
62 2 : }
63 :
64 : // CommonProperties holds properties for either a virtual or a physical sstable. This
65 : // can be used by code which doesn't care to make the distinction between physical
66 : // and virtual sstables properties.
67 : //
68 : // For virtual sstables, fields are constructed through extrapolation upon virtual
69 : // reader construction. See MakeVirtualReader for implementation details.
70 : //
71 : // NB: The values of these properties can affect correctness. For example,
72 : // if NumRangeKeySets == 0, but the sstable actually contains range keys, then
73 : // the iterators will behave incorrectly.
74 : type CommonProperties struct {
75 : // The number of entries in this table.
76 : NumEntries uint64 `prop:"rocksdb.num.entries"`
77 : // Total raw key size.
78 : RawKeySize uint64 `prop:"rocksdb.raw.key.size"`
79 : // Total raw value size.
80 : RawValueSize uint64 `prop:"rocksdb.raw.value.size"`
81 : // Total raw key size of point deletion tombstones. This value is comparable
82 : // to RawKeySize.
83 : RawPointTombstoneKeySize uint64 `prop:"pebble.raw.point-tombstone.key.size"`
84 : // Sum of the raw value sizes carried by point deletion tombstones
85 : // containing size estimates. See the DeleteSized key kind. This value is
86 : // comparable to Raw{Key,Value}Size.
87 : RawPointTombstoneValueSize uint64 `prop:"pebble.raw.point-tombstone.value.size"`
88 : // The number of point deletion entries ("tombstones") in this table that
89 : // carry a size hint indicating the size of the value the tombstone deletes.
90 : NumSizedDeletions uint64 `prop:"pebble.num.deletions.sized"`
91 : // The number of deletion entries in this table, including both point and
92 : // range deletions.
93 : NumDeletions uint64 `prop:"rocksdb.deleted.keys"`
94 : // The number of range deletions in this table.
95 : NumRangeDeletions uint64 `prop:"rocksdb.num.range-deletions"`
96 : // The number of RANGEKEYDELs in this table.
97 : NumRangeKeyDels uint64 `prop:"pebble.num.range-key-dels"`
98 : // The number of RANGEKEYSETs in this table.
99 : NumRangeKeySets uint64 `prop:"pebble.num.range-key-sets"`
100 : // Total size of value blocks and value index block. Only serialized if > 0.
101 : ValueBlocksSize uint64 `prop:"pebble.value-blocks.size"`
102 : }
103 :
104 : // String is only used for testing purposes.
105 1 : func (c *CommonProperties) String() string {
106 1 : var buf bytes.Buffer
107 1 : v := reflect.ValueOf(*c)
108 1 : loaded := make(map[uintptr]struct{})
109 1 : writeProperties(loaded, v, &buf)
110 1 : return buf.String()
111 1 : }
112 :
113 : // NumPointDeletions is the number of point deletions in the sstable. For virtual
114 : // sstables, this is an estimate.
115 2 : func (c *CommonProperties) NumPointDeletions() uint64 {
116 2 : return c.NumDeletions - c.NumRangeDeletions
117 2 : }
118 :
119 : // Properties holds the sstable property values. The properties are
120 : // automatically populated during sstable creation and load from the properties
121 : // meta block when an sstable is opened.
122 : type Properties struct {
123 : // CommonProperties needs to be at the top of the Properties struct so that the
124 : // offsets of the fields in CommonProperties match the offsets of the embedded
125 : // fields of CommonProperties in Properties.
126 : CommonProperties `prop:"pebble.embbeded_common_properties"`
127 :
128 : // The name of the comparer used in this table.
129 : ComparerName string `prop:"rocksdb.comparator"`
130 : // The compression algorithm used to compress blocks.
131 : CompressionName string `prop:"rocksdb.compression"`
132 : // The compression options used to compress blocks.
133 : CompressionOptions string `prop:"rocksdb.compression_options"`
134 : // The total size of all data blocks.
135 : DataSize uint64 `prop:"rocksdb.data.size"`
136 : // The external sstable version format. Version 2 is the one RocksDB has been
137 : // using since 5.13. RocksDB only uses the global sequence number for an
138 : // sstable if this property has been set.
139 : ExternalFormatVersion uint32 `prop:"rocksdb.external_sst_file.version"`
140 : // The name of the filter policy used in this table. Empty if no filter
141 : // policy is used.
142 : FilterPolicyName string `prop:"rocksdb.filter.policy"`
143 : // The size of filter block.
144 : FilterSize uint64 `prop:"rocksdb.filter.size"`
145 : // Total number of index partitions if kTwoLevelIndexSearch is used.
146 : IndexPartitions uint64 `prop:"rocksdb.index.partitions"`
147 : // The size of index block.
148 : IndexSize uint64 `prop:"rocksdb.index.size"`
149 : // The index type. TODO(peter): add a more detailed description.
150 : IndexType uint32 `prop:"rocksdb.block.based.table.index.type"`
151 : // For formats >= TableFormatPebblev4, this is set to true if the obsolete
152 : // bit is strict for all the point keys.
153 : IsStrictObsolete bool `prop:"pebble.obsolete.is_strict"`
154 : // The name of the merger used in this table. Empty if no merger is used.
155 : MergerName string `prop:"rocksdb.merge.operator"`
156 : // The number of blocks in this table.
157 : NumDataBlocks uint64 `prop:"rocksdb.num.data.blocks"`
158 : // The number of merge operands in the table.
159 : NumMergeOperands uint64 `prop:"rocksdb.merge.operands"`
160 : // The number of RANGEKEYUNSETs in this table.
161 : NumRangeKeyUnsets uint64 `prop:"pebble.num.range-key-unsets"`
162 : // The number of value blocks in this table. Only serialized if > 0.
163 : NumValueBlocks uint64 `prop:"pebble.num.value-blocks"`
164 : // The number of values stored in value blocks. Only serialized if > 0.
165 : NumValuesInValueBlocks uint64 `prop:"pebble.num.values.in.value-blocks"`
166 : // A comma separated list of names of the property collectors used in this
167 : // table.
168 : PropertyCollectorNames string `prop:"rocksdb.property.collectors"`
169 : // Total raw rangekey key size.
170 : RawRangeKeyKeySize uint64 `prop:"pebble.raw.range-key.key.size"`
171 : // Total raw rangekey value size.
172 : RawRangeKeyValueSize uint64 `prop:"pebble.raw.range-key.value.size"`
173 : // The total number of keys in this table that were pinned by open snapshots.
174 : SnapshotPinnedKeys uint64 `prop:"pebble.num.snapshot-pinned-keys"`
175 : // The cumulative bytes of keys in this table that were pinned by
176 : // open snapshots. This value is comparable to RawKeySize.
177 : SnapshotPinnedKeySize uint64 `prop:"pebble.raw.snapshot-pinned-keys.size"`
178 : // The cumulative bytes of values in this table that were pinned by
179 : // open snapshots. This value is comparable to RawValueSize.
180 : SnapshotPinnedValueSize uint64 `prop:"pebble.raw.snapshot-pinned-values.size"`
181 : // Size of the top-level index if kTwoLevelIndexSearch is used.
182 : TopLevelIndexSize uint64 `prop:"rocksdb.top-level.index.size"`
183 : // User collected properties.
184 : UserProperties map[string]string
185 :
186 : // Loaded set indicating which fields have been loaded from disk. Indexed by
187 : // the field's byte offset within the struct
188 : // (reflect.StructField.Offset). Only set if the properties have been loaded
189 : // from a file. Only exported for testing purposes.
190 : Loaded map[uintptr]struct{}
191 : }
192 :
193 : // NumPointDeletions returns the number of point deletions in this table.
194 1 : func (p *Properties) NumPointDeletions() uint64 {
195 1 : return p.NumDeletions - p.NumRangeDeletions
196 1 : }
197 :
198 : // NumRangeKeys returns a count of the number of range keys in this table.
199 2 : func (p *Properties) NumRangeKeys() uint64 {
200 2 : return p.NumRangeKeyDels + p.NumRangeKeySets + p.NumRangeKeyUnsets
201 2 : }
202 :
203 1 : func writeProperties(loaded map[uintptr]struct{}, v reflect.Value, buf *bytes.Buffer) {
204 1 : vt := v.Type()
205 1 : for i := 0; i < v.NumField(); i++ {
206 1 : ft := vt.Field(i)
207 1 : if ft.Type.Kind() == reflect.Struct {
208 1 : // Embedded struct within the properties.
209 1 : writeProperties(loaded, v.Field(i), buf)
210 1 : continue
211 : }
212 1 : tag := ft.Tag.Get("prop")
213 1 : if tag == "" {
214 1 : continue
215 : }
216 :
217 1 : f := v.Field(i)
218 1 : // TODO(peter): Use f.IsZero() when we can rely on go1.13.
219 1 : if zero := reflect.Zero(f.Type()); zero.Interface() == f.Interface() {
220 1 : // Skip printing of zero values which were not loaded from disk.
221 1 : if _, ok := loaded[ft.Offset]; !ok {
222 1 : continue
223 : }
224 : }
225 :
226 1 : fmt.Fprintf(buf, "%s: ", tag)
227 1 : switch ft.Type.Kind() {
228 0 : case reflect.Bool:
229 0 : fmt.Fprintf(buf, "%t\n", f.Bool())
230 1 : case reflect.Uint32:
231 1 : fmt.Fprintf(buf, "%d\n", f.Uint())
232 1 : case reflect.Uint64:
233 1 : fmt.Fprintf(buf, "%d\n", f.Uint())
234 1 : case reflect.String:
235 1 : fmt.Fprintf(buf, "%s\n", f.String())
236 0 : default:
237 0 : panic("not reached")
238 : }
239 : }
240 : }
241 :
242 1 : func (p *Properties) String() string {
243 1 : var buf bytes.Buffer
244 1 : v := reflect.ValueOf(*p)
245 1 : writeProperties(p.Loaded, v, &buf)
246 1 :
247 1 : // Write the UserProperties.
248 1 : keys := make([]string, 0, len(p.UserProperties))
249 1 : for key := range p.UserProperties {
250 1 : keys = append(keys, key)
251 1 : }
252 1 : sort.Strings(keys)
253 1 : for _, key := range keys {
254 1 : fmt.Fprintf(&buf, "%s: %s\n", key, p.UserProperties[key])
255 1 : }
256 1 : return buf.String()
257 : }
258 :
259 : func (p *Properties) load(
260 : b block, blockOffset uint64, deniedUserProperties map[string]struct{},
261 2 : ) error {
262 2 : i, err := newRawBlockIter(bytes.Compare, b)
263 2 : if err != nil {
264 0 : return err
265 0 : }
266 2 : p.Loaded = make(map[uintptr]struct{})
267 2 : v := reflect.ValueOf(p).Elem()
268 2 :
269 2 : for valid := i.First(); valid; valid = i.Next() {
270 2 : if f, ok := propTagMap[string(i.Key().UserKey)]; ok {
271 2 : p.Loaded[f.Offset] = struct{}{}
272 2 : field := v.FieldByIndex(f.Index)
273 2 : switch f.Type.Kind() {
274 1 : case reflect.Bool:
275 1 : field.SetBool(bytes.Equal(i.Value(), propBoolTrue))
276 2 : case reflect.Uint32:
277 2 : field.SetUint(uint64(binary.LittleEndian.Uint32(i.Value())))
278 2 : case reflect.Uint64:
279 2 : n, _ := binary.Uvarint(i.Value())
280 2 : field.SetUint(n)
281 2 : case reflect.String:
282 2 : field.SetString(intern.Bytes(i.Value()))
283 0 : default:
284 0 : panic("not reached")
285 : }
286 2 : continue
287 : }
288 2 : if p.UserProperties == nil {
289 2 : p.UserProperties = make(map[string]string)
290 2 : }
291 :
292 2 : if _, denied := deniedUserProperties[string(i.Key().UserKey)]; !denied {
293 2 : p.UserProperties[intern.Bytes(i.Key().UserKey)] = string(i.Value())
294 2 : }
295 : }
296 2 : return nil
297 : }
298 :
299 1 : func (p *Properties) saveBool(m map[string][]byte, offset uintptr, value bool) {
300 1 : tag := propOffsetTagMap[offset]
301 1 : if value {
302 1 : m[tag] = propBoolTrue
303 1 : } else {
304 0 : m[tag] = propBoolFalse
305 0 : }
306 : }
307 :
308 2 : func (p *Properties) saveUint32(m map[string][]byte, offset uintptr, value uint32) {
309 2 : var buf [4]byte
310 2 : binary.LittleEndian.PutUint32(buf[:], value)
311 2 : m[propOffsetTagMap[offset]] = buf[:]
312 2 : }
313 :
314 0 : func (p *Properties) saveUint64(m map[string][]byte, offset uintptr, value uint64) {
315 0 : var buf [8]byte
316 0 : binary.LittleEndian.PutUint64(buf[:], value)
317 0 : m[propOffsetTagMap[offset]] = buf[:]
318 0 : }
319 :
320 : var _ = (*Properties).saveUint64
321 :
322 2 : func (p *Properties) saveUvarint(m map[string][]byte, offset uintptr, value uint64) {
323 2 : var buf [10]byte
324 2 : n := binary.PutUvarint(buf[:], value)
325 2 : m[propOffsetTagMap[offset]] = buf[:n]
326 2 : }
327 :
328 2 : func (p *Properties) saveString(m map[string][]byte, offset uintptr, value string) {
329 2 : m[propOffsetTagMap[offset]] = []byte(value)
330 2 : }
331 :
332 2 : func (p *Properties) save(tblFormat TableFormat, w *rawBlockWriter) {
333 2 : m := make(map[string][]byte)
334 2 : for k, v := range p.UserProperties {
335 2 : m[k] = []byte(v)
336 2 : }
337 :
338 2 : if p.ComparerName != "" {
339 2 : p.saveString(m, unsafe.Offsetof(p.ComparerName), p.ComparerName)
340 2 : }
341 2 : if p.CompressionName != "" {
342 2 : p.saveString(m, unsafe.Offsetof(p.CompressionName), p.CompressionName)
343 2 : }
344 2 : if p.CompressionOptions != "" {
345 2 : p.saveString(m, unsafe.Offsetof(p.CompressionOptions), p.CompressionOptions)
346 2 : }
347 2 : p.saveUvarint(m, unsafe.Offsetof(p.DataSize), p.DataSize)
348 2 : if p.ExternalFormatVersion != 0 {
349 2 : p.saveUint32(m, unsafe.Offsetof(p.ExternalFormatVersion), p.ExternalFormatVersion)
350 2 : }
351 2 : if p.FilterPolicyName != "" {
352 2 : p.saveString(m, unsafe.Offsetof(p.FilterPolicyName), p.FilterPolicyName)
353 2 : }
354 2 : p.saveUvarint(m, unsafe.Offsetof(p.FilterSize), p.FilterSize)
355 2 : if p.IndexPartitions != 0 {
356 2 : p.saveUvarint(m, unsafe.Offsetof(p.IndexPartitions), p.IndexPartitions)
357 2 : p.saveUvarint(m, unsafe.Offsetof(p.TopLevelIndexSize), p.TopLevelIndexSize)
358 2 : }
359 2 : p.saveUvarint(m, unsafe.Offsetof(p.IndexSize), p.IndexSize)
360 2 : p.saveUint32(m, unsafe.Offsetof(p.IndexType), p.IndexType)
361 2 : if p.IsStrictObsolete {
362 1 : p.saveBool(m, unsafe.Offsetof(p.IsStrictObsolete), p.IsStrictObsolete)
363 1 : }
364 2 : if p.MergerName != "" {
365 2 : p.saveString(m, unsafe.Offsetof(p.MergerName), p.MergerName)
366 2 : }
367 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumDataBlocks), p.NumDataBlocks)
368 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumEntries), p.NumEntries)
369 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumDeletions), p.NumDeletions)
370 2 : if p.NumSizedDeletions > 0 {
371 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumSizedDeletions), p.NumSizedDeletions)
372 2 : }
373 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumMergeOperands), p.NumMergeOperands)
374 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeDeletions), p.NumRangeDeletions)
375 2 : // NB: We only write out some properties for Pebble formats. This isn't
376 2 : // strictly necessary because unrecognized properties are interpreted as
377 2 : // user-defined properties, however writing them prevents byte-for-byte
378 2 : // equivalence with RocksDB files that some of our testing requires.
379 2 : if p.RawPointTombstoneKeySize > 0 && tblFormat >= TableFormatPebblev1 {
380 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawPointTombstoneKeySize), p.RawPointTombstoneKeySize)
381 2 : }
382 2 : if p.RawPointTombstoneValueSize > 0 {
383 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawPointTombstoneValueSize), p.RawPointTombstoneValueSize)
384 2 : }
385 2 : if p.NumRangeKeys() > 0 {
386 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeyDels), p.NumRangeKeyDels)
387 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeySets), p.NumRangeKeySets)
388 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumRangeKeyUnsets), p.NumRangeKeyUnsets)
389 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawRangeKeyKeySize), p.RawRangeKeyKeySize)
390 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawRangeKeyValueSize), p.RawRangeKeyValueSize)
391 2 : }
392 2 : if p.NumValueBlocks > 0 {
393 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumValueBlocks), p.NumValueBlocks)
394 2 : }
395 2 : if p.NumValuesInValueBlocks > 0 {
396 2 : p.saveUvarint(m, unsafe.Offsetof(p.NumValuesInValueBlocks), p.NumValuesInValueBlocks)
397 2 : }
398 2 : if p.PropertyCollectorNames != "" {
399 2 : p.saveString(m, unsafe.Offsetof(p.PropertyCollectorNames), p.PropertyCollectorNames)
400 2 : }
401 2 : if p.SnapshotPinnedKeys > 0 {
402 2 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedKeys), p.SnapshotPinnedKeys)
403 2 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedKeySize), p.SnapshotPinnedKeySize)
404 2 : p.saveUvarint(m, unsafe.Offsetof(p.SnapshotPinnedValueSize), p.SnapshotPinnedValueSize)
405 2 : }
406 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawKeySize), p.RawKeySize)
407 2 : p.saveUvarint(m, unsafe.Offsetof(p.RawValueSize), p.RawValueSize)
408 2 : if p.ValueBlocksSize > 0 {
409 2 : p.saveUvarint(m, unsafe.Offsetof(p.ValueBlocksSize), p.ValueBlocksSize)
410 2 : }
411 :
412 2 : if tblFormat < TableFormatPebblev1 {
413 1 : m["rocksdb.column.family.id"] = binary.AppendUvarint([]byte(nil), math.MaxInt32)
414 1 : m["rocksdb.fixed.key.length"] = []byte{0x00}
415 1 : m["rocksdb.index.key.is.user.key"] = []byte{0x00}
416 1 : m["rocksdb.index.value.is.delta.encoded"] = []byte{0x00}
417 1 : m["rocksdb.oldest.key.time"] = []byte{0x00}
418 1 : m["rocksdb.creation.time"] = []byte{0x00}
419 1 : m["rocksdb.format.version"] = []byte{0x00}
420 1 : }
421 :
422 2 : keys := make([]string, 0, len(m))
423 2 : for key := range m {
424 2 : keys = append(keys, key)
425 2 : }
426 2 : sort.Strings(keys)
427 2 : for _, key := range keys {
428 2 : w.add(InternalKey{UserKey: []byte(key)}, m[key])
429 2 : }
430 : }
|