Line data Source code
1 : // Copyright 2013 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 : // of this source code is governed by a BSD-style license that can be found in 3 : // the LICENSE file. 4 : 5 : // Package bloom implements Bloom filters. 6 : package bloom // import "github.com/cockroachdb/pebble/bloom" 7 : 8 : import ( 9 : "encoding/binary" 10 : "fmt" 11 : "sync" 12 : 13 : "github.com/cockroachdb/pebble/internal/base" 14 : ) 15 : 16 : const ( 17 : cacheLineSize = 64 18 : cacheLineBits = cacheLineSize * 8 19 : ) 20 : 21 : type tableFilter []byte 22 : 23 2 : func (f tableFilter) MayContain(key []byte) bool { 24 2 : if len(f) <= 5 { 25 2 : return false 26 2 : } 27 2 : n := len(f) - 5 28 2 : nProbes := f[n] 29 2 : nLines := binary.LittleEndian.Uint32(f[n+1:]) 30 2 : cacheLineBits := 8 * (uint32(n) / nLines) 31 2 : 32 2 : h := hash(key) 33 2 : delta := h>>17 | h<<15 34 2 : b := (h % nLines) * cacheLineBits 35 2 : 36 2 : for j := uint8(0); j < nProbes; j++ { 37 2 : bitPos := b + (h % cacheLineBits) 38 2 : if f[bitPos/8]&(1<<(bitPos%8)) == 0 { 39 2 : return false 40 2 : } 41 2 : h += delta 42 : } 43 2 : return true 44 : } 45 : 46 2 : func calculateProbes(bitsPerKey int) uint32 { 47 2 : // We intentionally round down to reduce probing cost a little bit 48 2 : n := uint32(float64(bitsPerKey) * 0.69) // 0.69 =~ ln(2) 49 2 : if n < 1 { 50 2 : n = 1 51 2 : } 52 2 : if n > 30 { 53 1 : n = 30 54 1 : } 55 2 : return n 56 : } 57 : 58 : // extend appends n zero bytes to b. It returns the overall slice (of length 59 : // n+len(originalB)) and the slice of n trailing zeroes. 60 2 : func extend(b []byte, n int) (overall, trailer []byte) { 61 2 : want := n + len(b) 62 2 : if want <= cap(b) { 63 0 : overall = b[:want] 64 0 : trailer = overall[len(b):] 65 0 : clear(trailer) 66 2 : } else { 67 2 : // Grow the capacity exponentially, with a 1KiB minimum. 68 2 : c := 1024 69 2 : for c < want { 70 1 : c += c / 4 71 1 : } 72 2 : overall = make([]byte, want, c) 73 2 : trailer = overall[len(b):] 74 2 : copy(overall, b) 75 : } 76 2 : return overall, trailer 77 : } 78 : 79 : // hash implements a hashing algorithm similar to the Murmur hash. 80 2 : func hash(b []byte) uint32 { 81 2 : const ( 82 2 : seed = 0xbc9f1d34 83 2 : m = 0xc6a4a793 84 2 : ) 85 2 : h := uint32(seed) ^ uint32(uint64(uint32(len(b))*m)) 86 2 : for ; len(b) >= 4; b = b[4:] { 87 2 : h += uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 88 2 : h *= m 89 2 : h ^= h >> 16 90 2 : } 91 : 92 : // The code below first casts each byte to a signed 8-bit integer. This is 93 : // necessary to match RocksDB's behavior. Note that the `byte` type in Go is 94 : // unsigned. What is the difference between casting a signed 8-bit value vs 95 : // unsigned 8-bit value into an unsigned 32-bit value? 96 : // Sign-extension. Consider the value 250 which has the bit pattern 11111010: 97 : // 98 : // uint32(250) = 00000000000000000000000011111010 99 : // uint32(int8(250)) = 11111111111111111111111111111010 100 : // 101 : // Note that the original LevelDB code did not explicitly cast to a signed 102 : // 8-bit value which left the behavior dependent on whether C characters were 103 : // signed or unsigned which is a compiler flag for gcc (-funsigned-char). 104 2 : switch len(b) { 105 2 : case 3: 106 2 : h += uint32(int8(b[2])) << 16 107 2 : fallthrough 108 2 : case 2: 109 2 : h += uint32(int8(b[1])) << 8 110 2 : fallthrough 111 2 : case 1: 112 2 : h += uint32(int8(b[0])) 113 2 : h *= m 114 2 : h ^= h >> 24 115 : } 116 2 : return h 117 : } 118 : 119 : const hashBlockLen = 16384 120 : 121 : type hashBlock [hashBlockLen]uint32 122 : 123 : var hashBlockPool = sync.Pool{ 124 2 : New: func() interface{} { 125 2 : return &hashBlock{} 126 2 : }, 127 : } 128 : 129 : type tableFilterWriter struct { 130 : bitsPerKey int 131 : 132 : numHashes int 133 : // We store the hashes in blocks. 134 : blocks []*hashBlock 135 : lastHash uint32 136 : 137 : // Initial "in-line" storage for the blocks slice (to avoid some small 138 : // allocations). 139 : blocksBuf [16]*hashBlock 140 : } 141 : 142 2 : func newTableFilterWriter(bitsPerKey int) *tableFilterWriter { 143 2 : w := &tableFilterWriter{ 144 2 : bitsPerKey: bitsPerKey, 145 2 : } 146 2 : w.blocks = w.blocksBuf[:0] 147 2 : return w 148 2 : } 149 : 150 : // AddKey implements the base.FilterWriter interface. 151 2 : func (w *tableFilterWriter) AddKey(key []byte) { 152 2 : h := hash(key) 153 2 : if w.numHashes != 0 && h == w.lastHash { 154 2 : return 155 2 : } 156 2 : ofs := w.numHashes % hashBlockLen 157 2 : if ofs == 0 { 158 2 : // Time for a new block. 159 2 : w.blocks = append(w.blocks, hashBlockPool.Get().(*hashBlock)) 160 2 : } 161 2 : w.blocks[len(w.blocks)-1][ofs] = h 162 2 : w.numHashes++ 163 2 : w.lastHash = h 164 : } 165 : 166 : // Finish implements the base.FilterWriter interface. 167 2 : func (w *tableFilterWriter) Finish(buf []byte) []byte { 168 2 : // The table filter format matches the RocksDB full-file filter format. 169 2 : var nLines int 170 2 : if w.numHashes != 0 { 171 2 : nLines = (w.numHashes*w.bitsPerKey + cacheLineBits - 1) / (cacheLineBits) 172 2 : // Make nLines an odd number to make sure more bits are involved when 173 2 : // determining which block. 174 2 : if nLines%2 == 0 { 175 2 : nLines++ 176 2 : } 177 : } 178 : 179 2 : nBytes := nLines * cacheLineSize 180 2 : // +5: 4 bytes for num-lines, 1 byte for num-probes 181 2 : buf, filter := extend(buf, nBytes+5) 182 2 : 183 2 : if nLines != 0 { 184 2 : nProbes := calculateProbes(w.bitsPerKey) 185 2 : for bIdx, b := range w.blocks { 186 2 : length := hashBlockLen 187 2 : if bIdx == len(w.blocks)-1 && w.numHashes%hashBlockLen != 0 { 188 2 : length = w.numHashes % hashBlockLen 189 2 : } 190 2 : for _, h := range b[:length] { 191 2 : delta := h>>17 | h<<15 // rotate right 17 bits 192 2 : b := (h % uint32(nLines)) * (cacheLineBits) 193 2 : for i := uint32(0); i < nProbes; i++ { 194 2 : bitPos := b + (h % cacheLineBits) 195 2 : filter[bitPos/8] |= (1 << (bitPos % 8)) 196 2 : h += delta 197 2 : } 198 : } 199 : } 200 2 : filter[nBytes] = byte(nProbes) 201 2 : binary.LittleEndian.PutUint32(filter[nBytes+1:], uint32(nLines)) 202 : } 203 : 204 : // Release the hash blocks. 205 2 : for i, b := range w.blocks { 206 2 : hashBlockPool.Put(b) 207 2 : w.blocks[i] = nil 208 2 : } 209 2 : w.blocks = w.blocks[:0] 210 2 : w.numHashes = 0 211 2 : return buf 212 : } 213 : 214 : // FilterPolicy implements the FilterPolicy interface from the pebble package. 215 : // 216 : // The integer value is the approximate number of bits used per key. A good 217 : // value is 10, which yields a filter with ~ 1% false positive rate. 218 : type FilterPolicy int 219 : 220 : var _ base.FilterPolicy = FilterPolicy(0) 221 : 222 : // Name implements the pebble.FilterPolicy interface. 223 2 : func (p FilterPolicy) Name() string { 224 2 : // This string looks arbitrary, but its value is written to LevelDB .sst 225 2 : // files, and should be this exact value to be compatible with those files 226 2 : // and with the C++ LevelDB code. 227 2 : return "rocksdb.BuiltinBloomFilter" 228 2 : } 229 : 230 : // MayContain implements the pebble.FilterPolicy interface. 231 2 : func (p FilterPolicy) MayContain(ftype base.FilterType, f, key []byte) bool { 232 2 : switch ftype { 233 2 : case base.TableFilter: 234 2 : return tableFilter(f).MayContain(key) 235 0 : default: 236 0 : panic(fmt.Sprintf("unknown filter type: %v", ftype)) 237 : } 238 : } 239 : 240 : // NewWriter implements the pebble.FilterPolicy interface. 241 2 : func (p FilterPolicy) NewWriter(ftype base.FilterType) base.FilterWriter { 242 2 : switch ftype { 243 2 : case base.TableFilter: 244 2 : return newTableFilterWriter(int(p)) 245 0 : default: 246 0 : panic(fmt.Sprintf("unknown filter type: %v", ftype)) 247 : } 248 : }