Line data Source code
1 : // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 : // of this source code is governed by a BSD-style license that can be found in 3 : // the LICENSE file. 4 : 5 : package cache 6 : 7 : import ( 8 : "fmt" 9 : "os" 10 : "runtime" 11 : "sync" 12 : "sync/atomic" 13 : "unsafe" 14 : 15 : "github.com/cockroachdb/pebble/internal/buildtags" 16 : "github.com/cockroachdb/pebble/internal/invariants" 17 : "github.com/cockroachdb/pebble/internal/manual" 18 : ) 19 : 20 : type entryType int8 21 : 22 : const ( 23 : etTest entryType = iota 24 : etCold 25 : etHot 26 : ) 27 : 28 0 : func (p entryType) String() string { 29 0 : switch p { 30 0 : case etTest: 31 0 : return "test" 32 0 : case etCold: 33 0 : return "cold" 34 0 : case etHot: 35 0 : return "hot" 36 : } 37 0 : return "unknown" 38 : } 39 : 40 : // entry holds the metadata for a cache entry. The memory for an entry is 41 : // allocated from manually managed memory. 42 : // 43 : // Using manual memory management for entries may seem to be a violation of 44 : // the Cgo pointer rules: 45 : // 46 : // https://golang.org/cmd/cgo/#hdr-Passing_pointers 47 : // 48 : // Specifically, Go pointers should not be stored in C allocated memory. The 49 : // reason for this rule is that the Go GC will not look at C allocated memory 50 : // to find pointers to Go objects. If the only reference to a Go object is 51 : // stored in C allocated memory, the object will be reclaimed. The entry 52 : // contains various pointers to other entries. This does not violate the Go 53 : // pointer rules because either all entries are manually allocated or none 54 : // are. Also, even if we had a mix of C and Go allocated memory, which would 55 : // violate the rule, we would not have this reclamation problem since the 56 : // lifetime of the entry is managed by the shard containing it, and not 57 : // reliant on the entry pointers. 58 : type entry struct { 59 : key key 60 : // The value associated with the entry. The entry holds a reference on the 61 : // value which is maintained by entry.setValue(). 62 : val *Value 63 : blockLink struct { 64 : next *entry 65 : prev *entry 66 : } 67 : fileLink struct { 68 : next *entry 69 : prev *entry 70 : } 71 : size int64 72 : ptype entryType 73 : // referenced is atomically set to indicate that this entry has been accessed 74 : // since the last time one of the clock hands swept it. 75 : referenced atomic.Bool 76 : } 77 : 78 2 : func newEntry(key key, size int64) *entry { 79 2 : e := entryAllocNew() 80 2 : *e = entry{ 81 2 : key: key, 82 2 : size: size, 83 2 : ptype: etCold, 84 2 : } 85 2 : e.blockLink.next = e 86 2 : e.blockLink.prev = e 87 2 : e.fileLink.next = e 88 2 : e.fileLink.prev = e 89 2 : return e 90 2 : } 91 : 92 2 : func (e *entry) free() { 93 2 : e.setValue(nil) 94 2 : entryAllocFree(e) 95 2 : } 96 : 97 2 : func (e *entry) next() *entry { 98 2 : if e == nil { 99 2 : return nil 100 2 : } 101 2 : return e.blockLink.next 102 : } 103 : 104 2 : func (e *entry) prev() *entry { 105 2 : if e == nil { 106 0 : return nil 107 0 : } 108 2 : return e.blockLink.prev 109 : } 110 : 111 2 : func (e *entry) link(s *entry) { 112 2 : s.blockLink.prev = e.blockLink.prev 113 2 : s.blockLink.prev.blockLink.next = s 114 2 : s.blockLink.next = e 115 2 : s.blockLink.next.blockLink.prev = s 116 2 : } 117 : 118 2 : func (e *entry) unlink() *entry { 119 2 : next := e.blockLink.next 120 2 : e.blockLink.prev.blockLink.next = e.blockLink.next 121 2 : e.blockLink.next.blockLink.prev = e.blockLink.prev 122 2 : e.blockLink.prev = e 123 2 : e.blockLink.next = e 124 2 : return next 125 2 : } 126 : 127 2 : func (e *entry) linkFile(s *entry) { 128 2 : s.fileLink.prev = e.fileLink.prev 129 2 : s.fileLink.prev.fileLink.next = s 130 2 : s.fileLink.next = e 131 2 : s.fileLink.next.fileLink.prev = s 132 2 : } 133 : 134 2 : func (e *entry) unlinkFile() *entry { 135 2 : next := e.fileLink.next 136 2 : e.fileLink.prev.fileLink.next = e.fileLink.next 137 2 : e.fileLink.next.fileLink.prev = e.fileLink.prev 138 2 : e.fileLink.prev = e 139 2 : e.fileLink.next = e 140 2 : return next 141 2 : } 142 : 143 2 : func (e *entry) setValue(v *Value) { 144 2 : if v != nil { 145 2 : v.acquire() 146 2 : } 147 2 : old := e.val 148 2 : e.val = v 149 2 : old.release() 150 : } 151 : 152 2 : func (e *entry) acquireValue() *Value { 153 2 : v := e.val 154 2 : if v != nil { 155 2 : v.acquire() 156 2 : } 157 2 : return v 158 : } 159 : 160 : // The entries are normally allocated using the manual package. We use a 161 : // sync.Pool with each item in the pool holding multiple entries that can be 162 : // reused. 163 : // 164 : // We cannot use manual memory when the Value is allocated using the Go 165 : // allocator: in this case, we use the Go allocator because we need the entry 166 : // pointers to the Values to be discoverable by the GC. 167 : // 168 : // We also use the Go allocator in race mode because the normal path relies on a 169 : // finalizer (and historically there have been some finalizer-related bugs in 170 : // the race detector, in go1.15 and earlier). 171 : const entriesGoAllocated = valueEntryGoAllocated || buildtags.Race 172 : 173 : const entrySize = int(unsafe.Sizeof(entry{})) 174 : 175 2 : func entryAllocNew() *entry { 176 2 : if invariants.UseFinalizers { 177 2 : // We want to allocate each entry independently to check that it has been 178 2 : // properly cleaned up. 179 2 : e := &entry{} 180 2 : invariants.SetFinalizer(e, func(obj interface{}) { 181 2 : e := obj.(*entry) 182 2 : if *e != (entry{}) { 183 0 : fmt.Fprintf(os.Stderr, "%p: entry was not freed", e) 184 0 : os.Exit(1) 185 0 : } 186 : }) 187 2 : return e 188 : } 189 0 : a := entryAllocPool.Get().(*entryAllocCache) 190 0 : e := a.alloc() 191 0 : entryAllocPool.Put(a) 192 0 : return e 193 : } 194 : 195 2 : func entryAllocFree(e *entry) { 196 2 : if invariants.UseFinalizers { 197 2 : *e = entry{} 198 2 : return 199 2 : } 200 0 : a := entryAllocPool.Get().(*entryAllocCache) 201 0 : *e = entry{} 202 0 : a.free(e) 203 0 : entryAllocPool.Put(a) 204 : } 205 : 206 : var entryAllocPool = sync.Pool{ 207 0 : New: func() interface{} { 208 0 : return newEntryAllocCache() 209 0 : }, 210 : } 211 : 212 : // entryAllocCacheLimit is the maximum number of entries that are cached inside 213 : // a pooled object. 214 : const entryAllocCacheLimit = 128 215 : 216 : type entryAllocCache struct { 217 : entries []*entry 218 : } 219 : 220 0 : func newEntryAllocCache() *entryAllocCache { 221 0 : c := &entryAllocCache{} 222 0 : if !entriesGoAllocated { 223 0 : // Note the use of a "real" finalizer here (as opposed to a build tag-gated 224 0 : // no-op finalizer). Without the finalizer, objects released from the pool 225 0 : // and subsequently GC'd by the Go runtime would fail to have their manually 226 0 : // allocated memory freed, which results in a memory leak. 227 0 : // lint:ignore SetFinalizer 228 0 : runtime.SetFinalizer(c, freeEntryAllocCache) 229 0 : } 230 0 : return c 231 : } 232 : 233 0 : func freeEntryAllocCache(obj interface{}) { 234 0 : c := obj.(*entryAllocCache) 235 0 : for i, e := range c.entries { 236 0 : c.dealloc(e) 237 0 : c.entries[i] = nil 238 0 : } 239 : } 240 : 241 0 : func (c *entryAllocCache) alloc() *entry { 242 0 : n := len(c.entries) 243 0 : if n == 0 { 244 0 : if entriesGoAllocated { 245 0 : return &entry{} 246 0 : } 247 0 : b := manual.New(manual.BlockCacheEntry, entrySize) 248 0 : return (*entry)(unsafe.Pointer(&b[0])) 249 : } 250 0 : e := c.entries[n-1] 251 0 : c.entries = c.entries[:n-1] 252 0 : return e 253 : } 254 : 255 0 : func (c *entryAllocCache) dealloc(e *entry) { 256 0 : if !entriesGoAllocated { 257 0 : buf := unsafe.Slice((*byte)(unsafe.Pointer(e)), entrySize) 258 0 : manual.Free(manual.BlockCacheEntry, buf) 259 0 : } 260 : } 261 : 262 0 : func (c *entryAllocCache) free(e *entry) { 263 0 : if len(c.entries) == entryAllocCacheLimit { 264 0 : c.dealloc(e) 265 0 : return 266 0 : } 267 0 : c.entries = append(c.entries, e) 268 : }