/src/postgres/src/backend/utils/cache/catcache.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * catcache.c |
4 | | * System catalog cache for tuples matching a key. |
5 | | * |
6 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
7 | | * Portions Copyright (c) 1994, Regents of the University of California |
8 | | * |
9 | | * |
10 | | * IDENTIFICATION |
11 | | * src/backend/utils/cache/catcache.c |
12 | | * |
13 | | *------------------------------------------------------------------------- |
14 | | */ |
15 | | #include "postgres.h" |
16 | | |
17 | | #include "access/genam.h" |
18 | | #include "access/heaptoast.h" |
19 | | #include "access/relscan.h" |
20 | | #include "access/table.h" |
21 | | #include "access/xact.h" |
22 | | #include "catalog/catalog.h" |
23 | | #include "catalog/pg_collation.h" |
24 | | #include "catalog/pg_type.h" |
25 | | #include "common/hashfn.h" |
26 | | #include "common/pg_prng.h" |
27 | | #include "miscadmin.h" |
28 | | #include "port/pg_bitutils.h" |
29 | | #ifdef CATCACHE_STATS |
30 | | #include "storage/ipc.h" /* for on_proc_exit */ |
31 | | #endif |
32 | | #include "storage/lmgr.h" |
33 | | #include "utils/builtins.h" |
34 | | #include "utils/catcache.h" |
35 | | #include "utils/datum.h" |
36 | | #include "utils/fmgroids.h" |
37 | | #include "utils/injection_point.h" |
38 | | #include "utils/inval.h" |
39 | | #include "utils/memutils.h" |
40 | | #include "utils/rel.h" |
41 | | #include "utils/resowner.h" |
42 | | #include "utils/syscache.h" |
43 | | |
44 | | /* |
45 | | * If a catcache invalidation is processed while we are in the middle of |
46 | | * creating a catcache entry (or list), it might apply to the entry we're |
47 | | * creating, making it invalid before it's been inserted to the catcache. To |
48 | | * catch such cases, we have a stack of "create-in-progress" entries. Cache |
49 | | * invalidation marks any matching entries in the stack as dead, in addition |
50 | | * to the actual CatCTup and CatCList entries. |
51 | | */ |
52 | | typedef struct CatCInProgress |
53 | | { |
54 | | CatCache *cache; /* cache that the entry belongs to */ |
55 | | uint32 hash_value; /* hash of the entry; ignored for lists */ |
56 | | bool list; /* is it a list entry? */ |
57 | | bool dead; /* set when the entry is invalidated */ |
58 | | struct CatCInProgress *next; |
59 | | } CatCInProgress; |
60 | | |
61 | | static CatCInProgress *catcache_in_progress_stack = NULL; |
62 | | |
63 | | /* #define CACHEDEBUG */ /* turns DEBUG elogs on */ |
64 | | |
65 | | /* |
66 | | * Given a hash value and the size of the hash table, find the bucket |
67 | | * in which the hash value belongs. Since the hash table must contain |
68 | | * a power-of-2 number of elements, this is a simple bitmask. |
69 | | */ |
70 | 0 | #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1))) |
71 | | |
72 | | |
73 | | /* |
74 | | * variables, macros and other stuff |
75 | | */ |
76 | | |
77 | | #ifdef CACHEDEBUG |
78 | | #define CACHE_elog(...) elog(__VA_ARGS__) |
79 | | #else |
80 | | #define CACHE_elog(...) |
81 | | #endif |
82 | | |
83 | | /* Cache management header --- pointer is NULL until created */ |
84 | | static CatCacheHeader *CacheHdr = NULL; |
85 | | |
86 | | static inline HeapTuple SearchCatCacheInternal(CatCache *cache, |
87 | | int nkeys, |
88 | | Datum v1, Datum v2, |
89 | | Datum v3, Datum v4); |
90 | | |
91 | | static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, |
92 | | int nkeys, |
93 | | uint32 hashValue, |
94 | | Index hashIndex, |
95 | | Datum v1, Datum v2, |
96 | | Datum v3, Datum v4); |
97 | | |
98 | | static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, |
99 | | Datum v1, Datum v2, Datum v3, Datum v4); |
100 | | static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, |
101 | | HeapTuple tuple); |
102 | | static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, |
103 | | const Datum *cachekeys, |
104 | | const Datum *searchkeys); |
105 | | |
106 | | #ifdef CATCACHE_STATS |
107 | | static void CatCachePrintStats(int code, Datum arg); |
108 | | #endif |
109 | | static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct); |
110 | | static void CatCacheRemoveCList(CatCache *cache, CatCList *cl); |
111 | | static void RehashCatCache(CatCache *cp); |
112 | | static void RehashCatCacheLists(CatCache *cp); |
113 | | static void CatalogCacheInitializeCache(CatCache *cache); |
114 | | static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, |
115 | | Datum *arguments, |
116 | | uint32 hashValue, Index hashIndex); |
117 | | |
118 | | static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner); |
119 | | static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner); |
120 | | static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, |
121 | | Datum *keys); |
122 | | static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, |
123 | | Datum *srckeys, Datum *dstkeys); |
124 | | |
125 | | |
126 | | /* |
127 | | * internal support functions |
128 | | */ |
129 | | |
130 | | /* ResourceOwner callbacks to hold catcache references */ |
131 | | |
132 | | static void ResOwnerReleaseCatCache(Datum res); |
133 | | static char *ResOwnerPrintCatCache(Datum res); |
134 | | static void ResOwnerReleaseCatCacheList(Datum res); |
135 | | static char *ResOwnerPrintCatCacheList(Datum res); |
136 | | |
137 | | static const ResourceOwnerDesc catcache_resowner_desc = |
138 | | { |
139 | | /* catcache references */ |
140 | | .name = "catcache reference", |
141 | | .release_phase = RESOURCE_RELEASE_AFTER_LOCKS, |
142 | | .release_priority = RELEASE_PRIO_CATCACHE_REFS, |
143 | | .ReleaseResource = ResOwnerReleaseCatCache, |
144 | | .DebugPrint = ResOwnerPrintCatCache |
145 | | }; |
146 | | |
147 | | static const ResourceOwnerDesc catlistref_resowner_desc = |
148 | | { |
149 | | /* catcache-list pins */ |
150 | | .name = "catcache list reference", |
151 | | .release_phase = RESOURCE_RELEASE_AFTER_LOCKS, |
152 | | .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS, |
153 | | .ReleaseResource = ResOwnerReleaseCatCacheList, |
154 | | .DebugPrint = ResOwnerPrintCatCacheList |
155 | | }; |
156 | | |
157 | | /* Convenience wrappers over ResourceOwnerRemember/Forget */ |
158 | | static inline void |
159 | | ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple) |
160 | 0 | { |
161 | 0 | ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc); |
162 | 0 | } |
163 | | static inline void |
164 | | ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple) |
165 | 0 | { |
166 | 0 | ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc); |
167 | 0 | } |
168 | | static inline void |
169 | | ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list) |
170 | 0 | { |
171 | 0 | ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc); |
172 | 0 | } |
173 | | static inline void |
174 | | ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list) |
175 | 0 | { |
176 | 0 | ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc); |
177 | 0 | } |
178 | | |
179 | | |
180 | | /* |
181 | | * Hash and equality functions for system types that are used as cache key |
182 | | * fields. In some cases, we just call the regular SQL-callable functions for |
183 | | * the appropriate data type, but that tends to be a little slow, and the |
184 | | * speed of these functions is performance-critical. Therefore, for data |
185 | | * types that frequently occur as catcache keys, we hard-code the logic here. |
186 | | * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and |
187 | | * in certain cases (like int4) we can adopt a faster hash algorithm as well. |
188 | | */ |
189 | | |
190 | | static bool |
191 | | chareqfast(Datum a, Datum b) |
192 | 0 | { |
193 | 0 | return DatumGetChar(a) == DatumGetChar(b); |
194 | 0 | } |
195 | | |
196 | | static uint32 |
197 | | charhashfast(Datum datum) |
198 | 0 | { |
199 | 0 | return murmurhash32((int32) DatumGetChar(datum)); |
200 | 0 | } |
201 | | |
202 | | static bool |
203 | | nameeqfast(Datum a, Datum b) |
204 | 0 | { |
205 | 0 | char *ca = NameStr(*DatumGetName(a)); |
206 | 0 | char *cb = NameStr(*DatumGetName(b)); |
207 | |
|
208 | 0 | return strncmp(ca, cb, NAMEDATALEN) == 0; |
209 | 0 | } |
210 | | |
211 | | static uint32 |
212 | | namehashfast(Datum datum) |
213 | 0 | { |
214 | 0 | char *key = NameStr(*DatumGetName(datum)); |
215 | |
|
216 | 0 | return hash_any((unsigned char *) key, strlen(key)); |
217 | 0 | } |
218 | | |
219 | | static bool |
220 | | int2eqfast(Datum a, Datum b) |
221 | 0 | { |
222 | 0 | return DatumGetInt16(a) == DatumGetInt16(b); |
223 | 0 | } |
224 | | |
225 | | static uint32 |
226 | | int2hashfast(Datum datum) |
227 | 0 | { |
228 | 0 | return murmurhash32((int32) DatumGetInt16(datum)); |
229 | 0 | } |
230 | | |
231 | | static bool |
232 | | int4eqfast(Datum a, Datum b) |
233 | 0 | { |
234 | 0 | return DatumGetInt32(a) == DatumGetInt32(b); |
235 | 0 | } |
236 | | |
237 | | static uint32 |
238 | | int4hashfast(Datum datum) |
239 | 0 | { |
240 | 0 | return murmurhash32((int32) DatumGetInt32(datum)); |
241 | 0 | } |
242 | | |
243 | | static bool |
244 | | texteqfast(Datum a, Datum b) |
245 | 0 | { |
246 | | /* |
247 | | * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just |
248 | | * want to take the fast "deterministic" path in texteq(). |
249 | | */ |
250 | 0 | return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b)); |
251 | 0 | } |
252 | | |
253 | | static uint32 |
254 | | texthashfast(Datum datum) |
255 | 0 | { |
256 | | /* analogously here as in texteqfast() */ |
257 | 0 | return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum)); |
258 | 0 | } |
259 | | |
260 | | static bool |
261 | | oidvectoreqfast(Datum a, Datum b) |
262 | 0 | { |
263 | 0 | return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b)); |
264 | 0 | } |
265 | | |
266 | | static uint32 |
267 | | oidvectorhashfast(Datum datum) |
268 | 0 | { |
269 | 0 | return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum)); |
270 | 0 | } |
271 | | |
272 | | /* Lookup support functions for a type. */ |
273 | | static void |
274 | | GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc) |
275 | 0 | { |
276 | 0 | switch (keytype) |
277 | 0 | { |
278 | 0 | case BOOLOID: |
279 | 0 | *hashfunc = charhashfast; |
280 | 0 | *fasteqfunc = chareqfast; |
281 | 0 | *eqfunc = F_BOOLEQ; |
282 | 0 | break; |
283 | 0 | case CHAROID: |
284 | 0 | *hashfunc = charhashfast; |
285 | 0 | *fasteqfunc = chareqfast; |
286 | 0 | *eqfunc = F_CHAREQ; |
287 | 0 | break; |
288 | 0 | case NAMEOID: |
289 | 0 | *hashfunc = namehashfast; |
290 | 0 | *fasteqfunc = nameeqfast; |
291 | 0 | *eqfunc = F_NAMEEQ; |
292 | 0 | break; |
293 | 0 | case INT2OID: |
294 | 0 | *hashfunc = int2hashfast; |
295 | 0 | *fasteqfunc = int2eqfast; |
296 | 0 | *eqfunc = F_INT2EQ; |
297 | 0 | break; |
298 | 0 | case INT4OID: |
299 | 0 | *hashfunc = int4hashfast; |
300 | 0 | *fasteqfunc = int4eqfast; |
301 | 0 | *eqfunc = F_INT4EQ; |
302 | 0 | break; |
303 | 0 | case TEXTOID: |
304 | 0 | *hashfunc = texthashfast; |
305 | 0 | *fasteqfunc = texteqfast; |
306 | 0 | *eqfunc = F_TEXTEQ; |
307 | 0 | break; |
308 | 0 | case OIDOID: |
309 | 0 | case REGPROCOID: |
310 | 0 | case REGPROCEDUREOID: |
311 | 0 | case REGOPEROID: |
312 | 0 | case REGOPERATOROID: |
313 | 0 | case REGCLASSOID: |
314 | 0 | case REGTYPEOID: |
315 | 0 | case REGCOLLATIONOID: |
316 | 0 | case REGCONFIGOID: |
317 | 0 | case REGDICTIONARYOID: |
318 | 0 | case REGROLEOID: |
319 | 0 | case REGNAMESPACEOID: |
320 | 0 | *hashfunc = int4hashfast; |
321 | 0 | *fasteqfunc = int4eqfast; |
322 | 0 | *eqfunc = F_OIDEQ; |
323 | 0 | break; |
324 | 0 | case OIDVECTOROID: |
325 | 0 | *hashfunc = oidvectorhashfast; |
326 | 0 | *fasteqfunc = oidvectoreqfast; |
327 | 0 | *eqfunc = F_OIDVECTOREQ; |
328 | 0 | break; |
329 | 0 | default: |
330 | 0 | elog(FATAL, "type %u not supported as catcache key", keytype); |
331 | 0 | *hashfunc = NULL; /* keep compiler quiet */ |
332 | |
|
333 | 0 | *eqfunc = InvalidOid; |
334 | 0 | break; |
335 | 0 | } |
336 | 0 | } |
337 | | |
338 | | /* |
339 | | * CatalogCacheComputeHashValue |
340 | | * |
341 | | * Compute the hash value associated with a given set of lookup keys |
342 | | */ |
343 | | static uint32 |
344 | | CatalogCacheComputeHashValue(CatCache *cache, int nkeys, |
345 | | Datum v1, Datum v2, Datum v3, Datum v4) |
346 | 0 | { |
347 | 0 | uint32 hashValue = 0; |
348 | 0 | uint32 oneHash; |
349 | 0 | CCHashFN *cc_hashfunc = cache->cc_hashfunc; |
350 | |
|
351 | 0 | CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p", |
352 | 0 | cache->cc_relname, nkeys, cache); |
353 | |
|
354 | 0 | switch (nkeys) |
355 | 0 | { |
356 | 0 | case 4: |
357 | 0 | oneHash = (cc_hashfunc[3]) (v4); |
358 | 0 | hashValue ^= pg_rotate_left32(oneHash, 24); |
359 | | /* FALLTHROUGH */ |
360 | 0 | case 3: |
361 | 0 | oneHash = (cc_hashfunc[2]) (v3); |
362 | 0 | hashValue ^= pg_rotate_left32(oneHash, 16); |
363 | | /* FALLTHROUGH */ |
364 | 0 | case 2: |
365 | 0 | oneHash = (cc_hashfunc[1]) (v2); |
366 | 0 | hashValue ^= pg_rotate_left32(oneHash, 8); |
367 | | /* FALLTHROUGH */ |
368 | 0 | case 1: |
369 | 0 | oneHash = (cc_hashfunc[0]) (v1); |
370 | 0 | hashValue ^= oneHash; |
371 | 0 | break; |
372 | 0 | default: |
373 | 0 | elog(FATAL, "wrong number of hash keys: %d", nkeys); |
374 | 0 | break; |
375 | 0 | } |
376 | | |
377 | 0 | return hashValue; |
378 | 0 | } |
379 | | |
380 | | /* |
381 | | * CatalogCacheComputeTupleHashValue |
382 | | * |
383 | | * Compute the hash value associated with a given tuple to be cached |
384 | | */ |
385 | | static uint32 |
386 | | CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple) |
387 | 0 | { |
388 | 0 | Datum v1 = 0, |
389 | 0 | v2 = 0, |
390 | 0 | v3 = 0, |
391 | 0 | v4 = 0; |
392 | 0 | bool isNull = false; |
393 | 0 | int *cc_keyno = cache->cc_keyno; |
394 | 0 | TupleDesc cc_tupdesc = cache->cc_tupdesc; |
395 | | |
396 | | /* Now extract key fields from tuple, insert into scankey */ |
397 | 0 | switch (nkeys) |
398 | 0 | { |
399 | 0 | case 4: |
400 | 0 | v4 = fastgetattr(tuple, |
401 | 0 | cc_keyno[3], |
402 | 0 | cc_tupdesc, |
403 | 0 | &isNull); |
404 | 0 | Assert(!isNull); |
405 | | /* FALLTHROUGH */ |
406 | 0 | case 3: |
407 | 0 | v3 = fastgetattr(tuple, |
408 | 0 | cc_keyno[2], |
409 | 0 | cc_tupdesc, |
410 | 0 | &isNull); |
411 | 0 | Assert(!isNull); |
412 | | /* FALLTHROUGH */ |
413 | 0 | case 2: |
414 | 0 | v2 = fastgetattr(tuple, |
415 | 0 | cc_keyno[1], |
416 | 0 | cc_tupdesc, |
417 | 0 | &isNull); |
418 | 0 | Assert(!isNull); |
419 | | /* FALLTHROUGH */ |
420 | 0 | case 1: |
421 | 0 | v1 = fastgetattr(tuple, |
422 | 0 | cc_keyno[0], |
423 | 0 | cc_tupdesc, |
424 | 0 | &isNull); |
425 | 0 | Assert(!isNull); |
426 | 0 | break; |
427 | 0 | default: |
428 | 0 | elog(FATAL, "wrong number of hash keys: %d", nkeys); |
429 | 0 | break; |
430 | 0 | } |
431 | | |
432 | 0 | return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); |
433 | 0 | } |
434 | | |
435 | | /* |
436 | | * CatalogCacheCompareTuple |
437 | | * |
438 | | * Compare a tuple to the passed arguments. |
439 | | */ |
440 | | static inline bool |
441 | | CatalogCacheCompareTuple(const CatCache *cache, int nkeys, |
442 | | const Datum *cachekeys, |
443 | | const Datum *searchkeys) |
444 | 0 | { |
445 | 0 | const CCFastEqualFN *cc_fastequal = cache->cc_fastequal; |
446 | 0 | int i; |
447 | |
|
448 | 0 | for (i = 0; i < nkeys; i++) |
449 | 0 | { |
450 | 0 | if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i])) |
451 | 0 | return false; |
452 | 0 | } |
453 | 0 | return true; |
454 | 0 | } |
455 | | |
456 | | |
457 | | #ifdef CATCACHE_STATS |
458 | | |
459 | | static void |
460 | | CatCachePrintStats(int code, Datum arg) |
461 | | { |
462 | | slist_iter iter; |
463 | | long cc_searches = 0; |
464 | | long cc_hits = 0; |
465 | | long cc_neg_hits = 0; |
466 | | long cc_newloads = 0; |
467 | | long cc_invals = 0; |
468 | | long cc_nlists = 0; |
469 | | long cc_lsearches = 0; |
470 | | long cc_lhits = 0; |
471 | | |
472 | | slist_foreach(iter, &CacheHdr->ch_caches) |
473 | | { |
474 | | CatCache *cache = slist_container(CatCache, cc_next, iter.cur); |
475 | | |
476 | | if (cache->cc_ntup == 0 && cache->cc_searches == 0) |
477 | | continue; /* don't print unused caches */ |
478 | | elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits", |
479 | | cache->cc_relname, |
480 | | cache->cc_indexoid, |
481 | | cache->cc_ntup, |
482 | | cache->cc_searches, |
483 | | cache->cc_hits, |
484 | | cache->cc_neg_hits, |
485 | | cache->cc_hits + cache->cc_neg_hits, |
486 | | cache->cc_newloads, |
487 | | cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads, |
488 | | cache->cc_searches - cache->cc_hits - cache->cc_neg_hits, |
489 | | cache->cc_invals, |
490 | | cache->cc_nlist, |
491 | | cache->cc_lsearches, |
492 | | cache->cc_lhits); |
493 | | cc_searches += cache->cc_searches; |
494 | | cc_hits += cache->cc_hits; |
495 | | cc_neg_hits += cache->cc_neg_hits; |
496 | | cc_newloads += cache->cc_newloads; |
497 | | cc_invals += cache->cc_invals; |
498 | | cc_nlists += cache->cc_nlist; |
499 | | cc_lsearches += cache->cc_lsearches; |
500 | | cc_lhits += cache->cc_lhits; |
501 | | } |
502 | | elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits", |
503 | | CacheHdr->ch_ntup, |
504 | | cc_searches, |
505 | | cc_hits, |
506 | | cc_neg_hits, |
507 | | cc_hits + cc_neg_hits, |
508 | | cc_newloads, |
509 | | cc_searches - cc_hits - cc_neg_hits - cc_newloads, |
510 | | cc_searches - cc_hits - cc_neg_hits, |
511 | | cc_invals, |
512 | | cc_nlists, |
513 | | cc_lsearches, |
514 | | cc_lhits); |
515 | | } |
516 | | #endif /* CATCACHE_STATS */ |
517 | | |
518 | | |
519 | | /* |
520 | | * CatCacheRemoveCTup |
521 | | * |
522 | | * Unlink and delete the given cache entry |
523 | | * |
524 | | * NB: if it is a member of a CatCList, the CatCList is deleted too. |
525 | | * Both the cache entry and the list had better have zero refcount. |
526 | | */ |
527 | | static void |
528 | | CatCacheRemoveCTup(CatCache *cache, CatCTup *ct) |
529 | 0 | { |
530 | 0 | Assert(ct->refcount == 0); |
531 | 0 | Assert(ct->my_cache == cache); |
532 | |
|
533 | 0 | if (ct->c_list) |
534 | 0 | { |
535 | | /* |
536 | | * The cleanest way to handle this is to call CatCacheRemoveCList, |
537 | | * which will recurse back to me, and the recursive call will do the |
538 | | * work. Set the "dead" flag to make sure it does recurse. |
539 | | */ |
540 | 0 | ct->dead = true; |
541 | 0 | CatCacheRemoveCList(cache, ct->c_list); |
542 | 0 | return; /* nothing left to do */ |
543 | 0 | } |
544 | | |
545 | | /* delink from linked list */ |
546 | 0 | dlist_delete(&ct->cache_elem); |
547 | | |
548 | | /* |
549 | | * Free keys when we're dealing with a negative entry, normal entries just |
550 | | * point into tuple, allocated together with the CatCTup. |
551 | | */ |
552 | 0 | if (ct->negative) |
553 | 0 | CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys, |
554 | 0 | cache->cc_keyno, ct->keys); |
555 | |
|
556 | 0 | pfree(ct); |
557 | |
|
558 | 0 | --cache->cc_ntup; |
559 | 0 | --CacheHdr->ch_ntup; |
560 | 0 | } |
561 | | |
562 | | /* |
563 | | * CatCacheRemoveCList |
564 | | * |
565 | | * Unlink and delete the given cache list entry |
566 | | * |
567 | | * NB: any dead member entries that become unreferenced are deleted too. |
568 | | */ |
569 | | static void |
570 | | CatCacheRemoveCList(CatCache *cache, CatCList *cl) |
571 | 0 | { |
572 | 0 | int i; |
573 | |
|
574 | 0 | Assert(cl->refcount == 0); |
575 | 0 | Assert(cl->my_cache == cache); |
576 | | |
577 | | /* delink from member tuples */ |
578 | 0 | for (i = cl->n_members; --i >= 0;) |
579 | 0 | { |
580 | 0 | CatCTup *ct = cl->members[i]; |
581 | |
|
582 | 0 | Assert(ct->c_list == cl); |
583 | 0 | ct->c_list = NULL; |
584 | | /* if the member is dead and now has no references, remove it */ |
585 | 0 | if ( |
586 | 0 | #ifndef CATCACHE_FORCE_RELEASE |
587 | 0 | ct->dead && |
588 | 0 | #endif |
589 | 0 | ct->refcount == 0) |
590 | 0 | CatCacheRemoveCTup(cache, ct); |
591 | 0 | } |
592 | | |
593 | | /* delink from linked list */ |
594 | 0 | dlist_delete(&cl->cache_elem); |
595 | | |
596 | | /* free associated column data */ |
597 | 0 | CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys, |
598 | 0 | cache->cc_keyno, cl->keys); |
599 | |
|
600 | 0 | pfree(cl); |
601 | |
|
602 | 0 | --cache->cc_nlist; |
603 | 0 | } |
604 | | |
605 | | |
606 | | /* |
607 | | * CatCacheInvalidate |
608 | | * |
609 | | * Invalidate entries in the specified cache, given a hash value. |
610 | | * |
611 | | * We delete cache entries that match the hash value, whether positive |
612 | | * or negative. We don't care whether the invalidation is the result |
613 | | * of a tuple insertion or a deletion. |
614 | | * |
615 | | * We used to try to match positive cache entries by TID, but that is |
616 | | * unsafe after a VACUUM FULL on a system catalog: an inval event could |
617 | | * be queued before VACUUM FULL, and then processed afterwards, when the |
618 | | * target tuple that has to be invalidated has a different TID than it |
619 | | * did when the event was created. So now we just compare hash values and |
620 | | * accept the small risk of unnecessary invalidations due to false matches. |
621 | | * |
622 | | * This routine is only quasi-public: it should only be used by inval.c. |
623 | | */ |
624 | | void |
625 | | CatCacheInvalidate(CatCache *cache, uint32 hashValue) |
626 | 0 | { |
627 | 0 | Index hashIndex; |
628 | 0 | dlist_mutable_iter iter; |
629 | |
|
630 | 0 | CACHE_elog(DEBUG2, "CatCacheInvalidate: called"); |
631 | | |
632 | | /* |
633 | | * We don't bother to check whether the cache has finished initialization |
634 | | * yet; if not, there will be no entries in it so no problem. |
635 | | */ |
636 | | |
637 | | /* |
638 | | * Invalidate *all* CatCLists in this cache; it's too hard to tell which |
639 | | * searches might still be correct, so just zap 'em all. |
640 | | */ |
641 | 0 | for (int i = 0; i < cache->cc_nlbuckets; i++) |
642 | 0 | { |
643 | 0 | dlist_head *bucket = &cache->cc_lbucket[i]; |
644 | |
|
645 | 0 | dlist_foreach_modify(iter, bucket) |
646 | 0 | { |
647 | 0 | CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); |
648 | |
|
649 | 0 | if (cl->refcount > 0) |
650 | 0 | cl->dead = true; |
651 | 0 | else |
652 | 0 | CatCacheRemoveCList(cache, cl); |
653 | 0 | } |
654 | 0 | } |
655 | | |
656 | | /* |
657 | | * inspect the proper hash bucket for tuple matches |
658 | | */ |
659 | 0 | hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); |
660 | 0 | dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex]) |
661 | 0 | { |
662 | 0 | CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); |
663 | |
|
664 | 0 | if (hashValue == ct->hash_value) |
665 | 0 | { |
666 | 0 | if (ct->refcount > 0 || |
667 | 0 | (ct->c_list && ct->c_list->refcount > 0)) |
668 | 0 | { |
669 | 0 | ct->dead = true; |
670 | | /* list, if any, was marked dead above */ |
671 | 0 | Assert(ct->c_list == NULL || ct->c_list->dead); |
672 | 0 | } |
673 | 0 | else |
674 | 0 | CatCacheRemoveCTup(cache, ct); |
675 | 0 | CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated"); |
676 | | #ifdef CATCACHE_STATS |
677 | | cache->cc_invals++; |
678 | | #endif |
679 | | /* could be multiple matches, so keep looking! */ |
680 | 0 | } |
681 | 0 | } |
682 | | |
683 | | /* Also invalidate any entries that are being built */ |
684 | 0 | for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next) |
685 | 0 | { |
686 | 0 | if (e->cache == cache) |
687 | 0 | { |
688 | 0 | if (e->list || e->hash_value == hashValue) |
689 | 0 | e->dead = true; |
690 | 0 | } |
691 | 0 | } |
692 | 0 | } |
693 | | |
694 | | /* ---------------------------------------------------------------- |
695 | | * public functions |
696 | | * ---------------------------------------------------------------- |
697 | | */ |
698 | | |
699 | | |
700 | | /* |
701 | | * Standard routine for creating cache context if it doesn't exist yet |
702 | | * |
703 | | * There are a lot of places (probably far more than necessary) that check |
704 | | * whether CacheMemoryContext exists yet and want to create it if not. |
705 | | * We centralize knowledge of exactly how to create it here. |
706 | | */ |
707 | | void |
708 | | CreateCacheMemoryContext(void) |
709 | 0 | { |
710 | | /* |
711 | | * Purely for paranoia, check that context doesn't exist; caller probably |
712 | | * did so already. |
713 | | */ |
714 | 0 | if (!CacheMemoryContext) |
715 | 0 | CacheMemoryContext = AllocSetContextCreate(TopMemoryContext, |
716 | 0 | "CacheMemoryContext", |
717 | 0 | ALLOCSET_DEFAULT_SIZES); |
718 | 0 | } |
719 | | |
720 | | |
721 | | /* |
722 | | * ResetCatalogCache |
723 | | * |
724 | | * Reset one catalog cache to empty. |
725 | | * |
726 | | * This is not very efficient if the target cache is nearly empty. |
727 | | * However, it shouldn't need to be efficient; we don't invoke it often. |
728 | | * |
729 | | * If 'debug_discard' is true, we are being called as part of |
730 | | * debug_discard_caches. In that case, the cache is not reset for |
731 | | * correctness, but just to get more testing of cache invalidation. We skip |
732 | | * resetting in-progress build entries in that case, or we'd never make any |
733 | | * progress. |
734 | | */ |
735 | | static void |
736 | | ResetCatalogCache(CatCache *cache, bool debug_discard) |
737 | 0 | { |
738 | 0 | dlist_mutable_iter iter; |
739 | 0 | int i; |
740 | | |
741 | | /* Remove each list in this cache, or at least mark it dead */ |
742 | 0 | for (i = 0; i < cache->cc_nlbuckets; i++) |
743 | 0 | { |
744 | 0 | dlist_head *bucket = &cache->cc_lbucket[i]; |
745 | |
|
746 | 0 | dlist_foreach_modify(iter, bucket) |
747 | 0 | { |
748 | 0 | CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); |
749 | |
|
750 | 0 | if (cl->refcount > 0) |
751 | 0 | cl->dead = true; |
752 | 0 | else |
753 | 0 | CatCacheRemoveCList(cache, cl); |
754 | 0 | } |
755 | 0 | } |
756 | | |
757 | | /* Remove each tuple in this cache, or at least mark it dead */ |
758 | 0 | for (i = 0; i < cache->cc_nbuckets; i++) |
759 | 0 | { |
760 | 0 | dlist_head *bucket = &cache->cc_bucket[i]; |
761 | |
|
762 | 0 | dlist_foreach_modify(iter, bucket) |
763 | 0 | { |
764 | 0 | CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); |
765 | |
|
766 | 0 | if (ct->refcount > 0 || |
767 | 0 | (ct->c_list && ct->c_list->refcount > 0)) |
768 | 0 | { |
769 | 0 | ct->dead = true; |
770 | | /* list, if any, was marked dead above */ |
771 | 0 | Assert(ct->c_list == NULL || ct->c_list->dead); |
772 | 0 | } |
773 | 0 | else |
774 | 0 | CatCacheRemoveCTup(cache, ct); |
775 | | #ifdef CATCACHE_STATS |
776 | | cache->cc_invals++; |
777 | | #endif |
778 | 0 | } |
779 | 0 | } |
780 | | |
781 | | /* Also invalidate any entries that are being built */ |
782 | 0 | if (!debug_discard) |
783 | 0 | { |
784 | 0 | for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next) |
785 | 0 | { |
786 | 0 | if (e->cache == cache) |
787 | 0 | e->dead = true; |
788 | 0 | } |
789 | 0 | } |
790 | 0 | } |
791 | | |
792 | | /* |
793 | | * ResetCatalogCaches |
794 | | * |
795 | | * Reset all caches when a shared cache inval event forces it |
796 | | */ |
797 | | void |
798 | | ResetCatalogCaches(void) |
799 | 0 | { |
800 | 0 | ResetCatalogCachesExt(false); |
801 | 0 | } |
802 | | |
803 | | void |
804 | | ResetCatalogCachesExt(bool debug_discard) |
805 | 0 | { |
806 | 0 | slist_iter iter; |
807 | |
|
808 | 0 | CACHE_elog(DEBUG2, "ResetCatalogCaches called"); |
809 | |
|
810 | 0 | slist_foreach(iter, &CacheHdr->ch_caches) |
811 | 0 | { |
812 | 0 | CatCache *cache = slist_container(CatCache, cc_next, iter.cur); |
813 | |
|
814 | 0 | ResetCatalogCache(cache, debug_discard); |
815 | 0 | } |
816 | |
|
817 | 0 | CACHE_elog(DEBUG2, "end of ResetCatalogCaches call"); |
818 | 0 | } |
819 | | |
820 | | /* |
821 | | * CatalogCacheFlushCatalog |
822 | | * |
823 | | * Flush all catcache entries that came from the specified system catalog. |
824 | | * This is needed after VACUUM FULL/CLUSTER on the catalog, since the |
825 | | * tuples very likely now have different TIDs than before. (At one point |
826 | | * we also tried to force re-execution of CatalogCacheInitializeCache for |
827 | | * the cache(s) on that catalog. This is a bad idea since it leads to all |
828 | | * kinds of trouble if a cache flush occurs while loading cache entries. |
829 | | * We now avoid the need to do it by copying cc_tupdesc out of the relcache, |
830 | | * rather than relying on the relcache to keep a tupdesc for us. Of course |
831 | | * this assumes the tupdesc of a cachable system table will not change...) |
832 | | */ |
833 | | void |
834 | | CatalogCacheFlushCatalog(Oid catId) |
835 | 0 | { |
836 | 0 | slist_iter iter; |
837 | |
|
838 | 0 | CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId); |
839 | |
|
840 | 0 | slist_foreach(iter, &CacheHdr->ch_caches) |
841 | 0 | { |
842 | 0 | CatCache *cache = slist_container(CatCache, cc_next, iter.cur); |
843 | | |
844 | | /* Does this cache store tuples of the target catalog? */ |
845 | 0 | if (cache->cc_reloid == catId) |
846 | 0 | { |
847 | | /* Yes, so flush all its contents */ |
848 | 0 | ResetCatalogCache(cache, false); |
849 | | |
850 | | /* Tell inval.c to call syscache callbacks for this cache */ |
851 | 0 | CallSyscacheCallbacks(cache->id, 0); |
852 | 0 | } |
853 | 0 | } |
854 | |
|
855 | 0 | CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call"); |
856 | 0 | } |
857 | | |
858 | | /* |
859 | | * InitCatCache |
860 | | * |
861 | | * This allocates and initializes a cache for a system catalog relation. |
862 | | * Actually, the cache is only partially initialized to avoid opening the |
863 | | * relation. The relation will be opened and the rest of the cache |
864 | | * structure initialized on the first access. |
865 | | */ |
866 | | #ifdef CACHEDEBUG |
867 | | #define InitCatCache_DEBUG2 \ |
868 | | do { \ |
869 | | elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \ |
870 | | cp->cc_reloid, cp->cc_indexoid, cp->id, \ |
871 | | cp->cc_nkeys, cp->cc_nbuckets); \ |
872 | | } while(0) |
873 | | #else |
874 | | #define InitCatCache_DEBUG2 |
875 | | #endif |
876 | | |
877 | | CatCache * |
878 | | InitCatCache(int id, |
879 | | Oid reloid, |
880 | | Oid indexoid, |
881 | | int nkeys, |
882 | | const int *key, |
883 | | int nbuckets) |
884 | 0 | { |
885 | 0 | CatCache *cp; |
886 | 0 | MemoryContext oldcxt; |
887 | 0 | int i; |
888 | | |
889 | | /* |
890 | | * nbuckets is the initial number of hash buckets to use in this catcache. |
891 | | * It will be enlarged later if it becomes too full. |
892 | | * |
893 | | * nbuckets must be a power of two. We check this via Assert rather than |
894 | | * a full runtime check because the values will be coming from constant |
895 | | * tables. |
896 | | * |
897 | | * If you're confused by the power-of-two check, see comments in |
898 | | * bitmapset.c for an explanation. |
899 | | */ |
900 | 0 | Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets); |
901 | | |
902 | | /* |
903 | | * first switch to the cache context so our allocations do not vanish at |
904 | | * the end of a transaction |
905 | | */ |
906 | 0 | if (!CacheMemoryContext) |
907 | 0 | CreateCacheMemoryContext(); |
908 | |
|
909 | 0 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
910 | | |
911 | | /* |
912 | | * if first time through, initialize the cache group header |
913 | | */ |
914 | 0 | if (CacheHdr == NULL) |
915 | 0 | { |
916 | 0 | CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader)); |
917 | 0 | slist_init(&CacheHdr->ch_caches); |
918 | 0 | CacheHdr->ch_ntup = 0; |
919 | | #ifdef CATCACHE_STATS |
920 | | /* set up to dump stats at backend exit */ |
921 | | on_proc_exit(CatCachePrintStats, 0); |
922 | | #endif |
923 | 0 | } |
924 | | |
925 | | /* |
926 | | * Allocate a new cache structure, aligning to a cacheline boundary |
927 | | * |
928 | | * Note: we rely on zeroing to initialize all the dlist headers correctly |
929 | | */ |
930 | 0 | cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE, |
931 | 0 | MCXT_ALLOC_ZERO); |
932 | 0 | cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head)); |
933 | | |
934 | | /* |
935 | | * Many catcaches never receive any list searches. Therefore, we don't |
936 | | * allocate the cc_lbuckets till we get a list search. |
937 | | */ |
938 | 0 | cp->cc_lbucket = NULL; |
939 | | |
940 | | /* |
941 | | * initialize the cache's relation information for the relation |
942 | | * corresponding to this cache, and initialize some of the new cache's |
943 | | * other internal fields. But don't open the relation yet. |
944 | | */ |
945 | 0 | cp->id = id; |
946 | 0 | cp->cc_relname = "(not known yet)"; |
947 | 0 | cp->cc_reloid = reloid; |
948 | 0 | cp->cc_indexoid = indexoid; |
949 | 0 | cp->cc_relisshared = false; /* temporary */ |
950 | 0 | cp->cc_tupdesc = (TupleDesc) NULL; |
951 | 0 | cp->cc_ntup = 0; |
952 | 0 | cp->cc_nlist = 0; |
953 | 0 | cp->cc_nbuckets = nbuckets; |
954 | 0 | cp->cc_nlbuckets = 0; |
955 | 0 | cp->cc_nkeys = nkeys; |
956 | 0 | for (i = 0; i < nkeys; ++i) |
957 | 0 | { |
958 | 0 | Assert(AttributeNumberIsValid(key[i])); |
959 | 0 | cp->cc_keyno[i] = key[i]; |
960 | 0 | } |
961 | | |
962 | | /* |
963 | | * new cache is initialized as far as we can go for now. print some |
964 | | * debugging information, if appropriate. |
965 | | */ |
966 | 0 | InitCatCache_DEBUG2; |
967 | | |
968 | | /* |
969 | | * add completed cache to top of group header's list |
970 | | */ |
971 | 0 | slist_push_head(&CacheHdr->ch_caches, &cp->cc_next); |
972 | | |
973 | | /* |
974 | | * back to the old context before we return... |
975 | | */ |
976 | 0 | MemoryContextSwitchTo(oldcxt); |
977 | |
|
978 | 0 | return cp; |
979 | 0 | } |
980 | | |
981 | | /* |
982 | | * Enlarge a catcache, doubling the number of buckets. |
983 | | */ |
984 | | static void |
985 | | RehashCatCache(CatCache *cp) |
986 | 0 | { |
987 | 0 | dlist_head *newbucket; |
988 | 0 | int newnbuckets; |
989 | 0 | int i; |
990 | |
|
991 | 0 | elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets", |
992 | 0 | cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets); |
993 | | |
994 | | /* Allocate a new, larger, hash table. */ |
995 | 0 | newnbuckets = cp->cc_nbuckets * 2; |
996 | 0 | newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head)); |
997 | | |
998 | | /* Move all entries from old hash table to new. */ |
999 | 0 | for (i = 0; i < cp->cc_nbuckets; i++) |
1000 | 0 | { |
1001 | 0 | dlist_mutable_iter iter; |
1002 | |
|
1003 | 0 | dlist_foreach_modify(iter, &cp->cc_bucket[i]) |
1004 | 0 | { |
1005 | 0 | CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); |
1006 | 0 | int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets); |
1007 | |
|
1008 | 0 | dlist_delete(iter.cur); |
1009 | 0 | dlist_push_head(&newbucket[hashIndex], &ct->cache_elem); |
1010 | 0 | } |
1011 | 0 | } |
1012 | | |
1013 | | /* Switch to the new array. */ |
1014 | 0 | pfree(cp->cc_bucket); |
1015 | 0 | cp->cc_nbuckets = newnbuckets; |
1016 | 0 | cp->cc_bucket = newbucket; |
1017 | 0 | } |
1018 | | |
1019 | | /* |
1020 | | * Enlarge a catcache's list storage, doubling the number of buckets. |
1021 | | */ |
1022 | | static void |
1023 | | RehashCatCacheLists(CatCache *cp) |
1024 | 0 | { |
1025 | 0 | dlist_head *newbucket; |
1026 | 0 | int newnbuckets; |
1027 | 0 | int i; |
1028 | |
|
1029 | 0 | elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets", |
1030 | 0 | cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets); |
1031 | | |
1032 | | /* Allocate a new, larger, hash table. */ |
1033 | 0 | newnbuckets = cp->cc_nlbuckets * 2; |
1034 | 0 | newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head)); |
1035 | | |
1036 | | /* Move all entries from old hash table to new. */ |
1037 | 0 | for (i = 0; i < cp->cc_nlbuckets; i++) |
1038 | 0 | { |
1039 | 0 | dlist_mutable_iter iter; |
1040 | |
|
1041 | 0 | dlist_foreach_modify(iter, &cp->cc_lbucket[i]) |
1042 | 0 | { |
1043 | 0 | CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); |
1044 | 0 | int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets); |
1045 | |
|
1046 | 0 | dlist_delete(iter.cur); |
1047 | 0 | dlist_push_head(&newbucket[hashIndex], &cl->cache_elem); |
1048 | 0 | } |
1049 | 0 | } |
1050 | | |
1051 | | /* Switch to the new array. */ |
1052 | 0 | pfree(cp->cc_lbucket); |
1053 | 0 | cp->cc_nlbuckets = newnbuckets; |
1054 | 0 | cp->cc_lbucket = newbucket; |
1055 | 0 | } |
1056 | | |
1057 | | /* |
1058 | | * ConditionalCatalogCacheInitializeCache |
1059 | | * |
1060 | | * Call CatalogCacheInitializeCache() if not yet done. |
1061 | | */ |
1062 | | pg_attribute_always_inline |
1063 | | static void |
1064 | | ConditionalCatalogCacheInitializeCache(CatCache *cache) |
1065 | 0 | { |
1066 | | #ifdef USE_ASSERT_CHECKING |
1067 | | /* |
1068 | | * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID |
1069 | | * for hashing. This isn't ideal. Since lookup_type_cache() both |
1070 | | * registers the callback and searches TYPEOID, reaching trouble likely |
1071 | | * requires OOM at an unlucky moment. |
1072 | | * |
1073 | | * InvalidateAttoptCacheCallback() runs outside transactions and likewise |
1074 | | * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable. |
1075 | | */ |
1076 | | if (!(cache->id == TYPEOID || cache->id == ATTNUM) || |
1077 | | IsTransactionState()) |
1078 | | AssertCouldGetRelation(); |
1079 | | else |
1080 | | Assert(cache->cc_tupdesc != NULL); |
1081 | | #endif |
1082 | |
|
1083 | 0 | if (unlikely(cache->cc_tupdesc == NULL)) |
1084 | 0 | CatalogCacheInitializeCache(cache); |
1085 | 0 | } |
1086 | | |
1087 | | /* |
1088 | | * CatalogCacheInitializeCache |
1089 | | * |
1090 | | * This function does final initialization of a catcache: obtain the tuple |
1091 | | * descriptor and set up the hash and equality function links. |
1092 | | */ |
1093 | | #ifdef CACHEDEBUG |
1094 | | #define CatalogCacheInitializeCache_DEBUG1 \ |
1095 | | elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \ |
1096 | | cache->cc_reloid) |
1097 | | |
1098 | | #define CatalogCacheInitializeCache_DEBUG2 \ |
1099 | | do { \ |
1100 | | if (cache->cc_keyno[i] > 0) { \ |
1101 | | elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \ |
1102 | | i+1, cache->cc_nkeys, cache->cc_keyno[i], \ |
1103 | | TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \ |
1104 | | } else { \ |
1105 | | elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \ |
1106 | | i+1, cache->cc_nkeys, cache->cc_keyno[i]); \ |
1107 | | } \ |
1108 | | } while(0) |
1109 | | #else |
1110 | | #define CatalogCacheInitializeCache_DEBUG1 |
1111 | | #define CatalogCacheInitializeCache_DEBUG2 |
1112 | | #endif |
1113 | | |
1114 | | static void |
1115 | | CatalogCacheInitializeCache(CatCache *cache) |
1116 | 0 | { |
1117 | 0 | Relation relation; |
1118 | 0 | MemoryContext oldcxt; |
1119 | 0 | TupleDesc tupdesc; |
1120 | 0 | int i; |
1121 | |
|
1122 | 0 | CatalogCacheInitializeCache_DEBUG1; |
1123 | |
|
1124 | 0 | relation = table_open(cache->cc_reloid, AccessShareLock); |
1125 | | |
1126 | | /* |
1127 | | * switch to the cache context so our allocations do not vanish at the end |
1128 | | * of a transaction |
1129 | | */ |
1130 | 0 | Assert(CacheMemoryContext != NULL); |
1131 | |
|
1132 | 0 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
1133 | | |
1134 | | /* |
1135 | | * copy the relcache's tuple descriptor to permanent cache storage |
1136 | | */ |
1137 | 0 | tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation)); |
1138 | | |
1139 | | /* |
1140 | | * save the relation's name and relisshared flag, too (cc_relname is used |
1141 | | * only for debugging purposes) |
1142 | | */ |
1143 | 0 | cache->cc_relname = pstrdup(RelationGetRelationName(relation)); |
1144 | 0 | cache->cc_relisshared = RelationGetForm(relation)->relisshared; |
1145 | | |
1146 | | /* |
1147 | | * return to the caller's memory context and close the rel |
1148 | | */ |
1149 | 0 | MemoryContextSwitchTo(oldcxt); |
1150 | |
|
1151 | 0 | table_close(relation, AccessShareLock); |
1152 | |
|
1153 | 0 | CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys", |
1154 | 0 | cache->cc_relname, cache->cc_nkeys); |
1155 | | |
1156 | | /* |
1157 | | * initialize cache's key information |
1158 | | */ |
1159 | 0 | for (i = 0; i < cache->cc_nkeys; ++i) |
1160 | 0 | { |
1161 | 0 | Oid keytype; |
1162 | 0 | RegProcedure eqfunc; |
1163 | |
|
1164 | 0 | CatalogCacheInitializeCache_DEBUG2; |
1165 | |
|
1166 | 0 | if (cache->cc_keyno[i] > 0) |
1167 | 0 | { |
1168 | 0 | Form_pg_attribute attr = TupleDescAttr(tupdesc, |
1169 | 0 | cache->cc_keyno[i] - 1); |
1170 | |
|
1171 | 0 | keytype = attr->atttypid; |
1172 | | /* cache key columns should always be NOT NULL */ |
1173 | 0 | Assert(attr->attnotnull); |
1174 | 0 | } |
1175 | 0 | else |
1176 | 0 | { |
1177 | 0 | if (cache->cc_keyno[i] < 0) |
1178 | 0 | elog(FATAL, "sys attributes are not supported in caches"); |
1179 | 0 | keytype = OIDOID; |
1180 | 0 | } |
1181 | | |
1182 | 0 | GetCCHashEqFuncs(keytype, |
1183 | 0 | &cache->cc_hashfunc[i], |
1184 | 0 | &eqfunc, |
1185 | 0 | &cache->cc_fastequal[i]); |
1186 | | |
1187 | | /* |
1188 | | * Do equality-function lookup (we assume this won't need a catalog |
1189 | | * lookup for any supported type) |
1190 | | */ |
1191 | 0 | fmgr_info_cxt(eqfunc, |
1192 | 0 | &cache->cc_skey[i].sk_func, |
1193 | 0 | CacheMemoryContext); |
1194 | | |
1195 | | /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */ |
1196 | 0 | cache->cc_skey[i].sk_attno = cache->cc_keyno[i]; |
1197 | | |
1198 | | /* Fill in sk_strategy as well --- always standard equality */ |
1199 | 0 | cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber; |
1200 | 0 | cache->cc_skey[i].sk_subtype = InvalidOid; |
1201 | | /* If a catcache key requires a collation, it must be C collation */ |
1202 | 0 | cache->cc_skey[i].sk_collation = C_COLLATION_OID; |
1203 | |
|
1204 | 0 | CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p", |
1205 | 0 | cache->cc_relname, i, cache); |
1206 | 0 | } |
1207 | | |
1208 | | /* |
1209 | | * mark this cache fully initialized |
1210 | | */ |
1211 | 0 | cache->cc_tupdesc = tupdesc; |
1212 | 0 | } |
1213 | | |
1214 | | /* |
1215 | | * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache |
1216 | | * |
1217 | | * One reason to call this routine is to ensure that the relcache has |
1218 | | * created entries for all the catalogs and indexes referenced by catcaches. |
1219 | | * Therefore, provide an option to open the index as well as fixing the |
1220 | | * cache itself. An exception is the indexes on pg_am, which we don't use |
1221 | | * (cf. IndexScanOK). |
1222 | | */ |
1223 | | void |
1224 | | InitCatCachePhase2(CatCache *cache, bool touch_index) |
1225 | 0 | { |
1226 | 0 | ConditionalCatalogCacheInitializeCache(cache); |
1227 | |
|
1228 | 0 | if (touch_index && |
1229 | 0 | cache->id != AMOID && |
1230 | 0 | cache->id != AMNAME) |
1231 | 0 | { |
1232 | 0 | Relation idesc; |
1233 | | |
1234 | | /* |
1235 | | * We must lock the underlying catalog before opening the index to |
1236 | | * avoid deadlock, since index_open could possibly result in reading |
1237 | | * this same catalog, and if anyone else is exclusive-locking this |
1238 | | * catalog and index they'll be doing it in that order. |
1239 | | */ |
1240 | 0 | LockRelationOid(cache->cc_reloid, AccessShareLock); |
1241 | 0 | idesc = index_open(cache->cc_indexoid, AccessShareLock); |
1242 | | |
1243 | | /* |
1244 | | * While we've got the index open, let's check that it's unique (and |
1245 | | * not just deferrable-unique, thank you very much). This is just to |
1246 | | * catch thinkos in definitions of new catcaches, so we don't worry |
1247 | | * about the pg_am indexes not getting tested. |
1248 | | */ |
1249 | 0 | Assert(idesc->rd_index->indisunique && |
1250 | 0 | idesc->rd_index->indimmediate); |
1251 | |
|
1252 | 0 | index_close(idesc, AccessShareLock); |
1253 | 0 | UnlockRelationOid(cache->cc_reloid, AccessShareLock); |
1254 | 0 | } |
1255 | 0 | } |
1256 | | |
1257 | | |
1258 | | /* |
1259 | | * IndexScanOK |
1260 | | * |
1261 | | * This function checks for tuples that will be fetched by |
1262 | | * IndexSupportInitialize() during relcache initialization for |
1263 | | * certain system indexes that support critical syscaches. |
1264 | | * We can't use an indexscan to fetch these, else we'll get into |
1265 | | * infinite recursion. A plain heap scan will work, however. |
1266 | | * Once we have completed relcache initialization (signaled by |
1267 | | * criticalRelcachesBuilt), we don't have to worry anymore. |
1268 | | * |
1269 | | * Similarly, during backend startup we have to be able to use the |
1270 | | * pg_authid, pg_auth_members and pg_database syscaches for |
1271 | | * authentication even if we don't yet have relcache entries for those |
1272 | | * catalogs' indexes. |
1273 | | */ |
1274 | | static bool |
1275 | | IndexScanOK(CatCache *cache) |
1276 | 0 | { |
1277 | 0 | switch (cache->id) |
1278 | 0 | { |
1279 | 0 | case INDEXRELID: |
1280 | | |
1281 | | /* |
1282 | | * Rather than tracking exactly which indexes have to be loaded |
1283 | | * before we can use indexscans (which changes from time to time), |
1284 | | * just force all pg_index searches to be heap scans until we've |
1285 | | * built the critical relcaches. |
1286 | | */ |
1287 | 0 | if (!criticalRelcachesBuilt) |
1288 | 0 | return false; |
1289 | 0 | break; |
1290 | | |
1291 | 0 | case AMOID: |
1292 | 0 | case AMNAME: |
1293 | | |
1294 | | /* |
1295 | | * Always do heap scans in pg_am, because it's so small there's |
1296 | | * not much point in an indexscan anyway. We *must* do this when |
1297 | | * initially building critical relcache entries, but we might as |
1298 | | * well just always do it. |
1299 | | */ |
1300 | 0 | return false; |
1301 | | |
1302 | 0 | case AUTHNAME: |
1303 | 0 | case AUTHOID: |
1304 | 0 | case AUTHMEMMEMROLE: |
1305 | 0 | case DATABASEOID: |
1306 | | |
1307 | | /* |
1308 | | * Protect authentication lookups occurring before relcache has |
1309 | | * collected entries for shared indexes. |
1310 | | */ |
1311 | 0 | if (!criticalSharedRelcachesBuilt) |
1312 | 0 | return false; |
1313 | 0 | break; |
1314 | | |
1315 | 0 | default: |
1316 | 0 | break; |
1317 | 0 | } |
1318 | | |
1319 | | /* Normal case, allow index scan */ |
1320 | 0 | return true; |
1321 | 0 | } |
1322 | | |
1323 | | /* |
1324 | | * SearchCatCache |
1325 | | * |
1326 | | * This call searches a system cache for a tuple, opening the relation |
1327 | | * if necessary (on the first access to a particular cache). |
1328 | | * |
1329 | | * The result is NULL if not found, or a pointer to a HeapTuple in |
1330 | | * the cache. The caller must not modify the tuple, and must call |
1331 | | * ReleaseCatCache() when done with it. |
1332 | | * |
1333 | | * The search key values should be expressed as Datums of the key columns' |
1334 | | * datatype(s). (Pass zeroes for any unused parameters.) As a special |
1335 | | * exception, the passed-in key for a NAME column can be just a C string; |
1336 | | * the caller need not go to the trouble of converting it to a fully |
1337 | | * null-padded NAME. |
1338 | | */ |
1339 | | HeapTuple |
1340 | | SearchCatCache(CatCache *cache, |
1341 | | Datum v1, |
1342 | | Datum v2, |
1343 | | Datum v3, |
1344 | | Datum v4) |
1345 | 0 | { |
1346 | 0 | return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4); |
1347 | 0 | } |
1348 | | |
1349 | | |
1350 | | /* |
1351 | | * SearchCatCacheN() are SearchCatCache() versions for a specific number of |
1352 | | * arguments. The compiler can inline the body and unroll loops, making them a |
1353 | | * bit faster than SearchCatCache(). |
1354 | | */ |
1355 | | |
1356 | | HeapTuple |
1357 | | SearchCatCache1(CatCache *cache, |
1358 | | Datum v1) |
1359 | 0 | { |
1360 | 0 | return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0); |
1361 | 0 | } |
1362 | | |
1363 | | |
1364 | | HeapTuple |
1365 | | SearchCatCache2(CatCache *cache, |
1366 | | Datum v1, Datum v2) |
1367 | 0 | { |
1368 | 0 | return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0); |
1369 | 0 | } |
1370 | | |
1371 | | |
1372 | | HeapTuple |
1373 | | SearchCatCache3(CatCache *cache, |
1374 | | Datum v1, Datum v2, Datum v3) |
1375 | 0 | { |
1376 | 0 | return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0); |
1377 | 0 | } |
1378 | | |
1379 | | |
1380 | | HeapTuple |
1381 | | SearchCatCache4(CatCache *cache, |
1382 | | Datum v1, Datum v2, Datum v3, Datum v4) |
1383 | 0 | { |
1384 | 0 | return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4); |
1385 | 0 | } |
1386 | | |
1387 | | /* |
1388 | | * Work-horse for SearchCatCache/SearchCatCacheN. |
1389 | | */ |
1390 | | static inline HeapTuple |
1391 | | SearchCatCacheInternal(CatCache *cache, |
1392 | | int nkeys, |
1393 | | Datum v1, |
1394 | | Datum v2, |
1395 | | Datum v3, |
1396 | | Datum v4) |
1397 | 0 | { |
1398 | 0 | Datum arguments[CATCACHE_MAXKEYS]; |
1399 | 0 | uint32 hashValue; |
1400 | 0 | Index hashIndex; |
1401 | 0 | dlist_iter iter; |
1402 | 0 | dlist_head *bucket; |
1403 | 0 | CatCTup *ct; |
1404 | |
|
1405 | 0 | Assert(cache->cc_nkeys == nkeys); |
1406 | | |
1407 | | /* |
1408 | | * one-time startup overhead for each cache |
1409 | | */ |
1410 | 0 | ConditionalCatalogCacheInitializeCache(cache); |
1411 | |
|
1412 | | #ifdef CATCACHE_STATS |
1413 | | cache->cc_searches++; |
1414 | | #endif |
1415 | | |
1416 | | /* Initialize local parameter array */ |
1417 | 0 | arguments[0] = v1; |
1418 | 0 | arguments[1] = v2; |
1419 | 0 | arguments[2] = v3; |
1420 | 0 | arguments[3] = v4; |
1421 | | |
1422 | | /* |
1423 | | * find the hash bucket in which to look for the tuple |
1424 | | */ |
1425 | 0 | hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); |
1426 | 0 | hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); |
1427 | | |
1428 | | /* |
1429 | | * scan the hash bucket until we find a match or exhaust our tuples |
1430 | | * |
1431 | | * Note: it's okay to use dlist_foreach here, even though we modify the |
1432 | | * dlist within the loop, because we don't continue the loop afterwards. |
1433 | | */ |
1434 | 0 | bucket = &cache->cc_bucket[hashIndex]; |
1435 | 0 | dlist_foreach(iter, bucket) |
1436 | 0 | { |
1437 | 0 | ct = dlist_container(CatCTup, cache_elem, iter.cur); |
1438 | |
|
1439 | 0 | if (ct->dead) |
1440 | 0 | continue; /* ignore dead entries */ |
1441 | | |
1442 | 0 | if (ct->hash_value != hashValue) |
1443 | 0 | continue; /* quickly skip entry if wrong hash val */ |
1444 | | |
1445 | 0 | if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments)) |
1446 | 0 | continue; |
1447 | | |
1448 | | /* |
1449 | | * We found a match in the cache. Move it to the front of the list |
1450 | | * for its hashbucket, in order to speed subsequent searches. (The |
1451 | | * most frequently accessed elements in any hashbucket will tend to be |
1452 | | * near the front of the hashbucket's list.) |
1453 | | */ |
1454 | 0 | dlist_move_head(bucket, &ct->cache_elem); |
1455 | | |
1456 | | /* |
1457 | | * If it's a positive entry, bump its refcount and return it. If it's |
1458 | | * negative, we can report failure to the caller. |
1459 | | */ |
1460 | 0 | if (!ct->negative) |
1461 | 0 | { |
1462 | 0 | ResourceOwnerEnlarge(CurrentResourceOwner); |
1463 | 0 | ct->refcount++; |
1464 | 0 | ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple); |
1465 | |
|
1466 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d", |
1467 | 0 | cache->cc_relname, hashIndex); |
1468 | |
|
1469 | | #ifdef CATCACHE_STATS |
1470 | | cache->cc_hits++; |
1471 | | #endif |
1472 | |
|
1473 | 0 | return &ct->tuple; |
1474 | 0 | } |
1475 | 0 | else |
1476 | 0 | { |
1477 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d", |
1478 | 0 | cache->cc_relname, hashIndex); |
1479 | |
|
1480 | | #ifdef CATCACHE_STATS |
1481 | | cache->cc_neg_hits++; |
1482 | | #endif |
1483 | |
|
1484 | 0 | return NULL; |
1485 | 0 | } |
1486 | 0 | } |
1487 | | |
1488 | 0 | return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4); |
1489 | 0 | } |
1490 | | |
1491 | | /* |
1492 | | * Search the actual catalogs, rather than the cache. |
1493 | | * |
1494 | | * This is kept separate from SearchCatCacheInternal() to keep the fast-path |
1495 | | * as small as possible. To avoid that effort being undone by a helpful |
1496 | | * compiler, try to explicitly forbid inlining. |
1497 | | */ |
1498 | | static pg_noinline HeapTuple |
1499 | | SearchCatCacheMiss(CatCache *cache, |
1500 | | int nkeys, |
1501 | | uint32 hashValue, |
1502 | | Index hashIndex, |
1503 | | Datum v1, |
1504 | | Datum v2, |
1505 | | Datum v3, |
1506 | | Datum v4) |
1507 | 0 | { |
1508 | 0 | ScanKeyData cur_skey[CATCACHE_MAXKEYS]; |
1509 | 0 | Relation relation; |
1510 | 0 | SysScanDesc scandesc; |
1511 | 0 | HeapTuple ntp; |
1512 | 0 | CatCTup *ct; |
1513 | 0 | bool stale; |
1514 | 0 | Datum arguments[CATCACHE_MAXKEYS]; |
1515 | | |
1516 | | /* Initialize local parameter array */ |
1517 | 0 | arguments[0] = v1; |
1518 | 0 | arguments[1] = v2; |
1519 | 0 | arguments[2] = v3; |
1520 | 0 | arguments[3] = v4; |
1521 | | |
1522 | | /* |
1523 | | * Tuple was not found in cache, so we have to try to retrieve it directly |
1524 | | * from the relation. If found, we will add it to the cache; if not |
1525 | | * found, we will add a negative cache entry instead. |
1526 | | * |
1527 | | * NOTE: it is possible for recursive cache lookups to occur while reading |
1528 | | * the relation --- for example, due to shared-cache-inval messages being |
1529 | | * processed during table_open(). This is OK. It's even possible for one |
1530 | | * of those lookups to find and enter the very same tuple we are trying to |
1531 | | * fetch here. If that happens, we will enter a second copy of the tuple |
1532 | | * into the cache. The first copy will never be referenced again, and |
1533 | | * will eventually age out of the cache, so there's no functional problem. |
1534 | | * This case is rare enough that it's not worth expending extra cycles to |
1535 | | * detect. |
1536 | | * |
1537 | | * Another case, which we *must* handle, is that the tuple could become |
1538 | | * outdated during CatalogCacheCreateEntry's attempt to detoast it (since |
1539 | | * AcceptInvalidationMessages can run during TOAST table access). We do |
1540 | | * not want to return already-stale catcache entries, so we loop around |
1541 | | * and do the table scan again if that happens. |
1542 | | */ |
1543 | 0 | relation = table_open(cache->cc_reloid, AccessShareLock); |
1544 | | |
1545 | | /* |
1546 | | * Ok, need to make a lookup in the relation, copy the scankey and fill |
1547 | | * out any per-call fields. |
1548 | | */ |
1549 | 0 | memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys); |
1550 | 0 | cur_skey[0].sk_argument = v1; |
1551 | 0 | cur_skey[1].sk_argument = v2; |
1552 | 0 | cur_skey[2].sk_argument = v3; |
1553 | 0 | cur_skey[3].sk_argument = v4; |
1554 | |
|
1555 | 0 | do |
1556 | 0 | { |
1557 | 0 | scandesc = systable_beginscan(relation, |
1558 | 0 | cache->cc_indexoid, |
1559 | 0 | IndexScanOK(cache), |
1560 | 0 | NULL, |
1561 | 0 | nkeys, |
1562 | 0 | cur_skey); |
1563 | |
|
1564 | 0 | ct = NULL; |
1565 | 0 | stale = false; |
1566 | |
|
1567 | 0 | while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) |
1568 | 0 | { |
1569 | 0 | ct = CatalogCacheCreateEntry(cache, ntp, NULL, |
1570 | 0 | hashValue, hashIndex); |
1571 | | /* upon failure, we must start the scan over */ |
1572 | 0 | if (ct == NULL) |
1573 | 0 | { |
1574 | 0 | stale = true; |
1575 | 0 | break; |
1576 | 0 | } |
1577 | | /* immediately set the refcount to 1 */ |
1578 | 0 | ResourceOwnerEnlarge(CurrentResourceOwner); |
1579 | 0 | ct->refcount++; |
1580 | 0 | ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple); |
1581 | 0 | break; /* assume only one match */ |
1582 | 0 | } |
1583 | |
|
1584 | 0 | systable_endscan(scandesc); |
1585 | 0 | } while (stale); |
1586 | |
|
1587 | 0 | table_close(relation, AccessShareLock); |
1588 | | |
1589 | | /* |
1590 | | * If tuple was not found, we need to build a negative cache entry |
1591 | | * containing a fake tuple. The fake tuple has the correct key columns, |
1592 | | * but nulls everywhere else. |
1593 | | * |
1594 | | * In bootstrap mode, we don't build negative entries, because the cache |
1595 | | * invalidation mechanism isn't alive and can't clear them if the tuple |
1596 | | * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need |
1597 | | * cache inval for that.) |
1598 | | */ |
1599 | 0 | if (ct == NULL) |
1600 | 0 | { |
1601 | 0 | if (IsBootstrapProcessingMode()) |
1602 | 0 | return NULL; |
1603 | | |
1604 | 0 | ct = CatalogCacheCreateEntry(cache, NULL, arguments, |
1605 | 0 | hashValue, hashIndex); |
1606 | | |
1607 | | /* Creating a negative cache entry shouldn't fail */ |
1608 | 0 | Assert(ct != NULL); |
1609 | |
|
1610 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples", |
1611 | 0 | cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup); |
1612 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d", |
1613 | 0 | cache->cc_relname, hashIndex); |
1614 | | |
1615 | | /* |
1616 | | * We are not returning the negative entry to the caller, so leave its |
1617 | | * refcount zero. |
1618 | | */ |
1619 | |
|
1620 | 0 | return NULL; |
1621 | 0 | } |
1622 | | |
1623 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples", |
1624 | 0 | cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup); |
1625 | 0 | CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d", |
1626 | 0 | cache->cc_relname, hashIndex); |
1627 | |
|
1628 | | #ifdef CATCACHE_STATS |
1629 | | cache->cc_newloads++; |
1630 | | #endif |
1631 | |
|
1632 | 0 | return &ct->tuple; |
1633 | 0 | } |
1634 | | |
1635 | | /* |
1636 | | * ReleaseCatCache |
1637 | | * |
1638 | | * Decrement the reference count of a catcache entry (releasing the |
1639 | | * hold grabbed by a successful SearchCatCache). |
1640 | | * |
1641 | | * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries |
1642 | | * will be freed as soon as their refcount goes to zero. In combination |
1643 | | * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test |
1644 | | * to catch references to already-released catcache entries. |
1645 | | */ |
1646 | | void |
1647 | | ReleaseCatCache(HeapTuple tuple) |
1648 | 0 | { |
1649 | 0 | ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner); |
1650 | 0 | } |
1651 | | |
1652 | | static void |
1653 | | ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner) |
1654 | 0 | { |
1655 | 0 | CatCTup *ct = (CatCTup *) (((char *) tuple) - |
1656 | 0 | offsetof(CatCTup, tuple)); |
1657 | | |
1658 | | /* Safety checks to ensure we were handed a cache entry */ |
1659 | 0 | Assert(ct->ct_magic == CT_MAGIC); |
1660 | 0 | Assert(ct->refcount > 0); |
1661 | |
|
1662 | 0 | ct->refcount--; |
1663 | 0 | if (resowner) |
1664 | 0 | ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple); |
1665 | |
|
1666 | 0 | if ( |
1667 | 0 | #ifndef CATCACHE_FORCE_RELEASE |
1668 | 0 | ct->dead && |
1669 | 0 | #endif |
1670 | 0 | ct->refcount == 0 && |
1671 | 0 | (ct->c_list == NULL || ct->c_list->refcount == 0)) |
1672 | 0 | CatCacheRemoveCTup(ct->my_cache, ct); |
1673 | 0 | } |
1674 | | |
1675 | | |
1676 | | /* |
1677 | | * GetCatCacheHashValue |
1678 | | * |
1679 | | * Compute the hash value for a given set of search keys. |
1680 | | * |
1681 | | * The reason for exposing this as part of the API is that the hash value is |
1682 | | * exposed in cache invalidation operations, so there are places outside the |
1683 | | * catcache code that need to be able to compute the hash values. |
1684 | | */ |
1685 | | uint32 |
1686 | | GetCatCacheHashValue(CatCache *cache, |
1687 | | Datum v1, |
1688 | | Datum v2, |
1689 | | Datum v3, |
1690 | | Datum v4) |
1691 | 0 | { |
1692 | | /* |
1693 | | * one-time startup overhead for each cache |
1694 | | */ |
1695 | 0 | ConditionalCatalogCacheInitializeCache(cache); |
1696 | | |
1697 | | /* |
1698 | | * calculate the hash value |
1699 | | */ |
1700 | 0 | return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4); |
1701 | 0 | } |
1702 | | |
1703 | | |
1704 | | /* |
1705 | | * SearchCatCacheList |
1706 | | * |
1707 | | * Generate a list of all tuples matching a partial key (that is, |
1708 | | * a key specifying just the first K of the cache's N key columns). |
1709 | | * |
1710 | | * It doesn't make any sense to specify all of the cache's key columns |
1711 | | * here: since the key is unique, there could be at most one match, so |
1712 | | * you ought to use SearchCatCache() instead. Hence this function takes |
1713 | | * one fewer Datum argument than SearchCatCache() does. |
1714 | | * |
1715 | | * The caller must not modify the list object or the pointed-to tuples, |
1716 | | * and must call ReleaseCatCacheList() when done with the list. |
1717 | | */ |
1718 | | CatCList * |
1719 | | SearchCatCacheList(CatCache *cache, |
1720 | | int nkeys, |
1721 | | Datum v1, |
1722 | | Datum v2, |
1723 | | Datum v3) |
1724 | 0 | { |
1725 | 0 | Datum v4 = 0; /* dummy last-column value */ |
1726 | 0 | Datum arguments[CATCACHE_MAXKEYS]; |
1727 | 0 | uint32 lHashValue; |
1728 | 0 | Index lHashIndex; |
1729 | 0 | dlist_iter iter; |
1730 | 0 | dlist_head *lbucket; |
1731 | 0 | CatCList *cl; |
1732 | 0 | CatCTup *ct; |
1733 | 0 | List *volatile ctlist; |
1734 | 0 | ListCell *ctlist_item; |
1735 | 0 | int nmembers; |
1736 | 0 | bool ordered; |
1737 | 0 | HeapTuple ntp; |
1738 | 0 | MemoryContext oldcxt; |
1739 | 0 | int i; |
1740 | 0 | CatCInProgress *save_in_progress; |
1741 | 0 | CatCInProgress in_progress_ent; |
1742 | | |
1743 | | /* |
1744 | | * one-time startup overhead for each cache |
1745 | | */ |
1746 | 0 | ConditionalCatalogCacheInitializeCache(cache); |
1747 | |
|
1748 | 0 | Assert(nkeys > 0 && nkeys < cache->cc_nkeys); |
1749 | |
|
1750 | | #ifdef CATCACHE_STATS |
1751 | | cache->cc_lsearches++; |
1752 | | #endif |
1753 | | |
1754 | | /* Initialize local parameter array */ |
1755 | 0 | arguments[0] = v1; |
1756 | 0 | arguments[1] = v2; |
1757 | 0 | arguments[2] = v3; |
1758 | 0 | arguments[3] = v4; |
1759 | | |
1760 | | /* |
1761 | | * If we haven't previously done a list search in this cache, create the |
1762 | | * bucket header array; otherwise, consider whether it's time to enlarge |
1763 | | * it. |
1764 | | */ |
1765 | 0 | if (cache->cc_lbucket == NULL) |
1766 | 0 | { |
1767 | | /* Arbitrary initial size --- must be a power of 2 */ |
1768 | 0 | int nbuckets = 16; |
1769 | |
|
1770 | 0 | cache->cc_lbucket = (dlist_head *) |
1771 | 0 | MemoryContextAllocZero(CacheMemoryContext, |
1772 | 0 | nbuckets * sizeof(dlist_head)); |
1773 | | /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */ |
1774 | 0 | cache->cc_nlbuckets = nbuckets; |
1775 | 0 | } |
1776 | 0 | else |
1777 | 0 | { |
1778 | | /* |
1779 | | * If the hash table has become too full, enlarge the buckets array. |
1780 | | * Quite arbitrarily, we enlarge when fill factor > 2. |
1781 | | */ |
1782 | 0 | if (cache->cc_nlist > cache->cc_nlbuckets * 2) |
1783 | 0 | RehashCatCacheLists(cache); |
1784 | 0 | } |
1785 | | |
1786 | | /* |
1787 | | * Find the hash bucket in which to look for the CatCList. |
1788 | | */ |
1789 | 0 | lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); |
1790 | 0 | lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets); |
1791 | | |
1792 | | /* |
1793 | | * scan the items until we find a match or exhaust our list |
1794 | | * |
1795 | | * Note: it's okay to use dlist_foreach here, even though we modify the |
1796 | | * dlist within the loop, because we don't continue the loop afterwards. |
1797 | | */ |
1798 | 0 | lbucket = &cache->cc_lbucket[lHashIndex]; |
1799 | 0 | dlist_foreach(iter, lbucket) |
1800 | 0 | { |
1801 | 0 | cl = dlist_container(CatCList, cache_elem, iter.cur); |
1802 | |
|
1803 | 0 | if (cl->dead) |
1804 | 0 | continue; /* ignore dead entries */ |
1805 | | |
1806 | 0 | if (cl->hash_value != lHashValue) |
1807 | 0 | continue; /* quickly skip entry if wrong hash val */ |
1808 | | |
1809 | | /* |
1810 | | * see if the cached list matches our key. |
1811 | | */ |
1812 | 0 | if (cl->nkeys != nkeys) |
1813 | 0 | continue; |
1814 | | |
1815 | 0 | if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments)) |
1816 | 0 | continue; |
1817 | | |
1818 | | /* |
1819 | | * We found a matching list. Move the list to the front of the list |
1820 | | * for its hashbucket, so as to speed subsequent searches. (We do not |
1821 | | * move the members to the fronts of their hashbucket lists, however, |
1822 | | * since there's no point in that unless they are searched for |
1823 | | * individually.) |
1824 | | */ |
1825 | 0 | dlist_move_head(lbucket, &cl->cache_elem); |
1826 | | |
1827 | | /* Bump the list's refcount and return it */ |
1828 | 0 | ResourceOwnerEnlarge(CurrentResourceOwner); |
1829 | 0 | cl->refcount++; |
1830 | 0 | ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl); |
1831 | |
|
1832 | 0 | CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list", |
1833 | 0 | cache->cc_relname); |
1834 | |
|
1835 | | #ifdef CATCACHE_STATS |
1836 | | cache->cc_lhits++; |
1837 | | #endif |
1838 | |
|
1839 | 0 | return cl; |
1840 | 0 | } |
1841 | | |
1842 | | /* |
1843 | | * List was not found in cache, so we have to build it by reading the |
1844 | | * relation. For each matching tuple found in the relation, use an |
1845 | | * existing cache entry if possible, else build a new one. |
1846 | | * |
1847 | | * We have to bump the member refcounts temporarily to ensure they won't |
1848 | | * get dropped from the cache while loading other members. We use a PG_TRY |
1849 | | * block to ensure we can undo those refcounts if we get an error before |
1850 | | * we finish constructing the CatCList. ctlist must be valid throughout |
1851 | | * the PG_TRY block. |
1852 | | */ |
1853 | 0 | ctlist = NIL; |
1854 | | |
1855 | | /* |
1856 | | * Cache invalidation can happen while we're building the list. |
1857 | | * CatalogCacheCreateEntry() handles concurrent invalidation of individual |
1858 | | * tuples, but it's also possible that a new entry is concurrently added |
1859 | | * that should be part of the list we're building. Register an |
1860 | | * "in-progress" entry that will receive the invalidation, until we have |
1861 | | * built the final list entry. |
1862 | | */ |
1863 | 0 | save_in_progress = catcache_in_progress_stack; |
1864 | 0 | in_progress_ent.next = catcache_in_progress_stack; |
1865 | 0 | in_progress_ent.cache = cache; |
1866 | 0 | in_progress_ent.hash_value = lHashValue; |
1867 | 0 | in_progress_ent.list = true; |
1868 | 0 | in_progress_ent.dead = false; |
1869 | 0 | catcache_in_progress_stack = &in_progress_ent; |
1870 | |
|
1871 | 0 | PG_TRY(); |
1872 | 0 | { |
1873 | 0 | ScanKeyData cur_skey[CATCACHE_MAXKEYS]; |
1874 | 0 | Relation relation; |
1875 | 0 | SysScanDesc scandesc; |
1876 | 0 | bool first_iter = true; |
1877 | |
|
1878 | 0 | relation = table_open(cache->cc_reloid, AccessShareLock); |
1879 | | |
1880 | | /* |
1881 | | * Ok, need to make a lookup in the relation, copy the scankey and |
1882 | | * fill out any per-call fields. |
1883 | | */ |
1884 | 0 | memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys); |
1885 | 0 | cur_skey[0].sk_argument = v1; |
1886 | 0 | cur_skey[1].sk_argument = v2; |
1887 | 0 | cur_skey[2].sk_argument = v3; |
1888 | 0 | cur_skey[3].sk_argument = v4; |
1889 | | |
1890 | | /* |
1891 | | * Scan the table for matching entries. If an invalidation arrives |
1892 | | * mid-build, we will loop back here to retry. |
1893 | | */ |
1894 | 0 | do |
1895 | 0 | { |
1896 | | /* |
1897 | | * If we are retrying, release refcounts on any items created on |
1898 | | * the previous iteration. We dare not try to free them if |
1899 | | * they're now unreferenced, since an error while doing that would |
1900 | | * result in the PG_CATCH below doing extra refcount decrements. |
1901 | | * Besides, we'll likely re-adopt those items in the next |
1902 | | * iteration, so it's not worth complicating matters to try to get |
1903 | | * rid of them. |
1904 | | */ |
1905 | 0 | foreach(ctlist_item, ctlist) |
1906 | 0 | { |
1907 | 0 | ct = (CatCTup *) lfirst(ctlist_item); |
1908 | 0 | Assert(ct->c_list == NULL); |
1909 | 0 | Assert(ct->refcount > 0); |
1910 | 0 | ct->refcount--; |
1911 | 0 | } |
1912 | | /* Reset ctlist in preparation for new try */ |
1913 | 0 | ctlist = NIL; |
1914 | 0 | in_progress_ent.dead = false; |
1915 | |
|
1916 | 0 | scandesc = systable_beginscan(relation, |
1917 | 0 | cache->cc_indexoid, |
1918 | 0 | IndexScanOK(cache), |
1919 | 0 | NULL, |
1920 | 0 | nkeys, |
1921 | 0 | cur_skey); |
1922 | | |
1923 | | /* The list will be ordered iff we are doing an index scan */ |
1924 | 0 | ordered = (scandesc->irel != NULL); |
1925 | | |
1926 | | /* Injection point to help testing the recursive invalidation case */ |
1927 | 0 | if (first_iter) |
1928 | 0 | { |
1929 | 0 | INJECTION_POINT("catcache-list-miss-systable-scan-started", NULL); |
1930 | 0 | first_iter = false; |
1931 | 0 | } |
1932 | |
|
1933 | 0 | while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) && |
1934 | 0 | !in_progress_ent.dead) |
1935 | 0 | { |
1936 | 0 | uint32 hashValue; |
1937 | 0 | Index hashIndex; |
1938 | 0 | bool found = false; |
1939 | 0 | dlist_head *bucket; |
1940 | | |
1941 | | /* |
1942 | | * See if there's an entry for this tuple already. |
1943 | | */ |
1944 | 0 | ct = NULL; |
1945 | 0 | hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp); |
1946 | 0 | hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); |
1947 | |
|
1948 | 0 | bucket = &cache->cc_bucket[hashIndex]; |
1949 | 0 | dlist_foreach(iter, bucket) |
1950 | 0 | { |
1951 | 0 | ct = dlist_container(CatCTup, cache_elem, iter.cur); |
1952 | |
|
1953 | 0 | if (ct->dead || ct->negative) |
1954 | 0 | continue; /* ignore dead and negative entries */ |
1955 | | |
1956 | 0 | if (ct->hash_value != hashValue) |
1957 | 0 | continue; /* quickly skip entry if wrong hash val */ |
1958 | | |
1959 | 0 | if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self))) |
1960 | 0 | continue; /* not same tuple */ |
1961 | | |
1962 | | /* |
1963 | | * Found a match, but can't use it if it belongs to |
1964 | | * another list already |
1965 | | */ |
1966 | 0 | if (ct->c_list) |
1967 | 0 | continue; |
1968 | | |
1969 | 0 | found = true; |
1970 | 0 | break; /* A-OK */ |
1971 | 0 | } |
1972 | |
|
1973 | 0 | if (!found) |
1974 | 0 | { |
1975 | | /* We didn't find a usable entry, so make a new one */ |
1976 | 0 | ct = CatalogCacheCreateEntry(cache, ntp, NULL, |
1977 | 0 | hashValue, hashIndex); |
1978 | | |
1979 | | /* upon failure, we must start the scan over */ |
1980 | 0 | if (ct == NULL) |
1981 | 0 | { |
1982 | 0 | in_progress_ent.dead = true; |
1983 | 0 | break; |
1984 | 0 | } |
1985 | 0 | } |
1986 | | |
1987 | | /* Careful here: add entry to ctlist, then bump its refcount */ |
1988 | | /* This way leaves state correct if lappend runs out of memory */ |
1989 | 0 | ctlist = lappend(ctlist, ct); |
1990 | 0 | ct->refcount++; |
1991 | 0 | } |
1992 | |
|
1993 | 0 | systable_endscan(scandesc); |
1994 | 0 | } while (in_progress_ent.dead); |
1995 | |
|
1996 | 0 | table_close(relation, AccessShareLock); |
1997 | | |
1998 | | /* Make sure the resource owner has room to remember this entry. */ |
1999 | 0 | ResourceOwnerEnlarge(CurrentResourceOwner); |
2000 | | |
2001 | | /* Now we can build the CatCList entry. */ |
2002 | 0 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
2003 | 0 | nmembers = list_length(ctlist); |
2004 | 0 | cl = (CatCList *) |
2005 | 0 | palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *)); |
2006 | | |
2007 | | /* Extract key values */ |
2008 | 0 | CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno, |
2009 | 0 | arguments, cl->keys); |
2010 | 0 | MemoryContextSwitchTo(oldcxt); |
2011 | | |
2012 | | /* |
2013 | | * We are now past the last thing that could trigger an elog before we |
2014 | | * have finished building the CatCList and remembering it in the |
2015 | | * resource owner. So it's OK to fall out of the PG_TRY, and indeed |
2016 | | * we'd better do so before we start marking the members as belonging |
2017 | | * to the list. |
2018 | | */ |
2019 | 0 | } |
2020 | 0 | PG_CATCH(); |
2021 | 0 | { |
2022 | 0 | Assert(catcache_in_progress_stack == &in_progress_ent); |
2023 | 0 | catcache_in_progress_stack = save_in_progress; |
2024 | |
|
2025 | 0 | foreach(ctlist_item, ctlist) |
2026 | 0 | { |
2027 | 0 | ct = (CatCTup *) lfirst(ctlist_item); |
2028 | 0 | Assert(ct->c_list == NULL); |
2029 | 0 | Assert(ct->refcount > 0); |
2030 | 0 | ct->refcount--; |
2031 | 0 | if ( |
2032 | 0 | #ifndef CATCACHE_FORCE_RELEASE |
2033 | 0 | ct->dead && |
2034 | 0 | #endif |
2035 | 0 | ct->refcount == 0 && |
2036 | 0 | (ct->c_list == NULL || ct->c_list->refcount == 0)) |
2037 | 0 | CatCacheRemoveCTup(cache, ct); |
2038 | 0 | } |
2039 | |
|
2040 | 0 | PG_RE_THROW(); |
2041 | 0 | } |
2042 | 0 | PG_END_TRY(); |
2043 | 0 | Assert(catcache_in_progress_stack == &in_progress_ent); |
2044 | 0 | catcache_in_progress_stack = save_in_progress; |
2045 | |
|
2046 | 0 | cl->cl_magic = CL_MAGIC; |
2047 | 0 | cl->my_cache = cache; |
2048 | 0 | cl->refcount = 0; /* for the moment */ |
2049 | 0 | cl->dead = false; |
2050 | 0 | cl->ordered = ordered; |
2051 | 0 | cl->nkeys = nkeys; |
2052 | 0 | cl->hash_value = lHashValue; |
2053 | 0 | cl->n_members = nmembers; |
2054 | |
|
2055 | 0 | i = 0; |
2056 | 0 | foreach(ctlist_item, ctlist) |
2057 | 0 | { |
2058 | 0 | cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item); |
2059 | 0 | Assert(ct->c_list == NULL); |
2060 | 0 | ct->c_list = cl; |
2061 | | /* release the temporary refcount on the member */ |
2062 | 0 | Assert(ct->refcount > 0); |
2063 | 0 | ct->refcount--; |
2064 | | /* mark list dead if any members already dead */ |
2065 | 0 | if (ct->dead) |
2066 | 0 | cl->dead = true; |
2067 | 0 | } |
2068 | 0 | Assert(i == nmembers); |
2069 | | |
2070 | | /* |
2071 | | * Add the CatCList to the appropriate bucket, and count it. |
2072 | | */ |
2073 | 0 | dlist_push_head(lbucket, &cl->cache_elem); |
2074 | |
|
2075 | 0 | cache->cc_nlist++; |
2076 | | |
2077 | | /* Finally, bump the list's refcount and return it */ |
2078 | 0 | cl->refcount++; |
2079 | 0 | ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl); |
2080 | |
|
2081 | 0 | CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members", |
2082 | 0 | cache->cc_relname, nmembers); |
2083 | |
|
2084 | 0 | return cl; |
2085 | 0 | } |
2086 | | |
2087 | | /* |
2088 | | * ReleaseCatCacheList |
2089 | | * |
2090 | | * Decrement the reference count of a catcache list. |
2091 | | */ |
2092 | | void |
2093 | | ReleaseCatCacheList(CatCList *list) |
2094 | 0 | { |
2095 | 0 | ReleaseCatCacheListWithOwner(list, CurrentResourceOwner); |
2096 | 0 | } |
2097 | | |
2098 | | static void |
2099 | | ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner) |
2100 | 0 | { |
2101 | | /* Safety checks to ensure we were handed a cache entry */ |
2102 | 0 | Assert(list->cl_magic == CL_MAGIC); |
2103 | 0 | Assert(list->refcount > 0); |
2104 | 0 | list->refcount--; |
2105 | 0 | if (resowner) |
2106 | 0 | ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list); |
2107 | |
|
2108 | 0 | if ( |
2109 | 0 | #ifndef CATCACHE_FORCE_RELEASE |
2110 | 0 | list->dead && |
2111 | 0 | #endif |
2112 | 0 | list->refcount == 0) |
2113 | 0 | CatCacheRemoveCList(list->my_cache, list); |
2114 | 0 | } |
2115 | | |
2116 | | |
2117 | | /* |
2118 | | * CatalogCacheCreateEntry |
2119 | | * Create a new CatCTup entry, copying the given HeapTuple and other |
2120 | | * supplied data into it. The new entry initially has refcount 0. |
2121 | | * |
2122 | | * To create a normal cache entry, ntp must be the HeapTuple just fetched |
2123 | | * from scandesc, and "arguments" is not used. To create a negative cache |
2124 | | * entry, pass NULL for ntp; then "arguments" is the cache keys to use. |
2125 | | * In either case, hashValue/hashIndex are the hash values computed from |
2126 | | * the cache keys. |
2127 | | * |
2128 | | * Returns NULL if we attempt to detoast the tuple and observe that it |
2129 | | * became stale. (This cannot happen for a negative entry.) Caller must |
2130 | | * retry the tuple lookup in that case. |
2131 | | */ |
2132 | | static CatCTup * |
2133 | | CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, |
2134 | | uint32 hashValue, Index hashIndex) |
2135 | 0 | { |
2136 | 0 | CatCTup *ct; |
2137 | 0 | MemoryContext oldcxt; |
2138 | |
|
2139 | 0 | if (ntp) |
2140 | 0 | { |
2141 | 0 | int i; |
2142 | 0 | HeapTuple dtp = NULL; |
2143 | | |
2144 | | /* |
2145 | | * The invalidation of the in-progress entry essentially never happens |
2146 | | * during our regression tests, and there's no easy way to force it to |
2147 | | * fail for testing purposes. To ensure we have test coverage for the |
2148 | | * retry paths in our callers, make debug builds randomly fail about |
2149 | | * 0.1% of the times through this code path, even when there's no |
2150 | | * toasted fields. |
2151 | | */ |
2152 | | #ifdef USE_ASSERT_CHECKING |
2153 | | if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000)) |
2154 | | return NULL; |
2155 | | #endif |
2156 | | |
2157 | | /* |
2158 | | * If there are any out-of-line toasted fields in the tuple, expand |
2159 | | * them in-line. This saves cycles during later use of the catcache |
2160 | | * entry, and also protects us against the possibility of the toast |
2161 | | * tuples being freed before we attempt to fetch them, in case of |
2162 | | * something using a slightly stale catcache entry. |
2163 | | */ |
2164 | 0 | if (HeapTupleHasExternal(ntp)) |
2165 | 0 | { |
2166 | 0 | CatCInProgress *save_in_progress; |
2167 | 0 | CatCInProgress in_progress_ent; |
2168 | | |
2169 | | /* |
2170 | | * The tuple could become stale while we are doing toast table |
2171 | | * access (since AcceptInvalidationMessages can run then). The |
2172 | | * invalidation will mark our in-progress entry as dead. |
2173 | | */ |
2174 | 0 | save_in_progress = catcache_in_progress_stack; |
2175 | 0 | in_progress_ent.next = catcache_in_progress_stack; |
2176 | 0 | in_progress_ent.cache = cache; |
2177 | 0 | in_progress_ent.hash_value = hashValue; |
2178 | 0 | in_progress_ent.list = false; |
2179 | 0 | in_progress_ent.dead = false; |
2180 | 0 | catcache_in_progress_stack = &in_progress_ent; |
2181 | |
|
2182 | 0 | PG_TRY(); |
2183 | 0 | { |
2184 | 0 | dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc); |
2185 | 0 | } |
2186 | 0 | PG_FINALLY(); |
2187 | 0 | { |
2188 | 0 | Assert(catcache_in_progress_stack == &in_progress_ent); |
2189 | 0 | catcache_in_progress_stack = save_in_progress; |
2190 | 0 | } |
2191 | 0 | PG_END_TRY(); |
2192 | |
|
2193 | 0 | if (in_progress_ent.dead) |
2194 | 0 | { |
2195 | 0 | heap_freetuple(dtp); |
2196 | 0 | return NULL; |
2197 | 0 | } |
2198 | 0 | } |
2199 | 0 | else |
2200 | 0 | dtp = ntp; |
2201 | | |
2202 | | /* Allocate memory for CatCTup and the cached tuple in one go */ |
2203 | 0 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
2204 | |
|
2205 | 0 | ct = (CatCTup *) palloc(sizeof(CatCTup) + |
2206 | 0 | MAXIMUM_ALIGNOF + dtp->t_len); |
2207 | 0 | ct->tuple.t_len = dtp->t_len; |
2208 | 0 | ct->tuple.t_self = dtp->t_self; |
2209 | 0 | ct->tuple.t_tableOid = dtp->t_tableOid; |
2210 | 0 | ct->tuple.t_data = (HeapTupleHeader) |
2211 | 0 | MAXALIGN(((char *) ct) + sizeof(CatCTup)); |
2212 | | /* copy tuple contents */ |
2213 | 0 | memcpy((char *) ct->tuple.t_data, |
2214 | 0 | (const char *) dtp->t_data, |
2215 | 0 | dtp->t_len); |
2216 | 0 | MemoryContextSwitchTo(oldcxt); |
2217 | |
|
2218 | 0 | if (dtp != ntp) |
2219 | 0 | heap_freetuple(dtp); |
2220 | | |
2221 | | /* extract keys - they'll point into the tuple if not by-value */ |
2222 | 0 | for (i = 0; i < cache->cc_nkeys; i++) |
2223 | 0 | { |
2224 | 0 | Datum atp; |
2225 | 0 | bool isnull; |
2226 | |
|
2227 | 0 | atp = heap_getattr(&ct->tuple, |
2228 | 0 | cache->cc_keyno[i], |
2229 | 0 | cache->cc_tupdesc, |
2230 | 0 | &isnull); |
2231 | 0 | Assert(!isnull); |
2232 | 0 | ct->keys[i] = atp; |
2233 | 0 | } |
2234 | 0 | } |
2235 | 0 | else |
2236 | 0 | { |
2237 | | /* Set up keys for a negative cache entry */ |
2238 | 0 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
2239 | 0 | ct = (CatCTup *) palloc(sizeof(CatCTup)); |
2240 | | |
2241 | | /* |
2242 | | * Store keys - they'll point into separately allocated memory if not |
2243 | | * by-value. |
2244 | | */ |
2245 | 0 | CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno, |
2246 | 0 | arguments, ct->keys); |
2247 | 0 | MemoryContextSwitchTo(oldcxt); |
2248 | 0 | } |
2249 | | |
2250 | | /* |
2251 | | * Finish initializing the CatCTup header, and add it to the cache's |
2252 | | * linked list and counts. |
2253 | | */ |
2254 | 0 | ct->ct_magic = CT_MAGIC; |
2255 | 0 | ct->my_cache = cache; |
2256 | 0 | ct->c_list = NULL; |
2257 | 0 | ct->refcount = 0; /* for the moment */ |
2258 | 0 | ct->dead = false; |
2259 | 0 | ct->negative = (ntp == NULL); |
2260 | 0 | ct->hash_value = hashValue; |
2261 | |
|
2262 | 0 | dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem); |
2263 | |
|
2264 | 0 | cache->cc_ntup++; |
2265 | 0 | CacheHdr->ch_ntup++; |
2266 | | |
2267 | | /* |
2268 | | * If the hash table has become too full, enlarge the buckets array. Quite |
2269 | | * arbitrarily, we enlarge when fill factor > 2. |
2270 | | */ |
2271 | 0 | if (cache->cc_ntup > cache->cc_nbuckets * 2) |
2272 | 0 | RehashCatCache(cache); |
2273 | |
|
2274 | 0 | return ct; |
2275 | 0 | } |
2276 | | |
2277 | | /* |
2278 | | * Helper routine that frees keys stored in the keys array. |
2279 | | */ |
2280 | | static void |
2281 | | CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys) |
2282 | 0 | { |
2283 | 0 | int i; |
2284 | |
|
2285 | 0 | for (i = 0; i < nkeys; i++) |
2286 | 0 | { |
2287 | 0 | int attnum = attnos[i]; |
2288 | 0 | Form_pg_attribute att; |
2289 | | |
2290 | | /* system attribute are not supported in caches */ |
2291 | 0 | Assert(attnum > 0); |
2292 | |
|
2293 | 0 | att = TupleDescAttr(tupdesc, attnum - 1); |
2294 | |
|
2295 | 0 | if (!att->attbyval) |
2296 | 0 | pfree(DatumGetPointer(keys[i])); |
2297 | 0 | } |
2298 | 0 | } |
2299 | | |
2300 | | /* |
2301 | | * Helper routine that copies the keys in the srckeys array into the dstkeys |
2302 | | * one, guaranteeing that the datums are fully allocated in the current memory |
2303 | | * context. |
2304 | | */ |
2305 | | static void |
2306 | | CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, |
2307 | | Datum *srckeys, Datum *dstkeys) |
2308 | 0 | { |
2309 | 0 | int i; |
2310 | | |
2311 | | /* |
2312 | | * XXX: memory and lookup performance could possibly be improved by |
2313 | | * storing all keys in one allocation. |
2314 | | */ |
2315 | |
|
2316 | 0 | for (i = 0; i < nkeys; i++) |
2317 | 0 | { |
2318 | 0 | int attnum = attnos[i]; |
2319 | 0 | Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1); |
2320 | 0 | Datum src = srckeys[i]; |
2321 | 0 | NameData srcname; |
2322 | | |
2323 | | /* |
2324 | | * Must be careful in case the caller passed a C string where a NAME |
2325 | | * is wanted: convert the given argument to a correctly padded NAME. |
2326 | | * Otherwise the memcpy() done by datumCopy() could fall off the end |
2327 | | * of memory. |
2328 | | */ |
2329 | 0 | if (att->atttypid == NAMEOID) |
2330 | 0 | { |
2331 | 0 | namestrcpy(&srcname, DatumGetCString(src)); |
2332 | 0 | src = NameGetDatum(&srcname); |
2333 | 0 | } |
2334 | |
|
2335 | 0 | dstkeys[i] = datumCopy(src, |
2336 | 0 | att->attbyval, |
2337 | 0 | att->attlen); |
2338 | 0 | } |
2339 | 0 | } |
2340 | | |
2341 | | /* |
2342 | | * PrepareToInvalidateCacheTuple() |
2343 | | * |
2344 | | * This is part of a rather subtle chain of events, so pay attention: |
2345 | | * |
2346 | | * When a tuple is inserted or deleted, it cannot be flushed from the |
2347 | | * catcaches immediately, for reasons explained at the top of cache/inval.c. |
2348 | | * Instead we have to add entry(s) for the tuple to a list of pending tuple |
2349 | | * invalidations that will be done at the end of the command or transaction. |
2350 | | * |
2351 | | * The lists of tuples that need to be flushed are kept by inval.c. This |
2352 | | * routine is a helper routine for inval.c. Given a tuple belonging to |
2353 | | * the specified relation, find all catcaches it could be in, compute the |
2354 | | * correct hash value for each such catcache, and call the specified |
2355 | | * function to record the cache id and hash value in inval.c's lists. |
2356 | | * SysCacheInvalidate will be called later, if appropriate, |
2357 | | * using the recorded information. |
2358 | | * |
2359 | | * For an insert or delete, tuple is the target tuple and newtuple is NULL. |
2360 | | * For an update, we are called just once, with tuple being the old tuple |
2361 | | * version and newtuple the new version. We should make two list entries |
2362 | | * if the tuple's hash value changed, but only one if it didn't. |
2363 | | * |
2364 | | * Note that it is irrelevant whether the given tuple is actually loaded |
2365 | | * into the catcache at the moment. Even if it's not there now, it might |
2366 | | * be by the end of the command, or there might be a matching negative entry |
2367 | | * to flush --- or other backends' caches might have such entries --- so |
2368 | | * we have to make list entries to flush it later. |
2369 | | * |
2370 | | * Also note that it's not an error if there are no catcaches for the |
2371 | | * specified relation. inval.c doesn't know exactly which rels have |
2372 | | * catcaches --- it will call this routine for any tuple that's in a |
2373 | | * system relation. |
2374 | | */ |
2375 | | void |
2376 | | PrepareToInvalidateCacheTuple(Relation relation, |
2377 | | HeapTuple tuple, |
2378 | | HeapTuple newtuple, |
2379 | | void (*function) (int, uint32, Oid, void *), |
2380 | | void *context) |
2381 | 0 | { |
2382 | 0 | slist_iter iter; |
2383 | 0 | Oid reloid; |
2384 | |
|
2385 | 0 | CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called"); |
2386 | | |
2387 | | /* |
2388 | | * sanity checks |
2389 | | */ |
2390 | 0 | Assert(RelationIsValid(relation)); |
2391 | 0 | Assert(HeapTupleIsValid(tuple)); |
2392 | 0 | Assert(PointerIsValid(function)); |
2393 | 0 | Assert(CacheHdr != NULL); |
2394 | |
|
2395 | 0 | reloid = RelationGetRelid(relation); |
2396 | | |
2397 | | /* ---------------- |
2398 | | * for each cache |
2399 | | * if the cache contains tuples from the specified relation |
2400 | | * compute the tuple's hash value(s) in this cache, |
2401 | | * and call the passed function to register the information. |
2402 | | * ---------------- |
2403 | | */ |
2404 | |
|
2405 | 0 | slist_foreach(iter, &CacheHdr->ch_caches) |
2406 | 0 | { |
2407 | 0 | CatCache *ccp = slist_container(CatCache, cc_next, iter.cur); |
2408 | 0 | uint32 hashvalue; |
2409 | 0 | Oid dbid; |
2410 | |
|
2411 | 0 | if (ccp->cc_reloid != reloid) |
2412 | 0 | continue; |
2413 | | |
2414 | | /* Just in case cache hasn't finished initialization yet... */ |
2415 | 0 | ConditionalCatalogCacheInitializeCache(ccp); |
2416 | |
|
2417 | 0 | hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple); |
2418 | 0 | dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId; |
2419 | |
|
2420 | 0 | (*function) (ccp->id, hashvalue, dbid, context); |
2421 | |
|
2422 | 0 | if (newtuple) |
2423 | 0 | { |
2424 | 0 | uint32 newhashvalue; |
2425 | |
|
2426 | 0 | newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple); |
2427 | |
|
2428 | 0 | if (newhashvalue != hashvalue) |
2429 | 0 | (*function) (ccp->id, newhashvalue, dbid, context); |
2430 | 0 | } |
2431 | 0 | } |
2432 | 0 | } |
2433 | | |
2434 | | /* ResourceOwner callbacks */ |
2435 | | |
2436 | | static void |
2437 | | ResOwnerReleaseCatCache(Datum res) |
2438 | 0 | { |
2439 | 0 | ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL); |
2440 | 0 | } |
2441 | | |
2442 | | static char * |
2443 | | ResOwnerPrintCatCache(Datum res) |
2444 | 0 | { |
2445 | 0 | HeapTuple tuple = (HeapTuple) DatumGetPointer(res); |
2446 | 0 | CatCTup *ct = (CatCTup *) (((char *) tuple) - |
2447 | 0 | offsetof(CatCTup, tuple)); |
2448 | | |
2449 | | /* Safety check to ensure we were handed a cache entry */ |
2450 | 0 | Assert(ct->ct_magic == CT_MAGIC); |
2451 | |
|
2452 | 0 | return psprintf("cache %s (%d), tuple %u/%u has count %d", |
2453 | 0 | ct->my_cache->cc_relname, ct->my_cache->id, |
2454 | 0 | ItemPointerGetBlockNumber(&(tuple->t_self)), |
2455 | 0 | ItemPointerGetOffsetNumber(&(tuple->t_self)), |
2456 | 0 | ct->refcount); |
2457 | 0 | } |
2458 | | |
2459 | | static void |
2460 | | ResOwnerReleaseCatCacheList(Datum res) |
2461 | 0 | { |
2462 | 0 | ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL); |
2463 | 0 | } |
2464 | | |
2465 | | static char * |
2466 | | ResOwnerPrintCatCacheList(Datum res) |
2467 | 0 | { |
2468 | 0 | CatCList *list = (CatCList *) DatumGetPointer(res); |
2469 | |
|
2470 | 0 | return psprintf("cache %s (%d), list %p has count %d", |
2471 | 0 | list->my_cache->cc_relname, list->my_cache->id, |
2472 | 0 | list, list->refcount); |
2473 | 0 | } |