Coverage Report

Created: 2025-08-12 06:43

/src/postgres/src/backend/utils/cache/catcache.c
Line
Count
Source (jump to first uncovered line)
1
/*-------------------------------------------------------------------------
2
 *
3
 * catcache.c
4
 *    System catalog cache for tuples matching a key.
5
 *
6
 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7
 * Portions Copyright (c) 1994, Regents of the University of California
8
 *
9
 *
10
 * IDENTIFICATION
11
 *    src/backend/utils/cache/catcache.c
12
 *
13
 *-------------------------------------------------------------------------
14
 */
15
#include "postgres.h"
16
17
#include "access/genam.h"
18
#include "access/heaptoast.h"
19
#include "access/relscan.h"
20
#include "access/table.h"
21
#include "access/xact.h"
22
#include "catalog/catalog.h"
23
#include "catalog/pg_collation.h"
24
#include "catalog/pg_type.h"
25
#include "common/hashfn.h"
26
#include "common/pg_prng.h"
27
#include "miscadmin.h"
28
#include "port/pg_bitutils.h"
29
#ifdef CATCACHE_STATS
30
#include "storage/ipc.h"    /* for on_proc_exit */
31
#endif
32
#include "storage/lmgr.h"
33
#include "utils/builtins.h"
34
#include "utils/catcache.h"
35
#include "utils/datum.h"
36
#include "utils/fmgroids.h"
37
#include "utils/injection_point.h"
38
#include "utils/inval.h"
39
#include "utils/memutils.h"
40
#include "utils/rel.h"
41
#include "utils/resowner.h"
42
#include "utils/syscache.h"
43
44
/*
45
 * If a catcache invalidation is processed while we are in the middle of
46
 * creating a catcache entry (or list), it might apply to the entry we're
47
 * creating, making it invalid before it's been inserted to the catcache.  To
48
 * catch such cases, we have a stack of "create-in-progress" entries.  Cache
49
 * invalidation marks any matching entries in the stack as dead, in addition
50
 * to the actual CatCTup and CatCList entries.
51
 */
52
typedef struct CatCInProgress
53
{
54
  CatCache   *cache;      /* cache that the entry belongs to */
55
  uint32    hash_value;   /* hash of the entry; ignored for lists */
56
  bool    list;     /* is it a list entry? */
57
  bool    dead;     /* set when the entry is invalidated */
58
  struct CatCInProgress *next;
59
} CatCInProgress;
60
61
static CatCInProgress *catcache_in_progress_stack = NULL;
62
63
 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64
65
/*
66
 * Given a hash value and the size of the hash table, find the bucket
67
 * in which the hash value belongs. Since the hash table must contain
68
 * a power-of-2 number of elements, this is a simple bitmask.
69
 */
70
0
#define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71
72
73
/*
74
 *    variables, macros and other stuff
75
 */
76
77
#ifdef CACHEDEBUG
78
#define CACHE_elog(...)       elog(__VA_ARGS__)
79
#else
80
#define CACHE_elog(...)
81
#endif
82
83
/* Cache management header --- pointer is NULL until created */
84
static CatCacheHeader *CacheHdr = NULL;
85
86
static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
87
                         int nkeys,
88
                         Datum v1, Datum v2,
89
                         Datum v3, Datum v4);
90
91
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
92
                        int nkeys,
93
                        uint32 hashValue,
94
                        Index hashIndex,
95
                        Datum v1, Datum v2,
96
                        Datum v3, Datum v4);
97
98
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
99
                       Datum v1, Datum v2, Datum v3, Datum v4);
100
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
101
                        HeapTuple tuple);
102
static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103
                      const Datum *cachekeys,
104
                      const Datum *searchkeys);
105
106
#ifdef CATCACHE_STATS
107
static void CatCachePrintStats(int code, Datum arg);
108
#endif
109
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111
static void RehashCatCache(CatCache *cp);
112
static void RehashCatCacheLists(CatCache *cp);
113
static void CatalogCacheInitializeCache(CatCache *cache);
114
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
115
                    Datum *arguments,
116
                    uint32 hashValue, Index hashIndex);
117
118
static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
119
static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner);
120
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
121
               Datum *keys);
122
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
123
               Datum *srckeys, Datum *dstkeys);
124
125
126
/*
127
 *          internal support functions
128
 */
129
130
/* ResourceOwner callbacks to hold catcache references */
131
132
static void ResOwnerReleaseCatCache(Datum res);
133
static char *ResOwnerPrintCatCache(Datum res);
134
static void ResOwnerReleaseCatCacheList(Datum res);
135
static char *ResOwnerPrintCatCacheList(Datum res);
136
137
static const ResourceOwnerDesc catcache_resowner_desc =
138
{
139
  /* catcache references */
140
  .name = "catcache reference",
141
  .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142
  .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143
  .ReleaseResource = ResOwnerReleaseCatCache,
144
  .DebugPrint = ResOwnerPrintCatCache
145
};
146
147
static const ResourceOwnerDesc catlistref_resowner_desc =
148
{
149
  /* catcache-list pins */
150
  .name = "catcache list reference",
151
  .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152
  .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153
  .ReleaseResource = ResOwnerReleaseCatCacheList,
154
  .DebugPrint = ResOwnerPrintCatCacheList
155
};
156
157
/* Convenience wrappers over ResourceOwnerRemember/Forget */
158
static inline void
159
ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
160
0
{
161
0
  ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
162
0
}
163
static inline void
164
ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
165
0
{
166
0
  ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
167
0
}
168
static inline void
169
ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
170
0
{
171
0
  ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc);
172
0
}
173
static inline void
174
ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
175
0
{
176
0
  ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc);
177
0
}
178
179
180
/*
181
 * Hash and equality functions for system types that are used as cache key
182
 * fields.  In some cases, we just call the regular SQL-callable functions for
183
 * the appropriate data type, but that tends to be a little slow, and the
184
 * speed of these functions is performance-critical.  Therefore, for data
185
 * types that frequently occur as catcache keys, we hard-code the logic here.
186
 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187
 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188
 */
189
190
static bool
191
chareqfast(Datum a, Datum b)
192
0
{
193
0
  return DatumGetChar(a) == DatumGetChar(b);
194
0
}
195
196
static uint32
197
charhashfast(Datum datum)
198
0
{
199
0
  return murmurhash32((int32) DatumGetChar(datum));
200
0
}
201
202
static bool
203
nameeqfast(Datum a, Datum b)
204
0
{
205
0
  char     *ca = NameStr(*DatumGetName(a));
206
0
  char     *cb = NameStr(*DatumGetName(b));
207
208
0
  return strncmp(ca, cb, NAMEDATALEN) == 0;
209
0
}
210
211
static uint32
212
namehashfast(Datum datum)
213
0
{
214
0
  char     *key = NameStr(*DatumGetName(datum));
215
216
0
  return hash_bytes((unsigned char *) key, strlen(key));
217
0
}
218
219
static bool
220
int2eqfast(Datum a, Datum b)
221
0
{
222
0
  return DatumGetInt16(a) == DatumGetInt16(b);
223
0
}
224
225
static uint32
226
int2hashfast(Datum datum)
227
0
{
228
0
  return murmurhash32((int32) DatumGetInt16(datum));
229
0
}
230
231
static bool
232
int4eqfast(Datum a, Datum b)
233
0
{
234
0
  return DatumGetInt32(a) == DatumGetInt32(b);
235
0
}
236
237
static uint32
238
int4hashfast(Datum datum)
239
0
{
240
0
  return murmurhash32((int32) DatumGetInt32(datum));
241
0
}
242
243
static bool
244
texteqfast(Datum a, Datum b)
245
0
{
246
  /*
247
   * The use of DEFAULT_COLLATION_OID is fairly arbitrary here.  We just
248
   * want to take the fast "deterministic" path in texteq().
249
   */
250
0
  return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251
0
}
252
253
static uint32
254
texthashfast(Datum datum)
255
0
{
256
  /* analogously here as in texteqfast() */
257
0
  return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258
0
}
259
260
static bool
261
oidvectoreqfast(Datum a, Datum b)
262
0
{
263
0
  return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
264
0
}
265
266
static uint32
267
oidvectorhashfast(Datum datum)
268
0
{
269
0
  return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
270
0
}
271
272
/* Lookup support functions for a type. */
273
static void
274
GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275
0
{
276
0
  switch (keytype)
277
0
  {
278
0
    case BOOLOID:
279
0
      *hashfunc = charhashfast;
280
0
      *fasteqfunc = chareqfast;
281
0
      *eqfunc = F_BOOLEQ;
282
0
      break;
283
0
    case CHAROID:
284
0
      *hashfunc = charhashfast;
285
0
      *fasteqfunc = chareqfast;
286
0
      *eqfunc = F_CHAREQ;
287
0
      break;
288
0
    case NAMEOID:
289
0
      *hashfunc = namehashfast;
290
0
      *fasteqfunc = nameeqfast;
291
0
      *eqfunc = F_NAMEEQ;
292
0
      break;
293
0
    case INT2OID:
294
0
      *hashfunc = int2hashfast;
295
0
      *fasteqfunc = int2eqfast;
296
0
      *eqfunc = F_INT2EQ;
297
0
      break;
298
0
    case INT4OID:
299
0
      *hashfunc = int4hashfast;
300
0
      *fasteqfunc = int4eqfast;
301
0
      *eqfunc = F_INT4EQ;
302
0
      break;
303
0
    case TEXTOID:
304
0
      *hashfunc = texthashfast;
305
0
      *fasteqfunc = texteqfast;
306
0
      *eqfunc = F_TEXTEQ;
307
0
      break;
308
0
    case OIDOID:
309
0
    case REGPROCOID:
310
0
    case REGPROCEDUREOID:
311
0
    case REGOPEROID:
312
0
    case REGOPERATOROID:
313
0
    case REGCLASSOID:
314
0
    case REGTYPEOID:
315
0
    case REGCOLLATIONOID:
316
0
    case REGCONFIGOID:
317
0
    case REGDICTIONARYOID:
318
0
    case REGROLEOID:
319
0
    case REGNAMESPACEOID:
320
0
    case REGDATABASEOID:
321
0
      *hashfunc = int4hashfast;
322
0
      *fasteqfunc = int4eqfast;
323
0
      *eqfunc = F_OIDEQ;
324
0
      break;
325
0
    case OIDVECTOROID:
326
0
      *hashfunc = oidvectorhashfast;
327
0
      *fasteqfunc = oidvectoreqfast;
328
0
      *eqfunc = F_OIDVECTOREQ;
329
0
      break;
330
0
    default:
331
0
      elog(FATAL, "type %u not supported as catcache key", keytype);
332
0
      *hashfunc = NULL; /* keep compiler quiet */
333
334
0
      *eqfunc = InvalidOid;
335
0
      break;
336
0
  }
337
0
}
338
339
/*
340
 *    CatalogCacheComputeHashValue
341
 *
342
 * Compute the hash value associated with a given set of lookup keys
343
 */
344
static uint32
345
CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
346
               Datum v1, Datum v2, Datum v3, Datum v4)
347
0
{
348
0
  uint32    hashValue = 0;
349
0
  uint32    oneHash;
350
0
  CCHashFN   *cc_hashfunc = cache->cc_hashfunc;
351
352
0
  CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
353
0
         cache->cc_relname, nkeys, cache);
354
355
0
  switch (nkeys)
356
0
  {
357
0
    case 4:
358
0
      oneHash = (cc_hashfunc[3]) (v4);
359
0
      hashValue ^= pg_rotate_left32(oneHash, 24);
360
      /* FALLTHROUGH */
361
0
    case 3:
362
0
      oneHash = (cc_hashfunc[2]) (v3);
363
0
      hashValue ^= pg_rotate_left32(oneHash, 16);
364
      /* FALLTHROUGH */
365
0
    case 2:
366
0
      oneHash = (cc_hashfunc[1]) (v2);
367
0
      hashValue ^= pg_rotate_left32(oneHash, 8);
368
      /* FALLTHROUGH */
369
0
    case 1:
370
0
      oneHash = (cc_hashfunc[0]) (v1);
371
0
      hashValue ^= oneHash;
372
0
      break;
373
0
    default:
374
0
      elog(FATAL, "wrong number of hash keys: %d", nkeys);
375
0
      break;
376
0
  }
377
378
0
  return hashValue;
379
0
}
380
381
/*
382
 *    CatalogCacheComputeTupleHashValue
383
 *
384
 * Compute the hash value associated with a given tuple to be cached
385
 */
386
static uint32
387
CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
388
0
{
389
0
  Datum   v1 = 0,
390
0
        v2 = 0,
391
0
        v3 = 0,
392
0
        v4 = 0;
393
0
  bool    isNull = false;
394
0
  int      *cc_keyno = cache->cc_keyno;
395
0
  TupleDesc cc_tupdesc = cache->cc_tupdesc;
396
397
  /* Now extract key fields from tuple, insert into scankey */
398
0
  switch (nkeys)
399
0
  {
400
0
    case 4:
401
0
      v4 = fastgetattr(tuple,
402
0
               cc_keyno[3],
403
0
               cc_tupdesc,
404
0
               &isNull);
405
0
      Assert(!isNull);
406
      /* FALLTHROUGH */
407
0
    case 3:
408
0
      v3 = fastgetattr(tuple,
409
0
               cc_keyno[2],
410
0
               cc_tupdesc,
411
0
               &isNull);
412
0
      Assert(!isNull);
413
      /* FALLTHROUGH */
414
0
    case 2:
415
0
      v2 = fastgetattr(tuple,
416
0
               cc_keyno[1],
417
0
               cc_tupdesc,
418
0
               &isNull);
419
0
      Assert(!isNull);
420
      /* FALLTHROUGH */
421
0
    case 1:
422
0
      v1 = fastgetattr(tuple,
423
0
               cc_keyno[0],
424
0
               cc_tupdesc,
425
0
               &isNull);
426
0
      Assert(!isNull);
427
0
      break;
428
0
    default:
429
0
      elog(FATAL, "wrong number of hash keys: %d", nkeys);
430
0
      break;
431
0
  }
432
433
0
  return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
434
0
}
435
436
/*
437
 *    CatalogCacheCompareTuple
438
 *
439
 * Compare a tuple to the passed arguments.
440
 */
441
static inline bool
442
CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
443
             const Datum *cachekeys,
444
             const Datum *searchkeys)
445
0
{
446
0
  const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
447
0
  int     i;
448
449
0
  for (i = 0; i < nkeys; i++)
450
0
  {
451
0
    if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
452
0
      return false;
453
0
  }
454
0
  return true;
455
0
}
456
457
458
#ifdef CATCACHE_STATS
459
460
static void
461
CatCachePrintStats(int code, Datum arg)
462
{
463
  slist_iter  iter;
464
  long    cc_searches = 0;
465
  long    cc_hits = 0;
466
  long    cc_neg_hits = 0;
467
  long    cc_newloads = 0;
468
  long    cc_invals = 0;
469
  long    cc_nlists = 0;
470
  long    cc_lsearches = 0;
471
  long    cc_lhits = 0;
472
473
  slist_foreach(iter, &CacheHdr->ch_caches)
474
  {
475
    CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
476
477
    if (cache->cc_ntup == 0 && cache->cc_searches == 0)
478
      continue;     /* don't print unused caches */
479
    elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
480
       cache->cc_relname,
481
       cache->cc_indexoid,
482
       cache->cc_ntup,
483
       cache->cc_searches,
484
       cache->cc_hits,
485
       cache->cc_neg_hits,
486
       cache->cc_hits + cache->cc_neg_hits,
487
       cache->cc_newloads,
488
       cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
489
       cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
490
       cache->cc_invals,
491
       cache->cc_nlist,
492
       cache->cc_lsearches,
493
       cache->cc_lhits);
494
    cc_searches += cache->cc_searches;
495
    cc_hits += cache->cc_hits;
496
    cc_neg_hits += cache->cc_neg_hits;
497
    cc_newloads += cache->cc_newloads;
498
    cc_invals += cache->cc_invals;
499
    cc_nlists += cache->cc_nlist;
500
    cc_lsearches += cache->cc_lsearches;
501
    cc_lhits += cache->cc_lhits;
502
  }
503
  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
504
     CacheHdr->ch_ntup,
505
     cc_searches,
506
     cc_hits,
507
     cc_neg_hits,
508
     cc_hits + cc_neg_hits,
509
     cc_newloads,
510
     cc_searches - cc_hits - cc_neg_hits - cc_newloads,
511
     cc_searches - cc_hits - cc_neg_hits,
512
     cc_invals,
513
     cc_nlists,
514
     cc_lsearches,
515
     cc_lhits);
516
}
517
#endif              /* CATCACHE_STATS */
518
519
520
/*
521
 *    CatCacheRemoveCTup
522
 *
523
 * Unlink and delete the given cache entry
524
 *
525
 * NB: if it is a member of a CatCList, the CatCList is deleted too.
526
 * Both the cache entry and the list had better have zero refcount.
527
 */
528
static void
529
CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
530
0
{
531
0
  Assert(ct->refcount == 0);
532
0
  Assert(ct->my_cache == cache);
533
534
0
  if (ct->c_list)
535
0
  {
536
    /*
537
     * The cleanest way to handle this is to call CatCacheRemoveCList,
538
     * which will recurse back to me, and the recursive call will do the
539
     * work.  Set the "dead" flag to make sure it does recurse.
540
     */
541
0
    ct->dead = true;
542
0
    CatCacheRemoveCList(cache, ct->c_list);
543
0
    return;         /* nothing left to do */
544
0
  }
545
546
  /* delink from linked list */
547
0
  dlist_delete(&ct->cache_elem);
548
549
  /*
550
   * Free keys when we're dealing with a negative entry, normal entries just
551
   * point into tuple, allocated together with the CatCTup.
552
   */
553
0
  if (ct->negative)
554
0
    CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
555
0
             cache->cc_keyno, ct->keys);
556
557
0
  pfree(ct);
558
559
0
  --cache->cc_ntup;
560
0
  --CacheHdr->ch_ntup;
561
0
}
562
563
/*
564
 *    CatCacheRemoveCList
565
 *
566
 * Unlink and delete the given cache list entry
567
 *
568
 * NB: any dead member entries that become unreferenced are deleted too.
569
 */
570
static void
571
CatCacheRemoveCList(CatCache *cache, CatCList *cl)
572
0
{
573
0
  int     i;
574
575
0
  Assert(cl->refcount == 0);
576
0
  Assert(cl->my_cache == cache);
577
578
  /* delink from member tuples */
579
0
  for (i = cl->n_members; --i >= 0;)
580
0
  {
581
0
    CatCTup    *ct = cl->members[i];
582
583
0
    Assert(ct->c_list == cl);
584
0
    ct->c_list = NULL;
585
    /* if the member is dead and now has no references, remove it */
586
0
    if (
587
0
#ifndef CATCACHE_FORCE_RELEASE
588
0
      ct->dead &&
589
0
#endif
590
0
      ct->refcount == 0)
591
0
      CatCacheRemoveCTup(cache, ct);
592
0
  }
593
594
  /* delink from linked list */
595
0
  dlist_delete(&cl->cache_elem);
596
597
  /* free associated column data */
598
0
  CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
599
0
           cache->cc_keyno, cl->keys);
600
601
0
  pfree(cl);
602
603
0
  --cache->cc_nlist;
604
0
}
605
606
607
/*
608
 *  CatCacheInvalidate
609
 *
610
 *  Invalidate entries in the specified cache, given a hash value.
611
 *
612
 *  We delete cache entries that match the hash value, whether positive
613
 *  or negative.  We don't care whether the invalidation is the result
614
 *  of a tuple insertion or a deletion.
615
 *
616
 *  We used to try to match positive cache entries by TID, but that is
617
 *  unsafe after a VACUUM FULL on a system catalog: an inval event could
618
 *  be queued before VACUUM FULL, and then processed afterwards, when the
619
 *  target tuple that has to be invalidated has a different TID than it
620
 *  did when the event was created.  So now we just compare hash values and
621
 *  accept the small risk of unnecessary invalidations due to false matches.
622
 *
623
 *  This routine is only quasi-public: it should only be used by inval.c.
624
 */
625
void
626
CatCacheInvalidate(CatCache *cache, uint32 hashValue)
627
0
{
628
0
  Index   hashIndex;
629
0
  dlist_mutable_iter iter;
630
631
0
  CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
632
633
  /*
634
   * We don't bother to check whether the cache has finished initialization
635
   * yet; if not, there will be no entries in it so no problem.
636
   */
637
638
  /*
639
   * Invalidate *all* CatCLists in this cache; it's too hard to tell which
640
   * searches might still be correct, so just zap 'em all.
641
   */
642
0
  for (int i = 0; i < cache->cc_nlbuckets; i++)
643
0
  {
644
0
    dlist_head *bucket = &cache->cc_lbucket[i];
645
646
0
    dlist_foreach_modify(iter, bucket)
647
0
    {
648
0
      CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
649
650
0
      if (cl->refcount > 0)
651
0
        cl->dead = true;
652
0
      else
653
0
        CatCacheRemoveCList(cache, cl);
654
0
    }
655
0
  }
656
657
  /*
658
   * inspect the proper hash bucket for tuple matches
659
   */
660
0
  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
661
0
  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
662
0
  {
663
0
    CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
664
665
0
    if (hashValue == ct->hash_value)
666
0
    {
667
0
      if (ct->refcount > 0 ||
668
0
        (ct->c_list && ct->c_list->refcount > 0))
669
0
      {
670
0
        ct->dead = true;
671
        /* list, if any, was marked dead above */
672
0
        Assert(ct->c_list == NULL || ct->c_list->dead);
673
0
      }
674
0
      else
675
0
        CatCacheRemoveCTup(cache, ct);
676
0
      CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
677
#ifdef CATCACHE_STATS
678
      cache->cc_invals++;
679
#endif
680
      /* could be multiple matches, so keep looking! */
681
0
    }
682
0
  }
683
684
  /* Also invalidate any entries that are being built */
685
0
  for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
686
0
  {
687
0
    if (e->cache == cache)
688
0
    {
689
0
      if (e->list || e->hash_value == hashValue)
690
0
        e->dead = true;
691
0
    }
692
0
  }
693
0
}
694
695
/* ----------------------------------------------------------------
696
 *             public functions
697
 * ----------------------------------------------------------------
698
 */
699
700
701
/*
702
 * Standard routine for creating cache context if it doesn't exist yet
703
 *
704
 * There are a lot of places (probably far more than necessary) that check
705
 * whether CacheMemoryContext exists yet and want to create it if not.
706
 * We centralize knowledge of exactly how to create it here.
707
 */
708
void
709
CreateCacheMemoryContext(void)
710
0
{
711
  /*
712
   * Purely for paranoia, check that context doesn't exist; caller probably
713
   * did so already.
714
   */
715
0
  if (!CacheMemoryContext)
716
0
    CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
717
0
                           "CacheMemoryContext",
718
0
                           ALLOCSET_DEFAULT_SIZES);
719
0
}
720
721
722
/*
723
 *    ResetCatalogCache
724
 *
725
 * Reset one catalog cache to empty.
726
 *
727
 * This is not very efficient if the target cache is nearly empty.
728
 * However, it shouldn't need to be efficient; we don't invoke it often.
729
 *
730
 * If 'debug_discard' is true, we are being called as part of
731
 * debug_discard_caches.  In that case, the cache is not reset for
732
 * correctness, but just to get more testing of cache invalidation.  We skip
733
 * resetting in-progress build entries in that case, or we'd never make any
734
 * progress.
735
 */
736
static void
737
ResetCatalogCache(CatCache *cache, bool debug_discard)
738
0
{
739
0
  dlist_mutable_iter iter;
740
0
  int     i;
741
742
  /* Remove each list in this cache, or at least mark it dead */
743
0
  for (i = 0; i < cache->cc_nlbuckets; i++)
744
0
  {
745
0
    dlist_head *bucket = &cache->cc_lbucket[i];
746
747
0
    dlist_foreach_modify(iter, bucket)
748
0
    {
749
0
      CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
750
751
0
      if (cl->refcount > 0)
752
0
        cl->dead = true;
753
0
      else
754
0
        CatCacheRemoveCList(cache, cl);
755
0
    }
756
0
  }
757
758
  /* Remove each tuple in this cache, or at least mark it dead */
759
0
  for (i = 0; i < cache->cc_nbuckets; i++)
760
0
  {
761
0
    dlist_head *bucket = &cache->cc_bucket[i];
762
763
0
    dlist_foreach_modify(iter, bucket)
764
0
    {
765
0
      CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
766
767
0
      if (ct->refcount > 0 ||
768
0
        (ct->c_list && ct->c_list->refcount > 0))
769
0
      {
770
0
        ct->dead = true;
771
        /* list, if any, was marked dead above */
772
0
        Assert(ct->c_list == NULL || ct->c_list->dead);
773
0
      }
774
0
      else
775
0
        CatCacheRemoveCTup(cache, ct);
776
#ifdef CATCACHE_STATS
777
      cache->cc_invals++;
778
#endif
779
0
    }
780
0
  }
781
782
  /* Also invalidate any entries that are being built */
783
0
  if (!debug_discard)
784
0
  {
785
0
    for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
786
0
    {
787
0
      if (e->cache == cache)
788
0
        e->dead = true;
789
0
    }
790
0
  }
791
0
}
792
793
/*
794
 *    ResetCatalogCaches
795
 *
796
 * Reset all caches when a shared cache inval event forces it
797
 */
798
void
799
ResetCatalogCaches(void)
800
0
{
801
0
  ResetCatalogCachesExt(false);
802
0
}
803
804
void
805
ResetCatalogCachesExt(bool debug_discard)
806
0
{
807
0
  slist_iter  iter;
808
809
0
  CACHE_elog(DEBUG2, "ResetCatalogCaches called");
810
811
0
  slist_foreach(iter, &CacheHdr->ch_caches)
812
0
  {
813
0
    CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
814
815
0
    ResetCatalogCache(cache, debug_discard);
816
0
  }
817
818
0
  CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
819
0
}
820
821
/*
822
 *    CatalogCacheFlushCatalog
823
 *
824
 *  Flush all catcache entries that came from the specified system catalog.
825
 *  This is needed after VACUUM FULL/CLUSTER on the catalog, since the
826
 *  tuples very likely now have different TIDs than before.  (At one point
827
 *  we also tried to force re-execution of CatalogCacheInitializeCache for
828
 *  the cache(s) on that catalog.  This is a bad idea since it leads to all
829
 *  kinds of trouble if a cache flush occurs while loading cache entries.
830
 *  We now avoid the need to do it by copying cc_tupdesc out of the relcache,
831
 *  rather than relying on the relcache to keep a tupdesc for us.  Of course
832
 *  this assumes the tupdesc of a cachable system table will not change...)
833
 */
834
void
835
CatalogCacheFlushCatalog(Oid catId)
836
0
{
837
0
  slist_iter  iter;
838
839
0
  CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
840
841
0
  slist_foreach(iter, &CacheHdr->ch_caches)
842
0
  {
843
0
    CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
844
845
    /* Does this cache store tuples of the target catalog? */
846
0
    if (cache->cc_reloid == catId)
847
0
    {
848
      /* Yes, so flush all its contents */
849
0
      ResetCatalogCache(cache, false);
850
851
      /* Tell inval.c to call syscache callbacks for this cache */
852
0
      CallSyscacheCallbacks(cache->id, 0);
853
0
    }
854
0
  }
855
856
0
  CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
857
0
}
858
859
/*
860
 *    InitCatCache
861
 *
862
 *  This allocates and initializes a cache for a system catalog relation.
863
 *  Actually, the cache is only partially initialized to avoid opening the
864
 *  relation.  The relation will be opened and the rest of the cache
865
 *  structure initialized on the first access.
866
 */
867
#ifdef CACHEDEBUG
868
#define InitCatCache_DEBUG2 \
869
do { \
870
  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
871
     cp->cc_reloid, cp->cc_indexoid, cp->id, \
872
     cp->cc_nkeys, cp->cc_nbuckets); \
873
} while(0)
874
#else
875
#define InitCatCache_DEBUG2
876
#endif
877
878
CatCache *
879
InitCatCache(int id,
880
       Oid reloid,
881
       Oid indexoid,
882
       int nkeys,
883
       const int *key,
884
       int nbuckets)
885
0
{
886
0
  CatCache   *cp;
887
0
  MemoryContext oldcxt;
888
0
  int     i;
889
890
  /*
891
   * nbuckets is the initial number of hash buckets to use in this catcache.
892
   * It will be enlarged later if it becomes too full.
893
   *
894
   * nbuckets must be a power of two.  We check this via Assert rather than
895
   * a full runtime check because the values will be coming from constant
896
   * tables.
897
   *
898
   * If you're confused by the power-of-two check, see comments in
899
   * bitmapset.c for an explanation.
900
   */
901
0
  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
902
903
  /*
904
   * first switch to the cache context so our allocations do not vanish at
905
   * the end of a transaction
906
   */
907
0
  if (!CacheMemoryContext)
908
0
    CreateCacheMemoryContext();
909
910
0
  oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
911
912
  /*
913
   * if first time through, initialize the cache group header
914
   */
915
0
  if (CacheHdr == NULL)
916
0
  {
917
0
    CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
918
0
    slist_init(&CacheHdr->ch_caches);
919
0
    CacheHdr->ch_ntup = 0;
920
#ifdef CATCACHE_STATS
921
    /* set up to dump stats at backend exit */
922
    on_proc_exit(CatCachePrintStats, 0);
923
#endif
924
0
  }
925
926
  /*
927
   * Allocate a new cache structure, aligning to a cacheline boundary
928
   *
929
   * Note: we rely on zeroing to initialize all the dlist headers correctly
930
   */
931
0
  cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
932
0
                   MCXT_ALLOC_ZERO);
933
0
  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
934
935
  /*
936
   * Many catcaches never receive any list searches.  Therefore, we don't
937
   * allocate the cc_lbuckets till we get a list search.
938
   */
939
0
  cp->cc_lbucket = NULL;
940
941
  /*
942
   * initialize the cache's relation information for the relation
943
   * corresponding to this cache, and initialize some of the new cache's
944
   * other internal fields.  But don't open the relation yet.
945
   */
946
0
  cp->id = id;
947
0
  cp->cc_relname = "(not known yet)";
948
0
  cp->cc_reloid = reloid;
949
0
  cp->cc_indexoid = indexoid;
950
0
  cp->cc_relisshared = false; /* temporary */
951
0
  cp->cc_tupdesc = (TupleDesc) NULL;
952
0
  cp->cc_ntup = 0;
953
0
  cp->cc_nlist = 0;
954
0
  cp->cc_nbuckets = nbuckets;
955
0
  cp->cc_nlbuckets = 0;
956
0
  cp->cc_nkeys = nkeys;
957
0
  for (i = 0; i < nkeys; ++i)
958
0
  {
959
0
    Assert(AttributeNumberIsValid(key[i]));
960
0
    cp->cc_keyno[i] = key[i];
961
0
  }
962
963
  /*
964
   * new cache is initialized as far as we can go for now. print some
965
   * debugging information, if appropriate.
966
   */
967
0
  InitCatCache_DEBUG2;
968
969
  /*
970
   * add completed cache to top of group header's list
971
   */
972
0
  slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
973
974
  /*
975
   * back to the old context before we return...
976
   */
977
0
  MemoryContextSwitchTo(oldcxt);
978
979
0
  return cp;
980
0
}
981
982
/*
983
 * Enlarge a catcache, doubling the number of buckets.
984
 */
985
static void
986
RehashCatCache(CatCache *cp)
987
0
{
988
0
  dlist_head *newbucket;
989
0
  int     newnbuckets;
990
0
  int     i;
991
992
0
  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
993
0
     cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
994
995
  /* Allocate a new, larger, hash table. */
996
0
  newnbuckets = cp->cc_nbuckets * 2;
997
0
  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
998
999
  /* Move all entries from old hash table to new. */
1000
0
  for (i = 0; i < cp->cc_nbuckets; i++)
1001
0
  {
1002
0
    dlist_mutable_iter iter;
1003
1004
0
    dlist_foreach_modify(iter, &cp->cc_bucket[i])
1005
0
    {
1006
0
      CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1007
0
      int     hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1008
1009
0
      dlist_delete(iter.cur);
1010
0
      dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1011
0
    }
1012
0
  }
1013
1014
  /* Switch to the new array. */
1015
0
  pfree(cp->cc_bucket);
1016
0
  cp->cc_nbuckets = newnbuckets;
1017
0
  cp->cc_bucket = newbucket;
1018
0
}
1019
1020
/*
1021
 * Enlarge a catcache's list storage, doubling the number of buckets.
1022
 */
1023
static void
1024
RehashCatCacheLists(CatCache *cp)
1025
0
{
1026
0
  dlist_head *newbucket;
1027
0
  int     newnbuckets;
1028
0
  int     i;
1029
1030
0
  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1031
0
     cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1032
1033
  /* Allocate a new, larger, hash table. */
1034
0
  newnbuckets = cp->cc_nlbuckets * 2;
1035
0
  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1036
1037
  /* Move all entries from old hash table to new. */
1038
0
  for (i = 0; i < cp->cc_nlbuckets; i++)
1039
0
  {
1040
0
    dlist_mutable_iter iter;
1041
1042
0
    dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1043
0
    {
1044
0
      CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
1045
0
      int     hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1046
1047
0
      dlist_delete(iter.cur);
1048
0
      dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1049
0
    }
1050
0
  }
1051
1052
  /* Switch to the new array. */
1053
0
  pfree(cp->cc_lbucket);
1054
0
  cp->cc_nlbuckets = newnbuckets;
1055
0
  cp->cc_lbucket = newbucket;
1056
0
}
1057
1058
/*
1059
 *    ConditionalCatalogCacheInitializeCache
1060
 *
1061
 * Call CatalogCacheInitializeCache() if not yet done.
1062
 */
1063
pg_attribute_always_inline
1064
static void
1065
ConditionalCatalogCacheInitializeCache(CatCache *cache)
1066
0
{
1067
#ifdef USE_ASSERT_CHECKING
1068
  /*
1069
   * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1070
   * for hashing.  This isn't ideal.  Since lookup_type_cache() both
1071
   * registers the callback and searches TYPEOID, reaching trouble likely
1072
   * requires OOM at an unlucky moment.
1073
   *
1074
   * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1075
   * relies on ATTNUM.  InitPostgres() initializes ATTNUM, so it's reliable.
1076
   */
1077
  if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1078
    IsTransactionState())
1079
    AssertCouldGetRelation();
1080
  else
1081
    Assert(cache->cc_tupdesc != NULL);
1082
#endif
1083
1084
0
  if (unlikely(cache->cc_tupdesc == NULL))
1085
0
    CatalogCacheInitializeCache(cache);
1086
0
}
1087
1088
/*
1089
 *    CatalogCacheInitializeCache
1090
 *
1091
 * This function does final initialization of a catcache: obtain the tuple
1092
 * descriptor and set up the hash and equality function links.
1093
 */
1094
#ifdef CACHEDEBUG
1095
#define CatalogCacheInitializeCache_DEBUG1 \
1096
  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1097
     cache->cc_reloid)
1098
1099
#define CatalogCacheInitializeCache_DEBUG2 \
1100
do { \
1101
    if (cache->cc_keyno[i] > 0) { \
1102
      elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1103
        i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1104
         TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1105
    } else { \
1106
      elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1107
        i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1108
    } \
1109
} while(0)
1110
#else
1111
#define CatalogCacheInitializeCache_DEBUG1
1112
#define CatalogCacheInitializeCache_DEBUG2
1113
#endif
1114
1115
static void
1116
CatalogCacheInitializeCache(CatCache *cache)
1117
0
{
1118
0
  Relation  relation;
1119
0
  MemoryContext oldcxt;
1120
0
  TupleDesc tupdesc;
1121
0
  int     i;
1122
1123
0
  CatalogCacheInitializeCache_DEBUG1;
1124
1125
0
  relation = table_open(cache->cc_reloid, AccessShareLock);
1126
1127
  /*
1128
   * switch to the cache context so our allocations do not vanish at the end
1129
   * of a transaction
1130
   */
1131
0
  Assert(CacheMemoryContext != NULL);
1132
1133
0
  oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1134
1135
  /*
1136
   * copy the relcache's tuple descriptor to permanent cache storage
1137
   */
1138
0
  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1139
1140
  /*
1141
   * save the relation's name and relisshared flag, too (cc_relname is used
1142
   * only for debugging purposes)
1143
   */
1144
0
  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1145
0
  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1146
1147
  /*
1148
   * return to the caller's memory context and close the rel
1149
   */
1150
0
  MemoryContextSwitchTo(oldcxt);
1151
1152
0
  table_close(relation, AccessShareLock);
1153
1154
0
  CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1155
0
         cache->cc_relname, cache->cc_nkeys);
1156
1157
  /*
1158
   * initialize cache's key information
1159
   */
1160
0
  for (i = 0; i < cache->cc_nkeys; ++i)
1161
0
  {
1162
0
    Oid     keytype;
1163
0
    RegProcedure eqfunc;
1164
1165
0
    CatalogCacheInitializeCache_DEBUG2;
1166
1167
0
    if (cache->cc_keyno[i] > 0)
1168
0
    {
1169
0
      Form_pg_attribute attr = TupleDescAttr(tupdesc,
1170
0
                           cache->cc_keyno[i] - 1);
1171
1172
0
      keytype = attr->atttypid;
1173
      /* cache key columns should always be NOT NULL */
1174
0
      Assert(attr->attnotnull);
1175
0
    }
1176
0
    else
1177
0
    {
1178
0
      if (cache->cc_keyno[i] < 0)
1179
0
        elog(FATAL, "sys attributes are not supported in caches");
1180
0
      keytype = OIDOID;
1181
0
    }
1182
1183
0
    GetCCHashEqFuncs(keytype,
1184
0
             &cache->cc_hashfunc[i],
1185
0
             &eqfunc,
1186
0
             &cache->cc_fastequal[i]);
1187
1188
    /*
1189
     * Do equality-function lookup (we assume this won't need a catalog
1190
     * lookup for any supported type)
1191
     */
1192
0
    fmgr_info_cxt(eqfunc,
1193
0
            &cache->cc_skey[i].sk_func,
1194
0
            CacheMemoryContext);
1195
1196
    /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1197
0
    cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1198
1199
    /* Fill in sk_strategy as well --- always standard equality */
1200
0
    cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1201
0
    cache->cc_skey[i].sk_subtype = InvalidOid;
1202
    /* If a catcache key requires a collation, it must be C collation */
1203
0
    cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1204
1205
0
    CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1206
0
           cache->cc_relname, i, cache);
1207
0
  }
1208
1209
  /*
1210
   * mark this cache fully initialized
1211
   */
1212
0
  cache->cc_tupdesc = tupdesc;
1213
0
}
1214
1215
/*
1216
 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1217
 *
1218
 * One reason to call this routine is to ensure that the relcache has
1219
 * created entries for all the catalogs and indexes referenced by catcaches.
1220
 * Therefore, provide an option to open the index as well as fixing the
1221
 * cache itself.  An exception is the indexes on pg_am, which we don't use
1222
 * (cf. IndexScanOK).
1223
 */
1224
void
1225
InitCatCachePhase2(CatCache *cache, bool touch_index)
1226
0
{
1227
0
  ConditionalCatalogCacheInitializeCache(cache);
1228
1229
0
  if (touch_index &&
1230
0
    cache->id != AMOID &&
1231
0
    cache->id != AMNAME)
1232
0
  {
1233
0
    Relation  idesc;
1234
1235
    /*
1236
     * We must lock the underlying catalog before opening the index to
1237
     * avoid deadlock, since index_open could possibly result in reading
1238
     * this same catalog, and if anyone else is exclusive-locking this
1239
     * catalog and index they'll be doing it in that order.
1240
     */
1241
0
    LockRelationOid(cache->cc_reloid, AccessShareLock);
1242
0
    idesc = index_open(cache->cc_indexoid, AccessShareLock);
1243
1244
    /*
1245
     * While we've got the index open, let's check that it's unique (and
1246
     * not just deferrable-unique, thank you very much).  This is just to
1247
     * catch thinkos in definitions of new catcaches, so we don't worry
1248
     * about the pg_am indexes not getting tested.
1249
     */
1250
0
    Assert(idesc->rd_index->indisunique &&
1251
0
         idesc->rd_index->indimmediate);
1252
1253
0
    index_close(idesc, AccessShareLock);
1254
0
    UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1255
0
  }
1256
0
}
1257
1258
1259
/*
1260
 *    IndexScanOK
1261
 *
1262
 *    This function checks for tuples that will be fetched by
1263
 *    IndexSupportInitialize() during relcache initialization for
1264
 *    certain system indexes that support critical syscaches.
1265
 *    We can't use an indexscan to fetch these, else we'll get into
1266
 *    infinite recursion.  A plain heap scan will work, however.
1267
 *    Once we have completed relcache initialization (signaled by
1268
 *    criticalRelcachesBuilt), we don't have to worry anymore.
1269
 *
1270
 *    Similarly, during backend startup we have to be able to use the
1271
 *    pg_authid, pg_auth_members and pg_database syscaches for
1272
 *    authentication even if we don't yet have relcache entries for those
1273
 *    catalogs' indexes.
1274
 */
1275
static bool
1276
IndexScanOK(CatCache *cache)
1277
0
{
1278
0
  switch (cache->id)
1279
0
  {
1280
0
    case INDEXRELID:
1281
1282
      /*
1283
       * Rather than tracking exactly which indexes have to be loaded
1284
       * before we can use indexscans (which changes from time to time),
1285
       * just force all pg_index searches to be heap scans until we've
1286
       * built the critical relcaches.
1287
       */
1288
0
      if (!criticalRelcachesBuilt)
1289
0
        return false;
1290
0
      break;
1291
1292
0
    case AMOID:
1293
0
    case AMNAME:
1294
1295
      /*
1296
       * Always do heap scans in pg_am, because it's so small there's
1297
       * not much point in an indexscan anyway.  We *must* do this when
1298
       * initially building critical relcache entries, but we might as
1299
       * well just always do it.
1300
       */
1301
0
      return false;
1302
1303
0
    case AUTHNAME:
1304
0
    case AUTHOID:
1305
0
    case AUTHMEMMEMROLE:
1306
0
    case DATABASEOID:
1307
1308
      /*
1309
       * Protect authentication lookups occurring before relcache has
1310
       * collected entries for shared indexes.
1311
       */
1312
0
      if (!criticalSharedRelcachesBuilt)
1313
0
        return false;
1314
0
      break;
1315
1316
0
    default:
1317
0
      break;
1318
0
  }
1319
1320
  /* Normal case, allow index scan */
1321
0
  return true;
1322
0
}
1323
1324
/*
1325
 *  SearchCatCache
1326
 *
1327
 *    This call searches a system cache for a tuple, opening the relation
1328
 *    if necessary (on the first access to a particular cache).
1329
 *
1330
 *    The result is NULL if not found, or a pointer to a HeapTuple in
1331
 *    the cache.  The caller must not modify the tuple, and must call
1332
 *    ReleaseCatCache() when done with it.
1333
 *
1334
 * The search key values should be expressed as Datums of the key columns'
1335
 * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
1336
 * exception, the passed-in key for a NAME column can be just a C string;
1337
 * the caller need not go to the trouble of converting it to a fully
1338
 * null-padded NAME.
1339
 */
1340
HeapTuple
1341
SearchCatCache(CatCache *cache,
1342
         Datum v1,
1343
         Datum v2,
1344
         Datum v3,
1345
         Datum v4)
1346
0
{
1347
0
  return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1348
0
}
1349
1350
1351
/*
1352
 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1353
 * arguments. The compiler can inline the body and unroll loops, making them a
1354
 * bit faster than SearchCatCache().
1355
 */
1356
1357
HeapTuple
1358
SearchCatCache1(CatCache *cache,
1359
        Datum v1)
1360
0
{
1361
0
  return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1362
0
}
1363
1364
1365
HeapTuple
1366
SearchCatCache2(CatCache *cache,
1367
        Datum v1, Datum v2)
1368
0
{
1369
0
  return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1370
0
}
1371
1372
1373
HeapTuple
1374
SearchCatCache3(CatCache *cache,
1375
        Datum v1, Datum v2, Datum v3)
1376
0
{
1377
0
  return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1378
0
}
1379
1380
1381
HeapTuple
1382
SearchCatCache4(CatCache *cache,
1383
        Datum v1, Datum v2, Datum v3, Datum v4)
1384
0
{
1385
0
  return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1386
0
}
1387
1388
/*
1389
 * Work-horse for SearchCatCache/SearchCatCacheN.
1390
 */
1391
static inline HeapTuple
1392
SearchCatCacheInternal(CatCache *cache,
1393
             int nkeys,
1394
             Datum v1,
1395
             Datum v2,
1396
             Datum v3,
1397
             Datum v4)
1398
0
{
1399
0
  Datum   arguments[CATCACHE_MAXKEYS];
1400
0
  uint32    hashValue;
1401
0
  Index   hashIndex;
1402
0
  dlist_iter  iter;
1403
0
  dlist_head *bucket;
1404
0
  CatCTup    *ct;
1405
1406
0
  Assert(cache->cc_nkeys == nkeys);
1407
1408
  /*
1409
   * one-time startup overhead for each cache
1410
   */
1411
0
  ConditionalCatalogCacheInitializeCache(cache);
1412
1413
#ifdef CATCACHE_STATS
1414
  cache->cc_searches++;
1415
#endif
1416
1417
  /* Initialize local parameter array */
1418
0
  arguments[0] = v1;
1419
0
  arguments[1] = v2;
1420
0
  arguments[2] = v3;
1421
0
  arguments[3] = v4;
1422
1423
  /*
1424
   * find the hash bucket in which to look for the tuple
1425
   */
1426
0
  hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1427
0
  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1428
1429
  /*
1430
   * scan the hash bucket until we find a match or exhaust our tuples
1431
   *
1432
   * Note: it's okay to use dlist_foreach here, even though we modify the
1433
   * dlist within the loop, because we don't continue the loop afterwards.
1434
   */
1435
0
  bucket = &cache->cc_bucket[hashIndex];
1436
0
  dlist_foreach(iter, bucket)
1437
0
  {
1438
0
    ct = dlist_container(CatCTup, cache_elem, iter.cur);
1439
1440
0
    if (ct->dead)
1441
0
      continue;     /* ignore dead entries */
1442
1443
0
    if (ct->hash_value != hashValue)
1444
0
      continue;     /* quickly skip entry if wrong hash val */
1445
1446
0
    if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1447
0
      continue;
1448
1449
    /*
1450
     * We found a match in the cache.  Move it to the front of the list
1451
     * for its hashbucket, in order to speed subsequent searches.  (The
1452
     * most frequently accessed elements in any hashbucket will tend to be
1453
     * near the front of the hashbucket's list.)
1454
     */
1455
0
    dlist_move_head(bucket, &ct->cache_elem);
1456
1457
    /*
1458
     * If it's a positive entry, bump its refcount and return it. If it's
1459
     * negative, we can report failure to the caller.
1460
     */
1461
0
    if (!ct->negative)
1462
0
    {
1463
0
      ResourceOwnerEnlarge(CurrentResourceOwner);
1464
0
      ct->refcount++;
1465
0
      ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1466
1467
0
      CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1468
0
             cache->cc_relname, hashIndex);
1469
1470
#ifdef CATCACHE_STATS
1471
      cache->cc_hits++;
1472
#endif
1473
1474
0
      return &ct->tuple;
1475
0
    }
1476
0
    else
1477
0
    {
1478
0
      CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1479
0
             cache->cc_relname, hashIndex);
1480
1481
#ifdef CATCACHE_STATS
1482
      cache->cc_neg_hits++;
1483
#endif
1484
1485
0
      return NULL;
1486
0
    }
1487
0
  }
1488
1489
0
  return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1490
0
}
1491
1492
/*
1493
 * Search the actual catalogs, rather than the cache.
1494
 *
1495
 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1496
 * as small as possible.  To avoid that effort being undone by a helpful
1497
 * compiler, try to explicitly forbid inlining.
1498
 */
1499
static pg_noinline HeapTuple
1500
SearchCatCacheMiss(CatCache *cache,
1501
           int nkeys,
1502
           uint32 hashValue,
1503
           Index hashIndex,
1504
           Datum v1,
1505
           Datum v2,
1506
           Datum v3,
1507
           Datum v4)
1508
0
{
1509
0
  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1510
0
  Relation  relation;
1511
0
  SysScanDesc scandesc;
1512
0
  HeapTuple ntp;
1513
0
  CatCTup    *ct;
1514
0
  bool    stale;
1515
0
  Datum   arguments[CATCACHE_MAXKEYS];
1516
1517
  /* Initialize local parameter array */
1518
0
  arguments[0] = v1;
1519
0
  arguments[1] = v2;
1520
0
  arguments[2] = v3;
1521
0
  arguments[3] = v4;
1522
1523
  /*
1524
   * Tuple was not found in cache, so we have to try to retrieve it directly
1525
   * from the relation.  If found, we will add it to the cache; if not
1526
   * found, we will add a negative cache entry instead.
1527
   *
1528
   * NOTE: it is possible for recursive cache lookups to occur while reading
1529
   * the relation --- for example, due to shared-cache-inval messages being
1530
   * processed during table_open().  This is OK.  It's even possible for one
1531
   * of those lookups to find and enter the very same tuple we are trying to
1532
   * fetch here.  If that happens, we will enter a second copy of the tuple
1533
   * into the cache.  The first copy will never be referenced again, and
1534
   * will eventually age out of the cache, so there's no functional problem.
1535
   * This case is rare enough that it's not worth expending extra cycles to
1536
   * detect.
1537
   *
1538
   * Another case, which we *must* handle, is that the tuple could become
1539
   * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1540
   * AcceptInvalidationMessages can run during TOAST table access).  We do
1541
   * not want to return already-stale catcache entries, so we loop around
1542
   * and do the table scan again if that happens.
1543
   */
1544
0
  relation = table_open(cache->cc_reloid, AccessShareLock);
1545
1546
  /*
1547
   * Ok, need to make a lookup in the relation, copy the scankey and fill
1548
   * out any per-call fields.
1549
   */
1550
0
  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1551
0
  cur_skey[0].sk_argument = v1;
1552
0
  cur_skey[1].sk_argument = v2;
1553
0
  cur_skey[2].sk_argument = v3;
1554
0
  cur_skey[3].sk_argument = v4;
1555
1556
0
  do
1557
0
  {
1558
0
    scandesc = systable_beginscan(relation,
1559
0
                    cache->cc_indexoid,
1560
0
                    IndexScanOK(cache),
1561
0
                    NULL,
1562
0
                    nkeys,
1563
0
                    cur_skey);
1564
1565
0
    ct = NULL;
1566
0
    stale = false;
1567
1568
0
    while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1569
0
    {
1570
0
      ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1571
0
                     hashValue, hashIndex);
1572
      /* upon failure, we must start the scan over */
1573
0
      if (ct == NULL)
1574
0
      {
1575
0
        stale = true;
1576
0
        break;
1577
0
      }
1578
      /* immediately set the refcount to 1 */
1579
0
      ResourceOwnerEnlarge(CurrentResourceOwner);
1580
0
      ct->refcount++;
1581
0
      ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1582
0
      break;       /* assume only one match */
1583
0
    }
1584
1585
0
    systable_endscan(scandesc);
1586
0
  } while (stale);
1587
1588
0
  table_close(relation, AccessShareLock);
1589
1590
  /*
1591
   * If tuple was not found, we need to build a negative cache entry
1592
   * containing a fake tuple.  The fake tuple has the correct key columns,
1593
   * but nulls everywhere else.
1594
   *
1595
   * In bootstrap mode, we don't build negative entries, because the cache
1596
   * invalidation mechanism isn't alive and can't clear them if the tuple
1597
   * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
1598
   * cache inval for that.)
1599
   */
1600
0
  if (ct == NULL)
1601
0
  {
1602
0
    if (IsBootstrapProcessingMode())
1603
0
      return NULL;
1604
1605
0
    ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1606
0
                   hashValue, hashIndex);
1607
1608
    /* Creating a negative cache entry shouldn't fail */
1609
0
    Assert(ct != NULL);
1610
1611
0
    CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1612
0
           cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1613
0
    CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1614
0
           cache->cc_relname, hashIndex);
1615
1616
    /*
1617
     * We are not returning the negative entry to the caller, so leave its
1618
     * refcount zero.
1619
     */
1620
1621
0
    return NULL;
1622
0
  }
1623
1624
0
  CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1625
0
         cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1626
0
  CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1627
0
         cache->cc_relname, hashIndex);
1628
1629
#ifdef CATCACHE_STATS
1630
  cache->cc_newloads++;
1631
#endif
1632
1633
0
  return &ct->tuple;
1634
0
}
1635
1636
/*
1637
 *  ReleaseCatCache
1638
 *
1639
 *  Decrement the reference count of a catcache entry (releasing the
1640
 *  hold grabbed by a successful SearchCatCache).
1641
 *
1642
 *  NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1643
 *  will be freed as soon as their refcount goes to zero.  In combination
1644
 *  with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1645
 *  to catch references to already-released catcache entries.
1646
 */
1647
void
1648
ReleaseCatCache(HeapTuple tuple)
1649
0
{
1650
0
  ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner);
1651
0
}
1652
1653
static void
1654
ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
1655
0
{
1656
0
  CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1657
0
                  offsetof(CatCTup, tuple));
1658
1659
  /* Safety checks to ensure we were handed a cache entry */
1660
0
  Assert(ct->ct_magic == CT_MAGIC);
1661
0
  Assert(ct->refcount > 0);
1662
1663
0
  ct->refcount--;
1664
0
  if (resowner)
1665
0
    ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1666
1667
0
  if (
1668
0
#ifndef CATCACHE_FORCE_RELEASE
1669
0
    ct->dead &&
1670
0
#endif
1671
0
    ct->refcount == 0 &&
1672
0
    (ct->c_list == NULL || ct->c_list->refcount == 0))
1673
0
    CatCacheRemoveCTup(ct->my_cache, ct);
1674
0
}
1675
1676
1677
/*
1678
 *  GetCatCacheHashValue
1679
 *
1680
 *    Compute the hash value for a given set of search keys.
1681
 *
1682
 * The reason for exposing this as part of the API is that the hash value is
1683
 * exposed in cache invalidation operations, so there are places outside the
1684
 * catcache code that need to be able to compute the hash values.
1685
 */
1686
uint32
1687
GetCatCacheHashValue(CatCache *cache,
1688
           Datum v1,
1689
           Datum v2,
1690
           Datum v3,
1691
           Datum v4)
1692
0
{
1693
  /*
1694
   * one-time startup overhead for each cache
1695
   */
1696
0
  ConditionalCatalogCacheInitializeCache(cache);
1697
1698
  /*
1699
   * calculate the hash value
1700
   */
1701
0
  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1702
0
}
1703
1704
1705
/*
1706
 *  SearchCatCacheList
1707
 *
1708
 *    Generate a list of all tuples matching a partial key (that is,
1709
 *    a key specifying just the first K of the cache's N key columns).
1710
 *
1711
 *    It doesn't make any sense to specify all of the cache's key columns
1712
 *    here: since the key is unique, there could be at most one match, so
1713
 *    you ought to use SearchCatCache() instead.  Hence this function takes
1714
 *    one fewer Datum argument than SearchCatCache() does.
1715
 *
1716
 *    The caller must not modify the list object or the pointed-to tuples,
1717
 *    and must call ReleaseCatCacheList() when done with the list.
1718
 */
1719
CatCList *
1720
SearchCatCacheList(CatCache *cache,
1721
           int nkeys,
1722
           Datum v1,
1723
           Datum v2,
1724
           Datum v3)
1725
0
{
1726
0
  Datum   v4 = 0;     /* dummy last-column value */
1727
0
  Datum   arguments[CATCACHE_MAXKEYS];
1728
0
  uint32    lHashValue;
1729
0
  Index   lHashIndex;
1730
0
  dlist_iter  iter;
1731
0
  dlist_head *lbucket;
1732
0
  CatCList   *cl;
1733
0
  CatCTup    *ct;
1734
0
  List     *volatile ctlist;
1735
0
  ListCell   *ctlist_item;
1736
0
  int     nmembers;
1737
0
  bool    ordered;
1738
0
  HeapTuple ntp;
1739
0
  MemoryContext oldcxt;
1740
0
  int     i;
1741
0
  CatCInProgress *save_in_progress;
1742
0
  CatCInProgress in_progress_ent;
1743
1744
  /*
1745
   * one-time startup overhead for each cache
1746
   */
1747
0
  ConditionalCatalogCacheInitializeCache(cache);
1748
1749
0
  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1750
1751
#ifdef CATCACHE_STATS
1752
  cache->cc_lsearches++;
1753
#endif
1754
1755
  /* Initialize local parameter array */
1756
0
  arguments[0] = v1;
1757
0
  arguments[1] = v2;
1758
0
  arguments[2] = v3;
1759
0
  arguments[3] = v4;
1760
1761
  /*
1762
   * If we haven't previously done a list search in this cache, create the
1763
   * bucket header array; otherwise, consider whether it's time to enlarge
1764
   * it.
1765
   */
1766
0
  if (cache->cc_lbucket == NULL)
1767
0
  {
1768
    /* Arbitrary initial size --- must be a power of 2 */
1769
0
    int     nbuckets = 16;
1770
1771
0
    cache->cc_lbucket = (dlist_head *)
1772
0
      MemoryContextAllocZero(CacheMemoryContext,
1773
0
                   nbuckets * sizeof(dlist_head));
1774
    /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1775
0
    cache->cc_nlbuckets = nbuckets;
1776
0
  }
1777
0
  else
1778
0
  {
1779
    /*
1780
     * If the hash table has become too full, enlarge the buckets array.
1781
     * Quite arbitrarily, we enlarge when fill factor > 2.
1782
     */
1783
0
    if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1784
0
      RehashCatCacheLists(cache);
1785
0
  }
1786
1787
  /*
1788
   * Find the hash bucket in which to look for the CatCList.
1789
   */
1790
0
  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1791
0
  lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1792
1793
  /*
1794
   * scan the items until we find a match or exhaust our list
1795
   *
1796
   * Note: it's okay to use dlist_foreach here, even though we modify the
1797
   * dlist within the loop, because we don't continue the loop afterwards.
1798
   */
1799
0
  lbucket = &cache->cc_lbucket[lHashIndex];
1800
0
  dlist_foreach(iter, lbucket)
1801
0
  {
1802
0
    cl = dlist_container(CatCList, cache_elem, iter.cur);
1803
1804
0
    if (cl->dead)
1805
0
      continue;     /* ignore dead entries */
1806
1807
0
    if (cl->hash_value != lHashValue)
1808
0
      continue;     /* quickly skip entry if wrong hash val */
1809
1810
    /*
1811
     * see if the cached list matches our key.
1812
     */
1813
0
    if (cl->nkeys != nkeys)
1814
0
      continue;
1815
1816
0
    if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1817
0
      continue;
1818
1819
    /*
1820
     * We found a matching list.  Move the list to the front of the list
1821
     * for its hashbucket, so as to speed subsequent searches.  (We do not
1822
     * move the members to the fronts of their hashbucket lists, however,
1823
     * since there's no point in that unless they are searched for
1824
     * individually.)
1825
     */
1826
0
    dlist_move_head(lbucket, &cl->cache_elem);
1827
1828
    /* Bump the list's refcount and return it */
1829
0
    ResourceOwnerEnlarge(CurrentResourceOwner);
1830
0
    cl->refcount++;
1831
0
    ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1832
1833
0
    CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1834
0
           cache->cc_relname);
1835
1836
#ifdef CATCACHE_STATS
1837
    cache->cc_lhits++;
1838
#endif
1839
1840
0
    return cl;
1841
0
  }
1842
1843
  /*
1844
   * List was not found in cache, so we have to build it by reading the
1845
   * relation.  For each matching tuple found in the relation, use an
1846
   * existing cache entry if possible, else build a new one.
1847
   *
1848
   * We have to bump the member refcounts temporarily to ensure they won't
1849
   * get dropped from the cache while loading other members. We use a PG_TRY
1850
   * block to ensure we can undo those refcounts if we get an error before
1851
   * we finish constructing the CatCList.  ctlist must be valid throughout
1852
   * the PG_TRY block.
1853
   */
1854
0
  ctlist = NIL;
1855
1856
  /*
1857
   * Cache invalidation can happen while we're building the list.
1858
   * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1859
   * tuples, but it's also possible that a new entry is concurrently added
1860
   * that should be part of the list we're building.  Register an
1861
   * "in-progress" entry that will receive the invalidation, until we have
1862
   * built the final list entry.
1863
   */
1864
0
  save_in_progress = catcache_in_progress_stack;
1865
0
  in_progress_ent.next = catcache_in_progress_stack;
1866
0
  in_progress_ent.cache = cache;
1867
0
  in_progress_ent.hash_value = lHashValue;
1868
0
  in_progress_ent.list = true;
1869
0
  in_progress_ent.dead = false;
1870
0
  catcache_in_progress_stack = &in_progress_ent;
1871
1872
0
  PG_TRY();
1873
0
  {
1874
0
    ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1875
0
    Relation  relation;
1876
0
    SysScanDesc scandesc;
1877
0
    bool    first_iter = true;
1878
1879
0
    relation = table_open(cache->cc_reloid, AccessShareLock);
1880
1881
    /*
1882
     * Ok, need to make a lookup in the relation, copy the scankey and
1883
     * fill out any per-call fields.
1884
     */
1885
0
    memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1886
0
    cur_skey[0].sk_argument = v1;
1887
0
    cur_skey[1].sk_argument = v2;
1888
0
    cur_skey[2].sk_argument = v3;
1889
0
    cur_skey[3].sk_argument = v4;
1890
1891
    /*
1892
     * Scan the table for matching entries.  If an invalidation arrives
1893
     * mid-build, we will loop back here to retry.
1894
     */
1895
0
    do
1896
0
    {
1897
      /*
1898
       * If we are retrying, release refcounts on any items created on
1899
       * the previous iteration.  We dare not try to free them if
1900
       * they're now unreferenced, since an error while doing that would
1901
       * result in the PG_CATCH below doing extra refcount decrements.
1902
       * Besides, we'll likely re-adopt those items in the next
1903
       * iteration, so it's not worth complicating matters to try to get
1904
       * rid of them.
1905
       */
1906
0
      foreach(ctlist_item, ctlist)
1907
0
      {
1908
0
        ct = (CatCTup *) lfirst(ctlist_item);
1909
0
        Assert(ct->c_list == NULL);
1910
0
        Assert(ct->refcount > 0);
1911
0
        ct->refcount--;
1912
0
      }
1913
      /* Reset ctlist in preparation for new try */
1914
0
      ctlist = NIL;
1915
0
      in_progress_ent.dead = false;
1916
1917
0
      scandesc = systable_beginscan(relation,
1918
0
                      cache->cc_indexoid,
1919
0
                      IndexScanOK(cache),
1920
0
                      NULL,
1921
0
                      nkeys,
1922
0
                      cur_skey);
1923
1924
      /* The list will be ordered iff we are doing an index scan */
1925
0
      ordered = (scandesc->irel != NULL);
1926
1927
      /* Injection point to help testing the recursive invalidation case */
1928
0
      if (first_iter)
1929
0
      {
1930
0
        INJECTION_POINT("catcache-list-miss-systable-scan-started", NULL);
1931
0
        first_iter = false;
1932
0
      }
1933
1934
0
      while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1935
0
           !in_progress_ent.dead)
1936
0
      {
1937
0
        uint32    hashValue;
1938
0
        Index   hashIndex;
1939
0
        bool    found = false;
1940
0
        dlist_head *bucket;
1941
1942
        /*
1943
         * See if there's an entry for this tuple already.
1944
         */
1945
0
        ct = NULL;
1946
0
        hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1947
0
        hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1948
1949
0
        bucket = &cache->cc_bucket[hashIndex];
1950
0
        dlist_foreach(iter, bucket)
1951
0
        {
1952
0
          ct = dlist_container(CatCTup, cache_elem, iter.cur);
1953
1954
0
          if (ct->dead || ct->negative)
1955
0
            continue; /* ignore dead and negative entries */
1956
1957
0
          if (ct->hash_value != hashValue)
1958
0
            continue; /* quickly skip entry if wrong hash val */
1959
1960
0
          if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1961
0
            continue; /* not same tuple */
1962
1963
          /*
1964
           * Found a match, but can't use it if it belongs to
1965
           * another list already
1966
           */
1967
0
          if (ct->c_list)
1968
0
            continue;
1969
1970
0
          found = true;
1971
0
          break;   /* A-OK */
1972
0
        }
1973
1974
0
        if (!found)
1975
0
        {
1976
          /* We didn't find a usable entry, so make a new one */
1977
0
          ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1978
0
                         hashValue, hashIndex);
1979
1980
          /* upon failure, we must start the scan over */
1981
0
          if (ct == NULL)
1982
0
          {
1983
0
            in_progress_ent.dead = true;
1984
0
            break;
1985
0
          }
1986
0
        }
1987
1988
        /* Careful here: add entry to ctlist, then bump its refcount */
1989
        /* This way leaves state correct if lappend runs out of memory */
1990
0
        ctlist = lappend(ctlist, ct);
1991
0
        ct->refcount++;
1992
0
      }
1993
1994
0
      systable_endscan(scandesc);
1995
0
    } while (in_progress_ent.dead);
1996
1997
0
    table_close(relation, AccessShareLock);
1998
1999
    /* Make sure the resource owner has room to remember this entry. */
2000
0
    ResourceOwnerEnlarge(CurrentResourceOwner);
2001
2002
    /* Now we can build the CatCList entry. */
2003
0
    oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2004
0
    nmembers = list_length(ctlist);
2005
0
    cl = (CatCList *)
2006
0
      palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2007
2008
    /* Extract key values */
2009
0
    CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
2010
0
             arguments, cl->keys);
2011
0
    MemoryContextSwitchTo(oldcxt);
2012
2013
    /*
2014
     * We are now past the last thing that could trigger an elog before we
2015
     * have finished building the CatCList and remembering it in the
2016
     * resource owner.  So it's OK to fall out of the PG_TRY, and indeed
2017
     * we'd better do so before we start marking the members as belonging
2018
     * to the list.
2019
     */
2020
0
  }
2021
0
  PG_CATCH();
2022
0
  {
2023
0
    Assert(catcache_in_progress_stack == &in_progress_ent);
2024
0
    catcache_in_progress_stack = save_in_progress;
2025
2026
0
    foreach(ctlist_item, ctlist)
2027
0
    {
2028
0
      ct = (CatCTup *) lfirst(ctlist_item);
2029
0
      Assert(ct->c_list == NULL);
2030
0
      Assert(ct->refcount > 0);
2031
0
      ct->refcount--;
2032
0
      if (
2033
0
#ifndef CATCACHE_FORCE_RELEASE
2034
0
        ct->dead &&
2035
0
#endif
2036
0
        ct->refcount == 0 &&
2037
0
        (ct->c_list == NULL || ct->c_list->refcount == 0))
2038
0
        CatCacheRemoveCTup(cache, ct);
2039
0
    }
2040
2041
0
    PG_RE_THROW();
2042
0
  }
2043
0
  PG_END_TRY();
2044
0
  Assert(catcache_in_progress_stack == &in_progress_ent);
2045
0
  catcache_in_progress_stack = save_in_progress;
2046
2047
0
  cl->cl_magic = CL_MAGIC;
2048
0
  cl->my_cache = cache;
2049
0
  cl->refcount = 0;     /* for the moment */
2050
0
  cl->dead = false;
2051
0
  cl->ordered = ordered;
2052
0
  cl->nkeys = nkeys;
2053
0
  cl->hash_value = lHashValue;
2054
0
  cl->n_members = nmembers;
2055
2056
0
  i = 0;
2057
0
  foreach(ctlist_item, ctlist)
2058
0
  {
2059
0
    cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2060
0
    Assert(ct->c_list == NULL);
2061
0
    ct->c_list = cl;
2062
    /* release the temporary refcount on the member */
2063
0
    Assert(ct->refcount > 0);
2064
0
    ct->refcount--;
2065
    /* mark list dead if any members already dead */
2066
0
    if (ct->dead)
2067
0
      cl->dead = true;
2068
0
  }
2069
0
  Assert(i == nmembers);
2070
2071
  /*
2072
   * Add the CatCList to the appropriate bucket, and count it.
2073
   */
2074
0
  dlist_push_head(lbucket, &cl->cache_elem);
2075
2076
0
  cache->cc_nlist++;
2077
2078
  /* Finally, bump the list's refcount and return it */
2079
0
  cl->refcount++;
2080
0
  ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
2081
2082
0
  CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2083
0
         cache->cc_relname, nmembers);
2084
2085
0
  return cl;
2086
0
}
2087
2088
/*
2089
 *  ReleaseCatCacheList
2090
 *
2091
 *  Decrement the reference count of a catcache list.
2092
 */
2093
void
2094
ReleaseCatCacheList(CatCList *list)
2095
0
{
2096
0
  ReleaseCatCacheListWithOwner(list, CurrentResourceOwner);
2097
0
}
2098
2099
static void
2100
ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
2101
0
{
2102
  /* Safety checks to ensure we were handed a cache entry */
2103
0
  Assert(list->cl_magic == CL_MAGIC);
2104
0
  Assert(list->refcount > 0);
2105
0
  list->refcount--;
2106
0
  if (resowner)
2107
0
    ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
2108
2109
0
  if (
2110
0
#ifndef CATCACHE_FORCE_RELEASE
2111
0
    list->dead &&
2112
0
#endif
2113
0
    list->refcount == 0)
2114
0
    CatCacheRemoveCList(list->my_cache, list);
2115
0
}
2116
2117
2118
/*
2119
 * CatalogCacheCreateEntry
2120
 *    Create a new CatCTup entry, copying the given HeapTuple and other
2121
 *    supplied data into it.  The new entry initially has refcount 0.
2122
 *
2123
 * To create a normal cache entry, ntp must be the HeapTuple just fetched
2124
 * from scandesc, and "arguments" is not used.  To create a negative cache
2125
 * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2126
 * In either case, hashValue/hashIndex are the hash values computed from
2127
 * the cache keys.
2128
 *
2129
 * Returns NULL if we attempt to detoast the tuple and observe that it
2130
 * became stale.  (This cannot happen for a negative entry.)  Caller must
2131
 * retry the tuple lookup in that case.
2132
 */
2133
static CatCTup *
2134
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
2135
            uint32 hashValue, Index hashIndex)
2136
0
{
2137
0
  CatCTup    *ct;
2138
0
  MemoryContext oldcxt;
2139
2140
0
  if (ntp)
2141
0
  {
2142
0
    int     i;
2143
0
    HeapTuple dtp = NULL;
2144
2145
    /*
2146
     * The invalidation of the in-progress entry essentially never happens
2147
     * during our regression tests, and there's no easy way to force it to
2148
     * fail for testing purposes.  To ensure we have test coverage for the
2149
     * retry paths in our callers, make debug builds randomly fail about
2150
     * 0.1% of the times through this code path, even when there's no
2151
     * toasted fields.
2152
     */
2153
#ifdef USE_ASSERT_CHECKING
2154
    if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
2155
      return NULL;
2156
#endif
2157
2158
    /*
2159
     * If there are any out-of-line toasted fields in the tuple, expand
2160
     * them in-line.  This saves cycles during later use of the catcache
2161
     * entry, and also protects us against the possibility of the toast
2162
     * tuples being freed before we attempt to fetch them, in case of
2163
     * something using a slightly stale catcache entry.
2164
     */
2165
0
    if (HeapTupleHasExternal(ntp))
2166
0
    {
2167
0
      CatCInProgress *save_in_progress;
2168
0
      CatCInProgress in_progress_ent;
2169
2170
      /*
2171
       * The tuple could become stale while we are doing toast table
2172
       * access (since AcceptInvalidationMessages can run then).  The
2173
       * invalidation will mark our in-progress entry as dead.
2174
       */
2175
0
      save_in_progress = catcache_in_progress_stack;
2176
0
      in_progress_ent.next = catcache_in_progress_stack;
2177
0
      in_progress_ent.cache = cache;
2178
0
      in_progress_ent.hash_value = hashValue;
2179
0
      in_progress_ent.list = false;
2180
0
      in_progress_ent.dead = false;
2181
0
      catcache_in_progress_stack = &in_progress_ent;
2182
2183
0
      PG_TRY();
2184
0
      {
2185
0
        dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2186
0
      }
2187
0
      PG_FINALLY();
2188
0
      {
2189
0
        Assert(catcache_in_progress_stack == &in_progress_ent);
2190
0
        catcache_in_progress_stack = save_in_progress;
2191
0
      }
2192
0
      PG_END_TRY();
2193
2194
0
      if (in_progress_ent.dead)
2195
0
      {
2196
0
        heap_freetuple(dtp);
2197
0
        return NULL;
2198
0
      }
2199
0
    }
2200
0
    else
2201
0
      dtp = ntp;
2202
2203
    /* Allocate memory for CatCTup and the cached tuple in one go */
2204
0
    oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2205
2206
0
    ct = (CatCTup *) palloc(sizeof(CatCTup) +
2207
0
                MAXIMUM_ALIGNOF + dtp->t_len);
2208
0
    ct->tuple.t_len = dtp->t_len;
2209
0
    ct->tuple.t_self = dtp->t_self;
2210
0
    ct->tuple.t_tableOid = dtp->t_tableOid;
2211
0
    ct->tuple.t_data = (HeapTupleHeader)
2212
0
      MAXALIGN(((char *) ct) + sizeof(CatCTup));
2213
    /* copy tuple contents */
2214
0
    memcpy((char *) ct->tuple.t_data,
2215
0
         (const char *) dtp->t_data,
2216
0
         dtp->t_len);
2217
0
    MemoryContextSwitchTo(oldcxt);
2218
2219
0
    if (dtp != ntp)
2220
0
      heap_freetuple(dtp);
2221
2222
    /* extract keys - they'll point into the tuple if not by-value */
2223
0
    for (i = 0; i < cache->cc_nkeys; i++)
2224
0
    {
2225
0
      Datum   atp;
2226
0
      bool    isnull;
2227
2228
0
      atp = heap_getattr(&ct->tuple,
2229
0
                 cache->cc_keyno[i],
2230
0
                 cache->cc_tupdesc,
2231
0
                 &isnull);
2232
0
      Assert(!isnull);
2233
0
      ct->keys[i] = atp;
2234
0
    }
2235
0
  }
2236
0
  else
2237
0
  {
2238
    /* Set up keys for a negative cache entry */
2239
0
    oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2240
0
    ct = (CatCTup *) palloc(sizeof(CatCTup));
2241
2242
    /*
2243
     * Store keys - they'll point into separately allocated memory if not
2244
     * by-value.
2245
     */
2246
0
    CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2247
0
             arguments, ct->keys);
2248
0
    MemoryContextSwitchTo(oldcxt);
2249
0
  }
2250
2251
  /*
2252
   * Finish initializing the CatCTup header, and add it to the cache's
2253
   * linked list and counts.
2254
   */
2255
0
  ct->ct_magic = CT_MAGIC;
2256
0
  ct->my_cache = cache;
2257
0
  ct->c_list = NULL;
2258
0
  ct->refcount = 0;     /* for the moment */
2259
0
  ct->dead = false;
2260
0
  ct->negative = (ntp == NULL);
2261
0
  ct->hash_value = hashValue;
2262
2263
0
  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2264
2265
0
  cache->cc_ntup++;
2266
0
  CacheHdr->ch_ntup++;
2267
2268
  /*
2269
   * If the hash table has become too full, enlarge the buckets array. Quite
2270
   * arbitrarily, we enlarge when fill factor > 2.
2271
   */
2272
0
  if (cache->cc_ntup > cache->cc_nbuckets * 2)
2273
0
    RehashCatCache(cache);
2274
2275
0
  return ct;
2276
0
}
2277
2278
/*
2279
 * Helper routine that frees keys stored in the keys array.
2280
 */
2281
static void
2282
CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2283
0
{
2284
0
  int     i;
2285
2286
0
  for (i = 0; i < nkeys; i++)
2287
0
  {
2288
0
    int     attnum = attnos[i];
2289
0
    Form_pg_attribute att;
2290
2291
    /* system attribute are not supported in caches */
2292
0
    Assert(attnum > 0);
2293
2294
0
    att = TupleDescAttr(tupdesc, attnum - 1);
2295
2296
0
    if (!att->attbyval)
2297
0
      pfree(DatumGetPointer(keys[i]));
2298
0
  }
2299
0
}
2300
2301
/*
2302
 * Helper routine that copies the keys in the srckeys array into the dstkeys
2303
 * one, guaranteeing that the datums are fully allocated in the current memory
2304
 * context.
2305
 */
2306
static void
2307
CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2308
         Datum *srckeys, Datum *dstkeys)
2309
0
{
2310
0
  int     i;
2311
2312
  /*
2313
   * XXX: memory and lookup performance could possibly be improved by
2314
   * storing all keys in one allocation.
2315
   */
2316
2317
0
  for (i = 0; i < nkeys; i++)
2318
0
  {
2319
0
    int     attnum = attnos[i];
2320
0
    Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2321
0
    Datum   src = srckeys[i];
2322
0
    NameData  srcname;
2323
2324
    /*
2325
     * Must be careful in case the caller passed a C string where a NAME
2326
     * is wanted: convert the given argument to a correctly padded NAME.
2327
     * Otherwise the memcpy() done by datumCopy() could fall off the end
2328
     * of memory.
2329
     */
2330
0
    if (att->atttypid == NAMEOID)
2331
0
    {
2332
0
      namestrcpy(&srcname, DatumGetCString(src));
2333
0
      src = NameGetDatum(&srcname);
2334
0
    }
2335
2336
0
    dstkeys[i] = datumCopy(src,
2337
0
                 att->attbyval,
2338
0
                 att->attlen);
2339
0
  }
2340
0
}
2341
2342
/*
2343
 *  PrepareToInvalidateCacheTuple()
2344
 *
2345
 *  This is part of a rather subtle chain of events, so pay attention:
2346
 *
2347
 *  When a tuple is inserted or deleted, it cannot be flushed from the
2348
 *  catcaches immediately, for reasons explained at the top of cache/inval.c.
2349
 *  Instead we have to add entry(s) for the tuple to a list of pending tuple
2350
 *  invalidations that will be done at the end of the command or transaction.
2351
 *
2352
 *  The lists of tuples that need to be flushed are kept by inval.c.  This
2353
 *  routine is a helper routine for inval.c.  Given a tuple belonging to
2354
 *  the specified relation, find all catcaches it could be in, compute the
2355
 *  correct hash value for each such catcache, and call the specified
2356
 *  function to record the cache id and hash value in inval.c's lists.
2357
 *  SysCacheInvalidate will be called later, if appropriate,
2358
 *  using the recorded information.
2359
 *
2360
 *  For an insert or delete, tuple is the target tuple and newtuple is NULL.
2361
 *  For an update, we are called just once, with tuple being the old tuple
2362
 *  version and newtuple the new version.  We should make two list entries
2363
 *  if the tuple's hash value changed, but only one if it didn't.
2364
 *
2365
 *  Note that it is irrelevant whether the given tuple is actually loaded
2366
 *  into the catcache at the moment.  Even if it's not there now, it might
2367
 *  be by the end of the command, or there might be a matching negative entry
2368
 *  to flush --- or other backends' caches might have such entries --- so
2369
 *  we have to make list entries to flush it later.
2370
 *
2371
 *  Also note that it's not an error if there are no catcaches for the
2372
 *  specified relation.  inval.c doesn't know exactly which rels have
2373
 *  catcaches --- it will call this routine for any tuple that's in a
2374
 *  system relation.
2375
 */
2376
void
2377
PrepareToInvalidateCacheTuple(Relation relation,
2378
                HeapTuple tuple,
2379
                HeapTuple newtuple,
2380
                void (*function) (int, uint32, Oid, void *),
2381
                void *context)
2382
0
{
2383
0
  slist_iter  iter;
2384
0
  Oid     reloid;
2385
2386
0
  CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2387
2388
  /*
2389
   * sanity checks
2390
   */
2391
0
  Assert(RelationIsValid(relation));
2392
0
  Assert(HeapTupleIsValid(tuple));
2393
0
  Assert(PointerIsValid(function));
2394
0
  Assert(CacheHdr != NULL);
2395
2396
0
  reloid = RelationGetRelid(relation);
2397
2398
  /* ----------------
2399
   *  for each cache
2400
   *     if the cache contains tuples from the specified relation
2401
   *       compute the tuple's hash value(s) in this cache,
2402
   *       and call the passed function to register the information.
2403
   * ----------------
2404
   */
2405
2406
0
  slist_foreach(iter, &CacheHdr->ch_caches)
2407
0
  {
2408
0
    CatCache   *ccp = slist_container(CatCache, cc_next, iter.cur);
2409
0
    uint32    hashvalue;
2410
0
    Oid     dbid;
2411
2412
0
    if (ccp->cc_reloid != reloid)
2413
0
      continue;
2414
2415
    /* Just in case cache hasn't finished initialization yet... */
2416
0
    ConditionalCatalogCacheInitializeCache(ccp);
2417
2418
0
    hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2419
0
    dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2420
2421
0
    (*function) (ccp->id, hashvalue, dbid, context);
2422
2423
0
    if (newtuple)
2424
0
    {
2425
0
      uint32    newhashvalue;
2426
2427
0
      newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2428
2429
0
      if (newhashvalue != hashvalue)
2430
0
        (*function) (ccp->id, newhashvalue, dbid, context);
2431
0
    }
2432
0
  }
2433
0
}
2434
2435
/* ResourceOwner callbacks */
2436
2437
static void
2438
ResOwnerReleaseCatCache(Datum res)
2439
0
{
2440
0
  ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL);
2441
0
}
2442
2443
static char *
2444
ResOwnerPrintCatCache(Datum res)
2445
0
{
2446
0
  HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2447
0
  CatCTup    *ct = (CatCTup *) (((char *) tuple) -
2448
0
                  offsetof(CatCTup, tuple));
2449
2450
  /* Safety check to ensure we were handed a cache entry */
2451
0
  Assert(ct->ct_magic == CT_MAGIC);
2452
2453
0
  return psprintf("cache %s (%d), tuple %u/%u has count %d",
2454
0
          ct->my_cache->cc_relname, ct->my_cache->id,
2455
0
          ItemPointerGetBlockNumber(&(tuple->t_self)),
2456
0
          ItemPointerGetOffsetNumber(&(tuple->t_self)),
2457
0
          ct->refcount);
2458
0
}
2459
2460
static void
2461
ResOwnerReleaseCatCacheList(Datum res)
2462
0
{
2463
0
  ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL);
2464
0
}
2465
2466
static char *
2467
ResOwnerPrintCatCacheList(Datum res)
2468
0
{
2469
0
  CatCList   *list = (CatCList *) DatumGetPointer(res);
2470
2471
0
  return psprintf("cache %s (%d), list %p has count %d",
2472
0
          list->my_cache->cc_relname, list->my_cache->id,
2473
0
          list, list->refcount);
2474
0
}