/src/postgres/src/backend/utils/cache/funccache.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * funccache.c |
4 | | * Function cache management. |
5 | | * |
6 | | * funccache.c manages a cache of function execution data. The cache |
7 | | * is used by SQL-language and PL/pgSQL functions, and could be used by |
8 | | * other function languages. Each cache entry is specific to the execution |
9 | | * of a particular function (identified by OID) with specific input data |
10 | | * types; so a polymorphic function could have many associated cache entries. |
11 | | * Trigger functions similarly have a cache entry per trigger. These rules |
12 | | * allow the cached data to be specific to the particular data types the |
13 | | * function call will be dealing with. |
14 | | * |
15 | | * |
16 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
17 | | * Portions Copyright (c) 1994, Regents of the University of California |
18 | | * |
19 | | * IDENTIFICATION |
20 | | * src/backend/utils/cache/funccache.c |
21 | | * |
22 | | *------------------------------------------------------------------------- |
23 | | */ |
24 | | #include "postgres.h" |
25 | | |
26 | | #include "catalog/pg_proc.h" |
27 | | #include "commands/event_trigger.h" |
28 | | #include "commands/trigger.h" |
29 | | #include "common/hashfn.h" |
30 | | #include "funcapi.h" |
31 | | #include "utils/funccache.h" |
32 | | #include "utils/hsearch.h" |
33 | | #include "utils/syscache.h" |
34 | | |
35 | | |
36 | | /* |
37 | | * Hash table for cached functions |
38 | | */ |
39 | | static HTAB *cfunc_hashtable = NULL; |
40 | | |
41 | | typedef struct CachedFunctionHashEntry |
42 | | { |
43 | | CachedFunctionHashKey key; /* hash key, must be first */ |
44 | | CachedFunction *function; /* points to data of language-specific size */ |
45 | | } CachedFunctionHashEntry; |
46 | | |
47 | 0 | #define FUNCS_PER_USER 128 /* initial table size */ |
48 | | |
49 | | static uint32 cfunc_hash(const void *key, Size keysize); |
50 | | static int cfunc_match(const void *key1, const void *key2, Size keysize); |
51 | | |
52 | | |
53 | | /* |
54 | | * Initialize the hash table on first use. |
55 | | * |
56 | | * The hash table will be in TopMemoryContext regardless of caller's context. |
57 | | */ |
58 | | static void |
59 | | cfunc_hashtable_init(void) |
60 | 0 | { |
61 | 0 | HASHCTL ctl; |
62 | | |
63 | | /* don't allow double-initialization */ |
64 | 0 | Assert(cfunc_hashtable == NULL); |
65 | |
|
66 | 0 | ctl.keysize = sizeof(CachedFunctionHashKey); |
67 | 0 | ctl.entrysize = sizeof(CachedFunctionHashEntry); |
68 | 0 | ctl.hash = cfunc_hash; |
69 | 0 | ctl.match = cfunc_match; |
70 | 0 | cfunc_hashtable = hash_create("Cached function hash", |
71 | 0 | FUNCS_PER_USER, |
72 | 0 | &ctl, |
73 | 0 | HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); |
74 | 0 | } |
75 | | |
76 | | /* |
77 | | * cfunc_hash: hash function for cfunc hash table |
78 | | * |
79 | | * We need special hash and match functions to deal with the optional |
80 | | * presence of a TupleDesc in the hash keys. As long as we have to do |
81 | | * that, we might as well also be smart about not comparing unused |
82 | | * elements of the argtypes arrays. |
83 | | */ |
84 | | static uint32 |
85 | | cfunc_hash(const void *key, Size keysize) |
86 | 0 | { |
87 | 0 | const CachedFunctionHashKey *k = (const CachedFunctionHashKey *) key; |
88 | 0 | uint32 h; |
89 | |
|
90 | 0 | Assert(keysize == sizeof(CachedFunctionHashKey)); |
91 | | /* Hash all the fixed fields except callResultType */ |
92 | 0 | h = DatumGetUInt32(hash_any((const unsigned char *) k, |
93 | 0 | offsetof(CachedFunctionHashKey, callResultType))); |
94 | | /* Incorporate input argument types */ |
95 | 0 | if (k->nargs > 0) |
96 | 0 | h = hash_combine(h, |
97 | 0 | DatumGetUInt32(hash_any((const unsigned char *) k->argtypes, |
98 | 0 | k->nargs * sizeof(Oid)))); |
99 | | /* Incorporate callResultType if present */ |
100 | 0 | if (k->callResultType) |
101 | 0 | h = hash_combine(h, hashRowType(k->callResultType)); |
102 | 0 | return h; |
103 | 0 | } |
104 | | |
105 | | /* |
106 | | * cfunc_match: match function to use with cfunc_hash |
107 | | */ |
108 | | static int |
109 | | cfunc_match(const void *key1, const void *key2, Size keysize) |
110 | 0 | { |
111 | 0 | const CachedFunctionHashKey *k1 = (const CachedFunctionHashKey *) key1; |
112 | 0 | const CachedFunctionHashKey *k2 = (const CachedFunctionHashKey *) key2; |
113 | |
|
114 | 0 | Assert(keysize == sizeof(CachedFunctionHashKey)); |
115 | | /* Compare all the fixed fields except callResultType */ |
116 | 0 | if (memcmp(k1, k2, offsetof(CachedFunctionHashKey, callResultType)) != 0) |
117 | 0 | return 1; /* not equal */ |
118 | | /* Compare input argument types (we just verified that nargs matches) */ |
119 | 0 | if (k1->nargs > 0 && |
120 | 0 | memcmp(k1->argtypes, k2->argtypes, k1->nargs * sizeof(Oid)) != 0) |
121 | 0 | return 1; /* not equal */ |
122 | | /* Compare callResultType */ |
123 | 0 | if (k1->callResultType) |
124 | 0 | { |
125 | 0 | if (k2->callResultType) |
126 | 0 | { |
127 | 0 | if (!equalRowTypes(k1->callResultType, k2->callResultType)) |
128 | 0 | return 1; /* not equal */ |
129 | 0 | } |
130 | 0 | else |
131 | 0 | return 1; /* not equal */ |
132 | 0 | } |
133 | 0 | else |
134 | 0 | { |
135 | 0 | if (k2->callResultType) |
136 | 0 | return 1; /* not equal */ |
137 | 0 | } |
138 | 0 | return 0; /* equal */ |
139 | 0 | } |
140 | | |
141 | | /* |
142 | | * Look up the CachedFunction for the given hash key. |
143 | | * Returns NULL if not present. |
144 | | */ |
145 | | static CachedFunction * |
146 | | cfunc_hashtable_lookup(CachedFunctionHashKey *func_key) |
147 | 0 | { |
148 | 0 | CachedFunctionHashEntry *hentry; |
149 | |
|
150 | 0 | if (cfunc_hashtable == NULL) |
151 | 0 | return NULL; |
152 | | |
153 | 0 | hentry = (CachedFunctionHashEntry *) hash_search(cfunc_hashtable, |
154 | 0 | func_key, |
155 | 0 | HASH_FIND, |
156 | 0 | NULL); |
157 | 0 | if (hentry) |
158 | 0 | return hentry->function; |
159 | 0 | else |
160 | 0 | return NULL; |
161 | 0 | } |
162 | | |
163 | | /* |
164 | | * Insert a hash table entry. |
165 | | */ |
166 | | static void |
167 | | cfunc_hashtable_insert(CachedFunction *function, |
168 | | CachedFunctionHashKey *func_key) |
169 | 0 | { |
170 | 0 | CachedFunctionHashEntry *hentry; |
171 | 0 | bool found; |
172 | |
|
173 | 0 | if (cfunc_hashtable == NULL) |
174 | 0 | cfunc_hashtable_init(); |
175 | |
|
176 | 0 | hentry = (CachedFunctionHashEntry *) hash_search(cfunc_hashtable, |
177 | 0 | func_key, |
178 | 0 | HASH_ENTER, |
179 | 0 | &found); |
180 | 0 | if (found) |
181 | 0 | elog(WARNING, "trying to insert a function that already exists"); |
182 | | |
183 | | /* |
184 | | * If there's a callResultType, copy it into TopMemoryContext. If we're |
185 | | * unlucky enough for that to fail, leave the entry with null |
186 | | * callResultType, which will probably never match anything. |
187 | | */ |
188 | 0 | if (func_key->callResultType) |
189 | 0 | { |
190 | 0 | MemoryContext oldcontext = MemoryContextSwitchTo(TopMemoryContext); |
191 | |
|
192 | 0 | hentry->key.callResultType = NULL; |
193 | 0 | hentry->key.callResultType = CreateTupleDescCopy(func_key->callResultType); |
194 | 0 | MemoryContextSwitchTo(oldcontext); |
195 | 0 | } |
196 | |
|
197 | 0 | hentry->function = function; |
198 | | |
199 | | /* Set back-link from function to hashtable key */ |
200 | 0 | function->fn_hashkey = &hentry->key; |
201 | 0 | } |
202 | | |
203 | | /* |
204 | | * Delete a hash table entry. |
205 | | */ |
206 | | static void |
207 | | cfunc_hashtable_delete(CachedFunction *function) |
208 | 0 | { |
209 | 0 | CachedFunctionHashEntry *hentry; |
210 | 0 | TupleDesc tupdesc; |
211 | | |
212 | | /* do nothing if not in table */ |
213 | 0 | if (function->fn_hashkey == NULL) |
214 | 0 | return; |
215 | | |
216 | | /* |
217 | | * We need to free the callResultType if present, which is slightly tricky |
218 | | * because it has to be valid during the hashtable search. Fortunately, |
219 | | * because we have the hashkey back-link, we can grab that pointer before |
220 | | * deleting the hashtable entry. |
221 | | */ |
222 | 0 | tupdesc = function->fn_hashkey->callResultType; |
223 | |
|
224 | 0 | hentry = (CachedFunctionHashEntry *) hash_search(cfunc_hashtable, |
225 | 0 | function->fn_hashkey, |
226 | 0 | HASH_REMOVE, |
227 | 0 | NULL); |
228 | 0 | if (hentry == NULL) |
229 | 0 | elog(WARNING, "trying to delete function that does not exist"); |
230 | | |
231 | | /* Remove back link, which no longer points to allocated storage */ |
232 | 0 | function->fn_hashkey = NULL; |
233 | | |
234 | | /* Release the callResultType if present */ |
235 | 0 | if (tupdesc) |
236 | 0 | FreeTupleDesc(tupdesc); |
237 | 0 | } |
238 | | |
239 | | /* |
240 | | * Compute the hashkey for a given function invocation |
241 | | * |
242 | | * The hashkey is returned into the caller-provided storage at *hashkey. |
243 | | * Note however that if a callResultType is incorporated, we've not done |
244 | | * anything about copying that. |
245 | | */ |
246 | | static void |
247 | | compute_function_hashkey(FunctionCallInfo fcinfo, |
248 | | Form_pg_proc procStruct, |
249 | | CachedFunctionHashKey *hashkey, |
250 | | Size cacheEntrySize, |
251 | | bool includeResultType, |
252 | | bool forValidator) |
253 | 0 | { |
254 | | /* Make sure pad bytes within fixed part of the struct are zero */ |
255 | 0 | memset(hashkey, 0, offsetof(CachedFunctionHashKey, argtypes)); |
256 | | |
257 | | /* get function OID */ |
258 | 0 | hashkey->funcOid = fcinfo->flinfo->fn_oid; |
259 | | |
260 | | /* get call context */ |
261 | 0 | hashkey->isTrigger = CALLED_AS_TRIGGER(fcinfo); |
262 | 0 | hashkey->isEventTrigger = CALLED_AS_EVENT_TRIGGER(fcinfo); |
263 | | |
264 | | /* record cacheEntrySize so multiple languages can share hash table */ |
265 | 0 | hashkey->cacheEntrySize = cacheEntrySize; |
266 | | |
267 | | /* |
268 | | * If DML trigger, include trigger's OID in the hash, so that each trigger |
269 | | * usage gets a different hash entry, allowing for e.g. different relation |
270 | | * rowtypes or transition table names. In validation mode we do not know |
271 | | * what relation or transition table names are intended to be used, so we |
272 | | * leave trigOid zero; the hash entry built in this case will never be |
273 | | * used for any actual calls. |
274 | | * |
275 | | * We don't currently need to distinguish different event trigger usages |
276 | | * in the same way, since the special parameter variables don't vary in |
277 | | * type in that case. |
278 | | */ |
279 | 0 | if (hashkey->isTrigger && !forValidator) |
280 | 0 | { |
281 | 0 | TriggerData *trigdata = (TriggerData *) fcinfo->context; |
282 | |
|
283 | 0 | hashkey->trigOid = trigdata->tg_trigger->tgoid; |
284 | 0 | } |
285 | | |
286 | | /* get input collation, if known */ |
287 | 0 | hashkey->inputCollation = fcinfo->fncollation; |
288 | | |
289 | | /* |
290 | | * We include only input arguments in the hash key, since output argument |
291 | | * types can be deduced from those, and it would require extra cycles to |
292 | | * include the output arguments. But we have to resolve any polymorphic |
293 | | * argument types to the real types for the call. |
294 | | */ |
295 | 0 | if (procStruct->pronargs > 0) |
296 | 0 | { |
297 | 0 | hashkey->nargs = procStruct->pronargs; |
298 | 0 | memcpy(hashkey->argtypes, procStruct->proargtypes.values, |
299 | 0 | procStruct->pronargs * sizeof(Oid)); |
300 | 0 | cfunc_resolve_polymorphic_argtypes(procStruct->pronargs, |
301 | 0 | hashkey->argtypes, |
302 | 0 | NULL, /* all args are inputs */ |
303 | 0 | fcinfo->flinfo->fn_expr, |
304 | 0 | forValidator, |
305 | 0 | NameStr(procStruct->proname)); |
306 | 0 | } |
307 | | |
308 | | /* |
309 | | * While regular OUT arguments are sufficiently represented by the |
310 | | * resolved input arguments, a function returning composite has additional |
311 | | * variability: ALTER TABLE/ALTER TYPE could affect what it returns. Also, |
312 | | * a function returning RECORD may depend on a column definition list to |
313 | | * determine its output rowtype. If the caller needs the exact result |
314 | | * type to be part of the hash lookup key, we must run |
315 | | * get_call_result_type() to find that out. |
316 | | */ |
317 | 0 | if (includeResultType) |
318 | 0 | { |
319 | 0 | Oid resultTypeId; |
320 | 0 | TupleDesc tupdesc; |
321 | |
|
322 | 0 | switch (get_call_result_type(fcinfo, &resultTypeId, &tupdesc)) |
323 | 0 | { |
324 | 0 | case TYPEFUNC_COMPOSITE: |
325 | 0 | case TYPEFUNC_COMPOSITE_DOMAIN: |
326 | 0 | hashkey->callResultType = tupdesc; |
327 | 0 | break; |
328 | 0 | default: |
329 | | /* scalar result, or indeterminate rowtype */ |
330 | 0 | break; |
331 | 0 | } |
332 | 0 | } |
333 | 0 | } |
334 | | |
335 | | /* |
336 | | * This is the same as the standard resolve_polymorphic_argtypes() function, |
337 | | * except that: |
338 | | * 1. We go ahead and report the error if we can't resolve the types. |
339 | | * 2. We treat RECORD-type input arguments (not output arguments) as if |
340 | | * they were polymorphic, replacing their types with the actual input |
341 | | * types if we can determine those. This allows us to create a separate |
342 | | * function cache entry for each named composite type passed to such an |
343 | | * argument. |
344 | | * 3. In validation mode, we have no inputs to look at, so assume that |
345 | | * polymorphic arguments are integer, integer-array or integer-range. |
346 | | */ |
347 | | void |
348 | | cfunc_resolve_polymorphic_argtypes(int numargs, |
349 | | Oid *argtypes, char *argmodes, |
350 | | Node *call_expr, bool forValidator, |
351 | | const char *proname) |
352 | 0 | { |
353 | 0 | int i; |
354 | |
|
355 | 0 | if (!forValidator) |
356 | 0 | { |
357 | 0 | int inargno; |
358 | | |
359 | | /* normal case, pass to standard routine */ |
360 | 0 | if (!resolve_polymorphic_argtypes(numargs, argtypes, argmodes, |
361 | 0 | call_expr)) |
362 | 0 | ereport(ERROR, |
363 | 0 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
364 | 0 | errmsg("could not determine actual argument " |
365 | 0 | "type for polymorphic function \"%s\"", |
366 | 0 | proname))); |
367 | | /* also, treat RECORD inputs (but not outputs) as polymorphic */ |
368 | 0 | inargno = 0; |
369 | 0 | for (i = 0; i < numargs; i++) |
370 | 0 | { |
371 | 0 | char argmode = argmodes ? argmodes[i] : PROARGMODE_IN; |
372 | |
|
373 | 0 | if (argmode == PROARGMODE_OUT || argmode == PROARGMODE_TABLE) |
374 | 0 | continue; |
375 | 0 | if (argtypes[i] == RECORDOID || argtypes[i] == RECORDARRAYOID) |
376 | 0 | { |
377 | 0 | Oid resolvedtype = get_call_expr_argtype(call_expr, |
378 | 0 | inargno); |
379 | |
|
380 | 0 | if (OidIsValid(resolvedtype)) |
381 | 0 | argtypes[i] = resolvedtype; |
382 | 0 | } |
383 | 0 | inargno++; |
384 | 0 | } |
385 | 0 | } |
386 | 0 | else |
387 | 0 | { |
388 | | /* special validation case (no need to do anything for RECORD) */ |
389 | 0 | for (i = 0; i < numargs; i++) |
390 | 0 | { |
391 | 0 | switch (argtypes[i]) |
392 | 0 | { |
393 | 0 | case ANYELEMENTOID: |
394 | 0 | case ANYNONARRAYOID: |
395 | 0 | case ANYENUMOID: /* XXX dubious */ |
396 | 0 | case ANYCOMPATIBLEOID: |
397 | 0 | case ANYCOMPATIBLENONARRAYOID: |
398 | 0 | argtypes[i] = INT4OID; |
399 | 0 | break; |
400 | 0 | case ANYARRAYOID: |
401 | 0 | case ANYCOMPATIBLEARRAYOID: |
402 | 0 | argtypes[i] = INT4ARRAYOID; |
403 | 0 | break; |
404 | 0 | case ANYRANGEOID: |
405 | 0 | case ANYCOMPATIBLERANGEOID: |
406 | 0 | argtypes[i] = INT4RANGEOID; |
407 | 0 | break; |
408 | 0 | case ANYMULTIRANGEOID: |
409 | 0 | argtypes[i] = INT4MULTIRANGEOID; |
410 | 0 | break; |
411 | 0 | default: |
412 | 0 | break; |
413 | 0 | } |
414 | 0 | } |
415 | 0 | } |
416 | 0 | } |
417 | | |
418 | | /* |
419 | | * delete_function - clean up as much as possible of a stale function cache |
420 | | * |
421 | | * We can't release the CachedFunction struct itself, because of the |
422 | | * possibility that there are fn_extra pointers to it. We can release |
423 | | * the subsidiary storage, but only if there are no active evaluations |
424 | | * in progress. Otherwise we'll just leak that storage. Since the |
425 | | * case would only occur if a pg_proc update is detected during a nested |
426 | | * recursive call on the function, a leak seems acceptable. |
427 | | * |
428 | | * Note that this can be called more than once if there are multiple fn_extra |
429 | | * pointers to the same function cache. Hence be careful not to do things |
430 | | * twice. |
431 | | */ |
432 | | static void |
433 | | delete_function(CachedFunction *func) |
434 | 0 | { |
435 | | /* remove function from hash table (might be done already) */ |
436 | 0 | cfunc_hashtable_delete(func); |
437 | | |
438 | | /* release the function's storage if safe and not done already */ |
439 | 0 | if (func->use_count == 0 && |
440 | 0 | func->dcallback != NULL) |
441 | 0 | { |
442 | 0 | func->dcallback(func); |
443 | 0 | func->dcallback = NULL; |
444 | 0 | } |
445 | 0 | } |
446 | | |
447 | | /* |
448 | | * Compile a cached function, if no existing cache entry is suitable. |
449 | | * |
450 | | * fcinfo is the current call information. |
451 | | * |
452 | | * function should be NULL or the result of a previous call of |
453 | | * cached_function_compile() for the same fcinfo. The caller will |
454 | | * typically save the result in fcinfo->flinfo->fn_extra, or in a |
455 | | * field of a struct pointed to by fn_extra, to re-use in later |
456 | | * calls within the same query. |
457 | | * |
458 | | * ccallback and dcallback are function-language-specific callbacks to |
459 | | * compile and delete a cached function entry. dcallback can be NULL |
460 | | * if there's nothing for it to do. |
461 | | * |
462 | | * cacheEntrySize is the function-language-specific size of the cache entry |
463 | | * (which embeds a CachedFunction struct and typically has many more fields |
464 | | * after that). |
465 | | * |
466 | | * If includeResultType is true and the function returns composite, |
467 | | * include the actual result descriptor in the cache lookup key. |
468 | | * |
469 | | * If forValidator is true, we're only compiling for validation purposes, |
470 | | * and so some checks are skipped. |
471 | | * |
472 | | * Note: it's important for this to fall through quickly if the function |
473 | | * has already been compiled. |
474 | | * |
475 | | * Note: this function leaves the "use_count" field as zero. The caller |
476 | | * is expected to increment the use_count and decrement it when done with |
477 | | * the cache entry. |
478 | | */ |
479 | | CachedFunction * |
480 | | cached_function_compile(FunctionCallInfo fcinfo, |
481 | | CachedFunction *function, |
482 | | CachedFunctionCompileCallback ccallback, |
483 | | CachedFunctionDeleteCallback dcallback, |
484 | | Size cacheEntrySize, |
485 | | bool includeResultType, |
486 | | bool forValidator) |
487 | 0 | { |
488 | 0 | Oid funcOid = fcinfo->flinfo->fn_oid; |
489 | 0 | HeapTuple procTup; |
490 | 0 | Form_pg_proc procStruct; |
491 | 0 | CachedFunctionHashKey hashkey; |
492 | 0 | bool function_valid = false; |
493 | 0 | bool hashkey_valid = false; |
494 | 0 | bool new_function = false; |
495 | | |
496 | | /* |
497 | | * Lookup the pg_proc tuple by Oid; we'll need it in any case |
498 | | */ |
499 | 0 | procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); |
500 | 0 | if (!HeapTupleIsValid(procTup)) |
501 | 0 | elog(ERROR, "cache lookup failed for function %u", funcOid); |
502 | 0 | procStruct = (Form_pg_proc) GETSTRUCT(procTup); |
503 | | |
504 | | /* |
505 | | * Do we already have a cache entry for the current FmgrInfo? If not, try |
506 | | * to find one in the hash table. |
507 | | */ |
508 | 0 | recheck: |
509 | 0 | if (!function) |
510 | 0 | { |
511 | | /* Compute hashkey using function signature and actual arg types */ |
512 | 0 | compute_function_hashkey(fcinfo, procStruct, &hashkey, |
513 | 0 | cacheEntrySize, includeResultType, |
514 | 0 | forValidator); |
515 | 0 | hashkey_valid = true; |
516 | | |
517 | | /* And do the lookup */ |
518 | 0 | function = cfunc_hashtable_lookup(&hashkey); |
519 | 0 | } |
520 | |
|
521 | 0 | if (function) |
522 | 0 | { |
523 | | /* We have a compiled function, but is it still valid? */ |
524 | 0 | if (function->fn_xmin == HeapTupleHeaderGetRawXmin(procTup->t_data) && |
525 | 0 | ItemPointerEquals(&function->fn_tid, &procTup->t_self)) |
526 | 0 | function_valid = true; |
527 | 0 | else |
528 | 0 | { |
529 | | /* |
530 | | * Nope, so remove it from hashtable and try to drop associated |
531 | | * storage (if not done already). |
532 | | */ |
533 | 0 | delete_function(function); |
534 | | |
535 | | /* |
536 | | * If the function isn't in active use then we can overwrite the |
537 | | * func struct with new data, allowing any other existing fn_extra |
538 | | * pointers to make use of the new definition on their next use. |
539 | | * If it is in use then just leave it alone and make a new one. |
540 | | * (The active invocations will run to completion using the |
541 | | * previous definition, and then the cache entry will just be |
542 | | * leaked; doesn't seem worth adding code to clean it up, given |
543 | | * what a corner case this is.) |
544 | | * |
545 | | * If we found the function struct via fn_extra then it's possible |
546 | | * a replacement has already been made, so go back and recheck the |
547 | | * hashtable. |
548 | | */ |
549 | 0 | if (function->use_count != 0) |
550 | 0 | { |
551 | 0 | function = NULL; |
552 | 0 | if (!hashkey_valid) |
553 | 0 | goto recheck; |
554 | 0 | } |
555 | 0 | } |
556 | 0 | } |
557 | | |
558 | | /* |
559 | | * If the function wasn't found or was out-of-date, we have to compile it. |
560 | | */ |
561 | 0 | if (!function_valid) |
562 | 0 | { |
563 | | /* |
564 | | * Calculate hashkey if we didn't already; we'll need it to store the |
565 | | * completed function. |
566 | | */ |
567 | 0 | if (!hashkey_valid) |
568 | 0 | compute_function_hashkey(fcinfo, procStruct, &hashkey, |
569 | 0 | cacheEntrySize, includeResultType, |
570 | 0 | forValidator); |
571 | | |
572 | | /* |
573 | | * Create the new function struct, if not done already. The function |
574 | | * cache entry will be kept for the life of the backend, so put it in |
575 | | * TopMemoryContext. |
576 | | */ |
577 | 0 | Assert(cacheEntrySize >= sizeof(CachedFunction)); |
578 | 0 | if (function == NULL) |
579 | 0 | { |
580 | 0 | function = (CachedFunction *) |
581 | 0 | MemoryContextAllocZero(TopMemoryContext, cacheEntrySize); |
582 | 0 | new_function = true; |
583 | 0 | } |
584 | 0 | else |
585 | 0 | { |
586 | | /* re-using a previously existing struct, so clear it out */ |
587 | 0 | memset(function, 0, cacheEntrySize); |
588 | 0 | } |
589 | | |
590 | | /* |
591 | | * However, if function compilation fails, we'd like not to leak the |
592 | | * function struct, so use a PG_TRY block to prevent that. (It's up |
593 | | * to the compile callback function to avoid its own internal leakage |
594 | | * in such cases.) Unfortunately, freeing the struct is only safe if |
595 | | * we just allocated it: otherwise there are probably fn_extra |
596 | | * pointers to it. |
597 | | */ |
598 | 0 | PG_TRY(); |
599 | 0 | { |
600 | | /* |
601 | | * Do the hard, language-specific part. |
602 | | */ |
603 | 0 | ccallback(fcinfo, procTup, &hashkey, function, forValidator); |
604 | 0 | } |
605 | 0 | PG_CATCH(); |
606 | 0 | { |
607 | 0 | if (new_function) |
608 | 0 | pfree(function); |
609 | 0 | PG_RE_THROW(); |
610 | 0 | } |
611 | 0 | PG_END_TRY(); |
612 | | |
613 | | /* |
614 | | * Fill in the CachedFunction part. (We do this last to prevent the |
615 | | * function from looking valid before it's fully built.) fn_hashkey |
616 | | * will be set by cfunc_hashtable_insert; use_count remains zero. |
617 | | */ |
618 | 0 | function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); |
619 | 0 | function->fn_tid = procTup->t_self; |
620 | 0 | function->dcallback = dcallback; |
621 | | |
622 | | /* |
623 | | * Add the completed struct to the hash table. |
624 | | */ |
625 | 0 | cfunc_hashtable_insert(function, &hashkey); |
626 | 0 | } |
627 | |
|
628 | 0 | ReleaseSysCache(procTup); |
629 | | |
630 | | /* |
631 | | * Finally return the compiled function |
632 | | */ |
633 | 0 | return function; |
634 | 0 | } |