/src/postgres/src/backend/access/gist/gistxlog.c
Line | Count | Source |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * gistxlog.c |
4 | | * WAL replay logic for GiST. |
5 | | * |
6 | | * |
7 | | * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group |
8 | | * Portions Copyright (c) 1994, Regents of the University of California |
9 | | * |
10 | | * IDENTIFICATION |
11 | | * src/backend/access/gist/gistxlog.c |
12 | | *------------------------------------------------------------------------- |
13 | | */ |
14 | | #include "postgres.h" |
15 | | |
16 | | #include "access/bufmask.h" |
17 | | #include "access/gist_private.h" |
18 | | #include "access/gistxlog.h" |
19 | | #include "access/transam.h" |
20 | | #include "access/xloginsert.h" |
21 | | #include "access/xlogutils.h" |
22 | | #include "storage/standby.h" |
23 | | #include "utils/memutils.h" |
24 | | #include "utils/rel.h" |
25 | | |
26 | | static MemoryContext opCtx; /* working memory for operations */ |
27 | | |
28 | | /* |
29 | | * Replay the clearing of F_FOLLOW_RIGHT flag on a child page. |
30 | | * |
31 | | * Even if the WAL record includes a full-page image, we have to update the |
32 | | * follow-right flag, because that change is not included in the full-page |
33 | | * image. To be sure that the intermediate state with the wrong flag value is |
34 | | * not visible to concurrent Hot Standby queries, this function handles |
35 | | * restoring the full-page image as well as updating the flag. (Note that |
36 | | * we never need to do anything else to the child page in the current WAL |
37 | | * action.) |
38 | | */ |
39 | | static void |
40 | | gistRedoClearFollowRight(XLogReaderState *record, uint8 block_id) |
41 | 0 | { |
42 | 0 | XLogRecPtr lsn = record->EndRecPtr; |
43 | 0 | Buffer buffer; |
44 | 0 | Page page; |
45 | 0 | XLogRedoAction action; |
46 | | |
47 | | /* |
48 | | * Note that we still update the page even if it was restored from a full |
49 | | * page image, because the updated NSN is not included in the image. |
50 | | */ |
51 | 0 | action = XLogReadBufferForRedo(record, block_id, &buffer); |
52 | 0 | if (action == BLK_NEEDS_REDO || action == BLK_RESTORED) |
53 | 0 | { |
54 | 0 | page = BufferGetPage(buffer); |
55 | |
|
56 | 0 | GistPageSetNSN(page, lsn); |
57 | 0 | GistClearFollowRight(page); |
58 | |
|
59 | 0 | PageSetLSN(page, lsn); |
60 | 0 | MarkBufferDirty(buffer); |
61 | 0 | } |
62 | 0 | if (BufferIsValid(buffer)) |
63 | 0 | UnlockReleaseBuffer(buffer); |
64 | 0 | } |
65 | | |
66 | | /* |
67 | | * redo any page update (except page split) |
68 | | */ |
69 | | static void |
70 | | gistRedoPageUpdateRecord(XLogReaderState *record) |
71 | 0 | { |
72 | 0 | XLogRecPtr lsn = record->EndRecPtr; |
73 | 0 | gistxlogPageUpdate *xldata = (gistxlogPageUpdate *) XLogRecGetData(record); |
74 | 0 | Buffer buffer; |
75 | 0 | Page page; |
76 | |
|
77 | 0 | if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) |
78 | 0 | { |
79 | 0 | char *begin; |
80 | 0 | char *data; |
81 | 0 | Size datalen; |
82 | 0 | int ninserted PG_USED_FOR_ASSERTS_ONLY = 0; |
83 | |
|
84 | 0 | data = begin = XLogRecGetBlockData(record, 0, &datalen); |
85 | |
|
86 | 0 | page = BufferGetPage(buffer); |
87 | |
|
88 | 0 | if (xldata->ntodelete == 1 && xldata->ntoinsert == 1) |
89 | 0 | { |
90 | | /* |
91 | | * When replacing one tuple with one other tuple, we must use |
92 | | * PageIndexTupleOverwrite for consistency with gistplacetopage. |
93 | | */ |
94 | 0 | OffsetNumber offnum = *((OffsetNumber *) data); |
95 | 0 | IndexTuple itup; |
96 | 0 | Size itupsize; |
97 | |
|
98 | 0 | data += sizeof(OffsetNumber); |
99 | 0 | itup = (IndexTuple) data; |
100 | 0 | itupsize = IndexTupleSize(itup); |
101 | 0 | if (!PageIndexTupleOverwrite(page, offnum, (Item) itup, itupsize)) |
102 | 0 | elog(ERROR, "failed to add item to GiST index page, size %d bytes", |
103 | 0 | (int) itupsize); |
104 | 0 | data += itupsize; |
105 | | /* should be nothing left after consuming 1 tuple */ |
106 | 0 | Assert(data - begin == datalen); |
107 | | /* update insertion count for assert check below */ |
108 | 0 | ninserted++; |
109 | 0 | } |
110 | 0 | else if (xldata->ntodelete > 0) |
111 | 0 | { |
112 | | /* Otherwise, delete old tuples if any */ |
113 | 0 | OffsetNumber *todelete = (OffsetNumber *) data; |
114 | |
|
115 | 0 | data += sizeof(OffsetNumber) * xldata->ntodelete; |
116 | |
|
117 | 0 | PageIndexMultiDelete(page, todelete, xldata->ntodelete); |
118 | 0 | if (GistPageIsLeaf(page)) |
119 | 0 | GistMarkTuplesDeleted(page); |
120 | 0 | } |
121 | | |
122 | | /* Add new tuples if any */ |
123 | 0 | if (data - begin < datalen) |
124 | 0 | { |
125 | 0 | OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : |
126 | 0 | OffsetNumberNext(PageGetMaxOffsetNumber(page)); |
127 | |
|
128 | 0 | while (data - begin < datalen) |
129 | 0 | { |
130 | 0 | IndexTuple itup = (IndexTuple) data; |
131 | 0 | Size sz = IndexTupleSize(itup); |
132 | 0 | OffsetNumber l; |
133 | |
|
134 | 0 | data += sz; |
135 | |
|
136 | 0 | l = PageAddItem(page, (Item) itup, sz, off, false, false); |
137 | 0 | if (l == InvalidOffsetNumber) |
138 | 0 | elog(ERROR, "failed to add item to GiST index page, size %d bytes", |
139 | 0 | (int) sz); |
140 | 0 | off++; |
141 | 0 | ninserted++; |
142 | 0 | } |
143 | 0 | } |
144 | | |
145 | | /* Check that XLOG record contained expected number of tuples */ |
146 | 0 | Assert(ninserted == xldata->ntoinsert); |
147 | |
|
148 | 0 | PageSetLSN(page, lsn); |
149 | 0 | MarkBufferDirty(buffer); |
150 | 0 | } |
151 | | |
152 | | /* |
153 | | * Fix follow-right data on left child page |
154 | | * |
155 | | * This must be done while still holding the lock on the target page. Note |
156 | | * that even if the target page no longer exists, we still attempt to |
157 | | * replay the change on the child page. |
158 | | */ |
159 | 0 | if (XLogRecHasBlockRef(record, 1)) |
160 | 0 | gistRedoClearFollowRight(record, 1); |
161 | |
|
162 | 0 | if (BufferIsValid(buffer)) |
163 | 0 | UnlockReleaseBuffer(buffer); |
164 | 0 | } |
165 | | |
166 | | |
167 | | /* |
168 | | * redo delete on gist index page to remove tuples marked as DEAD during index |
169 | | * tuple insertion |
170 | | */ |
171 | | static void |
172 | | gistRedoDeleteRecord(XLogReaderState *record) |
173 | 0 | { |
174 | 0 | XLogRecPtr lsn = record->EndRecPtr; |
175 | 0 | gistxlogDelete *xldata = (gistxlogDelete *) XLogRecGetData(record); |
176 | 0 | Buffer buffer; |
177 | 0 | Page page; |
178 | 0 | OffsetNumber *toDelete = xldata->offsets; |
179 | | |
180 | | /* |
181 | | * If we have any conflict processing to do, it must happen before we |
182 | | * update the page. |
183 | | * |
184 | | * GiST delete records can conflict with standby queries. You might think |
185 | | * that vacuum records would conflict as well, but we've handled that |
186 | | * already. XLOG_HEAP2_PRUNE_VACUUM_SCAN records provide the highest xid |
187 | | * cleaned by the vacuum of the heap and so we can resolve any conflicts |
188 | | * just once when that arrives. After that we know that no conflicts |
189 | | * exist from individual gist vacuum records on that index. |
190 | | */ |
191 | 0 | if (InHotStandby) |
192 | 0 | { |
193 | 0 | RelFileLocator rlocator; |
194 | |
|
195 | 0 | XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); |
196 | |
|
197 | 0 | ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon, |
198 | 0 | xldata->isCatalogRel, |
199 | 0 | rlocator); |
200 | 0 | } |
201 | |
|
202 | 0 | if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) |
203 | 0 | { |
204 | 0 | page = BufferGetPage(buffer); |
205 | |
|
206 | 0 | PageIndexMultiDelete(page, toDelete, xldata->ntodelete); |
207 | |
|
208 | 0 | GistClearPageHasGarbage(page); |
209 | 0 | GistMarkTuplesDeleted(page); |
210 | |
|
211 | 0 | PageSetLSN(page, lsn); |
212 | 0 | MarkBufferDirty(buffer); |
213 | 0 | } |
214 | |
|
215 | 0 | if (BufferIsValid(buffer)) |
216 | 0 | UnlockReleaseBuffer(buffer); |
217 | 0 | } |
218 | | |
219 | | /* |
220 | | * Returns an array of index pointers. |
221 | | */ |
222 | | static IndexTuple * |
223 | | decodePageSplitRecord(char *begin, int len, int *n) |
224 | 0 | { |
225 | 0 | char *ptr; |
226 | 0 | int i = 0; |
227 | 0 | IndexTuple *tuples; |
228 | | |
229 | | /* extract the number of tuples */ |
230 | 0 | memcpy(n, begin, sizeof(int)); |
231 | 0 | ptr = begin + sizeof(int); |
232 | |
|
233 | 0 | tuples = palloc(*n * sizeof(IndexTuple)); |
234 | |
|
235 | 0 | for (i = 0; i < *n; i++) |
236 | 0 | { |
237 | 0 | Assert(ptr - begin < len); |
238 | 0 | tuples[i] = (IndexTuple) ptr; |
239 | 0 | ptr += IndexTupleSize((IndexTuple) ptr); |
240 | 0 | } |
241 | 0 | Assert(ptr - begin == len); |
242 | |
|
243 | 0 | return tuples; |
244 | 0 | } |
245 | | |
246 | | static void |
247 | | gistRedoPageSplitRecord(XLogReaderState *record) |
248 | 0 | { |
249 | 0 | XLogRecPtr lsn = record->EndRecPtr; |
250 | 0 | gistxlogPageSplit *xldata = (gistxlogPageSplit *) XLogRecGetData(record); |
251 | 0 | Buffer firstbuffer = InvalidBuffer; |
252 | 0 | Buffer buffer; |
253 | 0 | Page page; |
254 | 0 | int i; |
255 | 0 | bool isrootsplit = false; |
256 | | |
257 | | /* |
258 | | * We must hold lock on the first-listed page throughout the action, |
259 | | * including while updating the left child page (if any). We can unlock |
260 | | * remaining pages in the list as soon as they've been written, because |
261 | | * there is no path for concurrent queries to reach those pages without |
262 | | * first visiting the first-listed page. |
263 | | */ |
264 | | |
265 | | /* loop around all pages */ |
266 | 0 | for (i = 0; i < xldata->npage; i++) |
267 | 0 | { |
268 | 0 | int flags; |
269 | 0 | char *data; |
270 | 0 | Size datalen; |
271 | 0 | int num; |
272 | 0 | BlockNumber blkno; |
273 | 0 | IndexTuple *tuples; |
274 | |
|
275 | 0 | XLogRecGetBlockTag(record, i + 1, NULL, NULL, &blkno); |
276 | 0 | if (blkno == GIST_ROOT_BLKNO) |
277 | 0 | { |
278 | 0 | Assert(i == 0); |
279 | 0 | isrootsplit = true; |
280 | 0 | } |
281 | |
|
282 | 0 | buffer = XLogInitBufferForRedo(record, i + 1); |
283 | 0 | page = BufferGetPage(buffer); |
284 | 0 | data = XLogRecGetBlockData(record, i + 1, &datalen); |
285 | |
|
286 | 0 | tuples = decodePageSplitRecord(data, datalen, &num); |
287 | | |
288 | | /* ok, clear buffer */ |
289 | 0 | if (xldata->origleaf && blkno != GIST_ROOT_BLKNO) |
290 | 0 | flags = F_LEAF; |
291 | 0 | else |
292 | 0 | flags = 0; |
293 | 0 | GISTInitBuffer(buffer, flags); |
294 | | |
295 | | /* and fill it */ |
296 | 0 | gistfillbuffer(page, tuples, num, FirstOffsetNumber); |
297 | |
|
298 | 0 | if (blkno == GIST_ROOT_BLKNO) |
299 | 0 | { |
300 | 0 | GistPageGetOpaque(page)->rightlink = InvalidBlockNumber; |
301 | 0 | GistPageSetNSN(page, xldata->orignsn); |
302 | 0 | GistClearFollowRight(page); |
303 | 0 | } |
304 | 0 | else |
305 | 0 | { |
306 | 0 | if (i < xldata->npage - 1) |
307 | 0 | { |
308 | 0 | BlockNumber nextblkno; |
309 | |
|
310 | 0 | XLogRecGetBlockTag(record, i + 2, NULL, NULL, &nextblkno); |
311 | 0 | GistPageGetOpaque(page)->rightlink = nextblkno; |
312 | 0 | } |
313 | 0 | else |
314 | 0 | GistPageGetOpaque(page)->rightlink = xldata->origrlink; |
315 | 0 | GistPageSetNSN(page, xldata->orignsn); |
316 | 0 | if (i < xldata->npage - 1 && !isrootsplit && |
317 | 0 | xldata->markfollowright) |
318 | 0 | GistMarkFollowRight(page); |
319 | 0 | else |
320 | 0 | GistClearFollowRight(page); |
321 | 0 | } |
322 | |
|
323 | 0 | PageSetLSN(page, lsn); |
324 | 0 | MarkBufferDirty(buffer); |
325 | |
|
326 | 0 | if (i == 0) |
327 | 0 | firstbuffer = buffer; |
328 | 0 | else |
329 | 0 | UnlockReleaseBuffer(buffer); |
330 | 0 | } |
331 | | |
332 | | /* Fix follow-right data on left child page, if any */ |
333 | 0 | if (XLogRecHasBlockRef(record, 0)) |
334 | 0 | gistRedoClearFollowRight(record, 0); |
335 | | |
336 | | /* Finally, release lock on the first page */ |
337 | 0 | UnlockReleaseBuffer(firstbuffer); |
338 | 0 | } |
339 | | |
340 | | /* redo page deletion */ |
341 | | static void |
342 | | gistRedoPageDelete(XLogReaderState *record) |
343 | 0 | { |
344 | 0 | XLogRecPtr lsn = record->EndRecPtr; |
345 | 0 | gistxlogPageDelete *xldata = (gistxlogPageDelete *) XLogRecGetData(record); |
346 | 0 | Buffer parentBuffer; |
347 | 0 | Buffer leafBuffer; |
348 | |
|
349 | 0 | if (XLogReadBufferForRedo(record, 0, &leafBuffer) == BLK_NEEDS_REDO) |
350 | 0 | { |
351 | 0 | Page page = BufferGetPage(leafBuffer); |
352 | |
|
353 | 0 | GistPageSetDeleted(page, xldata->deleteXid); |
354 | |
|
355 | 0 | PageSetLSN(page, lsn); |
356 | 0 | MarkBufferDirty(leafBuffer); |
357 | 0 | } |
358 | |
|
359 | 0 | if (XLogReadBufferForRedo(record, 1, &parentBuffer) == BLK_NEEDS_REDO) |
360 | 0 | { |
361 | 0 | Page page = BufferGetPage(parentBuffer); |
362 | |
|
363 | 0 | PageIndexTupleDelete(page, xldata->downlinkOffset); |
364 | |
|
365 | 0 | PageSetLSN(page, lsn); |
366 | 0 | MarkBufferDirty(parentBuffer); |
367 | 0 | } |
368 | |
|
369 | 0 | if (BufferIsValid(parentBuffer)) |
370 | 0 | UnlockReleaseBuffer(parentBuffer); |
371 | 0 | if (BufferIsValid(leafBuffer)) |
372 | 0 | UnlockReleaseBuffer(leafBuffer); |
373 | 0 | } |
374 | | |
375 | | static void |
376 | | gistRedoPageReuse(XLogReaderState *record) |
377 | 0 | { |
378 | 0 | gistxlogPageReuse *xlrec = (gistxlogPageReuse *) XLogRecGetData(record); |
379 | | |
380 | | /* |
381 | | * PAGE_REUSE records exist to provide a conflict point when we reuse |
382 | | * pages in the index via the FSM. That's all they do though. |
383 | | * |
384 | | * snapshotConflictHorizon was the page's deleteXid. The |
385 | | * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable() |
386 | | * conceptually mirrors the PGPROC->xmin > limitXmin test in |
387 | | * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the |
388 | | * same exclusion effect on primary and standby. |
389 | | */ |
390 | 0 | if (InHotStandby) |
391 | 0 | ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon, |
392 | 0 | xlrec->isCatalogRel, |
393 | 0 | xlrec->locator); |
394 | 0 | } |
395 | | |
396 | | void |
397 | | gist_redo(XLogReaderState *record) |
398 | 0 | { |
399 | 0 | uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; |
400 | 0 | MemoryContext oldCxt; |
401 | | |
402 | | /* |
403 | | * GiST indexes do not require any conflict processing. NB: If we ever |
404 | | * implement a similar optimization we have in b-tree, and remove killed |
405 | | * tuples outside VACUUM, we'll need to handle that here. |
406 | | */ |
407 | |
|
408 | 0 | oldCxt = MemoryContextSwitchTo(opCtx); |
409 | 0 | switch (info) |
410 | 0 | { |
411 | 0 | case XLOG_GIST_PAGE_UPDATE: |
412 | 0 | gistRedoPageUpdateRecord(record); |
413 | 0 | break; |
414 | 0 | case XLOG_GIST_DELETE: |
415 | 0 | gistRedoDeleteRecord(record); |
416 | 0 | break; |
417 | 0 | case XLOG_GIST_PAGE_REUSE: |
418 | 0 | gistRedoPageReuse(record); |
419 | 0 | break; |
420 | 0 | case XLOG_GIST_PAGE_SPLIT: |
421 | 0 | gistRedoPageSplitRecord(record); |
422 | 0 | break; |
423 | 0 | case XLOG_GIST_PAGE_DELETE: |
424 | 0 | gistRedoPageDelete(record); |
425 | 0 | break; |
426 | 0 | case XLOG_GIST_ASSIGN_LSN: |
427 | | /* nop. See gistGetFakeLSN(). */ |
428 | 0 | break; |
429 | 0 | default: |
430 | 0 | elog(PANIC, "gist_redo: unknown op code %u", info); |
431 | 0 | } |
432 | | |
433 | 0 | MemoryContextSwitchTo(oldCxt); |
434 | 0 | MemoryContextReset(opCtx); |
435 | 0 | } |
436 | | |
437 | | void |
438 | | gist_xlog_startup(void) |
439 | 0 | { |
440 | 0 | opCtx = createTempGistContext(); |
441 | 0 | } |
442 | | |
443 | | void |
444 | | gist_xlog_cleanup(void) |
445 | 0 | { |
446 | 0 | MemoryContextDelete(opCtx); |
447 | 0 | } |
448 | | |
449 | | /* |
450 | | * Mask a Gist page before running consistency checks on it. |
451 | | */ |
452 | | void |
453 | | gist_mask(char *pagedata, BlockNumber blkno) |
454 | 0 | { |
455 | 0 | Page page = (Page) pagedata; |
456 | |
|
457 | 0 | mask_page_lsn_and_checksum(page); |
458 | |
|
459 | 0 | mask_page_hint_bits(page); |
460 | 0 | mask_unused_space(page); |
461 | | |
462 | | /* |
463 | | * NSN is nothing but a special purpose LSN. Hence, mask it for the same |
464 | | * reason as mask_page_lsn_and_checksum. |
465 | | */ |
466 | 0 | GistPageSetNSN(page, (uint64) MASK_MARKER); |
467 | | |
468 | | /* |
469 | | * We update F_FOLLOW_RIGHT flag on the left child after writing WAL |
470 | | * record. Hence, mask this flag. See gistplacetopage() for details. |
471 | | */ |
472 | 0 | GistMarkFollowRight(page); |
473 | |
|
474 | 0 | if (GistPageIsLeaf(page)) |
475 | 0 | { |
476 | | /* |
477 | | * In gist leaf pages, it is possible to modify the LP_FLAGS without |
478 | | * emitting any WAL record. Hence, mask the line pointer flags. See |
479 | | * gistkillitems() for details. |
480 | | */ |
481 | 0 | mask_lp_flags(page); |
482 | 0 | } |
483 | | |
484 | | /* |
485 | | * During gist redo, we never mark a page as garbage. Hence, mask it to |
486 | | * ignore any differences. |
487 | | */ |
488 | 0 | GistClearPageHasGarbage(page); |
489 | 0 | } |
490 | | |
491 | | /* |
492 | | * Write WAL record of a page split. |
493 | | */ |
494 | | XLogRecPtr |
495 | | gistXLogSplit(bool page_is_leaf, |
496 | | SplitPageLayout *dist, |
497 | | BlockNumber origrlink, GistNSN orignsn, |
498 | | Buffer leftchildbuf, bool markfollowright) |
499 | 0 | { |
500 | 0 | gistxlogPageSplit xlrec; |
501 | 0 | SplitPageLayout *ptr; |
502 | 0 | int npage = 0; |
503 | 0 | XLogRecPtr recptr; |
504 | 0 | int i; |
505 | |
|
506 | 0 | for (ptr = dist; ptr; ptr = ptr->next) |
507 | 0 | npage++; |
508 | |
|
509 | 0 | xlrec.origrlink = origrlink; |
510 | 0 | xlrec.orignsn = orignsn; |
511 | 0 | xlrec.origleaf = page_is_leaf; |
512 | 0 | xlrec.npage = (uint16) npage; |
513 | 0 | xlrec.markfollowright = markfollowright; |
514 | |
|
515 | 0 | XLogBeginInsert(); |
516 | | |
517 | | /* |
518 | | * Include a full page image of the child buf. (only necessary if a |
519 | | * checkpoint happened since the child page was split) |
520 | | */ |
521 | 0 | if (BufferIsValid(leftchildbuf)) |
522 | 0 | XLogRegisterBuffer(0, leftchildbuf, REGBUF_STANDARD); |
523 | | |
524 | | /* |
525 | | * NOTE: We register a lot of data. The caller must've called |
526 | | * XLogEnsureRecordSpace() to prepare for that. We cannot do it here, |
527 | | * because we're already in a critical section. If you change the number |
528 | | * of buffer or data registrations here, make sure you modify the |
529 | | * XLogEnsureRecordSpace() calls accordingly! |
530 | | */ |
531 | 0 | XLogRegisterData(&xlrec, sizeof(gistxlogPageSplit)); |
532 | |
|
533 | 0 | i = 1; |
534 | 0 | for (ptr = dist; ptr; ptr = ptr->next) |
535 | 0 | { |
536 | 0 | XLogRegisterBuffer(i, ptr->buffer, REGBUF_WILL_INIT); |
537 | 0 | XLogRegisterBufData(i, &(ptr->block.num), sizeof(int)); |
538 | 0 | XLogRegisterBufData(i, ptr->list, ptr->lenlist); |
539 | 0 | i++; |
540 | 0 | } |
541 | |
|
542 | 0 | recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT); |
543 | |
|
544 | 0 | return recptr; |
545 | 0 | } |
546 | | |
547 | | /* |
548 | | * Write XLOG record describing a page deletion. This also includes removal of |
549 | | * downlink from the parent page. |
550 | | */ |
551 | | XLogRecPtr |
552 | | gistXLogPageDelete(Buffer buffer, FullTransactionId xid, |
553 | | Buffer parentBuffer, OffsetNumber downlinkOffset) |
554 | 0 | { |
555 | 0 | gistxlogPageDelete xlrec; |
556 | 0 | XLogRecPtr recptr; |
557 | |
|
558 | 0 | xlrec.deleteXid = xid; |
559 | 0 | xlrec.downlinkOffset = downlinkOffset; |
560 | |
|
561 | 0 | XLogBeginInsert(); |
562 | 0 | XLogRegisterData(&xlrec, SizeOfGistxlogPageDelete); |
563 | |
|
564 | 0 | XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); |
565 | 0 | XLogRegisterBuffer(1, parentBuffer, REGBUF_STANDARD); |
566 | |
|
567 | 0 | recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_DELETE); |
568 | |
|
569 | 0 | return recptr; |
570 | 0 | } |
571 | | |
572 | | /* |
573 | | * Write an empty XLOG record to assign a distinct LSN. |
574 | | */ |
575 | | XLogRecPtr |
576 | | gistXLogAssignLSN(void) |
577 | 0 | { |
578 | 0 | int dummy = 0; |
579 | | |
580 | | /* |
581 | | * Records other than XLOG_SWITCH must have content. We use an integer 0 |
582 | | * to follow the restriction. |
583 | | */ |
584 | 0 | XLogBeginInsert(); |
585 | 0 | XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT); |
586 | 0 | XLogRegisterData(&dummy, sizeof(dummy)); |
587 | 0 | return XLogInsert(RM_GIST_ID, XLOG_GIST_ASSIGN_LSN); |
588 | 0 | } |
589 | | |
590 | | /* |
591 | | * Write XLOG record about reuse of a deleted page. |
592 | | */ |
593 | | void |
594 | | gistXLogPageReuse(Relation rel, Relation heaprel, |
595 | | BlockNumber blkno, FullTransactionId deleteXid) |
596 | 0 | { |
597 | 0 | gistxlogPageReuse xlrec_reuse; |
598 | | |
599 | | /* |
600 | | * Note that we don't register the buffer with the record, because this |
601 | | * operation doesn't modify the page. This record only exists to provide a |
602 | | * conflict point for Hot Standby. |
603 | | */ |
604 | | |
605 | | /* XLOG stuff */ |
606 | 0 | xlrec_reuse.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel); |
607 | 0 | xlrec_reuse.locator = rel->rd_locator; |
608 | 0 | xlrec_reuse.block = blkno; |
609 | 0 | xlrec_reuse.snapshotConflictHorizon = deleteXid; |
610 | |
|
611 | 0 | XLogBeginInsert(); |
612 | 0 | XLogRegisterData(&xlrec_reuse, SizeOfGistxlogPageReuse); |
613 | |
|
614 | 0 | XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_REUSE); |
615 | 0 | } |
616 | | |
617 | | /* |
618 | | * Write XLOG record describing a page update. The update can include any |
619 | | * number of deletions and/or insertions of tuples on a single index page. |
620 | | * |
621 | | * If this update inserts a downlink for a split page, also record that |
622 | | * the F_FOLLOW_RIGHT flag on the child page is cleared and NSN set. |
623 | | * |
624 | | * Note that both the todelete array and the tuples are marked as belonging |
625 | | * to the target buffer; they need not be stored in XLOG if XLogInsert decides |
626 | | * to log the whole buffer contents instead. |
627 | | */ |
628 | | XLogRecPtr |
629 | | gistXLogUpdate(Buffer buffer, |
630 | | OffsetNumber *todelete, int ntodelete, |
631 | | IndexTuple *itup, int ituplen, |
632 | | Buffer leftchildbuf) |
633 | 0 | { |
634 | 0 | gistxlogPageUpdate xlrec; |
635 | 0 | int i; |
636 | 0 | XLogRecPtr recptr; |
637 | |
|
638 | 0 | xlrec.ntodelete = ntodelete; |
639 | 0 | xlrec.ntoinsert = ituplen; |
640 | |
|
641 | 0 | XLogBeginInsert(); |
642 | 0 | XLogRegisterData(&xlrec, sizeof(gistxlogPageUpdate)); |
643 | |
|
644 | 0 | XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); |
645 | 0 | XLogRegisterBufData(0, todelete, sizeof(OffsetNumber) * ntodelete); |
646 | | |
647 | | /* new tuples */ |
648 | 0 | for (i = 0; i < ituplen; i++) |
649 | 0 | XLogRegisterBufData(0, itup[i], IndexTupleSize(itup[i])); |
650 | | |
651 | | /* |
652 | | * Include a full page image of the child buf. (only necessary if a |
653 | | * checkpoint happened since the child page was split) |
654 | | */ |
655 | 0 | if (BufferIsValid(leftchildbuf)) |
656 | 0 | XLogRegisterBuffer(1, leftchildbuf, REGBUF_STANDARD); |
657 | |
|
658 | 0 | recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE); |
659 | |
|
660 | 0 | return recptr; |
661 | 0 | } |
662 | | |
663 | | /* |
664 | | * Write XLOG record describing a delete of leaf index tuples marked as DEAD |
665 | | * during new tuple insertion. One may think that this case is already covered |
666 | | * by gistXLogUpdate(). But deletion of index tuples might conflict with |
667 | | * standby queries and needs special handling. |
668 | | */ |
669 | | XLogRecPtr |
670 | | gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, |
671 | | TransactionId snapshotConflictHorizon, Relation heaprel) |
672 | 0 | { |
673 | 0 | gistxlogDelete xlrec; |
674 | 0 | XLogRecPtr recptr; |
675 | |
|
676 | 0 | xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel); |
677 | 0 | xlrec.snapshotConflictHorizon = snapshotConflictHorizon; |
678 | 0 | xlrec.ntodelete = ntodelete; |
679 | |
|
680 | 0 | XLogBeginInsert(); |
681 | 0 | XLogRegisterData(&xlrec, SizeOfGistxlogDelete); |
682 | | |
683 | | /* |
684 | | * We need the target-offsets array whether or not we store the whole |
685 | | * buffer, to allow us to find the snapshotConflictHorizon on a standby |
686 | | * server. |
687 | | */ |
688 | 0 | XLogRegisterData(todelete, ntodelete * sizeof(OffsetNumber)); |
689 | |
|
690 | 0 | XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); |
691 | |
|
692 | 0 | recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_DELETE); |
693 | |
|
694 | 0 | return recptr; |
695 | 0 | } |