Coverage Report

Created: 2025-06-10 07:27

/src/ghostpdl/psi/ireclaim.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Interpreter's interface to garbage collector */
18
#include "ghost.h"
19
#include "ierrors.h"
20
#include "gsstruct.h"
21
#include "iastate.h"
22
#include "icontext.h"
23
#include "interp.h"
24
#include "isave.h"    /* for isstate.h */
25
#include "isstate.h"    /* for mem->saved->state */
26
#include "dstack.h"   /* for dsbot, dsp, dict_set_top */
27
#include "estack.h"   /* for esbot, esp */
28
#include "ostack.h"   /* for osbot, osp */
29
#include "opdef.h"    /* for defining init procedure */
30
#include "store.h"    /* for make_array */
31
32
/* Import preparation and cleanup routines. */
33
extern void ialloc_gc_prepare(gs_ref_memory_t *);
34
35
/* Forward references */
36
static int gs_vmreclaim(gs_dual_memory_t *, bool);
37
38
/* Initialize the GC hook in the allocator. */
39
static int ireclaim(gs_dual_memory_t *, int);
40
static int
41
ireclaim_init(i_ctx_t *i_ctx_p)
42
10.8k
{
43
10.8k
    gs_imemory.reclaim = ireclaim;
44
10.8k
    return 0;
45
10.8k
}
46
47
/* GC hook called when the allocator signals a GC is needed (space = -1), */
48
/* or for vmreclaim (space = the space to collect). */
49
static int
50
ireclaim(gs_dual_memory_t * dmem, int space)
51
23.0k
{
52
23.0k
    bool global;
53
23.0k
    gs_ref_memory_t *mem = NULL;
54
23.0k
    int code;
55
56
23.0k
    if (space < 0) {
57
        /* Determine which allocator exceeded the limit. */
58
1.37k
        int i;
59
60
5.43k
        for (i = 0; i < countof(dmem->spaces_indexed); i++) {
61
5.43k
            mem = dmem->spaces_indexed[i];
62
5.43k
            if (mem == 0)
63
1.37k
                continue;
64
4.05k
            if (mem->gc_status.requested > 0 ||
65
4.05k
                ((gs_ref_memory_t *)mem->stable_memory)->gc_status.requested > 0
66
4.05k
                )
67
1.37k
                break;
68
4.05k
        }
69
1.37k
        if (!mem) {
70
0
            mem = dmem->space_global; /* just in case */
71
0
        }
72
21.6k
    } else {
73
21.6k
        mem = dmem->spaces_indexed[space >> r_space_shift];
74
21.6k
    }
75
23.0k
    if_debug3m('0', (gs_memory_t *)mem, "[0]GC called, space=%d, requestor=%d, requested=%ld\n",
76
23.0k
               space, mem->space, (long)mem->gc_status.requested);
77
23.0k
    global = mem->space != avm_local;
78
    /* Since dmem may move, reset the request now. */
79
23.0k
    ialloc_reset_requested(dmem);
80
23.0k
    code = gs_vmreclaim(dmem, global);
81
23.0k
    if (code < 0)
82
0
        return code;
83
23.0k
    ialloc_set_limit(mem);
84
23.0k
    if (space < 0) {
85
1.37k
        gs_memory_status_t stats;
86
1.37k
        size_t allocated;
87
88
        /* If the ammount still allocated after the GC is complete */
89
        /* exceeds the max_vm setting, then return a VMerror       */
90
1.37k
        gs_memory_status((gs_memory_t *) mem, &stats);
91
1.37k
        allocated = stats.allocated;
92
1.37k
        if (mem->stable_memory != (gs_memory_t *)mem) {
93
1.37k
            gs_memory_status(mem->stable_memory, &stats);
94
1.37k
            allocated += stats.allocated;
95
1.37k
        }
96
1.37k
        if (allocated >= mem->gc_status.max_vm) {
97
            /* We can't satisfy this request within max_vm. */
98
0
            return_error(gs_error_VMerror);
99
0
        }
100
1.37k
    }
101
23.0k
    return 0;
102
23.0k
}
103
104
/* Interpreter entry to garbage collector. */
105
static int
106
gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
107
23.0k
{
108
    /* HACK: we know the gs_dual_memory_t is embedded in a context state. */
109
23.0k
    i_ctx_t *i_ctx_p =
110
23.0k
        (i_ctx_t *)((char *)dmem - offset_of(i_ctx_t, memory));
111
23.0k
    gs_ref_memory_t *lmem = dmem->space_local;
112
23.0k
    int code = context_state_store(i_ctx_p);
113
23.0k
    gs_ref_memory_t *memories[5];
114
23.0k
    gs_ref_memory_t *mem;
115
23.0k
    int nmem, i;
116
117
23.0k
    if (code < 0)
118
0
        return code;
119
120
23.0k
    memories[0] = dmem->space_system;
121
23.0k
    memories[1] = mem = dmem->space_global;
122
23.0k
    nmem = 2;
123
23.0k
    if (lmem != dmem->space_global)
124
23.0k
        memories[nmem++] = lmem;
125
92.1k
    for (i = nmem; --i >= 0;) {
126
69.0k
        mem = memories[i];
127
69.0k
        if (mem->stable_memory != (gs_memory_t *)mem)
128
46.0k
            memories[nmem++] = (gs_ref_memory_t *)mem->stable_memory;
129
69.0k
    }
130
131
    /****** ABORT IF code < 0 ******/
132
138k
    for (i = nmem; --i >= 0; )
133
115k
        alloc_close_clump(memories[i]);
134
135
    /* Prune the file list so it won't retain potentially collectible */
136
    /* files. */
137
138
23.0k
    for (i = (global ? i_vm_system : i_vm_local);
139
89.5k
         i < countof(dmem->spaces_indexed);
140
66.4k
         ++i
141
66.4k
         ) {
142
66.4k
        gs_ref_memory_t *mem = dmem->spaces_indexed[i];
143
144
        /* Always safe to substract 1 from i here, as i is always at
145
         * least i_vm_system (1) or i_vm_local (2). */
146
66.4k
        if (mem == 0 || (mem == dmem->spaces_indexed[i - 1]))
147
0
            continue;
148
66.4k
        if (mem->stable_memory != (gs_memory_t *)mem)
149
44.7k
            ialloc_gc_prepare((gs_ref_memory_t *)mem->stable_memory);
150
1.39M
        for (;; mem = &mem->saved->state) {
151
1.39M
            ialloc_gc_prepare(mem);
152
1.39M
            if (mem->saved == 0)
153
66.4k
                break;
154
1.39M
        }
155
66.4k
    }
156
157
    /* Do the actual collection. */
158
159
23.0k
    {
160
23.0k
        void *ctxp = i_ctx_p;
161
23.0k
        gs_gc_root_t context_root, *r = &context_root;
162
163
23.0k
        gs_register_struct_root((gs_memory_t *)lmem, &r,
164
23.0k
                                &ctxp, "i_ctx_p root");
165
23.0k
        GS_RECLAIM(&dmem->spaces, global);
166
23.0k
        gs_unregister_root((gs_memory_t *)lmem, r, "i_ctx_p root");
167
23.0k
        i_ctx_p = ctxp;
168
23.0k
        dmem = &i_ctx_p->memory;
169
23.0k
    }
170
171
    /* Update caches not handled by context_state_load. */
172
173
23.0k
    *systemdict = *ref_stack_index(&d_stack, ref_stack_count(&d_stack) - 1);
174
175
    /* Update the cached value pointers in names. */
176
177
23.0k
    dicts_gc_cleanup();
178
179
    /* Reopen the active clumps. */
180
181
138k
    for (i = 0; i < nmem; ++i)
182
115k
        alloc_open_clump(memories[i]);
183
184
    /* Reload the context state.  Note this should be done
185
       AFTER the clumps are reopened, since the context state
186
       load could do allocations that must remain.
187
       If it were done while the clumps were still closed,
188
       we would lose those allocations when the clumps were opened */
189
190
23.0k
    code = context_state_load(i_ctx_p);
191
23.0k
    return code;
192
23.0k
}
193
194
/* ------ Initialization procedure ------ */
195
196
const op_def ireclaim_l2_op_defs[] =
197
{
198
    op_def_end(ireclaim_init)
199
};