Coverage Report

Created: 2022-10-31 07:00

/src/ghostpdl/psi/ireclaim.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2021 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  1305 Grant Avenue - Suite 200, Novato,
13
   CA 94945, U.S.A., +1(415)492-9861, for further information.
14
*/
15
16
17
/* Interpreter's interface to garbage collector */
18
#include "ghost.h"
19
#include "ierrors.h"
20
#include "gsstruct.h"
21
#include "iastate.h"
22
#include "icontext.h"
23
#include "interp.h"
24
#include "isave.h"    /* for isstate.h */
25
#include "isstate.h"    /* for mem->saved->state */
26
#include "dstack.h"   /* for dsbot, dsp, dict_set_top */
27
#include "estack.h"   /* for esbot, esp */
28
#include "ostack.h"   /* for osbot, osp */
29
#include "opdef.h"    /* for defining init procedure */
30
#include "store.h"    /* for make_array */
31
32
/* Import preparation and cleanup routines. */
33
extern void ialloc_gc_prepare(gs_ref_memory_t *);
34
35
/* Forward references */
36
static int gs_vmreclaim(gs_dual_memory_t *, bool);
37
38
/* Initialize the GC hook in the allocator. */
39
static int ireclaim(gs_dual_memory_t *, int);
40
static int
41
ireclaim_init(i_ctx_t *i_ctx_p)
42
89.2k
{
43
89.2k
    gs_imemory.reclaim = ireclaim;
44
89.2k
    return 0;
45
89.2k
}
46
47
/* GC hook called when the allocator signals a GC is needed (space = -1), */
48
/* or for vmreclaim (space = the space to collect). */
49
static int
50
ireclaim(gs_dual_memory_t * dmem, int space)
51
182k
{
52
182k
    bool global;
53
182k
    gs_ref_memory_t *mem = NULL;
54
182k
    int code;
55
56
182k
    if (space < 0) {
57
        /* Determine which allocator exceeded the limit. */
58
4.00k
        int i;
59
60
15.9k
        for (i = 0; i < countof(dmem->spaces_indexed); i++) {
61
15.9k
            mem = dmem->spaces_indexed[i];
62
15.9k
            if (mem == 0)
63
4.00k
                continue;
64
11.9k
            if (mem->gc_status.requested > 0 ||
65
11.9k
                ((gs_ref_memory_t *)mem->stable_memory)->gc_status.requested > 0
66
11.9k
                )
67
4.00k
                break;
68
11.9k
        }
69
4.00k
        if (!mem) {
70
0
            mem = dmem->space_global; /* just in case */
71
0
        }
72
178k
    } else {
73
178k
        mem = dmem->spaces_indexed[space >> r_space_shift];
74
178k
    }
75
182k
    if_debug3m('0', (gs_memory_t *)mem, "[0]GC called, space=%d, requestor=%d, requested=%ld\n",
76
182k
               space, mem->space, (long)mem->gc_status.requested);
77
182k
    global = mem->space != avm_local;
78
    /* Since dmem may move, reset the request now. */
79
182k
    ialloc_reset_requested(dmem);
80
182k
    code = gs_vmreclaim(dmem, global);
81
182k
    if (code < 0)
82
0
        return code;
83
182k
    ialloc_set_limit(mem);
84
182k
    if (space < 0) {
85
4.00k
        gs_memory_status_t stats;
86
4.00k
        size_t allocated;
87
88
        /* If the ammount still allocated after the GC is complete */
89
        /* exceeds the max_vm setting, then return a VMerror       */
90
4.00k
        gs_memory_status((gs_memory_t *) mem, &stats);
91
4.00k
        allocated = stats.allocated;
92
4.00k
        if (mem->stable_memory != (gs_memory_t *)mem) {
93
4.00k
            gs_memory_status(mem->stable_memory, &stats);
94
4.00k
            allocated += stats.allocated;
95
4.00k
        }
96
4.00k
        if (allocated >= mem->gc_status.max_vm) {
97
            /* We can't satisfy this request within max_vm. */
98
0
            return_error(gs_error_VMerror);
99
0
        }
100
4.00k
    }
101
182k
    return 0;
102
182k
}
103
104
/* Interpreter entry to garbage collector. */
105
static int
106
gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
107
182k
{
108
    /* HACK: we know the gs_dual_memory_t is embedded in a context state. */
109
182k
    i_ctx_t *i_ctx_p =
110
182k
        (i_ctx_t *)((char *)dmem - offset_of(i_ctx_t, memory));
111
182k
    gs_ref_memory_t *lmem = dmem->space_local;
112
182k
    int code = context_state_store(i_ctx_p);
113
182k
    gs_ref_memory_t *memories[5];
114
182k
    gs_ref_memory_t *mem;
115
182k
    int nmem, i;
116
117
182k
    if (code < 0)
118
0
        return code;
119
120
182k
    memories[0] = dmem->space_system;
121
182k
    memories[1] = mem = dmem->space_global;
122
182k
    nmem = 2;
123
182k
    if (lmem != dmem->space_global)
124
182k
        memories[nmem++] = lmem;
125
729k
    for (i = nmem; --i >= 0;) {
126
547k
        mem = memories[i];
127
547k
        if (mem->stable_memory != (gs_memory_t *)mem)
128
364k
            memories[nmem++] = (gs_ref_memory_t *)mem->stable_memory;
129
547k
    }
130
131
    /****** ABORT IF code < 0 ******/
132
1.09M
    for (i = nmem; --i >= 0; )
133
912k
        alloc_close_clump(memories[i]);
134
135
    /* Prune the file list so it won't retain potentially collectible */
136
    /* files. */
137
138
182k
    for (i = (global ? i_vm_system : i_vm_local);
139
721k
         i < countof(dmem->spaces_indexed);
140
539k
         ++i
141
539k
         ) {
142
539k
        gs_ref_memory_t *mem = dmem->spaces_indexed[i];
143
144
        /* Always safe to substract 1 from i here, as i is always at
145
         * least i_vm_system (1) or i_vm_local (2). */
146
539k
        if (mem == 0 || (mem == dmem->spaces_indexed[i - 1]))
147
0
            continue;
148
539k
        if (mem->stable_memory != (gs_memory_t *)mem)
149
360k
            ialloc_gc_prepare((gs_ref_memory_t *)mem->stable_memory);
150
805k
        for (;; mem = &mem->saved->state) {
151
805k
            ialloc_gc_prepare(mem);
152
805k
            if (mem->saved == 0)
153
539k
                break;
154
805k
        }
155
539k
    }
156
157
    /* Do the actual collection. */
158
159
182k
    {
160
182k
        void *ctxp = i_ctx_p;
161
182k
        gs_gc_root_t context_root, *r = &context_root;
162
163
182k
        gs_register_struct_root((gs_memory_t *)lmem, &r,
164
182k
                                &ctxp, "i_ctx_p root");
165
182k
        GS_RECLAIM(&dmem->spaces, global);
166
182k
        gs_unregister_root((gs_memory_t *)lmem, r, "i_ctx_p root");
167
182k
        i_ctx_p = ctxp;
168
182k
        dmem = &i_ctx_p->memory;
169
182k
    }
170
171
    /* Update caches not handled by context_state_load. */
172
173
182k
    *systemdict = *ref_stack_index(&d_stack, ref_stack_count(&d_stack) - 1);
174
175
    /* Update the cached value pointers in names. */
176
177
182k
    dicts_gc_cleanup();
178
179
    /* Reopen the active clumps. */
180
181
1.09M
    for (i = 0; i < nmem; ++i)
182
912k
        alloc_open_clump(memories[i]);
183
184
    /* Reload the context state.  Note this should be done
185
       AFTER the clumps are reopened, since the context state
186
       load could do allocations that must remain.
187
       If it were done while the clumps were still closed,
188
       we would lose those allocations when the clumps were opened */
189
190
182k
    code = context_state_load(i_ctx_p);
191
182k
    return code;
192
182k
}
193
194
/* ------ Initialization procedure ------ */
195
196
const op_def ireclaim_l2_op_defs[] =
197
{
198
    op_def_end(ireclaim_init)
199
};