Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2025 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* CIE color operators */ |
18 | | #include "math_.h" |
19 | | #include "memory_.h" |
20 | | #include "ghost.h" |
21 | | #include "oper.h" |
22 | | #include "gsstruct.h" |
23 | | #include "gxcspace.h" /* gscolor2.h requires gscspace.h */ |
24 | | #include "gscolor2.h" |
25 | | #include "gscie.h" |
26 | | #include "estack.h" |
27 | | #include "ialloc.h" |
28 | | #include "idict.h" |
29 | | #include "idparam.h" |
30 | | #include "igstate.h" |
31 | | #include "icie.h" |
32 | | #include "isave.h" |
33 | | #include "ivmspace.h" |
34 | | #include "store.h" /* for make_null */ |
35 | | #include "zcie.h" |
36 | | #include "gsicc_create.h" |
37 | | #include "gsicc_manage.h" |
38 | | #include "gsicc_profilecache.h" |
39 | | |
40 | | /* Prototype */ |
41 | | int cieicc_prepare_caches(i_ctx_t *i_ctx_p, const gs_range * domains, |
42 | | const ref * procs, |
43 | | cie_cache_floats * pc0, cie_cache_floats * pc1, |
44 | | cie_cache_floats * pc2, cie_cache_floats * pc3, |
45 | | void *container, |
46 | | const gs_ref_memory_t * imem, client_name_t cname); |
47 | | static int |
48 | | cie_prepare_iccproc(i_ctx_t *i_ctx_p, const gs_range * domain, const ref * proc, |
49 | | cie_cache_floats * pcache, void *container, |
50 | | const gs_ref_memory_t * imem, client_name_t cname); |
51 | | |
52 | | /* Empty procedures */ |
53 | | static const ref empty_procs[4] = |
54 | | { |
55 | | empty_ref_data(t_array, a_readonly | a_executable), |
56 | | empty_ref_data(t_array, a_readonly | a_executable), |
57 | | empty_ref_data(t_array, a_readonly | a_executable), |
58 | | empty_ref_data(t_array, a_readonly | a_executable) |
59 | | }; |
60 | | |
61 | | /* ------ Parameter extraction utilities ------ */ |
62 | | |
63 | | /* Get a range array parameter from a dictionary. */ |
64 | | /* We know that count <= 4. */ |
65 | | int |
66 | | dict_ranges_param(const gs_memory_t *mem, |
67 | | const ref * pdref, const char *kstr, int count, |
68 | | gs_range * prange) |
69 | 487k | { |
70 | 487k | int code = dict_floats_param(mem, pdref, kstr, count * 2, |
71 | 487k | (float *)prange, NULL); |
72 | | |
73 | 487k | if (code < 0) |
74 | 0 | return code; |
75 | 487k | else if (code == 0) |
76 | 324k | memcpy(prange, Range4_default.ranges, count * sizeof(gs_range)); |
77 | 487k | return 0; |
78 | 487k | } |
79 | | |
80 | | /* Get an array of procedures from a dictionary. */ |
81 | | /* We know count <= countof(empty_procs). */ |
82 | | int |
83 | | dict_proc_array_param(const gs_memory_t *mem, |
84 | | const ref *pdict, const char *kstr, |
85 | | uint count, ref *pparray) |
86 | 974k | { |
87 | 974k | ref *pvalue; |
88 | | |
89 | 974k | if (dict_find_string(pdict, kstr, &pvalue) > 0) { |
90 | 649k | uint i; |
91 | | |
92 | 649k | check_array_only(*pvalue); |
93 | 649k | if (r_size(pvalue) != count) |
94 | 0 | return_error(gs_error_rangecheck); |
95 | 2.59M | for (i = 0; i < count; i++) { |
96 | 1.94M | ref proc; |
97 | | |
98 | 1.94M | array_get(mem, pvalue, (long)i, &proc); |
99 | 1.94M | check_proc_only(proc); |
100 | 1.94M | } |
101 | 649k | *pparray = *pvalue; |
102 | 649k | return 0; |
103 | 649k | } else { |
104 | 324k | make_const_array(pparray, a_readonly | avm_foreign, |
105 | 324k | count, &empty_procs[0]); |
106 | 324k | return 1; |
107 | 324k | } |
108 | 974k | } |
109 | | |
110 | | /* Get 3 ranges from a dictionary. */ |
111 | | int |
112 | | dict_range3_param(const gs_memory_t *mem, |
113 | | const ref *pdref, const char *kstr, |
114 | | gs_range3 *prange3) |
115 | 487k | { |
116 | 487k | return dict_ranges_param(mem, pdref, kstr, 3, prange3->ranges); |
117 | 487k | } |
118 | | |
119 | | /* Get a 3x3 matrix from a dictionary. */ |
120 | | int |
121 | | dict_matrix3_param(const gs_memory_t *mem, |
122 | | const ref *pdref, const char *kstr, gs_matrix3 *pmat3) |
123 | 487k | { |
124 | | /* |
125 | | * We can't simply call dict_float_array_param with the matrix |
126 | | * cast to a 9-element float array, because compilers may insert |
127 | | * padding elements after each of the vectors. However, we can be |
128 | | * confident that there is no padding within a single vector. |
129 | | */ |
130 | 487k | float values[9], defaults[9]; |
131 | 487k | int code; |
132 | | |
133 | 487k | memcpy(&defaults[0], &Matrix3_default.cu, 3 * sizeof(float)); |
134 | 487k | memcpy(&defaults[3], &Matrix3_default.cv, 3 * sizeof(float)); |
135 | 487k | memcpy(&defaults[6], &Matrix3_default.cw, 3 * sizeof(float)); |
136 | 487k | code = dict_floats_param(mem, pdref, kstr, 9, values, defaults); |
137 | 487k | if (code < 0) |
138 | 0 | return code; |
139 | 487k | memcpy(&pmat3->cu, &values[0], 3 * sizeof(float)); |
140 | 487k | memcpy(&pmat3->cv, &values[3], 3 * sizeof(float)); |
141 | 487k | memcpy(&pmat3->cw, &values[6], 3 * sizeof(float)); |
142 | 487k | return 0; |
143 | 487k | } |
144 | | |
145 | | /* Get 3 procedures from a dictionary. */ |
146 | | int |
147 | | dict_proc3_param(const gs_memory_t *mem, const ref *pdref, const char *kstr, ref *proc3) |
148 | 974k | { |
149 | 974k | return dict_proc_array_param(mem, pdref, kstr, 3, proc3); |
150 | 974k | } |
151 | | |
152 | | /* Get WhitePoint and BlackPoint values. */ |
153 | | int |
154 | | cie_points_param(const gs_memory_t *mem, |
155 | | const ref * pdref, gs_cie_wb * pwb) |
156 | 162k | { |
157 | 162k | int code; |
158 | | |
159 | 162k | if ((code = dict_floats_param(mem, pdref, "WhitePoint", 3, |
160 | 162k | (float *)&pwb->WhitePoint, NULL)) < 0 || |
161 | 162k | (code = dict_floats_param(mem, pdref, "BlackPoint", 3, |
162 | 162k | (float *)&pwb->BlackPoint, (const float *)&BlackPoint_default)) < 0 |
163 | 162k | ) |
164 | 0 | return code; |
165 | 162k | if (pwb->WhitePoint.u <= 0 || |
166 | 162k | pwb->WhitePoint.v != 1 || |
167 | 162k | pwb->WhitePoint.w <= 0 || |
168 | 162k | pwb->BlackPoint.u < 0 || |
169 | 162k | pwb->BlackPoint.v < 0 || |
170 | 162k | pwb->BlackPoint.w < 0 |
171 | 162k | ) |
172 | 0 | return_error(gs_error_rangecheck); |
173 | 162k | return 0; |
174 | 162k | } |
175 | | |
176 | | /* Process a 3- or 4-dimensional lookup table from a dictionary. */ |
177 | | /* The caller has set pclt->n and pclt->m. */ |
178 | | /* ptref is known to be a readable array of size at least n+1. */ |
179 | | static int cie_3d_table_param(const ref * ptable, uint count, uint nbytes, |
180 | | gs_const_string * strings, const gs_memory_t *mem); |
181 | | int |
182 | | cie_table_param(const ref * ptref, gx_color_lookup_table * pclt, |
183 | | const gs_memory_t * mem) |
184 | 0 | { |
185 | 0 | int n = pclt->n, m = pclt->m; |
186 | 0 | const ref *pta = ptref->value.const_refs; |
187 | 0 | int i; |
188 | 0 | uint nbytes; |
189 | 0 | int code; |
190 | 0 | gs_const_string *table; |
191 | |
|
192 | 0 | for (i = 0; i < n; ++i) { |
193 | 0 | check_type_only(pta[i], t_integer); |
194 | 0 | if (pta[i].value.intval <= 1 || pta[i].value.intval > max_ushort) |
195 | 0 | return_error(gs_error_rangecheck); |
196 | 0 | pclt->dims[i] = (int)pta[i].value.intval; |
197 | 0 | } |
198 | 0 | nbytes = m * pclt->dims[n - 2] * pclt->dims[n - 1]; |
199 | 0 | if (n == 3) { |
200 | 0 | table = |
201 | 0 | gs_alloc_struct_array(mem->stable_memory, pclt->dims[0], gs_const_string, |
202 | 0 | &st_const_string_element, "cie_table_param"); |
203 | 0 | if (table == 0) |
204 | 0 | return_error(gs_error_VMerror); |
205 | 0 | code = cie_3d_table_param(pta + 3, pclt->dims[0], nbytes, table, mem); |
206 | 0 | } else { /* n == 4 */ |
207 | 0 | int d0 = pclt->dims[0], d1 = pclt->dims[1]; |
208 | 0 | uint ntables = d0 * d1; |
209 | 0 | const ref *psuba; |
210 | |
|
211 | 0 | check_read_type(pta[4], t_array); |
212 | 0 | if (r_size(pta + 4) != d0) |
213 | 0 | return_error(gs_error_rangecheck); |
214 | 0 | table = |
215 | 0 | gs_alloc_struct_array(mem->stable_memory, ntables, gs_const_string, |
216 | 0 | &st_const_string_element, "cie_table_param"); |
217 | 0 | if (table == 0) |
218 | 0 | return_error(gs_error_VMerror); |
219 | 0 | psuba = pta[4].value.const_refs; |
220 | | /* |
221 | | * We know that d0 > 0, so code will always be set in the loop: |
222 | | * we initialize code to 0 here solely to pacify stupid compilers. |
223 | | */ |
224 | 0 | for (code = 0, i = 0; i < d0; ++i) { |
225 | 0 | code = cie_3d_table_param(psuba + i, d1, nbytes, table + d1 * i, mem); |
226 | 0 | if (code < 0) |
227 | 0 | break; |
228 | 0 | } |
229 | 0 | } |
230 | 0 | if (code < 0) { |
231 | 0 | gs_free_object((gs_memory_t *)mem, table, "cie_table_param"); |
232 | 0 | return code; |
233 | 0 | } |
234 | 0 | pclt->table = table; |
235 | 0 | return 0; |
236 | 0 | } |
237 | | static int |
238 | | cie_3d_table_param(const ref * ptable, uint count, uint nbytes, |
239 | | gs_const_string * strings, const gs_memory_t *mem) |
240 | 0 | { |
241 | 0 | const ref *rstrings; |
242 | 0 | uint i; |
243 | |
|
244 | 0 | check_read_type(*ptable, t_array); |
245 | 0 | if (r_size(ptable) != count) |
246 | 0 | return_error(gs_error_rangecheck); |
247 | 0 | rstrings = ptable->value.const_refs; |
248 | 0 | for (i = 0; i < count; ++i) { |
249 | 0 | const ref *const prt2 = rstrings + i; |
250 | 0 | byte *tmpstr; |
251 | |
|
252 | 0 | check_read_type(*prt2, t_string); |
253 | 0 | if (r_size(prt2) != nbytes) |
254 | 0 | return_error(gs_error_rangecheck); |
255 | | /* Here we need to get a string in stable_memory (like the rest of the CIEDEF(G) |
256 | | * structure). It _may_ already be in global or stable memory, but we don't know |
257 | | * that, so just allocate and copy it so we don't end up with stale pointers after |
258 | | * a "restore" that frees localVM. Rely on GC to collect the strings. |
259 | | */ |
260 | 0 | tmpstr = gs_alloc_string(mem->stable_memory, nbytes, "cie_3d_table_param"); |
261 | 0 | if (tmpstr == NULL) |
262 | 0 | return_error(gs_error_VMerror); |
263 | 0 | memcpy(tmpstr, prt2->value.const_bytes, nbytes); |
264 | 0 | strings[i].data = tmpstr; |
265 | 0 | strings[i].size = nbytes; |
266 | 0 | } |
267 | 0 | return 0; |
268 | 0 | } |
269 | | |
270 | | /* ------ CIE setcolorspace ------ */ |
271 | | |
272 | | /* Common code for the CIEBased* cases of setcolorspace. */ |
273 | | static int |
274 | | cie_lmnp_param(const gs_memory_t *mem, const ref * pdref, gs_cie_common * pcie, |
275 | | ref_cie_procs * pcprocs, bool *has_lmn_procs) |
276 | 0 | { |
277 | 0 | int code; |
278 | |
|
279 | 0 | if ((code = dict_range3_param(mem, pdref, "RangeLMN", &pcie->RangeLMN)) < 0 || |
280 | 0 | (code = dict_matrix3_param(mem, pdref, "MatrixLMN", &pcie->MatrixLMN)) < 0 || |
281 | 0 | (code = cie_points_param(mem, pdref, &pcie->points)) < 0 |
282 | 0 | ) |
283 | 0 | return code; |
284 | 0 | code = dict_proc3_param(mem, pdref, "DecodeLMN", &pcprocs->DecodeLMN); |
285 | 0 | if (code < 0) |
286 | 0 | return code; |
287 | 0 | *has_lmn_procs = !code; /* Need to know for efficient creation of ICC profile */ |
288 | 0 | pcie->DecodeLMN = DecodeLMN_default; |
289 | 0 | return 0; |
290 | 0 | } |
291 | | |
292 | | /* Get objects associated with cie color space */ |
293 | | static int |
294 | | cie_a_param(const gs_memory_t *mem, const ref * pdref, gs_cie_a * pcie, |
295 | | ref_cie_procs * pcprocs, bool *has_a_procs, bool *has_lmn_procs) |
296 | 0 | { |
297 | 0 | int code; |
298 | |
|
299 | 0 | code = dict_floats_param(mem, pdref, "RangeA", 2, (float *)&pcie->RangeA, |
300 | 0 | (const float *)&RangeA_default); |
301 | 0 | if (code < 0) |
302 | 0 | return code; |
303 | 0 | code = dict_floats_param(mem, pdref, "MatrixA", 3, (float *)&pcie->MatrixA, |
304 | 0 | (const float *)&MatrixA_default); |
305 | 0 | if (code < 0) |
306 | 0 | return code; |
307 | 0 | code = cie_lmnp_param(mem, pdref, &pcie->common, pcprocs, has_lmn_procs); |
308 | 0 | if (code < 0) |
309 | 0 | return code; |
310 | 0 | if ((code = dict_proc_param(pdref, "DecodeA", &(pcprocs->Decode.A), true)) < 0) |
311 | 0 | return code; |
312 | 0 | *has_a_procs = !code; |
313 | 0 | return 0; |
314 | 0 | } |
315 | | |
316 | | /* Common code for the CIEBasedABC/DEF[G] cases of setcolorspace. */ |
317 | | static int |
318 | | cie_abc_param(i_ctx_t *i_ctx_p, const gs_memory_t *mem, const ref * pdref, |
319 | | gs_cie_abc * pcie, ref_cie_procs * pcprocs, |
320 | | bool *has_abc_procs, bool *has_lmn_procs) |
321 | 0 | { |
322 | 0 | int code; |
323 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
324 | |
|
325 | 0 | if ((code = dict_range3_param(mem, pdref, "RangeABC", &pcie->RangeABC)) < 0 || |
326 | 0 | (code = dict_matrix3_param(mem, pdref, "MatrixABC", &pcie->MatrixABC)) < 0 || |
327 | 0 | (code = cie_lmnp_param(mem, pdref, &pcie->common, pcprocs, has_lmn_procs)) < 0 |
328 | 0 | ) |
329 | 0 | return code; |
330 | 0 | code = dict_proc3_param(mem, pdref, "DecodeABC", &pcprocs->Decode.ABC); |
331 | 0 | if (code < 0) |
332 | 0 | return code; |
333 | 0 | *has_abc_procs = !code; |
334 | 0 | pcie->DecodeABC = DecodeABC_default; |
335 | | /* At this point, we have all the parameters in pcie including knowing if |
336 | | there |
337 | | are procedures present. If there are no procedures, life is simple for us. |
338 | | If there are procedures, we can not create the ICC profile until we have the procedures |
339 | | sampled, which requires pushing the appropriate commands upon the postscript execution stack |
340 | | to create the sampled procs and then having a follow up operation to create the ICC profile. |
341 | | Because the procs may have to be merged with other operators and/or packed |
342 | | in a particular form, we will have the PS operators stuff them in the already |
343 | | existing static buffers that already exist for this purpose in the cie structures |
344 | | e.g. gx_cie_vector_cache3_t that are in the common (params.abc.common.caches.DecodeLMN) |
345 | | and unique entries (e.g. params.abc.caches.DecodeABC.caches) */ |
346 | 0 | if (*has_abc_procs) { |
347 | 0 | cieicc_prepare_caches(i_ctx_p, (&pcie->RangeABC)->ranges, |
348 | 0 | pcprocs->Decode.ABC.value.const_refs, |
349 | 0 | &(pcie->caches.DecodeABC.caches)->floats, |
350 | 0 | &(pcie->caches.DecodeABC.caches)[1].floats, |
351 | 0 | &(pcie->caches.DecodeABC.caches)[2].floats, |
352 | 0 | NULL, pcie, imem, "Decode.ABC(ICC)"); |
353 | 0 | } else { |
354 | 0 | pcie->caches.DecodeABC.caches->floats.params.is_identity = true; |
355 | 0 | (pcie->caches.DecodeABC.caches)[1].floats.params.is_identity = true; |
356 | 0 | (pcie->caches.DecodeABC.caches)[2].floats.params.is_identity = true; |
357 | 0 | } |
358 | 0 | if (*has_lmn_procs) { |
359 | 0 | cieicc_prepare_caches(i_ctx_p, (&pcie->common.RangeLMN)->ranges, |
360 | 0 | pcprocs->DecodeLMN.value.const_refs, |
361 | 0 | &(pcie->common.caches.DecodeLMN)->floats, |
362 | 0 | &(pcie->common.caches.DecodeLMN)[1].floats, |
363 | 0 | &(pcie->common.caches.DecodeLMN)[2].floats, |
364 | 0 | NULL, pcie, imem, "Decode.LMN(ICC)"); |
365 | 0 | } else { |
366 | 0 | pcie->common.caches.DecodeLMN->floats.params.is_identity = true; |
367 | 0 | (pcie->common.caches.DecodeLMN)[1].floats.params.is_identity = true; |
368 | 0 | (pcie->common.caches.DecodeLMN)[2].floats.params.is_identity = true; |
369 | 0 | } |
370 | 0 | return 0; |
371 | 0 | } |
372 | | |
373 | | /* Finish setting a CIE space (successful or not). */ |
374 | | int |
375 | | cie_set_finish(i_ctx_t *i_ctx_p, gs_color_space * pcs, |
376 | | const ref_cie_procs * pcprocs, int edepth, int code) |
377 | 0 | { |
378 | 0 | if (code >= 0) |
379 | 0 | code = gs_setcolorspace(igs, pcs); |
380 | | /* Delete the extra reference to the parameter tables. */ |
381 | 0 | rc_decrement_only_cs(pcs, "cie_set_finish"); |
382 | 0 | if (code < 0) { |
383 | 0 | ref_stack_pop_to(&e_stack, edepth); |
384 | 0 | return code; |
385 | 0 | } |
386 | 0 | istate->colorspace[0].procs.cie = *pcprocs; |
387 | 0 | pop(1); |
388 | 0 | return (ref_stack_count(&e_stack) == edepth ? 0 : o_push_estack); |
389 | 0 | } |
390 | | |
391 | | /* Forward references */ |
392 | | static int cie_defg_finish(i_ctx_t *); |
393 | | |
394 | | static int |
395 | | cie_defg_param(i_ctx_t *i_ctx_p, const gs_memory_t *mem, const ref * pdref, |
396 | | gs_cie_defg * pcie, ref_cie_procs * pcprocs, bool *has_abc_procs, |
397 | | bool *has_lmn_procs, bool *has_defg_procs, ref *ptref) |
398 | 0 | { |
399 | 0 | int code; |
400 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
401 | | |
402 | | /* First get all the ABC and LMN information related to this space */ |
403 | 0 | code = cie_abc_param(i_ctx_p, mem, pdref, (gs_cie_abc *) pcie, pcprocs, |
404 | 0 | has_abc_procs, has_lmn_procs); |
405 | 0 | if (code < 0) |
406 | 0 | return code; |
407 | 0 | code = dict_ranges_param(mem, pdref, "RangeDEFG", 4, pcie->RangeDEFG.ranges); |
408 | 0 | if (code < 0) |
409 | 0 | return code; |
410 | 0 | code = dict_ranges_param(mem, pdref, "RangeHIJK", 4, pcie->RangeHIJK.ranges); |
411 | 0 | if (code < 0) |
412 | 0 | return code; |
413 | 0 | code = cie_table_param(ptref, &pcie->Table, mem); |
414 | 0 | if (code < 0) |
415 | 0 | return code; |
416 | 0 | code = dict_proc_array_param(mem, pdref, "DecodeDEFG", 4, |
417 | 0 | &(pcprocs->PreDecode.DEFG)); |
418 | 0 | if (code < 0) |
419 | 0 | return code; |
420 | 0 | *has_defg_procs = !code; |
421 | 0 | if (*has_defg_procs) { |
422 | 0 | cieicc_prepare_caches(i_ctx_p, (&pcie->RangeDEFG)->ranges, |
423 | 0 | pcprocs->PreDecode.DEFG.value.const_refs, |
424 | 0 | &(pcie->caches_defg.DecodeDEFG)->floats, |
425 | 0 | &(pcie->caches_defg.DecodeDEFG)[1].floats, |
426 | 0 | &(pcie->caches_defg.DecodeDEFG)[2].floats, |
427 | 0 | &(pcie->caches_defg.DecodeDEFG)[3].floats, |
428 | 0 | pcie, imem, "Decode.DEFG(ICC)"); |
429 | 0 | } else { |
430 | 0 | pcie->caches_defg.DecodeDEFG->floats.params.is_identity = true; |
431 | 0 | (pcie->caches_defg.DecodeDEFG)[1].floats.params.is_identity = true; |
432 | 0 | (pcie->caches_defg.DecodeDEFG)[2].floats.params.is_identity = true; |
433 | 0 | (pcie->caches_defg.DecodeDEFG)[3].floats.params.is_identity = true; |
434 | 0 | } |
435 | 0 | return(0); |
436 | 0 | } |
437 | | int |
438 | | ciedefgspace(i_ctx_t *i_ctx_p, ref *CIEDict, uint64_t dictkey) |
439 | 0 | { |
440 | 0 | os_ptr op = osp; |
441 | 0 | int edepth = ref_stack_count(&e_stack); |
442 | 0 | gs_memory_t *mem = gs_gstate_memory(igs); |
443 | 0 | gs_color_space *pcs; |
444 | 0 | ref_cie_procs procs; |
445 | 0 | gs_cie_defg *pcie; |
446 | 0 | int code = 0; |
447 | 0 | ref *ptref; |
448 | 0 | bool has_defg_procs, has_abc_procs, has_lmn_procs; |
449 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
450 | |
|
451 | 0 | if (dictkey != 0) { |
452 | 0 | pcs = gsicc_find_cs(dictkey, igs); |
453 | 0 | if (pcs && gs_color_space_num_components(pcs) != 4) |
454 | 0 | pcs = NULL; |
455 | 0 | } |
456 | 0 | else |
457 | 0 | pcs = NULL; |
458 | 0 | push(1); /* Sacrificial */ |
459 | 0 | procs = istate->colorspace[0].procs.cie; |
460 | 0 | if (pcs == NULL ) { |
461 | 0 | if ((code = dict_find_string(CIEDict, "Table", &ptref)) <= 0) { |
462 | 0 | if (code == 0) |
463 | 0 | gs_note_error(cie_set_finish(i_ctx_p, pcs, &procs, edepth, gs_error_rangecheck)); |
464 | 0 | else |
465 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
466 | 0 | } |
467 | 0 | check_read_type(*ptref, t_array); |
468 | 0 | if (r_size(ptref) != 5) |
469 | 0 | return_error(gs_error_rangecheck); |
470 | | /* Stable memory due to current caching of color space */ |
471 | 0 | code = gs_cspace_build_CIEDEFG(&pcs, NULL, mem->stable_memory); |
472 | 0 | if (code < 0) |
473 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
474 | 0 | pcie = pcs->params.defg; |
475 | 0 | pcie->Table.n = 4; |
476 | 0 | pcie->Table.m = 3; |
477 | 0 | code = cie_cache_push_finish(i_ctx_p, cie_defg_finish, imem, pcie); |
478 | 0 | if (code < 0) |
479 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
480 | 0 | code = cie_defg_param(i_ctx_p, imemory, CIEDict, pcie, &procs, |
481 | 0 | &has_abc_procs, &has_lmn_procs, &has_defg_procs,ptref); |
482 | 0 | if (code < 0) |
483 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
484 | | /* Add the color space to the profile cache */ |
485 | 0 | code = gsicc_add_cs(igs, pcs,dictkey); |
486 | 0 | if (code < 0) |
487 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
488 | 0 | } else { |
489 | 0 | rc_increment(pcs); |
490 | 0 | } |
491 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
492 | 0 | } |
493 | | |
494 | | static int |
495 | | cie_defg_finish(i_ctx_t *i_ctx_p) |
496 | 0 | { |
497 | 0 | os_ptr op = osp; |
498 | 0 | gs_cie_defg *pcie = r_ptr(op, gs_cie_defg); |
499 | |
|
500 | 0 | pcie->DecodeDEFG = DecodeDEFG_from_cache; |
501 | 0 | pcie->DecodeABC = DecodeABC_from_cache; |
502 | 0 | pcie->common.DecodeLMN = DecodeLMN_from_cache; |
503 | 0 | gs_cie_defg_complete(pcie); |
504 | 0 | pop(1); |
505 | 0 | return 0; |
506 | 0 | } |
507 | | |
508 | | static int |
509 | | cie_def_param(i_ctx_t *i_ctx_p, const gs_memory_t *mem, const ref * pdref, |
510 | | gs_cie_def * pcie, ref_cie_procs * pcprocs, |
511 | | bool *has_abc_procs, bool *has_lmn_procs, |
512 | | bool *has_def_procs, ref *ptref) |
513 | 0 | { |
514 | 0 | int code; |
515 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
516 | | |
517 | | /* First get all the ABC and LMN information related to this space */ |
518 | 0 | code = cie_abc_param(i_ctx_p, mem, pdref, (gs_cie_abc *) pcie, pcprocs, |
519 | 0 | has_abc_procs, has_lmn_procs); |
520 | 0 | if (code < 0) |
521 | 0 | return code; |
522 | 0 | code = dict_range3_param(mem, pdref, "RangeDEF", &pcie->RangeDEF); |
523 | 0 | if (code < 0) |
524 | 0 | return code; |
525 | 0 | code = dict_range3_param(mem, pdref, "RangeHIJ", &pcie->RangeHIJ); |
526 | 0 | if (code < 0) |
527 | 0 | return code; |
528 | 0 | code = cie_table_param(ptref, &pcie->Table, mem); |
529 | 0 | if (code < 0) |
530 | 0 | return code; |
531 | | /* The DEF procs */ |
532 | 0 | code = dict_proc3_param(mem, pdref, "DecodeDEF", &(pcprocs->PreDecode.DEF)); |
533 | 0 | if (code < 0) |
534 | 0 | return code; |
535 | 0 | *has_def_procs = !code; |
536 | 0 | if (*has_def_procs) { |
537 | 0 | cieicc_prepare_caches(i_ctx_p, (&pcie->RangeDEF)->ranges, |
538 | 0 | pcprocs->PreDecode.DEF.value.const_refs, |
539 | 0 | &(pcie->caches_def.DecodeDEF)->floats, |
540 | 0 | &(pcie->caches_def.DecodeDEF)[1].floats, |
541 | 0 | &(pcie->caches_def.DecodeDEF)[2].floats, |
542 | 0 | NULL, pcie, imem, "Decode.DEF(ICC)"); |
543 | 0 | } else { |
544 | 0 | pcie->caches_def.DecodeDEF->floats.params.is_identity = true; |
545 | 0 | (pcie->caches_def.DecodeDEF)[1].floats.params.is_identity = true; |
546 | 0 | (pcie->caches_def.DecodeDEF)[2].floats.params.is_identity = true; |
547 | 0 | } |
548 | 0 | return(0); |
549 | 0 | } |
550 | | |
551 | | static int cie_def_finish(i_ctx_t *); |
552 | | int |
553 | | ciedefspace(i_ctx_t *i_ctx_p, ref *CIEDict, uint64_t dictkey) |
554 | 0 | { |
555 | 0 | os_ptr op = osp; |
556 | 0 | int edepth = ref_stack_count(&e_stack); |
557 | 0 | gs_memory_t *mem = gs_gstate_memory(igs); |
558 | 0 | gs_color_space *pcs; |
559 | 0 | ref_cie_procs procs; |
560 | 0 | gs_cie_def *pcie; |
561 | 0 | int code = 0; |
562 | 0 | ref *ptref; |
563 | 0 | bool has_def_procs, has_lmn_procs, has_abc_procs; |
564 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
565 | |
|
566 | 0 | if (dictkey != 0) { |
567 | 0 | pcs = gsicc_find_cs(dictkey, igs); |
568 | 0 | if (pcs && gs_color_space_num_components(pcs) != 3) |
569 | 0 | pcs = NULL; |
570 | 0 | } |
571 | 0 | else |
572 | 0 | pcs = NULL; |
573 | 0 | push(1); /* Sacrificial */ |
574 | 0 | procs = istate->colorspace[0].procs.cie; |
575 | 0 | if (pcs == NULL ) { |
576 | 0 | if ((code = dict_find_string(CIEDict, "Table", &ptref)) <= 0) { |
577 | 0 | if (code == 0) |
578 | 0 | gs_note_error(cie_set_finish(i_ctx_p, pcs, &procs, edepth, gs_error_rangecheck)); |
579 | 0 | else |
580 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
581 | 0 | } |
582 | 0 | check_read_type(*ptref, t_array); |
583 | 0 | if (r_size(ptref) != 4) |
584 | 0 | return_error(gs_error_rangecheck); |
585 | | /* Stable memory due to current caching of color space */ |
586 | 0 | code = gs_cspace_build_CIEDEF(&pcs, NULL, mem->stable_memory); |
587 | 0 | if (code < 0) |
588 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
589 | 0 | pcie = pcs->params.def; |
590 | 0 | pcie->Table.n = 3; |
591 | 0 | pcie->Table.m = 3; |
592 | 0 | code = cie_cache_push_finish(i_ctx_p, cie_def_finish, imem, pcie); |
593 | 0 | if (code < 0) |
594 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
595 | 0 | code = cie_def_param(i_ctx_p, imemory, CIEDict, pcie, &procs, |
596 | 0 | &has_abc_procs, &has_lmn_procs, &has_def_procs, ptref); |
597 | 0 | if (code < 0) |
598 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
599 | | /* Add the color space to the profile cache */ |
600 | 0 | code = gsicc_add_cs(igs, pcs,dictkey); |
601 | 0 | if (code < 0) |
602 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
603 | 0 | } else { |
604 | 0 | rc_increment(pcs); |
605 | 0 | } |
606 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
607 | 0 | } |
608 | | |
609 | | static int |
610 | | cie_def_finish(i_ctx_t *i_ctx_p) |
611 | 0 | { |
612 | 0 | os_ptr op = osp; |
613 | 0 | gs_cie_def *pcie = r_ptr(op, gs_cie_def); |
614 | |
|
615 | 0 | pcie->DecodeDEF = DecodeDEF_from_cache; |
616 | 0 | pcie->DecodeABC = DecodeABC_from_cache; |
617 | 0 | pcie->common.DecodeLMN = DecodeLMN_from_cache; |
618 | 0 | gs_cie_def_complete(pcie); |
619 | 0 | pop(1); |
620 | 0 | return 0; |
621 | 0 | } |
622 | | |
623 | | static int cie_abc_finish(i_ctx_t *); |
624 | | |
625 | | int |
626 | | cieabcspace(i_ctx_t *i_ctx_p, ref *CIEDict, uint64_t dictkey) |
627 | 0 | { |
628 | 0 | os_ptr op = osp; |
629 | 0 | int edepth = ref_stack_count(&e_stack); |
630 | 0 | gs_memory_t *mem = gs_gstate_memory(igs); |
631 | 0 | gs_color_space *pcs; |
632 | 0 | ref_cie_procs procs; |
633 | 0 | gs_cie_abc *pcie; |
634 | 0 | int code = 0; |
635 | 0 | bool has_lmn_procs, has_abc_procs; |
636 | 0 | gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
637 | | |
638 | | /* See if the color space is in the profile cache */ |
639 | 0 | if (dictkey != 0) { |
640 | 0 | pcs = gsicc_find_cs(dictkey, igs); |
641 | 0 | if (pcs && gs_color_space_num_components(pcs) != 3) |
642 | 0 | pcs = NULL; |
643 | 0 | } |
644 | 0 | else |
645 | 0 | pcs = NULL; |
646 | |
|
647 | 0 | push(1); /* Sacrificial */ |
648 | 0 | procs = istate->colorspace[0].procs.cie; |
649 | 0 | if (pcs == NULL ) { |
650 | | /* Stable memory due to current caching of color space */ |
651 | 0 | code = gs_cspace_build_CIEABC(&pcs, NULL, mem->stable_memory); |
652 | 0 | if (code < 0) |
653 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
654 | 0 | pcie = pcs->params.abc; |
655 | 0 | code = cie_cache_push_finish(i_ctx_p, cie_abc_finish, imem, pcie); |
656 | 0 | if (code < 0) |
657 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
658 | 0 | code = cie_abc_param(i_ctx_p, imemory, CIEDict, pcie, &procs, |
659 | 0 | &has_abc_procs, &has_lmn_procs); |
660 | 0 | if (code < 0) |
661 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
662 | | /* Set the color space in the graphic state. The ICC profile |
663 | | will be set later if we actually use the space. Procs will be |
664 | | sampled now though. Also, the finish procedure is on the stack |
665 | | since that is where the vector cache is completed from the scalar |
666 | | caches. We may need the vector cache if we are going to go |
667 | | ahead and create an MLUT for this thing */ |
668 | | /* Add the color space to the profile cache */ |
669 | 0 | code = gsicc_add_cs(igs, pcs,dictkey); |
670 | 0 | if (code < 0) |
671 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
672 | 0 | } else { |
673 | 0 | rc_increment(pcs); |
674 | 0 | } |
675 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
676 | 0 | } |
677 | | |
678 | | static int |
679 | | cie_abc_finish(i_ctx_t *i_ctx_p) |
680 | 0 | { |
681 | 0 | os_ptr op = osp; |
682 | 0 | gs_cie_abc *pcie = r_ptr(op, gs_cie_abc); |
683 | |
|
684 | 0 | pcie->DecodeABC = DecodeABC_from_cache; |
685 | 0 | pcie->common.DecodeLMN = DecodeLMN_from_cache; |
686 | 0 | gs_cie_abc_complete(pcie); |
687 | 0 | pop(1); |
688 | 0 | return 0; |
689 | 0 | } |
690 | | |
691 | | static int cie_a_finish(i_ctx_t *); |
692 | | |
693 | | int |
694 | | cieaspace(i_ctx_t *i_ctx_p, ref *CIEdict, uint64_t dictkey) |
695 | 0 | { |
696 | 0 | os_ptr op = osp; |
697 | 0 | int edepth = ref_stack_count(&e_stack); |
698 | 0 | gs_memory_t *mem = gs_gstate_memory(igs); |
699 | 0 | const gs_ref_memory_t *imem = (gs_ref_memory_t *)mem; |
700 | 0 | gs_color_space *pcs; |
701 | 0 | ref_cie_procs procs; |
702 | 0 | gs_cie_a *pcie; |
703 | 0 | int code = 0; |
704 | 0 | bool has_a_procs = false; |
705 | 0 | bool has_lmn_procs; |
706 | | |
707 | | /* See if the color space is in the profile cache */ |
708 | 0 | if (dictkey != 0) { |
709 | 0 | pcs = gsicc_find_cs(dictkey, igs); |
710 | 0 | if (pcs && gs_color_space_num_components(pcs) != 1) |
711 | 0 | pcs = NULL; |
712 | 0 | } |
713 | 0 | else |
714 | 0 | pcs = NULL; |
715 | 0 | push(1); /* Sacrificial */ |
716 | 0 | procs = istate->colorspace[0].procs.cie; |
717 | 0 | if (pcs == NULL ) { |
718 | | /* Stable memory due to current caching of color space */ |
719 | 0 | code = gs_cspace_build_CIEA(&pcs, NULL, mem->stable_memory); |
720 | 0 | if (code < 0) |
721 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
722 | 0 | pcie = pcs->params.a; |
723 | 0 | code = cie_a_param(imemory, CIEdict, pcie, &procs, &has_a_procs, |
724 | 0 | &has_lmn_procs); |
725 | 0 | if (code < 0) |
726 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
727 | | /* Push finalize procedure on the execution stack */ |
728 | 0 | code = cie_cache_push_finish(i_ctx_p, cie_a_finish, (gs_ref_memory_t *)imem, pcie); |
729 | 0 | if (code < 0) |
730 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
731 | 0 | if (!has_a_procs && !has_lmn_procs) { |
732 | 0 | pcie->common.caches.DecodeLMN->floats |
733 | 0 | .params.is_identity = true; |
734 | 0 | (pcie->common.caches.DecodeLMN)[1].floats.params.is_identity = true; |
735 | 0 | (pcie->common.caches.DecodeLMN)[2].floats.params.is_identity = true; |
736 | 0 | pcie->caches.DecodeA.floats.params.is_identity = true; |
737 | 0 | } else { |
738 | 0 | if (has_a_procs) { |
739 | 0 | code = cie_prepare_iccproc(i_ctx_p, &pcie->RangeA, |
740 | 0 | &procs.Decode.A, &pcie->caches.DecodeA.floats, pcie, imem, "Decode.A"); |
741 | 0 | if (code < 0) |
742 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
743 | 0 | } else { |
744 | 0 | pcie->caches.DecodeA.floats.params.is_identity = true; |
745 | 0 | } |
746 | 0 | if (has_lmn_procs) { |
747 | 0 | cieicc_prepare_caches(i_ctx_p, (&pcie->common.RangeLMN)->ranges, |
748 | 0 | procs.DecodeLMN.value.const_refs, |
749 | 0 | &(pcie->common.caches.DecodeLMN)->floats, |
750 | 0 | &(pcie->common.caches.DecodeLMN)[1].floats, |
751 | 0 | &(pcie->common.caches.DecodeLMN)[2].floats, |
752 | 0 | NULL, pcie, imem, "Decode.LMN(ICC)"); |
753 | 0 | } else { |
754 | 0 | pcie->common.caches.DecodeLMN->floats.params.is_identity = true; |
755 | 0 | (pcie->common.caches.DecodeLMN)[1].floats.params.is_identity = true; |
756 | 0 | (pcie->common.caches.DecodeLMN)[2].floats.params.is_identity = true; |
757 | 0 | } |
758 | 0 | } |
759 | | /* Add the color space to the profile cache */ |
760 | 0 | code = gsicc_add_cs(igs, pcs,dictkey); |
761 | 0 | if (code < 0) |
762 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
763 | 0 | } else { |
764 | 0 | rc_increment(pcs); |
765 | 0 | } |
766 | | /* Set the color space in the graphic state. The ICC profile may be set after this |
767 | | due to the needed sampled procs */ |
768 | 0 | return cie_set_finish(i_ctx_p, pcs, &procs, edepth, code); |
769 | 0 | } |
770 | | |
771 | | static int |
772 | | cie_a_finish(i_ctx_t *i_ctx_p) |
773 | 0 | { |
774 | 0 | os_ptr op = osp; |
775 | 0 | gs_cie_a *pcie = r_ptr(op, gs_cie_a); |
776 | |
|
777 | 0 | pcie->DecodeA = DecodeA_from_cache; |
778 | 0 | pcie->common.DecodeLMN = DecodeLMN_from_cache; |
779 | 0 | gs_cie_a_complete(pcie); |
780 | 0 | pop(1); |
781 | 0 | return 0; |
782 | 0 | } |
783 | | |
784 | | /* ------ Internal routines ------ */ |
785 | | |
786 | | /* Prepare to cache the values for one or more procedures. */ |
787 | | /* RJW: No longer used, but keeping it around in case it becomes useful |
788 | | * again in future. |
789 | | * static int cie_cache_finish1(i_ctx_t *); |
790 | | */ |
791 | | static int cie_cache_finish(i_ctx_t *); |
792 | | int |
793 | | cie_prepare_cache(i_ctx_t *i_ctx_p, const gs_range * domain, const ref * proc, |
794 | | cie_cache_floats * pcache, void *container, |
795 | | gs_ref_memory_t * imem, client_name_t cname) |
796 | 0 | { |
797 | 0 | int space = imemory_space(imem); |
798 | 0 | gs_sample_loop_params_t lp; |
799 | 0 | es_ptr ep; |
800 | |
|
801 | 0 | gs_cie_cache_init(&pcache->params, &lp, domain, cname); |
802 | 0 | pcache->params.is_identity = r_size(proc) == 0; |
803 | 0 | check_estack(9); |
804 | 0 | ep = esp; |
805 | 0 | make_real(ep + 9, lp.A); |
806 | 0 | make_int(ep + 8, lp.N); |
807 | 0 | make_real(ep + 7, lp.B); |
808 | 0 | ep[6] = *proc; |
809 | 0 | r_clear_attrs(ep + 6, a_executable); |
810 | 0 | make_op_estack(ep + 5, zcvx); |
811 | 0 | make_op_estack(ep + 4, zfor_samples); |
812 | 0 | make_op_estack(ep + 3, cie_cache_finish); |
813 | 0 | esp += 9; |
814 | | /* |
815 | | * The caches are embedded in the middle of other |
816 | | * structures, so we represent the pointer to the cache |
817 | | * as a pointer to the container plus an offset. |
818 | | */ |
819 | 0 | make_int(ep + 2, (char *)pcache - (char *)container); |
820 | 0 | make_struct(ep + 1, space, container); |
821 | 0 | return o_push_estack; |
822 | 0 | } |
823 | | /* Note that pc3 may be 0, indicating that there are only 3 caches to load. */ |
824 | | int |
825 | | cie_prepare_caches_4(i_ctx_t *i_ctx_p, const gs_range * domains, |
826 | | const ref * procs, |
827 | | cie_cache_floats * pc0, cie_cache_floats * pc1, |
828 | | cie_cache_floats * pc2, cie_cache_floats * pc3, |
829 | | void *container, |
830 | | gs_ref_memory_t * imem, client_name_t cname) |
831 | 0 | { |
832 | 0 | cie_cache_floats *pcn[4]; |
833 | 0 | int i, n, code = 0; |
834 | |
|
835 | 0 | pcn[0] = pc0, pcn[1] = pc1, pcn[2] = pc2; |
836 | 0 | if (pc3 == 0) |
837 | 0 | n = 3; |
838 | 0 | else |
839 | 0 | pcn[3] = pc3, n = 4; |
840 | 0 | for (i = 0; i < n && code >= 0; ++i) |
841 | 0 | code = cie_prepare_cache(i_ctx_p, domains + i, procs + i, pcn[i], |
842 | 0 | container, imem, cname); |
843 | 0 | return code; |
844 | 0 | } |
845 | | |
846 | | /* Store the result of caching one procedure. */ |
847 | | static int |
848 | | cie_cache_finish_store(i_ctx_t *i_ctx_p, bool replicate) |
849 | 0 | { |
850 | 0 | os_ptr op = osp; |
851 | 0 | cie_cache_floats *pcache; |
852 | 0 | int code; |
853 | |
|
854 | 0 | check_esp(2); |
855 | | /* See above for the container + offset representation of */ |
856 | | /* the pointer to the cache. */ |
857 | 0 | pcache = (cie_cache_floats *) (r_ptr(esp - 1, char) + esp->value.intval); |
858 | |
|
859 | 0 | pcache->params.is_identity = false; /* cache_set_linear computes this */ |
860 | 0 | if_debug3m('c', imemory, "[c]cache "PRI_INTPTR" base=%g, factor=%g:\n", |
861 | 0 | (intptr_t) pcache, pcache->params.base, pcache->params.factor); |
862 | 0 | if (replicate || |
863 | 0 | (code = float_params(op, gx_cie_cache_size, &pcache->values[0])) < 0 |
864 | 0 | ) { |
865 | | /* We might have underflowed the current stack block. */ |
866 | | /* Handle the parameters one-by-one. */ |
867 | 0 | uint i; |
868 | |
|
869 | 0 | for (i = 0; i < gx_cie_cache_size; i++) { |
870 | 0 | ref *o = ref_stack_index(&o_stack, (replicate ? 0 : gx_cie_cache_size - 1 - i)); |
871 | 0 | if (o == NULL) |
872 | 0 | return_error(gs_error_stackunderflow); |
873 | | |
874 | 0 | code = float_param(o, &pcache->values[i]); |
875 | 0 | if (code < 0) { |
876 | 0 | esp -= 2; /* pop pointer to cache */ |
877 | 0 | return code; |
878 | 0 | } |
879 | 0 | } |
880 | 0 | } |
881 | | #ifdef DEBUG |
882 | | if (gs_debug_c('c')) { |
883 | | int i; |
884 | | |
885 | | for (i = 0; i < gx_cie_cache_size; i += 4) |
886 | | dmlprintf5(imemory, "[c] cache[%3d]=%g, %g, %g, %g\n", i, |
887 | | pcache->values[i], pcache->values[i + 1], |
888 | | pcache->values[i + 2], pcache->values[i + 3]); |
889 | | } |
890 | | #endif |
891 | 0 | ref_stack_pop(&o_stack, (replicate ? 1 : gx_cie_cache_size)); |
892 | 0 | esp -= 2; /* pop pointer to cache */ |
893 | 0 | return o_pop_estack; |
894 | 0 | } |
895 | | static int |
896 | | cie_cache_finish(i_ctx_t *i_ctx_p) |
897 | 0 | { |
898 | 0 | return cie_cache_finish_store(i_ctx_p, false); |
899 | 0 | } |
900 | | #if 0 |
901 | | /* RJW: No longer used, but might be useful in future. */ |
902 | | static int |
903 | | cie_cache_finish1(i_ctx_t *i_ctx_p) |
904 | | { |
905 | | return cie_cache_finish_store(i_ctx_p, true); |
906 | | } |
907 | | #endif |
908 | | |
909 | | /* Push a finishing procedure on the e-stack. */ |
910 | | /* ptr will be the top element of the o-stack. */ |
911 | | int |
912 | | cie_cache_push_finish(i_ctx_t *i_ctx_p, op_proc_t finish_proc, |
913 | | gs_ref_memory_t * imem, void *data) |
914 | 0 | { |
915 | 0 | check_estack(2); |
916 | 0 | push_op_estack(finish_proc); |
917 | 0 | ++esp; |
918 | 0 | make_struct(esp, imemory_space(imem), data); |
919 | 0 | return o_push_estack; |
920 | 0 | } |
921 | | |
922 | | /* Special functions related to the creation of ICC profiles |
923 | | from the PS CIE color management objects. These basically |
924 | | make use of the existing objects in the CIE stuctures to |
925 | | store the sampled procs. These sampled procs are then |
926 | | used in the creation of the ICC profiles */ |
927 | | |
928 | | /* Push the sequence of commands onto the execution stack |
929 | | so that we sample the procs */ |
930 | | static int cie_create_icc(i_ctx_t *); |
931 | | static int |
932 | | cie_prepare_iccproc(i_ctx_t *i_ctx_p, const gs_range * domain, const ref * proc, |
933 | | cie_cache_floats * pcache, void *container, |
934 | | const gs_ref_memory_t * imem, client_name_t cname) |
935 | 0 | { |
936 | 0 | int space = imemory_space(imem); |
937 | 0 | gs_sample_loop_params_t lp; |
938 | 0 | es_ptr ep; |
939 | |
|
940 | 0 | gs_cie_cache_init(&pcache->params, &lp, domain, cname); |
941 | 0 | pcache->params.is_identity = r_size(proc) == 0; |
942 | 0 | check_estack(9); |
943 | 0 | ep = esp; |
944 | 0 | make_real(ep + 9, lp.A); |
945 | 0 | make_int(ep + 8, lp.N); |
946 | 0 | make_real(ep + 7, lp.B); |
947 | 0 | ep[6] = *proc; |
948 | 0 | r_clear_attrs(ep + 6, a_executable); |
949 | 0 | make_op_estack(ep + 5, zcvx); |
950 | 0 | make_op_estack(ep + 4, zfor_samples); |
951 | 0 | make_op_estack(ep + 3, cie_create_icc); |
952 | 0 | esp += 9; |
953 | | /* |
954 | | * The caches are embedded in the middle of other |
955 | | * structures, so we represent the pointer to the cache |
956 | | * as a pointer to the container plus an offset. |
957 | | */ |
958 | 0 | make_int(ep + 2, (char *)pcache - (char *)container); |
959 | 0 | make_struct(ep + 1, space, container); |
960 | 0 | return o_push_estack; |
961 | 0 | } |
962 | | |
963 | | int |
964 | | cieicc_prepare_caches(i_ctx_t *i_ctx_p, const gs_range * domains, |
965 | | const ref * procs, |
966 | | cie_cache_floats * pc0, cie_cache_floats * pc1, |
967 | | cie_cache_floats * pc2, cie_cache_floats * pc3, |
968 | | void *container, |
969 | | const gs_ref_memory_t * imem, client_name_t cname) |
970 | 0 | { |
971 | 0 | cie_cache_floats *pcn[4]; |
972 | 0 | int i, n, code = 0; |
973 | |
|
974 | 0 | pcn[0] = pc0, pcn[1] = pc1, pcn[2] = pc2; |
975 | 0 | if (pc3 == 0) |
976 | 0 | n = 3; |
977 | 0 | else |
978 | 0 | pcn[3] = pc3, n = 4; |
979 | 0 | for (i = 0; i < n && code >= 0; ++i) |
980 | 0 | code = cie_prepare_iccproc(i_ctx_p, domains + i, procs + i, pcn[i], |
981 | 0 | container, imem, cname); |
982 | 0 | return code; |
983 | 0 | } |
984 | | |
985 | | /* We have sampled the procs. Go ahead and create the ICC profile. */ |
986 | | static int |
987 | | cie_create_icc(i_ctx_t *i_ctx_p) |
988 | 0 | { |
989 | 0 | os_ptr op = osp; |
990 | 0 | cie_cache_floats *pcache; |
991 | 0 | int code; |
992 | |
|
993 | 0 | check_esp(2); |
994 | | /* See above for the container + offset representation of */ |
995 | | /* the pointer to the cache. */ |
996 | 0 | pcache = (cie_cache_floats *) (r_ptr(esp - 1, char) + esp->value.intval); |
997 | |
|
998 | 0 | pcache->params.is_identity = false; /* cache_set_linear computes this */ |
999 | 0 | if_debug3m('c', imemory, "[c]icc_sample_proc "PRI_INTPTR" base=%g, factor=%g:\n", |
1000 | 0 | (intptr_t) pcache, pcache->params.base, pcache->params.factor); |
1001 | 0 | if ((code = float_params(op, gx_cie_cache_size, &pcache->values[0])) < 0) { |
1002 | | /* We might have underflowed the current stack block. */ |
1003 | | /* Handle the parameters one-by-one. */ |
1004 | 0 | uint i; |
1005 | |
|
1006 | 0 | for (i = 0; i < gx_cie_cache_size; i++) { |
1007 | 0 | const ref *o = ref_stack_index(&o_stack,gx_cie_cache_size - 1 - i); |
1008 | |
|
1009 | 0 | if (o == NULL) |
1010 | 0 | code = gs_note_error(gs_error_stackunderflow); |
1011 | 0 | else |
1012 | 0 | code = float_param(o, &pcache->values[i]); |
1013 | 0 | if (code < 0) |
1014 | 0 | return code; |
1015 | 0 | } |
1016 | 0 | } |
1017 | | #ifdef DEBUG |
1018 | | if (gs_debug_c('c')) { |
1019 | | int i; |
1020 | | |
1021 | | for (i = 0; i < gx_cie_cache_size; i += 4) |
1022 | | dmlprintf5(imemory, "[c] icc_sample_proc[%3d]=%g, %g, %g, %g\n", i, |
1023 | | pcache->values[i], pcache->values[i + 1], |
1024 | | pcache->values[i + 2], pcache->values[i + 3]); |
1025 | | } |
1026 | | #endif |
1027 | 0 | ref_stack_pop(&o_stack, gx_cie_cache_size); |
1028 | 0 | esp -= 2; /* pop pointer to cache */ |
1029 | 0 | return o_pop_estack; |
1030 | 0 | } |