/src/c-blosc2/blosc/blosc2.c
Line | Count | Source |
1 | | /********************************************************************* |
2 | | Blosc - Blocked Shuffling and Compression Library |
3 | | |
4 | | Copyright (c) 2021 Blosc Development Team <blosc@blosc.org> |
5 | | https://blosc.org |
6 | | License: BSD 3-Clause (see LICENSE.txt) |
7 | | |
8 | | See LICENSE.txt for details about copyright and rights to use. |
9 | | **********************************************************************/ |
10 | | |
11 | | |
12 | | #include "blosc2.h" |
13 | | #include "blosc-private.h" |
14 | | #include "../plugins/codecs/zfp/blosc2-zfp.h" |
15 | | #include "frame.h" |
16 | | #include "b2nd-private.h" |
17 | | #include "schunk-private.h" |
18 | | |
19 | | #if defined(USING_CMAKE) |
20 | | #include "config.h" |
21 | | #endif /* USING_CMAKE */ |
22 | | #include "context.h" |
23 | | |
24 | | #include "shuffle.h" |
25 | | #include "delta.h" |
26 | | #include "trunc-prec.h" |
27 | | #include "blosclz.h" |
28 | | #include "stune.h" |
29 | | #include "blosc2/codecs-registry.h" |
30 | | #include "blosc2/filters-registry.h" |
31 | | #include "blosc2/tuners-registry.h" |
32 | | |
33 | | #include "lz4.h" |
34 | | #define LZ4_HC_STATIC_LINKING_ONLY |
35 | | #include "lz4hc.h" |
36 | | #ifdef HAVE_IPP |
37 | | #include <ipps.h> |
38 | | #include <ippdc.h> |
39 | | #endif |
40 | | #if defined(HAVE_ZLIB_NG) |
41 | | #ifdef ZLIB_COMPAT |
42 | | #include "zlib.h" |
43 | | #else |
44 | | #include "zlib-ng.h" |
45 | | #endif |
46 | | #elif defined(HAVE_ZLIB) |
47 | | #include "zlib.h" |
48 | | #endif /* HAVE_MINIZ */ |
49 | | #if defined(HAVE_ZSTD) |
50 | | #include "zstd.h" |
51 | | #include "zstd_errors.h" |
52 | | // #include "cover.h" // for experimenting with fast cover training for building dicts |
53 | | #include "zdict.h" |
54 | | #endif /* HAVE_ZSTD */ |
55 | | |
56 | | #if defined(_WIN32) && !defined(__MINGW32__) |
57 | | #include <windows.h> |
58 | | #include <malloc.h> |
59 | | #include <process.h> |
60 | | #define getpid _getpid |
61 | | #endif /* _WIN32 */ |
62 | | |
63 | | #include "threading.h" |
64 | | |
65 | | #include <stdio.h> |
66 | | #include <stdlib.h> |
67 | | #include <errno.h> |
68 | | #include <string.h> |
69 | | #include <sys/types.h> |
70 | | #include <assert.h> |
71 | | #include <math.h> |
72 | | #include <stdint.h> |
73 | | |
74 | | |
75 | | /* Synchronization variables */ |
76 | | |
77 | | /* Global context for non-contextual API */ |
78 | | static blosc2_context* g_global_context; |
79 | | static blosc2_pthread_mutex_t global_comp_mutex; |
80 | | static int g_compressor = BLOSC_BLOSCLZ; |
81 | | static int g_delta = 0; |
82 | | /* The default splitmode */ |
83 | | static int32_t g_splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
84 | | /* the compressor to use by default */ |
85 | | static int16_t g_nthreads = 1; |
86 | | static int32_t g_force_blocksize = 0; |
87 | | static int g_initlib = 0; |
88 | | static blosc2_schunk* g_schunk = NULL; /* the pointer to super-chunk */ |
89 | | |
90 | | blosc2_codec g_codecs[256] = {0}; |
91 | | uint8_t g_ncodecs = 0; |
92 | | |
93 | | static blosc2_filter g_filters[256] = {0}; |
94 | | static uint64_t g_nfilters = 0; |
95 | | |
96 | | static blosc2_io_cb g_ios[256] = {0}; |
97 | | static uint64_t g_nio = 0; |
98 | | |
99 | | blosc2_tuner g_tuners[256] = {0}; |
100 | | int g_ntuners = 0; |
101 | | |
102 | | static int g_tuner = BLOSC_STUNE; |
103 | | |
104 | | // Forward declarations |
105 | | int init_threadpool(blosc2_context *context); |
106 | | int release_threadpool(blosc2_context *context); |
107 | | |
108 | | /* Macros for synchronization */ |
109 | | |
110 | | /* Wait until all threads are initialized */ |
111 | | #ifdef BLOSC_POSIX_BARRIERS |
112 | | #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ |
113 | 0 | do { \ |
114 | 0 | rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_init); \ |
115 | 0 | if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ |
116 | 0 | BLOSC_TRACE_ERROR("Could not wait on barrier (init): %d", rc); \ |
117 | 0 | return((RET_VAL)); \ |
118 | 0 | } \ |
119 | 0 | } while (0) |
120 | | #else |
121 | | #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ |
122 | | do { \ |
123 | | blosc2_pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ |
124 | | if ((CONTEXT_PTR)->count_threads < (CONTEXT_PTR)->nthreads) { \ |
125 | | (CONTEXT_PTR)->count_threads++; \ |
126 | | blosc2_pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ |
127 | | &(CONTEXT_PTR)->count_threads_mutex); \ |
128 | | } \ |
129 | | else { \ |
130 | | blosc2_pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ |
131 | | } \ |
132 | | blosc2_pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); \ |
133 | | } while (0) |
134 | | #endif |
135 | | |
136 | | /* Wait for all threads to finish */ |
137 | | #ifdef BLOSC_POSIX_BARRIERS |
138 | | #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ |
139 | 0 | do { \ |
140 | 0 | rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_finish); \ |
141 | 0 | if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ |
142 | 0 | BLOSC_TRACE_ERROR("Could not wait on barrier (finish)"); \ |
143 | 0 | return((RET_VAL)); \ |
144 | 0 | } \ |
145 | 0 | } while (0) |
146 | | #else |
147 | | #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ |
148 | | do { \ |
149 | | blosc2_pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ |
150 | | if ((CONTEXT_PTR)->count_threads > 0) { \ |
151 | | (CONTEXT_PTR)->count_threads--; \ |
152 | | blosc2_pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ |
153 | | &(CONTEXT_PTR)->count_threads_mutex); \ |
154 | | } \ |
155 | | else { \ |
156 | | blosc2_pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ |
157 | | } \ |
158 | | blosc2_pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); \ |
159 | | } while (0) |
160 | | #endif |
161 | | |
162 | | |
163 | | /* global variable to change threading backend from Blosc-managed to caller-managed */ |
164 | | static blosc_threads_callback threads_callback = 0; |
165 | | static void *threads_callback_data = 0; |
166 | | |
167 | | |
168 | | /* non-threadsafe function should be called before any other Blosc function in |
169 | | order to change how threads are managed */ |
170 | | void blosc2_set_threads_callback(blosc_threads_callback callback, void *callback_data) |
171 | 0 | { |
172 | 0 | threads_callback = callback; |
173 | 0 | threads_callback_data = callback_data; |
174 | 0 | } |
175 | | |
176 | | |
177 | | /* A function for aligned malloc that is portable */ |
178 | 19.1k | static uint8_t* my_malloc(size_t size) { |
179 | 19.1k | void* block = NULL; |
180 | 19.1k | int res = 0; |
181 | | /* Keep aligned allocations valid under Valgrind and POSIX wrappers. */ |
182 | 19.1k | if (size == 0) { |
183 | 0 | size = 1; |
184 | 0 | } |
185 | | |
186 | | /* Do an alignment to 32 bytes because AVX2 is supported */ |
187 | | #if defined(_WIN32) |
188 | | /* A (void *) cast needed for avoiding a warning with MINGW :-/ */ |
189 | | block = (void *)_aligned_malloc(size, 32); |
190 | | #elif _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 |
191 | | /* Platform does have an implementation of posix_memalign */ |
192 | 19.1k | res = posix_memalign(&block, 32, size); |
193 | | #else |
194 | | block = malloc(size); |
195 | | #endif /* _WIN32 */ |
196 | | |
197 | 19.1k | if (block == NULL || res != 0) { |
198 | 0 | BLOSC_TRACE_ERROR("Error allocating memory!"); |
199 | 0 | return NULL; |
200 | 0 | } |
201 | | |
202 | 19.1k | return (uint8_t*)block; |
203 | 19.1k | } |
204 | | |
205 | | |
206 | | /* Release memory booked by my_malloc */ |
207 | 19.1k | static void my_free(void* block) { |
208 | | #if defined(_WIN32) |
209 | | _aligned_free(block); |
210 | | #else |
211 | 19.1k | free(block); |
212 | 19.1k | #endif /* _WIN32 */ |
213 | 19.1k | } |
214 | | |
215 | | |
216 | | /* |
217 | | * Conversion routines between compressor and compression libraries |
218 | | */ |
219 | | |
220 | | /* Return the library code associated with the compressor name */ |
221 | 0 | static int compname_to_clibcode(const char* compname) { |
222 | 0 | if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) |
223 | 0 | return BLOSC_BLOSCLZ_LIB; |
224 | 0 | if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) |
225 | 0 | return BLOSC_LZ4_LIB; |
226 | 0 | if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) |
227 | 0 | return BLOSC_LZ4_LIB; |
228 | 0 | if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) |
229 | 0 | return BLOSC_ZLIB_LIB; |
230 | 0 | if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) |
231 | 0 | return BLOSC_ZSTD_LIB; |
232 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
233 | 0 | if (strcmp(compname, g_codecs[i].compname) == 0) |
234 | 0 | return g_codecs[i].complib; |
235 | 0 | } |
236 | 0 | return BLOSC2_ERROR_NOT_FOUND; |
237 | 0 | } |
238 | | |
239 | | /* Return the library name associated with the compressor code */ |
240 | 0 | static const char* clibcode_to_clibname(int clibcode) { |
241 | 0 | if (clibcode == BLOSC_BLOSCLZ_LIB) return BLOSC_BLOSCLZ_LIBNAME; |
242 | 0 | if (clibcode == BLOSC_LZ4_LIB) return BLOSC_LZ4_LIBNAME; |
243 | 0 | if (clibcode == BLOSC_ZLIB_LIB) return BLOSC_ZLIB_LIBNAME; |
244 | 0 | if (clibcode == BLOSC_ZSTD_LIB) return BLOSC_ZSTD_LIBNAME; |
245 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
246 | 0 | if (clibcode == g_codecs[i].complib) |
247 | 0 | return g_codecs[i].compname; |
248 | 0 | } |
249 | 0 | return NULL; /* should never happen */ |
250 | 0 | } |
251 | | |
252 | | |
253 | | /* |
254 | | * Conversion routines between compressor names and compressor codes |
255 | | */ |
256 | | |
257 | | /* Get the compressor name associated with the compressor code */ |
258 | 0 | int blosc2_compcode_to_compname(int compcode, const char** compname) { |
259 | 0 | int code = -1; /* -1 means non-existent compressor code */ |
260 | 0 | const char* name = NULL; |
261 | | |
262 | | /* Map the compressor code */ |
263 | 0 | if (compcode == BLOSC_BLOSCLZ) |
264 | 0 | name = BLOSC_BLOSCLZ_COMPNAME; |
265 | 0 | else if (compcode == BLOSC_LZ4) |
266 | 0 | name = BLOSC_LZ4_COMPNAME; |
267 | 0 | else if (compcode == BLOSC_LZ4HC) |
268 | 0 | name = BLOSC_LZ4HC_COMPNAME; |
269 | 0 | else if (compcode == BLOSC_ZLIB) |
270 | 0 | name = BLOSC_ZLIB_COMPNAME; |
271 | 0 | else if (compcode == BLOSC_ZSTD) |
272 | 0 | name = BLOSC_ZSTD_COMPNAME; |
273 | 0 | else { |
274 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
275 | 0 | if (compcode == g_codecs[i].compcode) { |
276 | 0 | name = g_codecs[i].compname; |
277 | 0 | break; |
278 | 0 | } |
279 | 0 | } |
280 | 0 | } |
281 | |
|
282 | 0 | *compname = name; |
283 | | |
284 | | /* Guess if there is support for this code */ |
285 | 0 | if (compcode == BLOSC_BLOSCLZ) |
286 | 0 | code = BLOSC_BLOSCLZ; |
287 | 0 | else if (compcode == BLOSC_LZ4) |
288 | 0 | code = BLOSC_LZ4; |
289 | 0 | else if (compcode == BLOSC_LZ4HC) |
290 | 0 | code = BLOSC_LZ4HC; |
291 | 0 | #if defined(HAVE_ZLIB) |
292 | 0 | else if (compcode == BLOSC_ZLIB) |
293 | 0 | code = BLOSC_ZLIB; |
294 | 0 | #endif /* HAVE_ZLIB */ |
295 | 0 | #if defined(HAVE_ZSTD) |
296 | 0 | else if (compcode == BLOSC_ZSTD) |
297 | 0 | code = BLOSC_ZSTD; |
298 | 0 | #endif /* HAVE_ZSTD */ |
299 | 0 | else if (compcode >= BLOSC_LAST_CODEC) |
300 | 0 | code = compcode; |
301 | 0 | return code; |
302 | 0 | } |
303 | | |
304 | | /* Get the compressor code for the compressor name. -1 if it is not available */ |
305 | 2.74k | int blosc2_compname_to_compcode(const char* compname) { |
306 | 2.74k | int code = -1; /* -1 means non-existent compressor code */ |
307 | | |
308 | 2.74k | if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) { |
309 | 2.74k | code = BLOSC_BLOSCLZ; |
310 | 2.74k | } |
311 | 0 | else if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) { |
312 | 0 | code = BLOSC_LZ4; |
313 | 0 | } |
314 | 0 | else if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) { |
315 | 0 | code = BLOSC_LZ4HC; |
316 | 0 | } |
317 | 0 | #if defined(HAVE_ZLIB) |
318 | 0 | else if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) { |
319 | 0 | code = BLOSC_ZLIB; |
320 | 0 | } |
321 | 0 | #endif /* HAVE_ZLIB */ |
322 | 0 | #if defined(HAVE_ZSTD) |
323 | 0 | else if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) { |
324 | 0 | code = BLOSC_ZSTD; |
325 | 0 | } |
326 | 0 | #endif /* HAVE_ZSTD */ |
327 | 0 | else{ |
328 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
329 | 0 | if (strcmp(compname, g_codecs[i].compname) == 0) { |
330 | 0 | code = g_codecs[i].compcode; |
331 | 0 | break; |
332 | 0 | } |
333 | 0 | } |
334 | 0 | } |
335 | 2.74k | return code; |
336 | 2.74k | } |
337 | | |
338 | | |
339 | | /* Convert compressor code to blosc compressor format code */ |
340 | 51.5k | static int compcode_to_compformat(int compcode) { |
341 | 51.5k | switch (compcode) { |
342 | 51.5k | case BLOSC_BLOSCLZ: return BLOSC_BLOSCLZ_FORMAT; |
343 | 0 | case BLOSC_LZ4: return BLOSC_LZ4_FORMAT; |
344 | 0 | case BLOSC_LZ4HC: return BLOSC_LZ4HC_FORMAT; |
345 | | |
346 | 0 | #if defined(HAVE_ZLIB) |
347 | 0 | case BLOSC_ZLIB: return BLOSC_ZLIB_FORMAT; |
348 | 0 | #endif /* HAVE_ZLIB */ |
349 | | |
350 | 0 | #if defined(HAVE_ZSTD) |
351 | 0 | case BLOSC_ZSTD: return BLOSC_ZSTD_FORMAT; |
352 | 0 | break; |
353 | 0 | #endif /* HAVE_ZSTD */ |
354 | 0 | default: |
355 | 0 | return BLOSC_UDCODEC_FORMAT; |
356 | 51.5k | } |
357 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
358 | 0 | } |
359 | | |
360 | | |
361 | | /* Convert compressor code to blosc compressor format version */ |
362 | 52.0k | static int compcode_to_compversion(int compcode) { |
363 | | /* Write compressor format */ |
364 | 52.0k | switch (compcode) { |
365 | 52.0k | case BLOSC_BLOSCLZ: |
366 | 52.0k | return BLOSC_BLOSCLZ_VERSION_FORMAT; |
367 | 0 | case BLOSC_LZ4: |
368 | 0 | return BLOSC_LZ4_VERSION_FORMAT; |
369 | 0 | case BLOSC_LZ4HC: |
370 | 0 | return BLOSC_LZ4HC_VERSION_FORMAT; |
371 | | |
372 | 0 | #if defined(HAVE_ZLIB) |
373 | 0 | case BLOSC_ZLIB: |
374 | 0 | return BLOSC_ZLIB_VERSION_FORMAT; |
375 | 0 | break; |
376 | 0 | #endif /* HAVE_ZLIB */ |
377 | | |
378 | 0 | #if defined(HAVE_ZSTD) |
379 | 0 | case BLOSC_ZSTD: |
380 | 0 | return BLOSC_ZSTD_VERSION_FORMAT; |
381 | 0 | break; |
382 | 0 | #endif /* HAVE_ZSTD */ |
383 | 0 | default: |
384 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
385 | 0 | if (compcode == g_codecs[i].compcode) { |
386 | 0 | return g_codecs[i].version; |
387 | 0 | } |
388 | 0 | } |
389 | 52.0k | } |
390 | 0 | return BLOSC2_ERROR_FAILURE; |
391 | 52.0k | } |
392 | | |
393 | | |
394 | | static int lz4_wrap_compress(const char* input, size_t input_length, |
395 | | char* output, size_t maxout, int accel, void* hash_table, |
396 | 0 | struct thread_context* thread_context) { |
397 | 0 | BLOSC_UNUSED_PARAM(accel); |
398 | 0 | int cbytes; |
399 | 0 | blosc2_context* context = thread_context->parent_context; |
400 | | #ifdef HAVE_IPP |
401 | | if (context->use_dict) { |
402 | | BLOSC_TRACE_WARNING("LZ4 dict compression is not supported with IPP. Ignoring dict."); |
403 | | } |
404 | | if (hash_table == NULL) { |
405 | | return BLOSC2_ERROR_INVALID_PARAM; // the hash table should always be initialized |
406 | | } |
407 | | int outlen = (int)maxout; |
408 | | int inlen = (int)input_length; |
409 | | // I have not found any function that uses `accel` like in `LZ4_compress_fast`, but |
410 | | // the IPP LZ4Safe call does a pretty good job on compressing well, so let's use it |
411 | | IppStatus status = ippsEncodeLZ4Safe_8u((const Ipp8u*)input, &inlen, |
412 | | (Ipp8u*)output, &outlen, (Ipp8u*)hash_table); |
413 | | if (status == ippStsDstSizeLessExpected) { |
414 | | return 0; // we cannot compress in required outlen |
415 | | } |
416 | | else if (status != ippStsNoErr) { |
417 | | return BLOSC2_ERROR_FAILURE; // an unexpected error happened |
418 | | } |
419 | | cbytes = outlen; |
420 | | #else |
421 | 0 | BLOSC_UNUSED_PARAM(hash_table); |
422 | 0 | accel = 1; // deactivate acceleration to match IPP behaviour |
423 | 0 | if (context->use_dict && context->dict_cdict != NULL) { |
424 | 0 | if (thread_context->lz4_cstream == NULL) { |
425 | 0 | thread_context->lz4_cstream = LZ4_createStream(); |
426 | 0 | } |
427 | | // Reset the thread stream to dict-only context before each block so that |
428 | | // each compressed block references only the dictionary (not prior blocks). |
429 | | // This ensures independent decompressibility with LZ4_decompress_safe_usingDict. |
430 | 0 | LZ4_loadDict((LZ4_stream_t*)thread_context->lz4_cstream, |
431 | 0 | (const char*)context->dict_buffer, (int)context->dict_size); |
432 | 0 | cbytes = LZ4_compress_fast_continue((LZ4_stream_t*)thread_context->lz4_cstream, |
433 | 0 | input, output, (int)input_length, (int)maxout, accel); |
434 | 0 | } else { |
435 | 0 | cbytes = LZ4_compress_fast(input, output, (int)input_length, (int)maxout, accel); |
436 | 0 | } |
437 | 0 | #endif |
438 | 0 | return cbytes; |
439 | 0 | } |
440 | | |
441 | | |
442 | | static int lz4hc_wrap_compress(const char* input, size_t input_length, |
443 | | char* output, size_t maxout, int clevel, |
444 | 0 | struct thread_context* thread_context) { |
445 | 0 | int cbytes; |
446 | 0 | if (input_length > (size_t)(UINT32_C(2) << 30)) |
447 | 0 | return BLOSC2_ERROR_2GB_LIMIT; |
448 | 0 | blosc2_context* context = thread_context->parent_context; |
449 | | /* clevel for lz4hc goes up to 12, at least in LZ4 1.7.5 |
450 | | * but levels larger than 9 do not buy much compression. */ |
451 | 0 | if (context->use_dict && context->dict_cdict != NULL) { |
452 | 0 | if (thread_context->lz4hc_cstream == NULL) { |
453 | 0 | thread_context->lz4hc_cstream = LZ4_createStreamHC(); |
454 | 0 | } |
455 | | // Reset to dict-only context (with correct clevel) before each block. |
456 | 0 | LZ4_resetStreamHC_fast((LZ4_streamHC_t*)thread_context->lz4hc_cstream, clevel); |
457 | 0 | LZ4_loadDictHC((LZ4_streamHC_t*)thread_context->lz4hc_cstream, |
458 | 0 | (const char*)context->dict_buffer, (int)context->dict_size); |
459 | 0 | cbytes = LZ4_compress_HC_continue((LZ4_streamHC_t*)thread_context->lz4hc_cstream, |
460 | 0 | input, output, (int)input_length, (int)maxout); |
461 | 0 | } else { |
462 | 0 | cbytes = LZ4_compress_HC(input, output, (int)input_length, (int)maxout, clevel); |
463 | 0 | } |
464 | 0 | return cbytes; |
465 | 0 | } |
466 | | |
467 | | |
468 | | static int lz4_wrap_decompress(const char* input, size_t compressed_length, |
469 | | char* output, size_t maxout, |
470 | 0 | struct thread_context* thread_context) { |
471 | 0 | int nbytes; |
472 | 0 | blosc2_context* context = thread_context->parent_context; |
473 | | #ifdef HAVE_IPP |
474 | | int outlen = (int)maxout; |
475 | | int inlen = (int)compressed_length; |
476 | | IppStatus status; |
477 | | status = ippsDecodeLZ4_8u((const Ipp8u*)input, inlen, (Ipp8u*)output, &outlen); |
478 | | nbytes = (status == ippStsNoErr) ? outlen : -outlen; |
479 | | #else |
480 | 0 | if (context->use_dict && context->dict_buffer != NULL && context->dict_size > 0) { |
481 | 0 | nbytes = LZ4_decompress_safe_usingDict(input, output, |
482 | 0 | (int)compressed_length, (int)maxout, |
483 | 0 | (const char*)context->dict_buffer, |
484 | 0 | (int)context->dict_size); |
485 | 0 | } else { |
486 | 0 | nbytes = LZ4_decompress_safe(input, output, (int)compressed_length, (int)maxout); |
487 | 0 | } |
488 | 0 | #endif |
489 | 0 | if (nbytes != (int)maxout) { |
490 | 0 | return 0; |
491 | 0 | } |
492 | 0 | return (int)maxout; |
493 | 0 | } |
494 | | |
495 | | #if defined(HAVE_ZLIB) |
496 | | /* zlib is not very respectful with sharing name space with others. |
497 | | Fortunately, its names do not collide with those already in blosc. */ |
498 | | static int zlib_wrap_compress(const char* input, size_t input_length, |
499 | 0 | char* output, size_t maxout, int clevel) { |
500 | 0 | int status; |
501 | | #if defined(HAVE_ZLIB_NG) && ! defined(ZLIB_COMPAT) |
502 | | size_t cl = maxout; |
503 | | status = zng_compress2( |
504 | | (uint8_t*)output, &cl, (uint8_t*)input, input_length, clevel); |
505 | | #else |
506 | 0 | uLongf cl = (uLongf)maxout; |
507 | 0 | status = compress2( |
508 | 0 | (Bytef*)output, &cl, (Bytef*)input, (uLong)input_length, clevel); |
509 | 0 | #endif |
510 | 0 | if (status != Z_OK) { |
511 | 0 | return 0; |
512 | 0 | } |
513 | 0 | return (int)cl; |
514 | 0 | } |
515 | | |
516 | | static int zlib_wrap_decompress(const char* input, size_t compressed_length, |
517 | 0 | char* output, size_t maxout) { |
518 | 0 | int status; |
519 | | #if defined(HAVE_ZLIB_NG) && ! defined(ZLIB_COMPAT) |
520 | | size_t ul = maxout; |
521 | | status = zng_uncompress( |
522 | | (uint8_t*)output, &ul, (uint8_t*)input, compressed_length); |
523 | | #else |
524 | 0 | uLongf ul = (uLongf)maxout; |
525 | 0 | status = uncompress( |
526 | 0 | (Bytef*)output, &ul, (Bytef*)input, (uLong)compressed_length); |
527 | 0 | #endif |
528 | 0 | if (status != Z_OK) { |
529 | 0 | return 0; |
530 | 0 | } |
531 | 0 | return (int)ul; |
532 | 0 | } |
533 | | #endif /* HAVE_ZLIB */ |
534 | | |
535 | | |
536 | | #if defined(HAVE_ZSTD) |
537 | | static int zstd_wrap_compress(struct thread_context* thread_context, |
538 | | const char* input, size_t input_length, |
539 | 0 | char* output, size_t maxout, int clevel) { |
540 | 0 | size_t code; |
541 | 0 | blosc2_context* context = thread_context->parent_context; |
542 | |
|
543 | 0 | clevel = (clevel < 9) ? clevel * 2 - 1 : ZSTD_maxCLevel(); |
544 | | /* Make the level 8 close enough to maxCLevel */ |
545 | 0 | if (clevel == 8) clevel = ZSTD_maxCLevel() - 2; |
546 | |
|
547 | 0 | if (thread_context->zstd_cctx == NULL) { |
548 | 0 | thread_context->zstd_cctx = ZSTD_createCCtx(); |
549 | 0 | } |
550 | |
|
551 | 0 | if (context->use_dict) { |
552 | 0 | assert(context->dict_cdict != NULL); |
553 | 0 | code = ZSTD_compress_usingCDict( |
554 | 0 | thread_context->zstd_cctx, (void*)output, maxout, (void*)input, |
555 | 0 | input_length, context->dict_cdict); |
556 | 0 | } else { |
557 | 0 | code = ZSTD_compressCCtx(thread_context->zstd_cctx, |
558 | 0 | (void*)output, maxout, (void*)input, input_length, clevel); |
559 | 0 | } |
560 | 0 | if (ZSTD_isError(code) != ZSTD_error_no_error) { |
561 | | // Blosc will just memcpy this buffer |
562 | 0 | return 0; |
563 | 0 | } |
564 | 0 | return (int)code; |
565 | 0 | } |
566 | | |
567 | | static int zstd_wrap_decompress(struct thread_context* thread_context, |
568 | | const char* input, size_t compressed_length, |
569 | 0 | char* output, size_t maxout) { |
570 | 0 | size_t code; |
571 | 0 | blosc2_context* context = thread_context->parent_context; |
572 | |
|
573 | 0 | if (thread_context->zstd_dctx == NULL) { |
574 | 0 | thread_context->zstd_dctx = ZSTD_createDCtx(); |
575 | 0 | } |
576 | |
|
577 | 0 | if (context->use_dict) { |
578 | 0 | assert(context->dict_ddict != NULL); |
579 | 0 | code = ZSTD_decompress_usingDDict( |
580 | 0 | thread_context->zstd_dctx, (void*)output, maxout, (void*)input, |
581 | 0 | compressed_length, context->dict_ddict); |
582 | 0 | } else { |
583 | 0 | code = ZSTD_decompressDCtx(thread_context->zstd_dctx, |
584 | 0 | (void*)output, maxout, (void*)input, compressed_length); |
585 | 0 | } |
586 | 0 | if (ZSTD_isError(code) != ZSTD_error_no_error) { |
587 | 0 | BLOSC_TRACE_ERROR("Error in ZSTD decompression: '%s'. Giving up.", |
588 | 0 | ZDICT_getErrorName(code)); |
589 | 0 | return 0; |
590 | 0 | } |
591 | 0 | return (int)code; |
592 | 0 | } |
593 | | #endif /* HAVE_ZSTD */ |
594 | | |
595 | | /* Compute acceleration for blosclz */ |
596 | 52.1k | static int get_accel(const blosc2_context* context) { |
597 | 52.1k | int clevel = context->clevel; |
598 | | |
599 | 52.1k | if (context->compcode == BLOSC_LZ4) { |
600 | | /* This acceleration setting based on discussions held in: |
601 | | * https://groups.google.com/forum/#!topic/lz4c/zosy90P8MQw |
602 | | */ |
603 | 0 | return (10 - clevel); |
604 | 0 | } |
605 | 52.1k | return 1; |
606 | 52.1k | } |
607 | | |
608 | | |
609 | 411k | int do_nothing(uint8_t filter, char cmode) { |
610 | 411k | if (cmode == 'c') { |
611 | 312k | return (filter == BLOSC_NOFILTER); |
612 | 312k | } else { |
613 | | // TRUNC_PREC do not have to be applied during decompression |
614 | 98.1k | return ((filter == BLOSC_NOFILTER) || (filter == BLOSC_TRUNC_PREC)); |
615 | 98.1k | } |
616 | 411k | } |
617 | | |
618 | | |
619 | 11.0k | int next_filter(const uint8_t* filters, int current_filter, char cmode) { |
620 | 11.0k | for (int i = current_filter - 1; i >= 0; i--) { |
621 | 11.0k | if (!do_nothing(filters[i], cmode)) { |
622 | 11.0k | return filters[i]; |
623 | 11.0k | } |
624 | 11.0k | } |
625 | 0 | return BLOSC_NOFILTER; |
626 | 11.0k | } |
627 | | |
628 | | |
629 | 66.6k | int last_filter(const uint8_t* filters, char cmode) { |
630 | 66.6k | int last_index = -1; |
631 | 466k | for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { |
632 | 399k | if (!do_nothing(filters[i], cmode)) { |
633 | 49.3k | last_index = i; |
634 | 49.3k | } |
635 | 399k | } |
636 | 66.6k | return last_index; |
637 | 66.6k | } |
638 | | |
639 | | |
640 | | /* Convert filter pipeline to filter flags */ |
641 | 68.7k | static uint8_t filters_to_flags(const uint8_t* filters) { |
642 | 68.7k | uint8_t flags = 0; |
643 | | |
644 | 481k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
645 | 412k | switch (filters[i]) { |
646 | 28.3k | case BLOSC_SHUFFLE: |
647 | 28.3k | flags |= BLOSC_DOSHUFFLE; |
648 | 28.3k | break; |
649 | 22.7k | case BLOSC_BITSHUFFLE: |
650 | 22.7k | flags |= BLOSC_DOBITSHUFFLE; |
651 | 22.7k | break; |
652 | 0 | case BLOSC_DELTA: |
653 | 0 | flags |= BLOSC_DODELTA; |
654 | 0 | break; |
655 | 361k | default : |
656 | 361k | break; |
657 | 412k | } |
658 | 412k | } |
659 | 68.7k | return flags; |
660 | 68.7k | } |
661 | | |
662 | | |
663 | | /* Convert filter flags to filter pipeline */ |
664 | 136k | static void flags_to_filters(const uint8_t flags, uint8_t* filters) { |
665 | | /* Initialize the filter pipeline */ |
666 | 136k | memset(filters, 0, BLOSC2_MAX_FILTERS); |
667 | | /* Fill the filter pipeline */ |
668 | 136k | if (flags & BLOSC_DOSHUFFLE) |
669 | 136k | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; |
670 | 136k | if (flags & BLOSC_DOBITSHUFFLE) |
671 | 136k | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; |
672 | 136k | if (flags & BLOSC_DODELTA) |
673 | 0 | filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; |
674 | 136k | } |
675 | | |
676 | | |
677 | | /* Get filter flags from header flags */ |
678 | | static uint8_t get_filter_flags(const uint8_t header_flags, |
679 | 0 | const int32_t typesize) { |
680 | 0 | uint8_t flags = 0; |
681 | |
|
682 | 0 | if ((header_flags & BLOSC_DOSHUFFLE) && (typesize > 1)) { |
683 | 0 | flags |= BLOSC_DOSHUFFLE; |
684 | 0 | } |
685 | 0 | if (header_flags & BLOSC_DOBITSHUFFLE) { |
686 | 0 | flags |= BLOSC_DOBITSHUFFLE; |
687 | 0 | } |
688 | 0 | if (header_flags & BLOSC_DODELTA) { |
689 | 0 | flags |= BLOSC_DODELTA; |
690 | 0 | } |
691 | 0 | if (header_flags & BLOSC_MEMCPYED) { |
692 | 0 | flags |= BLOSC_MEMCPYED; |
693 | 0 | } |
694 | 0 | return flags; |
695 | 0 | } |
696 | | |
697 | | typedef struct blosc_header_s { |
698 | | uint8_t version; |
699 | | uint8_t versionlz; |
700 | | uint8_t flags; |
701 | | uint8_t typesize; |
702 | | int32_t nbytes; |
703 | | int32_t blocksize; |
704 | | int32_t cbytes; |
705 | | // Extended Blosc2 header |
706 | | uint8_t filters[BLOSC2_MAX_FILTERS]; |
707 | | uint8_t udcompcode; |
708 | | uint8_t compcode_meta; |
709 | | uint8_t filters_meta[BLOSC2_MAX_FILTERS]; |
710 | | uint8_t blosc2_flags2; |
711 | | uint8_t blosc2_flags; |
712 | | } blosc_header; |
713 | | |
714 | | |
715 | | int read_chunk_header(const uint8_t* src, int32_t srcsize, bool extended_header, blosc_header* header) |
716 | 152k | { |
717 | 152k | memset(header, 0, sizeof(blosc_header)); |
718 | | |
719 | 152k | if (srcsize < BLOSC_MIN_HEADER_LENGTH) { |
720 | 0 | BLOSC_TRACE_ERROR("Not enough space to read Blosc header."); |
721 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
722 | 0 | } |
723 | | |
724 | 152k | memcpy(header, src, BLOSC_MIN_HEADER_LENGTH); |
725 | | |
726 | 152k | bool little_endian = is_little_endian(); |
727 | | |
728 | 152k | if (!little_endian) { |
729 | 0 | header->nbytes = bswap32_(header->nbytes); |
730 | 0 | header->blocksize = bswap32_(header->blocksize); |
731 | 0 | header->cbytes = bswap32_(header->cbytes); |
732 | 0 | } |
733 | | |
734 | 152k | if (header->cbytes < BLOSC_MIN_HEADER_LENGTH) { |
735 | 0 | BLOSC_TRACE_ERROR("`cbytes` is too small to read min header."); |
736 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
737 | 0 | } |
738 | 152k | if (header->blocksize <= 0) { |
739 | 0 | BLOSC_TRACE_ERROR("`blocksize` is zero"); |
740 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
741 | 0 | } |
742 | 152k | if (header->blocksize > BLOSC2_MAXBLOCKSIZE) { |
743 | 0 | BLOSC_TRACE_ERROR("`blocksize` greater than maximum allowed"); |
744 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
745 | 0 | } |
746 | 152k | if (header->typesize == 0) { |
747 | 0 | BLOSC_TRACE_ERROR("`typesize` is zero."); |
748 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
749 | 0 | } |
750 | | |
751 | | /* Read extended header if it is wanted */ |
752 | 152k | if ((extended_header) && (header->flags & BLOSC_DOSHUFFLE) && (header->flags & BLOSC_DOBITSHUFFLE)) { |
753 | 16.7k | if (header->cbytes < BLOSC_EXTENDED_HEADER_LENGTH) { |
754 | 0 | BLOSC_TRACE_ERROR("`cbytes` is too small to read extended header."); |
755 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
756 | 0 | } |
757 | 16.7k | if (srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
758 | 0 | BLOSC_TRACE_ERROR("Not enough space to read Blosc extended header."); |
759 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
760 | 0 | } |
761 | | |
762 | 16.7k | memcpy((uint8_t *)header + BLOSC_MIN_HEADER_LENGTH, src + BLOSC_MIN_HEADER_LENGTH, |
763 | 16.7k | BLOSC_EXTENDED_HEADER_LENGTH - BLOSC_MIN_HEADER_LENGTH); |
764 | | |
765 | 16.7k | if ((header->blosc2_flags2 & BLOSC2_VL_BLOCKS) && (header->blosc2_flags != 0)) { |
766 | 0 | int32_t special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
767 | 0 | if (special_type != 0) { |
768 | 0 | BLOSC_TRACE_ERROR("VL-block chunks cannot use special chunk encodings."); |
769 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
770 | 0 | } |
771 | 0 | } |
772 | | |
773 | 16.7k | int32_t special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
774 | 16.7k | if (special_type != 0) { |
775 | 302 | if (special_type == BLOSC2_SPECIAL_VALUE) { |
776 | | // In this case, the actual type size must be derived from the cbytes |
777 | 0 | int32_t typesize = header->cbytes - BLOSC_EXTENDED_HEADER_LENGTH; |
778 | 0 | if (typesize <= 0) { |
779 | 0 | BLOSC_TRACE_ERROR("`typesize` is zero or negative"); |
780 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
781 | 0 | } |
782 | 0 | if (typesize > BLOSC2_MAXTYPESIZE) { |
783 | 0 | BLOSC_TRACE_ERROR("`typesize` is greater than maximum allowed"); |
784 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
785 | 0 | } |
786 | 0 | if (typesize > header->nbytes) { |
787 | 0 | BLOSC_TRACE_ERROR("`typesize` is greater than `nbytes`"); |
788 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
789 | 0 | } |
790 | 0 | if (header->nbytes % typesize != 0) { |
791 | 0 | BLOSC_TRACE_ERROR("`nbytes` is not a multiple of typesize"); |
792 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
793 | 0 | } |
794 | 0 | } |
795 | 302 | else { |
796 | 302 | if (header->nbytes % header->typesize != 0) { |
797 | 0 | BLOSC_TRACE_ERROR("`nbytes` is not a multiple of typesize"); |
798 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
799 | 0 | } |
800 | 302 | } |
801 | 302 | } |
802 | | // The number of filters depends on the version of the header. Blosc2 alpha series |
803 | | // did not initialize filters to zero beyond the max supported. |
804 | 16.7k | if (header->version == BLOSC2_VERSION_FORMAT_ALPHA) { |
805 | 0 | header->filters[5] = 0; |
806 | 0 | header->filters_meta[5] = 0; |
807 | 0 | } |
808 | 16.7k | } |
809 | 136k | else { |
810 | 136k | flags_to_filters(header->flags, header->filters); |
811 | 136k | } |
812 | 152k | if (header->version > BLOSC2_VERSION_FORMAT && |
813 | 0 | (header->blosc2_flags2 & (uint8_t)~BLOSC2_VL_BLOCKS) != 0) { |
814 | | /* Version from future with unsupported chunk features. */ |
815 | 0 | return BLOSC2_ERROR_VERSION_SUPPORT; |
816 | 0 | } |
817 | 152k | if ((header->blosc2_flags2 & BLOSC2_VL_BLOCKS) == 0 && |
818 | 152k | header->nbytes > 0 && header->blocksize > header->nbytes) { |
819 | 0 | header->blocksize = header->nbytes; |
820 | 0 | } |
821 | 152k | return 0; |
822 | 152k | } |
823 | | |
824 | 68.7k | static inline void blosc2_calculate_blocks(blosc2_context* context) { |
825 | | /* Compute number of blocks in buffer */ |
826 | 68.7k | context->nblocks = context->sourcesize / context->blocksize; |
827 | 68.7k | context->leftover = context->sourcesize % context->blocksize; |
828 | 68.7k | context->nblocks = (context->leftover > 0) ? |
829 | 68.0k | (context->nblocks + 1) : context->nblocks; |
830 | 68.7k | } |
831 | | |
832 | 16.7k | static int blosc2_initialize_context_from_header(blosc2_context* context, blosc_header* header) { |
833 | 16.7k | context->header_flags = header->flags; |
834 | 16.7k | context->typesize = header->typesize; |
835 | 16.7k | context->sourcesize = header->nbytes; |
836 | 16.7k | context->header_blocksize = header->blocksize; |
837 | 16.7k | context->blocksize = header->blocksize; |
838 | 16.7k | context->blosc2_flags2 = header->blosc2_flags2; |
839 | 16.7k | context->blosc2_flags = header->blosc2_flags; |
840 | 16.7k | context->compcode = header->flags >> 5; |
841 | 16.7k | if (context->compcode == BLOSC_UDCODEC_FORMAT) { |
842 | 0 | context->compcode = header->udcompcode; |
843 | 0 | } |
844 | 16.7k | if (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) { |
845 | 0 | context->nblocks = header->blocksize; |
846 | 0 | context->leftover = 0; |
847 | 0 | context->blocksize = 0; |
848 | 0 | } |
849 | 16.7k | else { |
850 | 16.7k | blosc2_calculate_blocks(context); |
851 | 16.7k | } |
852 | | |
853 | 16.7k | bool is_lazy = false; |
854 | 16.7k | if ((context->header_flags & BLOSC_DOSHUFFLE) && |
855 | 16.7k | (context->header_flags & BLOSC_DOBITSHUFFLE)) { |
856 | | /* Extended header */ |
857 | 16.7k | context->header_overhead = BLOSC_EXTENDED_HEADER_LENGTH; |
858 | | |
859 | 16.7k | memcpy(context->filters, header->filters, BLOSC2_MAX_FILTERS); |
860 | 16.7k | memcpy(context->filters_meta, header->filters_meta, BLOSC2_MAX_FILTERS); |
861 | 16.7k | context->compcode_meta = header->compcode_meta; |
862 | | |
863 | 16.7k | context->filter_flags = filters_to_flags(header->filters); |
864 | 16.7k | context->special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
865 | | |
866 | 16.7k | is_lazy = (context->blosc2_flags & 0x08u); |
867 | 16.7k | } |
868 | 0 | else { |
869 | 0 | context->header_overhead = BLOSC_MIN_HEADER_LENGTH; |
870 | 0 | context->filter_flags = get_filter_flags(context->header_flags, context->typesize); |
871 | 0 | flags_to_filters(context->header_flags, context->filters); |
872 | 0 | } |
873 | | |
874 | | // Some checks for malformed headers |
875 | 16.7k | if (!is_lazy && header->cbytes > context->srcsize) { |
876 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
877 | 0 | } |
878 | | |
879 | 16.7k | return 0; |
880 | 16.7k | } |
881 | | |
882 | | |
883 | 0 | int fill_filter(blosc2_filter *filter) { |
884 | 0 | char libpath[PATH_MAX]; |
885 | 0 | void *lib = load_lib(filter->name, libpath); |
886 | 0 | if(lib == NULL) { |
887 | 0 | BLOSC_TRACE_ERROR("Error while loading the library"); |
888 | 0 | return BLOSC2_ERROR_FAILURE; |
889 | 0 | } |
890 | | |
891 | 0 | filter_info *info = dlsym(lib, "info"); |
892 | 0 | filter->forward = dlsym(lib, info->forward); |
893 | 0 | filter->backward = dlsym(lib, info->backward); |
894 | |
|
895 | 0 | if (filter->forward == NULL || filter->backward == NULL){ |
896 | 0 | BLOSC_TRACE_ERROR("Wrong library loaded"); |
897 | 0 | dlclose(lib); |
898 | 0 | return BLOSC2_ERROR_FAILURE; |
899 | 0 | } |
900 | | |
901 | 0 | return BLOSC2_ERROR_SUCCESS; |
902 | 0 | } |
903 | | |
904 | | |
905 | 0 | int fill_codec(blosc2_codec *codec) { |
906 | 0 | char libpath[PATH_MAX]; |
907 | 0 | void *lib = load_lib(codec->compname, libpath); |
908 | 0 | if(lib == NULL) { |
909 | 0 | BLOSC_TRACE_ERROR("Error while loading the library for codec `%s`", codec->compname); |
910 | 0 | return BLOSC2_ERROR_FAILURE; |
911 | 0 | } |
912 | | |
913 | 0 | codec_info *info = dlsym(lib, "info"); |
914 | 0 | if (info == NULL) { |
915 | 0 | BLOSC_TRACE_ERROR("`info` symbol cannot be loaded from plugin `%s`", codec->compname); |
916 | 0 | dlclose(lib); |
917 | 0 | return BLOSC2_ERROR_FAILURE; |
918 | 0 | } |
919 | 0 | codec->encoder = dlsym(lib, info->encoder); |
920 | 0 | codec->decoder = dlsym(lib, info->decoder); |
921 | |
|
922 | 0 | if (codec->encoder == NULL || codec->decoder == NULL) { |
923 | 0 | BLOSC_TRACE_ERROR("encoder or decoder cannot be loaded from plugin `%s`", codec->compname); |
924 | 0 | dlclose(lib); |
925 | 0 | return BLOSC2_ERROR_FAILURE; |
926 | 0 | } |
927 | | |
928 | | /* If ever add .free function in future for codec params |
929 | | codecparams_info *info2 = dlsym(lib, "info2"); |
930 | | if (info2 != NULL) { |
931 | | // New plugin (e.g. openzl) with free function for codec_params defined |
932 | | // will be used when destroying context in blosc2_free_ctx |
933 | | codec->free = dlsym(lib, info2->free); |
934 | | } |
935 | | else{ |
936 | | codec->free = NULL; |
937 | | } |
938 | | */ |
939 | | |
940 | 0 | return BLOSC2_ERROR_SUCCESS; |
941 | 0 | } |
942 | | |
943 | | |
944 | 0 | int fill_tuner(blosc2_tuner *tuner) { |
945 | 0 | char libpath[PATH_MAX] = {0}; |
946 | 0 | void *lib = load_lib(tuner->name, libpath); |
947 | 0 | if(lib == NULL) { |
948 | 0 | BLOSC_TRACE_ERROR("Error while loading the library"); |
949 | 0 | return BLOSC2_ERROR_FAILURE; |
950 | 0 | } |
951 | | |
952 | 0 | tuner_info *info = dlsym(lib, "info"); |
953 | 0 | tuner->init = dlsym(lib, info->init); |
954 | 0 | tuner->update = dlsym(lib, info->update); |
955 | 0 | tuner->next_blocksize = dlsym(lib, info->next_blocksize); |
956 | 0 | tuner->free = dlsym(lib, info->free); |
957 | 0 | tuner->next_cparams = dlsym(lib, info->next_cparams); |
958 | |
|
959 | 0 | if (tuner->init == NULL || tuner->update == NULL || tuner->next_blocksize == NULL || tuner->free == NULL |
960 | 0 | || tuner->next_cparams == NULL){ |
961 | 0 | BLOSC_TRACE_ERROR("Wrong library loaded"); |
962 | 0 | dlclose(lib); |
963 | 0 | return BLOSC2_ERROR_FAILURE; |
964 | 0 | } |
965 | | |
966 | 0 | return BLOSC2_ERROR_SUCCESS; |
967 | 0 | } |
968 | | |
969 | | |
970 | 52.0k | static int blosc2_intialize_header_from_context(blosc2_context* context, blosc_header* header, bool extended_header) { |
971 | 52.0k | int32_t header_blocksize = (int32_t)(context->header_blocksize > 0 ? context->header_blocksize : context->blocksize); |
972 | 52.0k | if ((context->blosc2_flags2 & BLOSC2_VL_BLOCKS) == 0 && |
973 | 52.0k | context->sourcesize > 0 && header_blocksize > context->sourcesize) { |
974 | 449 | header_blocksize = (int32_t)context->sourcesize; |
975 | 449 | } |
976 | 52.0k | memset(header, 0, sizeof(blosc_header)); |
977 | | |
978 | 52.0k | header->version = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) ? |
979 | 0 | BLOSC2_VERSION_FORMAT_VL_BLOCKS : |
980 | 52.0k | BLOSC2_VERSION_FORMAT_STABLE; |
981 | 52.0k | header->versionlz = compcode_to_compversion(context->compcode); |
982 | 52.0k | header->flags = context->header_flags; |
983 | 52.0k | header->typesize = (uint8_t)context->typesize; |
984 | 52.0k | header->nbytes = (int32_t)context->sourcesize; |
985 | 52.0k | header->blocksize = header_blocksize; |
986 | | |
987 | 52.0k | int little_endian = is_little_endian(); |
988 | 52.0k | if (!little_endian) { |
989 | 0 | header->nbytes = bswap32_(header->nbytes); |
990 | 0 | header->blocksize = bswap32_(header->blocksize); |
991 | | // cbytes written after compression |
992 | 0 | } |
993 | | |
994 | 52.0k | if (extended_header) { |
995 | | /* Store filter pipeline info at the end of the header */ |
996 | 364k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
997 | 312k | header->filters[i] = context->filters[i]; |
998 | 312k | header->filters_meta[i] = context->filters_meta[i]; |
999 | 312k | } |
1000 | 52.0k | header->udcompcode = context->compcode; |
1001 | 52.0k | header->compcode_meta = context->compcode_meta; |
1002 | 52.0k | header->blosc2_flags2 = context->blosc2_flags2; |
1003 | | |
1004 | 52.0k | if (!little_endian) { |
1005 | 0 | header->blosc2_flags |= BLOSC2_BIGENDIAN; |
1006 | 0 | } |
1007 | 52.0k | if (context->use_dict) { |
1008 | 0 | header->blosc2_flags |= BLOSC2_USEDICT; |
1009 | 0 | } |
1010 | 52.0k | if (context->blosc2_flags & BLOSC2_INSTR_CODEC) { |
1011 | 0 | header->blosc2_flags |= BLOSC2_INSTR_CODEC; |
1012 | 0 | } |
1013 | 52.0k | } |
1014 | | |
1015 | 52.0k | return 0; |
1016 | 52.0k | } |
1017 | | |
1018 | 49.3k | void _cycle_buffers(uint8_t **src, uint8_t **dest, uint8_t **tmp) { |
1019 | 49.3k | uint8_t *tmp2 = *src; |
1020 | 49.3k | *src = *dest; |
1021 | 49.3k | *dest = *tmp; |
1022 | 49.3k | *tmp = tmp2; |
1023 | 49.3k | } |
1024 | | |
1025 | | uint8_t* pipeline_forward(struct thread_context* thread_context, const int32_t bsize, |
1026 | | const uint8_t* src, const int32_t offset, |
1027 | 38.2k | uint8_t* dest, uint8_t* tmp) { |
1028 | 38.2k | blosc2_context* context = thread_context->parent_context; |
1029 | 38.2k | uint8_t* _src = (uint8_t*)src + offset; |
1030 | 38.2k | uint8_t* _tmp = tmp; |
1031 | 38.2k | uint8_t* _dest = dest; |
1032 | 38.2k | int32_t typesize = context->typesize; |
1033 | 38.2k | uint8_t* filters = context->filters; |
1034 | 38.2k | uint8_t* filters_meta = context->filters_meta; |
1035 | 38.2k | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
1036 | 38.2k | bool output_is_disposable = (context->preparams != NULL) ? context->preparams->output_is_disposable : false; |
1037 | | |
1038 | | /* Prefilter function */ |
1039 | 38.2k | if (context->prefilter != NULL) { |
1040 | | // Create new prefilter parameters for this block (must be private for each thread) |
1041 | 0 | blosc2_prefilter_params preparams; |
1042 | 0 | memcpy(&preparams, context->preparams, sizeof(preparams)); |
1043 | | // Calculate output_size based on number of elements and output typesize |
1044 | 0 | int32_t nelems = bsize / typesize; // number of elements in the input block |
1045 | | // If output_typesize is not set (0), default to input typesize (no type conversion) |
1046 | 0 | int32_t output_typesize_actual = (preparams.output_typesize > 0) ? preparams.output_typesize : typesize; |
1047 | 0 | int32_t output_size = nelems * output_typesize_actual; // output size in bytes |
1048 | 0 | preparams.output_typesize = output_typesize_actual; // ensure it's set |
1049 | | /* Set unwritten values to zero */ |
1050 | 0 | if (!output_is_disposable) { |
1051 | 0 | memset(_dest, 0, output_size); |
1052 | 0 | } |
1053 | 0 | preparams.input = _src; |
1054 | 0 | preparams.output = _dest; |
1055 | 0 | preparams.output_size = output_size; |
1056 | 0 | preparams.output_offset = offset; |
1057 | 0 | preparams.nblock = offset / context->blocksize; |
1058 | 0 | preparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
1059 | 0 | preparams.tid = thread_context->tid; |
1060 | 0 | preparams.ttmp = thread_context->tmp; |
1061 | 0 | preparams.ttmp_nbytes = thread_context->tmp_nbytes; |
1062 | 0 | preparams.ctx = context; |
1063 | 0 | preparams.output_is_disposable = output_is_disposable; |
1064 | |
|
1065 | 0 | if (context->prefilter(&preparams) != 0) { |
1066 | 0 | if (output_is_disposable) { |
1067 | | // Output is going to be discarded; no more filters are required |
1068 | 0 | BLOSC_TRACE_INFO("Output is disposable"); |
1069 | 0 | return _dest; |
1070 | 0 | } |
1071 | 0 | BLOSC_TRACE_ERROR("Execution of prefilter function failed"); |
1072 | 0 | return NULL; |
1073 | 0 | } |
1074 | | |
1075 | 0 | if (memcpyed) { |
1076 | | // No more filters are required |
1077 | 0 | return _dest; |
1078 | 0 | } |
1079 | 0 | _cycle_buffers(&_src, &_dest, &_tmp); |
1080 | 0 | } |
1081 | | |
1082 | | /* Process the filter pipeline */ |
1083 | 267k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
1084 | 229k | int rc = BLOSC2_ERROR_SUCCESS; |
1085 | 229k | if (filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
1086 | 229k | switch (filters[i]) { |
1087 | 21.1k | case BLOSC_SHUFFLE: |
1088 | | // if filters_meta is different to 0, interpret it as grouped bytes to shuffle |
1089 | 21.1k | blosc2_shuffle(filters_meta[i] == 0 ? typesize : filters_meta[i], bsize, _src, _dest); |
1090 | 21.1k | break; |
1091 | 17.1k | case BLOSC_BITSHUFFLE: |
1092 | 17.1k | if (blosc2_bitshuffle(typesize, bsize, _src, _dest) < 0) { |
1093 | 0 | return NULL; |
1094 | 0 | } |
1095 | 17.1k | break; |
1096 | 17.1k | case BLOSC_DELTA: |
1097 | 0 | delta_encoder(src, offset, bsize, typesize, _src, _dest); |
1098 | 0 | break; |
1099 | 0 | case BLOSC_TRUNC_PREC: |
1100 | 0 | if (truncate_precision(filters_meta[i], typesize, bsize, _src, _dest) < 0) { |
1101 | 0 | return NULL; |
1102 | 0 | } |
1103 | 0 | break; |
1104 | 191k | default: |
1105 | 191k | if (filters[i] != BLOSC_NOFILTER) { |
1106 | 0 | BLOSC_TRACE_ERROR("Filter %d not handled during compression\n", filters[i]); |
1107 | 0 | return NULL; |
1108 | 0 | } |
1109 | 229k | } |
1110 | 229k | } |
1111 | 0 | else { |
1112 | | // Look for the filters_meta in user filters and run it |
1113 | 0 | for (uint64_t j = 0; j < g_nfilters; ++j) { |
1114 | 0 | if (g_filters[j].id == filters[i]) { |
1115 | 0 | if (g_filters[j].forward == NULL) { |
1116 | | // Dynamically load library |
1117 | 0 | if (fill_filter(&g_filters[j]) < 0) { |
1118 | 0 | BLOSC_TRACE_ERROR("Could not load filter %d\n", g_filters[j].id); |
1119 | 0 | return NULL; |
1120 | 0 | } |
1121 | 0 | } |
1122 | 0 | if (g_filters[j].forward != NULL) { |
1123 | 0 | blosc2_cparams cparams; |
1124 | 0 | blosc2_ctx_get_cparams(context, &cparams); |
1125 | 0 | rc = g_filters[j].forward(_src, _dest, bsize, filters_meta[i], &cparams, g_filters[j].id); |
1126 | 0 | } else { |
1127 | 0 | BLOSC_TRACE_ERROR("Forward function is NULL"); |
1128 | 0 | return NULL; |
1129 | 0 | } |
1130 | 0 | if (rc != BLOSC2_ERROR_SUCCESS) { |
1131 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d failed during compression\n", filters[i]); |
1132 | 0 | return NULL; |
1133 | 0 | } |
1134 | 0 | goto urfiltersuccess; |
1135 | 0 | } |
1136 | 0 | } |
1137 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d not found during compression\n", filters[i]); |
1138 | 0 | return NULL; |
1139 | | |
1140 | 0 | urfiltersuccess:; |
1141 | |
|
1142 | 0 | } |
1143 | | |
1144 | | // Cycle buffers when required |
1145 | 229k | if (filters[i] != BLOSC_NOFILTER) { |
1146 | 38.2k | _cycle_buffers(&_src, &_dest, &_tmp); |
1147 | 38.2k | } |
1148 | 229k | } |
1149 | 38.2k | return _src; |
1150 | 38.2k | } |
1151 | | |
1152 | | |
1153 | | // Optimized version for detecting runs. It compares 8 bytes values wherever possible. |
1154 | 52.1k | static bool get_run(const uint8_t* ip, const uint8_t* ip_bound) { |
1155 | 52.1k | uint8_t x = *ip; |
1156 | 52.1k | int64_t value, value2; |
1157 | | /* Broadcast the value for every byte in a 64-bit register */ |
1158 | 52.1k | memset(&value, x, 8); |
1159 | 1.59M | while (ip < (ip_bound - 8)) { |
1160 | | #if defined(BLOSC_STRICT_ALIGN) |
1161 | | memcpy(&value2, ip, 8); |
1162 | | #else |
1163 | 1.58M | value2 = *(int64_t*)ip; |
1164 | 1.58M | #endif |
1165 | 1.58M | if (value != value2) { |
1166 | | // Values differ. We don't have a run. |
1167 | 44.4k | return false; |
1168 | 44.4k | } |
1169 | 1.54M | else { |
1170 | 1.54M | ip += 8; |
1171 | 1.54M | } |
1172 | 1.58M | } |
1173 | | /* Look into the remainder */ |
1174 | 66.7k | while ((ip < ip_bound) && (*ip == x)) ip++; |
1175 | 7.70k | return ip == ip_bound ? true : false; |
1176 | 52.1k | } |
1177 | | |
1178 | | |
1179 | | /* Shuffle & compress a single block */ |
1180 | | static int blosc_c(struct thread_context* thread_context, int32_t bsize, |
1181 | | int32_t leftoverblock, int32_t ntbytes, int32_t destsize, |
1182 | | const uint8_t* src, const int32_t offset, uint8_t* dest, |
1183 | 52.1k | uint8_t* tmp, uint8_t* tmp2) { |
1184 | 52.1k | blosc2_context* context = thread_context->parent_context; |
1185 | 52.1k | int dont_split = (context->header_flags & 0x10) >> 4; |
1186 | 52.1k | bool vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
1187 | 52.1k | int dict_training = context->use_dict && context->dict_cdict == NULL; |
1188 | 52.1k | int32_t j, neblock, nstreams; |
1189 | 52.1k | int32_t cbytes; /* number of compressed bytes in split */ |
1190 | 52.1k | int32_t ctbytes = 0; /* number of compressed bytes in block */ |
1191 | 52.1k | int32_t maxout; |
1192 | 52.1k | int32_t typesize = context->typesize; |
1193 | 52.1k | bool output_is_disposable = (context->preparams != NULL) ? context->preparams->output_is_disposable : false; |
1194 | 52.1k | const char* compname; |
1195 | 52.1k | int accel; |
1196 | 52.1k | const uint8_t* _src; |
1197 | 52.1k | uint8_t *_tmp = tmp, *_tmp2 = tmp2; |
1198 | 52.1k | int last_filter_index = last_filter(context->filters, 'c'); |
1199 | 52.1k | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
1200 | 52.1k | bool instr_codec = context->blosc2_flags & BLOSC2_INSTR_CODEC; |
1201 | 52.1k | blosc_timestamp_t last, current; |
1202 | 52.1k | float filter_time = 0.f; |
1203 | | |
1204 | 52.1k | if (instr_codec) { |
1205 | 0 | blosc_set_timestamp(&last); |
1206 | 0 | } |
1207 | | |
1208 | | // See whether we have a run here |
1209 | 52.1k | if (last_filter_index >= 0 || context->prefilter != NULL) { |
1210 | | /* Apply the filter pipeline just for the prefilter */ |
1211 | 38.2k | if (memcpyed && context->prefilter != NULL) { |
1212 | | // We only need the prefilter output |
1213 | 0 | _src = pipeline_forward(thread_context, bsize, src, offset, dest, _tmp2); |
1214 | 0 | if (_src == NULL) { |
1215 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1216 | 0 | } |
1217 | 0 | return bsize; |
1218 | 0 | } |
1219 | | /* Apply regular filter pipeline */ |
1220 | 38.2k | _src = pipeline_forward(thread_context, bsize, src, offset, _tmp, _tmp2); |
1221 | 38.2k | if (_src == NULL) { |
1222 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1223 | 0 | } |
1224 | 38.2k | } else { |
1225 | 13.9k | _src = src + offset; |
1226 | 13.9k | } |
1227 | | |
1228 | 52.1k | if (instr_codec) { |
1229 | 0 | blosc_set_timestamp(¤t); |
1230 | 0 | filter_time = (float) blosc_elapsed_secs(last, current); |
1231 | 0 | last = current; |
1232 | 0 | } |
1233 | | |
1234 | 52.1k | assert(context->clevel > 0); |
1235 | | |
1236 | | /* Calculate acceleration for different compressors */ |
1237 | 52.1k | accel = get_accel(context); |
1238 | | |
1239 | | /* The number of compressed data streams for this block */ |
1240 | 52.1k | if (!dont_split && !leftoverblock && !dict_training) { |
1241 | 20.9k | nstreams = (int32_t)typesize; |
1242 | 20.9k | } |
1243 | 31.2k | else { |
1244 | 31.2k | nstreams = 1; |
1245 | 31.2k | } |
1246 | 52.1k | neblock = bsize / nstreams; |
1247 | 90.5k | for (j = 0; j < nstreams; j++) { |
1248 | 52.1k | if (instr_codec) { |
1249 | 0 | blosc_set_timestamp(&last); |
1250 | 0 | } |
1251 | 52.1k | if (!dict_training) { |
1252 | 52.1k | dest += sizeof(int32_t); |
1253 | 52.1k | ntbytes += sizeof(int32_t); |
1254 | 52.1k | ctbytes += sizeof(int32_t); |
1255 | | |
1256 | 52.1k | if (!vlblocks && context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH && output_is_disposable) { |
1257 | | // Simulate a run of 0s |
1258 | 0 | BLOSC_TRACE_INFO("Output is disposable, simulating a run of 0s"); |
1259 | 0 | memset(dest - 4, 0, sizeof(int32_t)); |
1260 | 0 | continue; |
1261 | 0 | } |
1262 | | |
1263 | 52.1k | const uint8_t *ip = (uint8_t *) _src + j * neblock; |
1264 | 52.1k | const uint8_t *ipbound = (uint8_t *) _src + (j + 1) * neblock; |
1265 | | |
1266 | 52.1k | if (!vlblocks && context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH && get_run(ip, ipbound)) { |
1267 | | // A run |
1268 | 7.24k | int32_t value = _src[j * neblock]; |
1269 | 7.24k | if (ntbytes > destsize) { |
1270 | 3 | return 0; /* Non-compressible data */ |
1271 | 3 | } |
1272 | | |
1273 | 7.23k | if (instr_codec) { |
1274 | 0 | blosc_set_timestamp(¤t); |
1275 | 0 | int32_t instr_size = sizeof(blosc2_instr); |
1276 | 0 | ntbytes += instr_size; |
1277 | 0 | ctbytes += instr_size; |
1278 | 0 | if (ntbytes > destsize) { |
1279 | 0 | return 0; /* Non-compressible data */ |
1280 | 0 | } |
1281 | 0 | _sw32(dest - 4, instr_size); |
1282 | 0 | blosc2_instr *desti = (blosc2_instr *)dest; |
1283 | 0 | memset(desti, 0, sizeof(blosc2_instr)); |
1284 | | // Special values have an overhead of about 1 int32 |
1285 | 0 | int32_t ssize = value == 0 ? sizeof(int32_t) : sizeof(int32_t) + 1; |
1286 | 0 | desti->cratio = (float) neblock / (float) ssize; |
1287 | 0 | float ctime = (float) blosc_elapsed_secs(last, current); |
1288 | 0 | desti->cspeed = (float) neblock / ctime; |
1289 | 0 | desti->filter_speed = (float) neblock / filter_time; |
1290 | 0 | desti->flags[0] = 1; // mark a runlen |
1291 | 0 | dest += instr_size; |
1292 | 0 | continue; |
1293 | 0 | } |
1294 | | |
1295 | | // Encode the repeated byte in the first (LSB) byte of the length of the split. |
1296 | 7.23k | _sw32(dest - 4, -value); // write the value in two's complement |
1297 | 7.23k | if (value > 0) { |
1298 | | // Mark encoding as a run-length (== 0 is always a 0's run) |
1299 | 2.45k | ntbytes += 1; |
1300 | 2.45k | ctbytes += 1; |
1301 | 2.45k | if (ntbytes > destsize) { |
1302 | 1 | return 0; /* Non-compressible data */ |
1303 | 1 | } |
1304 | | // Set MSB bit (sign) to 1 (not really necessary here, but for demonstration purposes) |
1305 | | // dest[-1] |= 0x80; |
1306 | 2.45k | dest[0] = 0x1; // set run-length bit (0) in token |
1307 | 2.45k | dest += 1; |
1308 | 2.45k | } |
1309 | 7.23k | continue; |
1310 | 7.23k | } |
1311 | 52.1k | } |
1312 | | |
1313 | 44.9k | maxout = neblock; |
1314 | 44.9k | if (ntbytes + maxout > destsize && !instr_codec) { |
1315 | | /* avoid buffer * overrun */ |
1316 | 44.2k | maxout = destsize - ntbytes; |
1317 | 44.2k | if (maxout <= 0) { |
1318 | 9 | return 0; /* non-compressible block */ |
1319 | 9 | } |
1320 | 44.2k | } |
1321 | 44.8k | if (dict_training) { |
1322 | | // We are in the build dict state, so don't compress |
1323 | | // TODO: copy only a percentage for sampling |
1324 | 0 | memcpy(dest, _src + j * neblock, (unsigned int)neblock); |
1325 | 0 | cbytes = (int32_t)neblock; |
1326 | 0 | } |
1327 | 44.8k | else if (context->compcode == BLOSC_BLOSCLZ) { |
1328 | 44.8k | cbytes = blosclz_compress(context->clevel, _src + j * neblock, |
1329 | 44.8k | (int)neblock, dest, maxout, context); |
1330 | 44.8k | } |
1331 | 0 | else if (context->compcode == BLOSC_LZ4) { |
1332 | 0 | void *hash_table = NULL; |
1333 | | #ifdef HAVE_IPP |
1334 | | hash_table = (void*)thread_context->lz4_hash_table; |
1335 | | #endif |
1336 | 0 | cbytes = lz4_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1337 | 0 | (char*)dest, (size_t)maxout, accel, hash_table, |
1338 | 0 | thread_context); |
1339 | 0 | } |
1340 | 0 | else if (context->compcode == BLOSC_LZ4HC) { |
1341 | 0 | cbytes = lz4hc_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1342 | 0 | (char*)dest, (size_t)maxout, context->clevel, |
1343 | 0 | thread_context); |
1344 | 0 | } |
1345 | 0 | #if defined(HAVE_ZLIB) |
1346 | 0 | else if (context->compcode == BLOSC_ZLIB) { |
1347 | 0 | cbytes = zlib_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1348 | 0 | (char*)dest, (size_t)maxout, context->clevel); |
1349 | 0 | } |
1350 | 0 | #endif /* HAVE_ZLIB */ |
1351 | 0 | #if defined(HAVE_ZSTD) |
1352 | 0 | else if (context->compcode == BLOSC_ZSTD) { |
1353 | 0 | cbytes = zstd_wrap_compress(thread_context, |
1354 | 0 | (char*)_src + j * neblock, (size_t)neblock, |
1355 | 0 | (char*)dest, (size_t)maxout, context->clevel); |
1356 | 0 | } |
1357 | 0 | #endif /* HAVE_ZSTD */ |
1358 | 0 | else if (context->compcode > BLOSC2_DEFINED_CODECS_STOP) { |
1359 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
1360 | 0 | if (g_codecs[i].compcode == context->compcode) { |
1361 | 0 | if (g_codecs[i].encoder == NULL) { |
1362 | | // Dynamically load codec plugin |
1363 | 0 | if (fill_codec(&g_codecs[i]) < 0) { |
1364 | 0 | BLOSC_TRACE_ERROR("Could not load codec %d.", g_codecs[i].compcode); |
1365 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1366 | 0 | } |
1367 | 0 | } |
1368 | 0 | blosc2_cparams cparams; |
1369 | 0 | blosc2_ctx_get_cparams(context, &cparams); |
1370 | 0 | cbytes = g_codecs[i].encoder(_src + j * neblock, |
1371 | 0 | neblock, |
1372 | 0 | dest, |
1373 | 0 | maxout, |
1374 | 0 | context->compcode_meta, |
1375 | 0 | &cparams, |
1376 | 0 | context->src); |
1377 | 0 | goto urcodecsuccess; |
1378 | 0 | } |
1379 | 0 | } |
1380 | 0 | BLOSC_TRACE_ERROR("User-defined compressor codec %d not found during compression", context->compcode); |
1381 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1382 | 0 | urcodecsuccess: |
1383 | 0 | ; |
1384 | 0 | } else { |
1385 | 0 | blosc2_compcode_to_compname(context->compcode, &compname); |
1386 | 0 | BLOSC_TRACE_ERROR("Blosc has not been compiled with '%s' compression support." |
1387 | 0 | "Please use one having it.", compname); |
1388 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1389 | 0 | } |
1390 | | |
1391 | 44.8k | if (cbytes > maxout) { |
1392 | | /* Buffer overrun caused by compression (should never happen) */ |
1393 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
1394 | 0 | } |
1395 | 44.8k | if (cbytes < 0) { |
1396 | | /* cbytes should never be negative */ |
1397 | 0 | return BLOSC2_ERROR_DATA; |
1398 | 0 | } |
1399 | 44.8k | if (cbytes == 0) { |
1400 | | // When cbytes is 0, the compressor has not been able to compress anything |
1401 | 14.1k | cbytes = neblock; |
1402 | 14.1k | } |
1403 | | |
1404 | 44.8k | if (instr_codec) { |
1405 | 0 | blosc_set_timestamp(¤t); |
1406 | 0 | int32_t instr_size = sizeof(blosc2_instr); |
1407 | 0 | ntbytes += instr_size; |
1408 | 0 | ctbytes += instr_size; |
1409 | 0 | if (ntbytes > destsize) { |
1410 | 0 | return 0; /* Non-compressible data */ |
1411 | 0 | } |
1412 | 0 | _sw32(dest - 4, vlblocks ? neblock : instr_size); |
1413 | 0 | float ctime = (float)blosc_elapsed_secs(last, current); |
1414 | 0 | blosc2_instr *desti = (blosc2_instr *)dest; |
1415 | 0 | memset(desti, 0, sizeof(blosc2_instr)); |
1416 | | // cratio is computed having into account 1 additional int (csize) |
1417 | 0 | desti->cratio = (float)neblock / (float)(cbytes + sizeof(int32_t)); |
1418 | 0 | desti->cspeed = (float)neblock / ctime; |
1419 | 0 | desti->filter_speed = (float) neblock / filter_time; |
1420 | 0 | dest += instr_size; |
1421 | 0 | continue; |
1422 | 0 | } |
1423 | | |
1424 | 44.8k | if (!dict_training) { |
1425 | 44.8k | if (cbytes == neblock) { |
1426 | | /* The compressor has been unable to compress data at all. */ |
1427 | | /* Before doing the copy, check that we are not running into a |
1428 | | buffer overflow. */ |
1429 | 14.1k | if ((ntbytes + neblock) > destsize) { |
1430 | 13.7k | return 0; /* Non-compressible data */ |
1431 | 13.7k | } |
1432 | 383 | memcpy(dest, _src + j * neblock, (unsigned int)neblock); |
1433 | 383 | cbytes = neblock; |
1434 | 383 | } |
1435 | 31.1k | _sw32(dest - 4, vlblocks ? neblock : cbytes); |
1436 | 31.1k | } |
1437 | 31.1k | dest += cbytes; |
1438 | 31.1k | ntbytes += cbytes; |
1439 | 31.1k | ctbytes += cbytes; |
1440 | 31.1k | } /* Closes j < nstreams */ |
1441 | | |
1442 | 38.4k | return ctbytes; |
1443 | 52.1k | } |
1444 | | |
1445 | | |
1446 | | /* Process the filter pipeline (decompression mode) */ |
1447 | | int pipeline_backward(struct thread_context* thread_context, const int32_t bsize, uint8_t* dest, |
1448 | | const int32_t offset, uint8_t* src, uint8_t* tmp, |
1449 | 11.0k | uint8_t* tmp2, int last_filter_index, int32_t nblock) { |
1450 | 11.0k | blosc2_context* context = thread_context->parent_context; |
1451 | 11.0k | int32_t typesize = context->typesize; |
1452 | 11.0k | uint8_t* filters = context->filters; |
1453 | 11.0k | uint8_t* filters_meta = context->filters_meta; |
1454 | 11.0k | uint8_t* _src = src; |
1455 | 11.0k | uint8_t* _dest = tmp; |
1456 | 11.0k | uint8_t* _tmp = tmp2; |
1457 | 11.0k | int errcode = 0; |
1458 | | |
1459 | 11.0k | for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { |
1460 | | // Delta filter requires the whole chunk ready |
1461 | 11.0k | int last_copy_filter = (last_filter_index == i) || (next_filter(filters, i, 'd') == BLOSC_DELTA); |
1462 | 11.0k | if (last_copy_filter && context->postfilter == NULL) { |
1463 | 11.0k | _dest = dest + offset; |
1464 | 11.0k | } |
1465 | 11.0k | int rc = BLOSC2_ERROR_SUCCESS; |
1466 | 11.0k | if (filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
1467 | 11.0k | switch (filters[i]) { |
1468 | 5.89k | case BLOSC_SHUFFLE: |
1469 | | // if filters_meta is not 0, interpret as number of bytes to be grouped together for shuffle |
1470 | 5.89k | blosc2_unshuffle(filters_meta[i] == 0 ? typesize : filters_meta[i], bsize, _src, _dest); |
1471 | 5.89k | break; |
1472 | 5.19k | case BLOSC_BITSHUFFLE: |
1473 | 5.19k | if (bitunshuffle(typesize, bsize, _src, _dest, context->src[BLOSC2_CHUNK_VERSION]) < 0) { |
1474 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1475 | 0 | } |
1476 | 5.19k | break; |
1477 | 5.19k | case BLOSC_DELTA: |
1478 | 0 | if (context->nthreads == 1) { |
1479 | | /* Serial mode */ |
1480 | 0 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1481 | 0 | } else { |
1482 | | /* Force the thread in charge of the block 0 to go first */ |
1483 | 0 | blosc2_pthread_mutex_lock(&context->delta_mutex); |
1484 | 0 | if (context->dref_not_init) { |
1485 | 0 | if (offset != 0) { |
1486 | 0 | blosc2_pthread_cond_wait(&context->delta_cv, &context->delta_mutex); |
1487 | 0 | } else { |
1488 | 0 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1489 | 0 | context->dref_not_init = 0; |
1490 | 0 | blosc2_pthread_cond_broadcast(&context->delta_cv); |
1491 | 0 | } |
1492 | 0 | } |
1493 | 0 | blosc2_pthread_mutex_unlock(&context->delta_mutex); |
1494 | 0 | if (offset != 0) { |
1495 | 0 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1496 | 0 | } |
1497 | 0 | } |
1498 | 0 | break; |
1499 | 0 | case BLOSC_TRUNC_PREC: |
1500 | | // TRUNC_PREC filter does not need to be undone |
1501 | 0 | break; |
1502 | 0 | default: |
1503 | 0 | if (filters[i] != BLOSC_NOFILTER) { |
1504 | 0 | BLOSC_TRACE_ERROR("Filter %d not handled during decompression.", |
1505 | 0 | filters[i]); |
1506 | 0 | errcode = -1; |
1507 | 0 | } |
1508 | 11.0k | } |
1509 | 11.0k | } else { |
1510 | | // Look for the filters_meta in user filters and run it |
1511 | 0 | for (uint64_t j = 0; j < g_nfilters; ++j) { |
1512 | 0 | if (g_filters[j].id == filters[i]) { |
1513 | 0 | if (g_filters[j].backward == NULL) { |
1514 | | // Dynamically load filter |
1515 | 0 | if (fill_filter(&g_filters[j]) < 0) { |
1516 | 0 | BLOSC_TRACE_ERROR("Could not load filter %d.", g_filters[j].id); |
1517 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1518 | 0 | } |
1519 | 0 | } |
1520 | 0 | if (g_filters[j].backward != NULL) { |
1521 | 0 | blosc2_dparams dparams; |
1522 | 0 | blosc2_ctx_get_dparams(context, &dparams); |
1523 | 0 | rc = g_filters[j].backward(_src, _dest, bsize, filters_meta[i], &dparams, g_filters[j].id); |
1524 | 0 | } else { |
1525 | 0 | BLOSC_TRACE_ERROR("Backward function is NULL"); |
1526 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1527 | 0 | } |
1528 | 0 | if (rc != BLOSC2_ERROR_SUCCESS) { |
1529 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d failed during decompression.", filters[i]); |
1530 | 0 | return rc; |
1531 | 0 | } |
1532 | 0 | goto urfiltersuccess; |
1533 | 0 | } |
1534 | 0 | } |
1535 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d not found during decompression.", filters[i]); |
1536 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1537 | 0 | urfiltersuccess:; |
1538 | 0 | } |
1539 | | |
1540 | | // Cycle buffers when required |
1541 | 11.0k | if ((filters[i] != BLOSC_NOFILTER) && (filters[i] != BLOSC_TRUNC_PREC)) { |
1542 | 11.0k | _cycle_buffers(&_src, &_dest, &_tmp); |
1543 | 11.0k | } |
1544 | 11.0k | if (last_filter_index == i) { |
1545 | 11.0k | break; |
1546 | 11.0k | } |
1547 | 11.0k | } |
1548 | | |
1549 | | /* Postfilter function */ |
1550 | 11.0k | if (context->postfilter != NULL) { |
1551 | | // Create new postfilter parameters for this block (must be private for each thread) |
1552 | 0 | blosc2_postfilter_params postparams; |
1553 | 0 | memcpy(&postparams, context->postparams, sizeof(postparams)); |
1554 | 0 | postparams.input = _src; |
1555 | 0 | postparams.output = dest + offset; |
1556 | 0 | postparams.size = bsize; |
1557 | 0 | postparams.typesize = typesize; |
1558 | 0 | postparams.offset = nblock * context->blocksize; |
1559 | 0 | postparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
1560 | 0 | postparams.nblock = nblock; |
1561 | 0 | postparams.tid = thread_context->tid; |
1562 | 0 | postparams.ttmp = thread_context->tmp; |
1563 | 0 | postparams.ttmp_nbytes = thread_context->tmp_nbytes; |
1564 | 0 | postparams.ctx = context; |
1565 | |
|
1566 | 0 | if (context->postfilter(&postparams) != 0) { |
1567 | 0 | BLOSC_TRACE_ERROR("Execution of postfilter function failed"); |
1568 | 0 | return BLOSC2_ERROR_POSTFILTER; |
1569 | 0 | } |
1570 | 0 | } |
1571 | | |
1572 | 11.0k | return errcode; |
1573 | 11.0k | } |
1574 | | |
1575 | | |
1576 | 0 | static int32_t set_nans(int32_t typesize, uint8_t* dest, int32_t destsize) { |
1577 | 0 | if (destsize % typesize != 0) { |
1578 | 0 | BLOSC_TRACE_ERROR("destsize can only be a multiple of typesize"); |
1579 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1580 | 0 | } |
1581 | 0 | int32_t nitems = destsize / typesize; |
1582 | 0 | if (nitems == 0) { |
1583 | 0 | return 0; |
1584 | 0 | } |
1585 | | |
1586 | 0 | if (typesize == 4) { |
1587 | 0 | float* dest_ = (float*)dest; |
1588 | 0 | float val = nanf(""); |
1589 | 0 | for (int i = 0; i < nitems; i++) { |
1590 | 0 | dest_[i] = val; |
1591 | 0 | } |
1592 | 0 | return nitems; |
1593 | 0 | } |
1594 | 0 | else if (typesize == 8) { |
1595 | 0 | double* dest_ = (double*)dest; |
1596 | 0 | double val = nan(""); |
1597 | 0 | for (int i = 0; i < nitems; i++) { |
1598 | 0 | dest_[i] = val; |
1599 | 0 | } |
1600 | 0 | return nitems; |
1601 | 0 | } |
1602 | | |
1603 | 0 | BLOSC_TRACE_ERROR("Unsupported typesize for NaN"); |
1604 | 0 | return BLOSC2_ERROR_DATA; |
1605 | 0 | } |
1606 | | |
1607 | | |
1608 | 0 | static int32_t set_values(int32_t typesize, const uint8_t* src, uint8_t* dest, int32_t destsize) { |
1609 | | #if defined(BLOSC_STRICT_ALIGN) |
1610 | | if (destsize % typesize != 0) { |
1611 | | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1612 | | } |
1613 | | int32_t nitems = destsize / typesize; |
1614 | | if (nitems == 0) { |
1615 | | return 0; |
1616 | | } |
1617 | | for (int i = 0; i < nitems; i++) { |
1618 | | memcpy(dest + i * typesize, src + BLOSC_EXTENDED_HEADER_LENGTH, typesize); |
1619 | | } |
1620 | | #else |
1621 | | // destsize can only be a multiple of typesize |
1622 | 0 | int64_t val8; |
1623 | 0 | int64_t* dest8; |
1624 | 0 | int32_t val4; |
1625 | 0 | int32_t* dest4; |
1626 | 0 | int16_t val2; |
1627 | 0 | int16_t* dest2; |
1628 | 0 | int8_t val1; |
1629 | 0 | int8_t* dest1; |
1630 | |
|
1631 | 0 | if (destsize % typesize != 0) { |
1632 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1633 | 0 | } |
1634 | 0 | int32_t nitems = destsize / typesize; |
1635 | 0 | if (nitems == 0) { |
1636 | 0 | return 0; |
1637 | 0 | } |
1638 | | |
1639 | 0 | switch (typesize) { |
1640 | 0 | case 8: |
1641 | 0 | val8 = ((int64_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1642 | 0 | dest8 = (int64_t*)dest; |
1643 | 0 | for (int i = 0; i < nitems; i++) { |
1644 | 0 | dest8[i] = val8; |
1645 | 0 | } |
1646 | 0 | break; |
1647 | 0 | case 4: |
1648 | 0 | val4 = ((int32_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1649 | 0 | dest4 = (int32_t*)dest; |
1650 | 0 | for (int i = 0; i < nitems; i++) { |
1651 | 0 | dest4[i] = val4; |
1652 | 0 | } |
1653 | 0 | break; |
1654 | 0 | case 2: |
1655 | 0 | val2 = ((int16_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1656 | 0 | dest2 = (int16_t*)dest; |
1657 | 0 | for (int i = 0; i < nitems; i++) { |
1658 | 0 | dest2[i] = val2; |
1659 | 0 | } |
1660 | 0 | break; |
1661 | 0 | case 1: |
1662 | 0 | val1 = ((int8_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1663 | 0 | dest1 = (int8_t*)dest; |
1664 | 0 | for (int i = 0; i < nitems; i++) { |
1665 | 0 | dest1[i] = val1; |
1666 | 0 | } |
1667 | 0 | break; |
1668 | 0 | default: |
1669 | 0 | for (int i = 0; i < nitems; i++) { |
1670 | 0 | memcpy(dest + i * typesize, src + BLOSC_EXTENDED_HEADER_LENGTH, typesize); |
1671 | 0 | } |
1672 | 0 | } |
1673 | 0 | #endif |
1674 | | |
1675 | 0 | return nitems; |
1676 | 0 | } |
1677 | | |
1678 | | |
1679 | | /* Decompress & unshuffle a single block */ |
1680 | | static int blosc_d( |
1681 | | struct thread_context* thread_context, int32_t bsize, |
1682 | | int32_t leftoverblock, bool memcpyed, const uint8_t* src, int32_t srcsize, int32_t src_offset, |
1683 | 17.4k | int32_t nblock, uint8_t* dest, int32_t dest_offset, uint8_t* tmp, uint8_t* tmp2) { |
1684 | 17.4k | blosc2_context* context = thread_context->parent_context; |
1685 | 17.4k | uint8_t* filters = context->filters; |
1686 | 17.4k | uint8_t *tmp3 = thread_context->tmp4; |
1687 | 17.4k | bool vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
1688 | 17.4k | int32_t compformat = (context->header_flags & (uint8_t)0xe0) >> 5u; |
1689 | 17.4k | int dont_split = (context->header_flags & 0x10) >> 4; |
1690 | 17.4k | int32_t chunk_nbytes; |
1691 | 17.4k | int32_t chunk_cbytes; |
1692 | 17.4k | int nstreams; |
1693 | 17.4k | int32_t neblock; |
1694 | 17.4k | int32_t nbytes; /* number of decompressed bytes in split */ |
1695 | 17.4k | int32_t cbytes; /* number of compressed bytes in split */ |
1696 | | // int32_t ctbytes = 0; /* number of compressed bytes in block */ |
1697 | 17.4k | int32_t ntbytes = 0; /* number of uncompressed bytes in block */ |
1698 | 17.4k | uint8_t* _dest; |
1699 | 17.4k | int32_t typesize = context->typesize; |
1700 | 17.4k | bool instr_codec = context->blosc2_flags & BLOSC2_INSTR_CODEC; |
1701 | 17.4k | const char* compname; |
1702 | 17.4k | int rc; |
1703 | | |
1704 | 17.4k | if (context->block_maskout != NULL && context->block_maskout[nblock]) { |
1705 | | // Do not decompress, but act as if we successfully decompressed everything |
1706 | 0 | return bsize; |
1707 | 0 | } |
1708 | | |
1709 | 17.4k | rc = blosc2_cbuffer_sizes(src, &chunk_nbytes, &chunk_cbytes, NULL); |
1710 | 17.4k | if (rc < 0) { |
1711 | 0 | return rc; |
1712 | 0 | } |
1713 | 17.4k | if (context->special_type == BLOSC2_SPECIAL_VALUE) { |
1714 | | // We need the actual typesize in this case, but it cannot be encoded in the header, so derive it from cbytes |
1715 | 0 | typesize = chunk_cbytes - context->header_overhead; |
1716 | 0 | } |
1717 | | |
1718 | | // In some situations (lazychunks) the context can arrive uninitialized |
1719 | | // (but BITSHUFFLE needs it for accessing the format of the chunk) |
1720 | 17.4k | if (context->src == NULL) { |
1721 | 0 | context->src = src; |
1722 | 0 | } |
1723 | | |
1724 | | // Chunks with special values cannot be lazy |
1725 | 17.4k | bool is_lazy = ((context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH) && |
1726 | 17.4k | (context->blosc2_flags & 0x08u) && !context->special_type); |
1727 | 17.4k | if (is_lazy) { |
1728 | | // The chunk is on disk, so just lazily load the block |
1729 | 0 | if (context->schunk == NULL) { |
1730 | 0 | BLOSC_TRACE_ERROR("Lazy chunk needs an associated super-chunk."); |
1731 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
1732 | 0 | } |
1733 | 0 | if (context->schunk->frame == NULL) { |
1734 | 0 | BLOSC_TRACE_ERROR("Lazy chunk needs an associated frame."); |
1735 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
1736 | 0 | } |
1737 | 0 | blosc2_frame_s* frame = (blosc2_frame_s*)context->schunk->frame; |
1738 | 0 | char* urlpath = frame->urlpath; |
1739 | 0 | size_t trailer_offset = BLOSC_EXTENDED_HEADER_LENGTH + context->nblocks * sizeof(int32_t); |
1740 | 0 | int32_t nchunk; |
1741 | 0 | int64_t chunk_offset; |
1742 | | // The nchunk and the offset of the current chunk are in the trailer |
1743 | 0 | nchunk = *(int32_t*)(src + trailer_offset); |
1744 | 0 | chunk_offset = *(int64_t*)(src + trailer_offset + sizeof(int32_t)); |
1745 | | // Get the csize of the nblock |
1746 | 0 | int32_t *block_csizes = (int32_t *)(src + trailer_offset + sizeof(int32_t) + sizeof(int64_t)); |
1747 | 0 | int32_t block_csize = block_csizes[nblock]; |
1748 | | // Read the lazy block on disk |
1749 | 0 | void* fp = NULL; |
1750 | 0 | blosc2_io_cb *io_cb = blosc2_get_io_cb(context->schunk->storage->io->id); |
1751 | 0 | if (io_cb == NULL) { |
1752 | 0 | BLOSC_TRACE_ERROR("Error getting the input/output API"); |
1753 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
1754 | 0 | } |
1755 | | |
1756 | 0 | int64_t io_pos = 0; |
1757 | 0 | if (frame->sframe) { |
1758 | | // The chunk is not in the frame |
1759 | 0 | char* chunkpath = malloc(strlen(frame->urlpath) + 1 + 8 + strlen(".chunk") + 1); |
1760 | 0 | BLOSC_ERROR_NULL(chunkpath, BLOSC2_ERROR_MEMORY_ALLOC); |
1761 | 0 | sprintf(chunkpath, "%s/%08X.chunk", frame->urlpath, nchunk); |
1762 | 0 | fp = io_cb->open(chunkpath, "rb", context->schunk->storage->io->params); |
1763 | 0 | BLOSC_ERROR_NULL(fp, BLOSC2_ERROR_FILE_OPEN); |
1764 | 0 | free(chunkpath); |
1765 | | // The offset of the block is src_offset |
1766 | 0 | io_pos = src_offset; |
1767 | 0 | } |
1768 | 0 | else { |
1769 | 0 | fp = io_cb->open(urlpath, "rb", context->schunk->storage->io->params); |
1770 | 0 | BLOSC_ERROR_NULL(fp, BLOSC2_ERROR_FILE_OPEN); |
1771 | | // The offset of the block is src_offset |
1772 | 0 | io_pos = frame->file_offset + chunk_offset + src_offset; |
1773 | 0 | } |
1774 | | // We can make use of tmp3 because it will be used after src is not needed anymore |
1775 | 0 | int64_t rbytes = io_cb->read((void**)&tmp3, 1, block_csize, io_pos, fp); |
1776 | 0 | io_cb->close(fp); |
1777 | 0 | if ((int32_t)rbytes != block_csize) { |
1778 | 0 | BLOSC_TRACE_ERROR("Cannot read the (lazy) block out of the fileframe."); |
1779 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1780 | 0 | } |
1781 | 0 | src = tmp3; |
1782 | 0 | src_offset = 0; |
1783 | 0 | srcsize = block_csize; |
1784 | 0 | } |
1785 | | |
1786 | | // If the chunk is memcpyed, we just have to copy the block to dest and return |
1787 | 17.4k | if (memcpyed) { |
1788 | 2.97k | int bsize_ = leftoverblock ? chunk_nbytes % context->blocksize : bsize; |
1789 | 2.97k | if (!context->special_type) { |
1790 | 2.67k | if (chunk_nbytes + context->header_overhead != chunk_cbytes) { |
1791 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
1792 | 0 | } |
1793 | 2.67k | if (chunk_cbytes < context->header_overhead + (nblock * context->blocksize) + bsize_) { |
1794 | | /* Not enough input to copy block */ |
1795 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1796 | 0 | } |
1797 | 2.67k | } |
1798 | 2.97k | if (!is_lazy) { |
1799 | 2.97k | src += context->header_overhead + nblock * context->blocksize; |
1800 | 2.97k | } |
1801 | 2.97k | _dest = dest + dest_offset; |
1802 | 2.97k | if (context->postfilter != NULL) { |
1803 | | // We are making use of a postfilter, so use a temp for destination |
1804 | 0 | _dest = tmp; |
1805 | 0 | } |
1806 | 2.97k | rc = 0; |
1807 | 2.97k | switch (context->special_type) { |
1808 | 0 | case BLOSC2_SPECIAL_VALUE: |
1809 | | // All repeated values |
1810 | 0 | rc = set_values(typesize, context->src, _dest, bsize_); |
1811 | 0 | if (rc < 0) { |
1812 | 0 | BLOSC_TRACE_ERROR("set_values failed"); |
1813 | 0 | return BLOSC2_ERROR_DATA; |
1814 | 0 | } |
1815 | 0 | break; |
1816 | 0 | case BLOSC2_SPECIAL_NAN: |
1817 | 0 | rc = set_nans(context->typesize, _dest, bsize_); |
1818 | 0 | if (rc < 0) { |
1819 | 0 | BLOSC_TRACE_ERROR("set_nans failed"); |
1820 | 0 | return BLOSC2_ERROR_DATA; |
1821 | 0 | } |
1822 | 0 | break; |
1823 | 302 | case BLOSC2_SPECIAL_ZERO: |
1824 | 302 | memset(_dest, 0, bsize_); |
1825 | 302 | break; |
1826 | 0 | case BLOSC2_SPECIAL_UNINIT: |
1827 | | // We do nothing here |
1828 | 0 | break; |
1829 | 2.67k | default: |
1830 | 2.67k | memcpy(_dest, src, bsize_); |
1831 | 2.97k | } |
1832 | 2.97k | if (context->postfilter != NULL) { |
1833 | | // Create new postfilter parameters for this block (must be private for each thread) |
1834 | 0 | blosc2_postfilter_params postparams; |
1835 | 0 | memcpy(&postparams, context->postparams, sizeof(postparams)); |
1836 | 0 | postparams.input = tmp; |
1837 | 0 | postparams.output = dest + dest_offset; |
1838 | 0 | postparams.size = bsize; |
1839 | 0 | postparams.typesize = typesize; |
1840 | 0 | postparams.offset = nblock * context->blocksize; |
1841 | 0 | postparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
1842 | 0 | postparams.nblock = nblock; |
1843 | 0 | postparams.tid = thread_context->tid; |
1844 | 0 | postparams.ttmp = thread_context->tmp; |
1845 | 0 | postparams.ttmp_nbytes = thread_context->tmp_nbytes; |
1846 | 0 | postparams.ctx = context; |
1847 | | |
1848 | | // Execute the postfilter (the processed block will be copied to dest) |
1849 | 0 | if (context->postfilter(&postparams) != 0) { |
1850 | 0 | BLOSC_TRACE_ERROR("Execution of postfilter function failed"); |
1851 | 0 | return BLOSC2_ERROR_POSTFILTER; |
1852 | 0 | } |
1853 | 0 | } |
1854 | 2.97k | thread_context->zfp_cell_nitems = 0; |
1855 | | |
1856 | 2.97k | return bsize_; |
1857 | 2.97k | } |
1858 | | |
1859 | 14.5k | if (!is_lazy && (src_offset <= 0 || src_offset >= srcsize)) { |
1860 | | /* Invalid block src offset encountered */ |
1861 | 0 | return BLOSC2_ERROR_DATA; |
1862 | 0 | } |
1863 | | |
1864 | 14.5k | src += src_offset; |
1865 | 14.5k | if (vlblocks) { |
1866 | 0 | if (context->blockcbytes == NULL || nblock >= context->nblocks || |
1867 | 0 | context->blockcbytes[nblock] <= (int32_t)sizeof(int32_t) || |
1868 | 0 | src_offset > srcsize - context->blockcbytes[nblock]) { |
1869 | 0 | return BLOSC2_ERROR_DATA; |
1870 | 0 | } |
1871 | 0 | srcsize = context->blockcbytes[nblock]; |
1872 | 0 | } |
1873 | 14.5k | else { |
1874 | 14.5k | srcsize -= src_offset; |
1875 | 14.5k | } |
1876 | | |
1877 | 14.5k | int last_filter_index = last_filter(filters, 'd'); |
1878 | 14.5k | if (instr_codec) { |
1879 | | // If instrumented, we don't want to run the filters |
1880 | 0 | _dest = dest + dest_offset; |
1881 | 0 | } |
1882 | 14.5k | else if (((last_filter_index >= 0) && |
1883 | 11.0k | (next_filter(filters, BLOSC2_MAX_FILTERS, 'd') != BLOSC_DELTA)) || |
1884 | 11.0k | context->postfilter != NULL) { |
1885 | | // We are making use of some filter, so use a temp for destination |
1886 | 11.0k | _dest = tmp; |
1887 | 11.0k | } |
1888 | 3.42k | else { |
1889 | | // If no filters, or only DELTA in pipeline |
1890 | 3.42k | _dest = dest + dest_offset; |
1891 | 3.42k | } |
1892 | | |
1893 | | /* The number of compressed data streams for this block */ |
1894 | 14.5k | if (vlblocks) { |
1895 | 0 | nstreams = 1; |
1896 | 0 | } |
1897 | 14.5k | else if (!dont_split && !leftoverblock) { |
1898 | 5.84k | nstreams = context->typesize; |
1899 | 5.84k | } |
1900 | 8.67k | else { |
1901 | 8.67k | nstreams = 1; |
1902 | 8.67k | } |
1903 | | |
1904 | 14.5k | neblock = bsize / nstreams; |
1905 | 14.5k | if (neblock == 0) { |
1906 | | /* Not enough space to output bytes */ |
1907 | 0 | BLOSC_ERROR(BLOSC2_ERROR_WRITE_BUFFER); |
1908 | 0 | } |
1909 | 29.0k | for (int j = 0; j < nstreams; j++) { |
1910 | 14.5k | if (vlblocks) { |
1911 | 0 | if (srcsize < (signed)sizeof(int32_t)) { |
1912 | | /* Not enough input to read compressed bytes */ |
1913 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1914 | 0 | } |
1915 | 0 | neblock = sw32_(src); |
1916 | 0 | if (neblock != bsize) { |
1917 | 0 | return BLOSC2_ERROR_DATA; |
1918 | 0 | } |
1919 | 0 | src += sizeof(int32_t); |
1920 | 0 | cbytes = srcsize - (int32_t)sizeof(int32_t); |
1921 | 0 | srcsize = 0; |
1922 | 0 | } |
1923 | 14.5k | else { |
1924 | 14.5k | if (srcsize < (signed)sizeof(int32_t)) { |
1925 | | /* Not enough input to read compressed size */ |
1926 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1927 | 0 | } |
1928 | 14.5k | srcsize -= sizeof(int32_t); |
1929 | 14.5k | cbytes = sw32_(src); /* amount of compressed bytes */ |
1930 | 14.5k | if (cbytes > 0) { |
1931 | 13.6k | if (srcsize < cbytes) { |
1932 | | /* Not enough input to read compressed bytes */ |
1933 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1934 | 0 | } |
1935 | 13.6k | srcsize -= cbytes; |
1936 | 13.6k | } |
1937 | 14.5k | src += sizeof(int32_t); |
1938 | 14.5k | } |
1939 | | // ctbytes += (signed)sizeof(int32_t); |
1940 | | |
1941 | | /* Uncompress */ |
1942 | 14.5k | if (!vlblocks && cbytes == 0) { |
1943 | | // A run of 0's |
1944 | 73 | memset(_dest, 0, (unsigned int)neblock); |
1945 | 73 | nbytes = neblock; |
1946 | 73 | } |
1947 | 14.4k | else if (!vlblocks && cbytes < 0) { |
1948 | | // A negative number means some encoding depending on the token that comes next |
1949 | 748 | uint8_t token; |
1950 | | |
1951 | 748 | if (srcsize < (signed)sizeof(uint8_t)) { |
1952 | | // Not enough input to read token */ |
1953 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1954 | 0 | } |
1955 | 748 | srcsize -= sizeof(uint8_t); |
1956 | | |
1957 | 748 | token = src[0]; |
1958 | 748 | src += 1; |
1959 | | // ctbytes += 1; |
1960 | | |
1961 | 748 | if (token & 0x1) { |
1962 | | // A run of bytes that are different than 0 |
1963 | 748 | if (cbytes < -255) { |
1964 | | // Runs can only encode a byte |
1965 | 0 | return BLOSC2_ERROR_RUN_LENGTH; |
1966 | 0 | } |
1967 | 748 | uint8_t value = -cbytes; |
1968 | 748 | memset(_dest, value, (unsigned int)neblock); |
1969 | 748 | } else { |
1970 | 0 | BLOSC_TRACE_ERROR("Invalid or unsupported compressed stream token value - %d", token); |
1971 | 0 | return BLOSC2_ERROR_RUN_LENGTH; |
1972 | 0 | } |
1973 | 748 | nbytes = neblock; |
1974 | 748 | cbytes = 0; // everything is encoded in the cbytes token |
1975 | 748 | } |
1976 | 13.6k | else if (cbytes == neblock) { |
1977 | 160 | memcpy(_dest, src, (unsigned int)neblock); |
1978 | 160 | nbytes = (int32_t)neblock; |
1979 | 160 | } |
1980 | 13.5k | else { |
1981 | 13.5k | if (compformat == BLOSC_BLOSCLZ_FORMAT) { |
1982 | 13.5k | nbytes = blosclz_decompress(src, cbytes, _dest, (int)neblock); |
1983 | 13.5k | } |
1984 | 0 | else if (compformat == BLOSC_LZ4_FORMAT) { |
1985 | 0 | nbytes = lz4_wrap_decompress((char*)src, (size_t)cbytes, |
1986 | 0 | (char*)_dest, (size_t)neblock, |
1987 | 0 | thread_context); |
1988 | 0 | } |
1989 | 0 | #if defined(HAVE_ZLIB) |
1990 | 0 | else if (compformat == BLOSC_ZLIB_FORMAT) { |
1991 | 0 | nbytes = zlib_wrap_decompress((char*)src, (size_t)cbytes, |
1992 | 0 | (char*)_dest, (size_t)neblock); |
1993 | 0 | } |
1994 | 0 | #endif /* HAVE_ZLIB */ |
1995 | 0 | #if defined(HAVE_ZSTD) |
1996 | 0 | else if (compformat == BLOSC_ZSTD_FORMAT) { |
1997 | 0 | nbytes = zstd_wrap_decompress(thread_context, |
1998 | 0 | (char*)src, (size_t)cbytes, |
1999 | 0 | (char*)_dest, (size_t)neblock); |
2000 | 0 | } |
2001 | 0 | #endif /* HAVE_ZSTD */ |
2002 | 0 | else if (compformat == BLOSC_UDCODEC_FORMAT) { |
2003 | 0 | bool getcell = false; |
2004 | |
|
2005 | 0 | #if defined(HAVE_PLUGINS) |
2006 | 0 | if ((context->compcode == BLOSC_CODEC_ZFP_FIXED_RATE) && |
2007 | 0 | (thread_context->zfp_cell_nitems > 0)) { |
2008 | 0 | nbytes = zfp_getcell(thread_context, src, cbytes, _dest, neblock); |
2009 | 0 | if (nbytes < 0) { |
2010 | 0 | return BLOSC2_ERROR_DATA; |
2011 | 0 | } |
2012 | 0 | if (nbytes == thread_context->zfp_cell_nitems * typesize) { |
2013 | 0 | getcell = true; |
2014 | 0 | } |
2015 | 0 | } |
2016 | 0 | #endif /* HAVE_PLUGINS */ |
2017 | 0 | if (!getcell) { |
2018 | 0 | thread_context->zfp_cell_nitems = 0; |
2019 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
2020 | 0 | if (g_codecs[i].compcode == context->compcode) { |
2021 | 0 | if (g_codecs[i].decoder == NULL) { |
2022 | | // Dynamically load codec plugin |
2023 | 0 | if (fill_codec(&g_codecs[i]) < 0) { |
2024 | 0 | BLOSC_TRACE_ERROR("Could not load codec %d.", g_codecs[i].compcode); |
2025 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
2026 | 0 | } |
2027 | 0 | } |
2028 | 0 | blosc2_dparams dparams; |
2029 | 0 | blosc2_ctx_get_dparams(context, &dparams); |
2030 | 0 | nbytes = g_codecs[i].decoder(src, |
2031 | 0 | cbytes, |
2032 | 0 | _dest, |
2033 | 0 | neblock, |
2034 | 0 | context->compcode_meta, |
2035 | 0 | &dparams, |
2036 | 0 | context->src); |
2037 | 0 | goto urcodecsuccess; |
2038 | 0 | } |
2039 | 0 | } |
2040 | 0 | BLOSC_TRACE_ERROR("User-defined compressor codec %d not found during decompression", context->compcode); |
2041 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
2042 | 0 | } |
2043 | 0 | urcodecsuccess: |
2044 | 0 | ; |
2045 | 0 | } |
2046 | 0 | else { |
2047 | 0 | compname = clibcode_to_clibname(compformat); |
2048 | 0 | BLOSC_TRACE_ERROR( |
2049 | 0 | "Blosc has not been compiled with decompression " |
2050 | 0 | "support for '%s' format. " |
2051 | 0 | "Please recompile for adding this support.", compname); |
2052 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
2053 | 0 | } |
2054 | | |
2055 | | /* Check that decompressed bytes number is correct */ |
2056 | 13.5k | if ((nbytes != neblock) && (thread_context->zfp_cell_nitems == 0)) { |
2057 | 0 | return BLOSC2_ERROR_DATA; |
2058 | 0 | } |
2059 | | |
2060 | 13.5k | } |
2061 | 14.5k | src += cbytes; |
2062 | | // ctbytes += cbytes; |
2063 | 14.5k | _dest += nbytes; |
2064 | 14.5k | ntbytes += nbytes; |
2065 | 14.5k | } /* Closes j < nstreams */ |
2066 | | |
2067 | 14.5k | if (!instr_codec) { |
2068 | 14.5k | if (last_filter_index >= 0 || context->postfilter != NULL) { |
2069 | | /* Apply regular filter pipeline */ |
2070 | 11.0k | int errcode = pipeline_backward(thread_context, bsize, dest, dest_offset, tmp, tmp2, tmp3, |
2071 | 11.0k | last_filter_index, nblock); |
2072 | 11.0k | if (errcode < 0) |
2073 | 0 | return errcode; |
2074 | 11.0k | } |
2075 | 14.5k | } |
2076 | | |
2077 | | /* Return the number of uncompressed bytes */ |
2078 | 14.5k | return (int)ntbytes; |
2079 | 14.5k | } |
2080 | | |
2081 | | |
2082 | | /* Serial version for compression/decompression */ |
2083 | 82.5k | static int serial_blosc(struct thread_context* thread_context) { |
2084 | 82.5k | blosc2_context* context = thread_context->parent_context; |
2085 | 82.5k | bool vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
2086 | 82.5k | int32_t j, bsize, leftoverblock; |
2087 | 82.5k | int32_t cbytes; |
2088 | 82.5k | int32_t ntbytes = context->output_bytes; |
2089 | 82.5k | int32_t* bstarts = context->bstarts; |
2090 | 82.5k | uint8_t* tmp = thread_context->tmp; |
2091 | 82.5k | uint8_t* tmp2 = thread_context->tmp2; |
2092 | 82.5k | int dict_training = context->use_dict && (context->dict_cdict == NULL); |
2093 | 82.5k | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
2094 | 82.5k | if (!context->do_compress && context->special_type) { |
2095 | | // Fake a runlen as if it was a memcpyed chunk |
2096 | 302 | memcpyed = true; |
2097 | 302 | } |
2098 | | |
2099 | 153k | for (j = 0; j < context->nblocks; j++) { |
2100 | 84.5k | if (context->do_compress && !memcpyed && !dict_training) { |
2101 | 52.1k | _sw32(bstarts + j, ntbytes); |
2102 | 52.1k | } |
2103 | 84.5k | bsize = vlblocks ? context->blocknbytes[j] : context->blocksize; |
2104 | 84.5k | leftoverblock = 0; |
2105 | 84.5k | if (!vlblocks && (j == context->nblocks - 1) && (context->leftover > 0)) { |
2106 | 807 | bsize = context->leftover; |
2107 | 807 | leftoverblock = 1; |
2108 | 807 | } |
2109 | 84.5k | if (context->do_compress) { |
2110 | 67.0k | if (memcpyed && !context->prefilter) { |
2111 | | /* We want to memcpy only */ |
2112 | 14.8k | memcpy(context->dest + context->header_overhead + j * context->blocksize, |
2113 | 14.8k | context->src + j * context->blocksize, (unsigned int)bsize); |
2114 | 14.8k | cbytes = (int32_t)bsize; |
2115 | 14.8k | } |
2116 | 52.1k | else { |
2117 | | /* Regular compression */ |
2118 | 52.1k | cbytes = blosc_c(thread_context, bsize, leftoverblock, ntbytes, |
2119 | 52.1k | context->destsize, |
2120 | 52.1k | vlblocks ? context->vlblock_sources[j] : context->src, |
2121 | 52.1k | vlblocks ? 0 : j * context->blocksize, |
2122 | 52.1k | context->dest + ntbytes, tmp, tmp2); |
2123 | 52.1k | if (cbytes == 0) { |
2124 | 13.7k | ntbytes = 0; /* incompressible data */ |
2125 | 13.7k | break; |
2126 | 13.7k | } |
2127 | 52.1k | } |
2128 | 67.0k | } |
2129 | 17.4k | else { |
2130 | | /* Regular decompression */ |
2131 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
2132 | 17.4k | int32_t src_offset = memcpyed ? |
2133 | 14.5k | context->header_overhead + j * context->blocksize : sw32_(bstarts + j); |
2134 | 17.4k | uint8_t *dest_block = (vlblocks && context->vlblock_dests != NULL) ? context->vlblock_dests[j] : context->dest; |
2135 | 17.4k | int32_t dest_offset = (vlblocks && context->vlblock_dests != NULL) ? 0 : |
2136 | 17.4k | (vlblocks ? context->blockoffsets[j] : j * context->blocksize); |
2137 | 17.4k | cbytes = blosc_d(thread_context, bsize, leftoverblock, memcpyed, |
2138 | 17.4k | context->src, context->srcsize, src_offset, j, |
2139 | 17.4k | dest_block, dest_offset, tmp, tmp2); |
2140 | 17.4k | } |
2141 | | |
2142 | 70.7k | if (cbytes < 0) { |
2143 | 0 | ntbytes = cbytes; /* error in blosc_c or blosc_d */ |
2144 | 0 | break; |
2145 | 0 | } |
2146 | 70.7k | ntbytes += cbytes; |
2147 | 70.7k | } |
2148 | | |
2149 | 82.5k | return ntbytes; |
2150 | 82.5k | } |
2151 | | |
2152 | | static void t_blosc_do_job(void *ctxt); |
2153 | | |
2154 | | /* Threaded version for compression/decompression */ |
2155 | 0 | static int parallel_blosc(blosc2_context* context) { |
2156 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
2157 | 0 | int rc; |
2158 | 0 | #endif |
2159 | | /* Set sentinels */ |
2160 | 0 | context->thread_giveup_code = 1; |
2161 | 0 | context->thread_nblock = -1; |
2162 | |
|
2163 | 0 | if (threads_callback) { |
2164 | 0 | threads_callback(threads_callback_data, t_blosc_do_job, |
2165 | 0 | context->nthreads, sizeof(struct thread_context), (void*) context->thread_contexts); |
2166 | 0 | } |
2167 | 0 | else { |
2168 | | /* Synchronization point for all threads (wait for initialization) */ |
2169 | 0 | WAIT_INIT(-1, context); |
2170 | | |
2171 | | /* Synchronization point for all threads (wait for finalization) */ |
2172 | 0 | WAIT_FINISH(-1, context); |
2173 | 0 | } |
2174 | | |
2175 | 0 | if (context->thread_giveup_code <= 0) { |
2176 | | /* Compression/decompression gave up. Return error code. */ |
2177 | 0 | return context->thread_giveup_code; |
2178 | 0 | } |
2179 | | |
2180 | | /* Return the total bytes (de-)compressed in threads */ |
2181 | 0 | return (int)context->output_bytes; |
2182 | 0 | } |
2183 | | |
2184 | | /* initialize a thread_context that has already been allocated */ |
2185 | | static int init_thread_context(struct thread_context* thread_context, blosc2_context* context, int32_t tid) |
2186 | 5.46k | { |
2187 | 5.46k | int32_t ebsize; |
2188 | | |
2189 | 5.46k | thread_context->parent_context = context; |
2190 | 5.46k | thread_context->tid = tid; |
2191 | | |
2192 | 5.46k | ebsize = context->blocksize + context->typesize * (signed)sizeof(int32_t); |
2193 | 5.46k | thread_context->tmp_nbytes = (size_t)4 * ebsize; |
2194 | 5.46k | thread_context->tmp = my_malloc(thread_context->tmp_nbytes); |
2195 | 5.46k | BLOSC_ERROR_NULL(thread_context->tmp, BLOSC2_ERROR_MEMORY_ALLOC); |
2196 | 5.46k | thread_context->tmp2 = thread_context->tmp + ebsize; |
2197 | 5.46k | thread_context->tmp3 = thread_context->tmp2 + ebsize; |
2198 | 5.46k | thread_context->tmp4 = thread_context->tmp3 + ebsize; |
2199 | 5.46k | thread_context->tmp_blocksize = context->blocksize; |
2200 | 5.46k | thread_context->zfp_cell_nitems = 0; |
2201 | 5.46k | thread_context->zfp_cell_start = 0; |
2202 | 5.46k | #if defined(HAVE_ZSTD) |
2203 | 5.46k | thread_context->zstd_cctx = NULL; |
2204 | 5.46k | thread_context->zstd_dctx = NULL; |
2205 | 5.46k | #endif |
2206 | 5.46k | thread_context->lz4_cstream = NULL; |
2207 | 5.46k | thread_context->lz4hc_cstream = NULL; |
2208 | | |
2209 | | /* Create the hash table for LZ4 in case we are using IPP */ |
2210 | | #ifdef HAVE_IPP |
2211 | | IppStatus status; |
2212 | | int inlen = thread_context->tmp_blocksize > 0 ? thread_context->tmp_blocksize : 1 << 16; |
2213 | | int hash_size = 0; |
2214 | | status = ippsEncodeLZ4HashTableGetSize_8u(&hash_size); |
2215 | | if (status != ippStsNoErr) { |
2216 | | BLOSC_TRACE_ERROR("Error in ippsEncodeLZ4HashTableGetSize_8u."); |
2217 | | } |
2218 | | Ipp8u *hash_table = ippsMalloc_8u(hash_size); |
2219 | | status = ippsEncodeLZ4HashTableInit_8u(hash_table, inlen); |
2220 | | if (status != ippStsNoErr) { |
2221 | | BLOSC_TRACE_ERROR("Error in ippsEncodeLZ4HashTableInit_8u."); |
2222 | | } |
2223 | | thread_context->lz4_hash_table = hash_table; |
2224 | | #endif |
2225 | 5.46k | return 0; |
2226 | 5.46k | } |
2227 | | |
2228 | | static struct thread_context* |
2229 | 5.46k | create_thread_context(blosc2_context* context, int32_t tid) { |
2230 | 5.46k | struct thread_context* thread_context; |
2231 | 5.46k | thread_context = (struct thread_context*)my_malloc(sizeof(struct thread_context)); |
2232 | 5.46k | BLOSC_ERROR_NULL(thread_context, NULL); |
2233 | 5.46k | int rc = init_thread_context(thread_context, context, tid); |
2234 | 5.46k | if (rc < 0) { |
2235 | 0 | return NULL; |
2236 | 0 | } |
2237 | 5.46k | return thread_context; |
2238 | 5.46k | } |
2239 | | |
2240 | | /* free members of thread_context, but not thread_context itself */ |
2241 | 5.46k | static void destroy_thread_context(struct thread_context* thread_context) { |
2242 | 5.46k | my_free(thread_context->tmp); |
2243 | 5.46k | #if defined(HAVE_ZSTD) |
2244 | 5.46k | if (thread_context->zstd_cctx != NULL) { |
2245 | 0 | ZSTD_freeCCtx(thread_context->zstd_cctx); |
2246 | 0 | } |
2247 | 5.46k | if (thread_context->zstd_dctx != NULL) { |
2248 | 0 | ZSTD_freeDCtx(thread_context->zstd_dctx); |
2249 | 0 | } |
2250 | 5.46k | #endif |
2251 | 5.46k | if (thread_context->lz4_cstream != NULL) { |
2252 | 0 | LZ4_freeStream((LZ4_stream_t*)thread_context->lz4_cstream); |
2253 | 0 | } |
2254 | 5.46k | if (thread_context->lz4hc_cstream != NULL) { |
2255 | 0 | LZ4_freeStreamHC((LZ4_streamHC_t*)thread_context->lz4hc_cstream); |
2256 | 0 | } |
2257 | | #ifdef HAVE_IPP |
2258 | | if (thread_context->lz4_hash_table != NULL) { |
2259 | | ippsFree(thread_context->lz4_hash_table); |
2260 | | } |
2261 | | #endif |
2262 | 5.46k | } |
2263 | | |
2264 | 5.46k | void free_thread_context(struct thread_context* thread_context) { |
2265 | 5.46k | destroy_thread_context(thread_context); |
2266 | 5.46k | my_free(thread_context); |
2267 | 5.46k | } |
2268 | | |
2269 | | |
2270 | 82.5k | int check_nthreads(blosc2_context* context) { |
2271 | 82.5k | if (context->nthreads <= 0) { |
2272 | 0 | BLOSC_TRACE_ERROR("nthreads must be >= 1 and <= %d", INT16_MAX); |
2273 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2274 | 0 | } |
2275 | | |
2276 | 82.5k | if (context->new_nthreads != context->nthreads) { |
2277 | 0 | if (context->nthreads > 1) { |
2278 | 0 | release_threadpool(context); |
2279 | 0 | } |
2280 | 0 | context->nthreads = context->new_nthreads; |
2281 | 0 | } |
2282 | 82.5k | if (context->new_nthreads > 1 && context->threads_started == 0) { |
2283 | 0 | init_threadpool(context); |
2284 | 0 | } |
2285 | | |
2286 | 82.5k | return context->nthreads; |
2287 | 82.5k | } |
2288 | | |
2289 | | /* Do the compression or decompression of the buffer depending on the |
2290 | | global params. */ |
2291 | 82.5k | static int do_job(blosc2_context* context) { |
2292 | 82.5k | int32_t ntbytes; |
2293 | | |
2294 | | /* Set sentinels */ |
2295 | 82.5k | context->dref_not_init = 1; |
2296 | | |
2297 | | /* Check whether we need to restart threads */ |
2298 | 82.5k | check_nthreads(context); |
2299 | | |
2300 | | /* Run the serial version when nthreads is 1 or when the buffers are |
2301 | | not larger than blocksize */ |
2302 | 82.5k | if (context->nthreads == 1 || (context->sourcesize / context->blocksize) <= 1) { |
2303 | | /* The context for this 'thread' has no been initialized yet */ |
2304 | 82.5k | if (context->serial_context == NULL) { |
2305 | 5.01k | context->serial_context = create_thread_context(context, 0); |
2306 | 5.01k | } |
2307 | 77.5k | else if (context->blocksize != context->serial_context->tmp_blocksize) { |
2308 | 449 | free_thread_context(context->serial_context); |
2309 | 449 | context->serial_context = create_thread_context(context, 0); |
2310 | 449 | } |
2311 | 82.5k | BLOSC_ERROR_NULL(context->serial_context, BLOSC2_ERROR_THREAD_CREATE); |
2312 | 82.5k | ntbytes = serial_blosc(context->serial_context); |
2313 | 82.5k | } |
2314 | 0 | else { |
2315 | 0 | ntbytes = parallel_blosc(context); |
2316 | 0 | } |
2317 | | |
2318 | 82.5k | return ntbytes; |
2319 | 82.5k | } |
2320 | | |
2321 | | |
2322 | | static int initialize_context_compression( |
2323 | | blosc2_context* context, const void* src, int32_t srcsize, void* dest, |
2324 | | int32_t destsize, int clevel, uint8_t const *filters, |
2325 | | uint8_t const *filters_meta, int32_t typesize, int compressor, |
2326 | | int32_t blocksize, int16_t new_nthreads, int16_t nthreads, |
2327 | | int32_t splitmode, |
2328 | | int tuner_id, void *tuner_params, |
2329 | 52.0k | blosc2_schunk* schunk) { |
2330 | | |
2331 | | /* Set parameters */ |
2332 | 52.0k | context->do_compress = 1; |
2333 | 52.0k | context->src = (const uint8_t*)src; |
2334 | 52.0k | context->srcsize = srcsize; |
2335 | 52.0k | context->dest = (uint8_t*)dest; |
2336 | 52.0k | context->output_bytes = 0; |
2337 | 52.0k | context->destsize = destsize; |
2338 | 52.0k | context->sourcesize = srcsize; |
2339 | 52.0k | context->typesize = typesize; |
2340 | 52.0k | context->filter_flags = filters_to_flags(filters); |
2341 | 364k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
2342 | 312k | context->filters[i] = filters[i]; |
2343 | 312k | context->filters_meta[i] = filters_meta[i]; |
2344 | 312k | } |
2345 | 52.0k | context->compcode = compressor; |
2346 | 52.0k | context->nthreads = nthreads; |
2347 | 52.0k | context->new_nthreads = new_nthreads; |
2348 | 52.0k | context->end_threads = 0; |
2349 | 52.0k | context->clevel = clevel; |
2350 | 52.0k | context->schunk = schunk; |
2351 | 52.0k | context->tuner_params = tuner_params; |
2352 | 52.0k | context->tuner_id = tuner_id; |
2353 | 52.0k | context->splitmode = splitmode; |
2354 | 52.0k | context->header_blocksize = (int32_t)blocksize; |
2355 | 52.0k | context->blosc2_flags2 = 0; |
2356 | 52.0k | if (context->blocknbytes != NULL) { |
2357 | 0 | free(context->blocknbytes); |
2358 | 0 | context->blocknbytes = NULL; |
2359 | 0 | } |
2360 | 52.0k | if (context->blockoffsets != NULL) { |
2361 | 0 | free(context->blockoffsets); |
2362 | 0 | context->blockoffsets = NULL; |
2363 | 0 | } |
2364 | 52.0k | if (context->blockcbytes != NULL) { |
2365 | 0 | free(context->blockcbytes); |
2366 | 0 | context->blockcbytes = NULL; |
2367 | 0 | } |
2368 | 52.0k | context->vlblock_sources = NULL; |
2369 | 52.0k | context->vlblock_dests = NULL; |
2370 | | /* tuner some compression parameters */ |
2371 | 52.0k | context->blocksize = (int32_t)blocksize; |
2372 | 52.0k | int rc = 0; |
2373 | 52.0k | if (context->tuner_params != NULL) { |
2374 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2375 | 0 | if (blosc_stune_next_cparams(context) < 0) { |
2376 | 0 | BLOSC_TRACE_ERROR("Error in stune next_cparams func\n"); |
2377 | 0 | return BLOSC2_ERROR_TUNER; |
2378 | 0 | } |
2379 | 0 | } else { |
2380 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2381 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2382 | 0 | if (g_tuners[i].next_cparams == NULL) { |
2383 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2384 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2385 | 0 | return BLOSC2_ERROR_FAILURE; |
2386 | 0 | } |
2387 | 0 | } |
2388 | 0 | if (g_tuners[i].next_cparams(context) < 0) { |
2389 | 0 | BLOSC_TRACE_ERROR("Error in tuner %d next_cparams func\n", context->tuner_id); |
2390 | 0 | return BLOSC2_ERROR_TUNER; |
2391 | 0 | } |
2392 | 0 | if (g_tuners[i].id == BLOSC_BTUNE && context->blocksize == 0) { |
2393 | | // Call stune for initializing blocksize |
2394 | 0 | if (blosc_stune_next_blocksize(context) < 0) { |
2395 | 0 | BLOSC_TRACE_ERROR("Error in stune next_blocksize func\n"); |
2396 | 0 | return BLOSC2_ERROR_TUNER; |
2397 | 0 | } |
2398 | 0 | } |
2399 | 0 | goto urtunersuccess; |
2400 | 0 | } |
2401 | 0 | } |
2402 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2403 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2404 | 0 | } |
2405 | 52.0k | } else { |
2406 | 52.0k | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2407 | 52.0k | rc = blosc_stune_next_blocksize(context); |
2408 | 52.0k | } else { |
2409 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2410 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2411 | 0 | if (g_tuners[i].next_blocksize == NULL) { |
2412 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2413 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2414 | 0 | return BLOSC2_ERROR_FAILURE; |
2415 | 0 | } |
2416 | 0 | } |
2417 | 0 | rc = g_tuners[i].next_blocksize(context); |
2418 | 0 | goto urtunersuccess; |
2419 | 0 | } |
2420 | 0 | } |
2421 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2422 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2423 | 0 | } |
2424 | 52.0k | } |
2425 | 52.0k | urtunersuccess:; |
2426 | 52.0k | if (rc < 0) { |
2427 | 0 | BLOSC_TRACE_ERROR("Error in tuner next_blocksize func\n"); |
2428 | 0 | return BLOSC2_ERROR_TUNER; |
2429 | 0 | } |
2430 | | |
2431 | | |
2432 | | /* Check buffer size limits */ |
2433 | 52.0k | if (srcsize > BLOSC2_MAX_BUFFERSIZE) { |
2434 | 0 | BLOSC_TRACE_ERROR("Input buffer size cannot exceed %d bytes.", |
2435 | 0 | BLOSC2_MAX_BUFFERSIZE); |
2436 | 0 | return BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED; |
2437 | 0 | } |
2438 | | |
2439 | 52.0k | if (destsize < BLOSC2_MAX_OVERHEAD) { |
2440 | 0 | BLOSC_TRACE_ERROR("Output buffer size should be larger than %d bytes.", |
2441 | 0 | BLOSC2_MAX_OVERHEAD); |
2442 | 0 | return BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED; |
2443 | 0 | } |
2444 | | |
2445 | | /* Compression level */ |
2446 | 52.0k | if (clevel < 0 || clevel > 9) { |
2447 | | /* If clevel not in 0..9, print an error */ |
2448 | 0 | BLOSC_TRACE_ERROR("`clevel` parameter must be between 0 and 9!."); |
2449 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
2450 | 0 | } |
2451 | | |
2452 | | /* Dictionary support is only available for ZSTD, LZ4, and LZ4HC. |
2453 | | * Skip the check when src is NULL (special-value chunks): no compression |
2454 | | * will actually happen, so codec compatibility is irrelevant. */ |
2455 | 52.0k | if (src != NULL && context->use_dict && context->compcode != BLOSC_ZSTD && |
2456 | 0 | context->compcode != BLOSC_LZ4 && context->compcode != BLOSC_LZ4HC) { |
2457 | 0 | BLOSC_TRACE_ERROR("`use_dict` is only supported for ZSTD, LZ4, and LZ4HC codecs."); |
2458 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
2459 | 0 | } |
2460 | | |
2461 | | /* Check typesize limits */ |
2462 | 52.0k | if (context->typesize > BLOSC2_MAXTYPESIZE) { |
2463 | | // If typesize is too large for Blosc2, return an error |
2464 | 0 | BLOSC_TRACE_ERROR("Typesize cannot exceed %d bytes.", BLOSC2_MAXTYPESIZE); |
2465 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2466 | 0 | } |
2467 | | /* Now, cap typesize so that blosc2 split machinery can continue to work */ |
2468 | 52.0k | if (context->typesize > BLOSC_MAX_TYPESIZE) { |
2469 | | /* If typesize is too large, treat buffer as an 1-byte stream. */ |
2470 | 0 | context->typesize = 1; |
2471 | 0 | } |
2472 | | |
2473 | 52.0k | blosc2_calculate_blocks(context); |
2474 | | |
2475 | 52.0k | return 1; |
2476 | 52.0k | } |
2477 | | |
2478 | 25.0k | static void release_context_dict_buffer(blosc2_context* context) { |
2479 | 25.0k | if (context->dict_buffer_owned && context->dict_buffer != NULL) { |
2480 | 0 | free(context->dict_buffer); |
2481 | 0 | } |
2482 | 25.0k | context->dict_buffer = NULL; |
2483 | 25.0k | context->dict_buffer_owned = false; |
2484 | 25.0k | context->dict_size = 0; |
2485 | 25.0k | } |
2486 | | |
2487 | | |
2488 | 16.7k | static void clear_context_decompression_dict(blosc2_context* context) { |
2489 | 16.7k | context->use_dict = 0; |
2490 | 16.7k | release_context_dict_buffer(context); |
2491 | 16.7k | #if defined(HAVE_ZSTD) |
2492 | 16.7k | if (context->dict_ddict != NULL) { |
2493 | 0 | ZSTD_freeDDict(context->dict_ddict); |
2494 | 0 | context->dict_ddict = NULL; |
2495 | 0 | } |
2496 | | #else |
2497 | | context->dict_ddict = NULL; |
2498 | | #endif |
2499 | 16.7k | } |
2500 | | |
2501 | | |
2502 | | static int read_lazy_chunk_bytes(blosc2_context* context, int32_t offset, uint8_t* buffer, int32_t nbytes, |
2503 | 0 | const char* open_error, const char* read_error) { |
2504 | 0 | if (context->schunk == NULL || context->schunk->frame == NULL) { |
2505 | 0 | BLOSC_TRACE_ERROR("Lazy chunk needs an associated super-chunk with a frame."); |
2506 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2507 | 0 | } |
2508 | | |
2509 | 0 | blosc2_frame_s* frame = (blosc2_frame_s*)context->schunk->frame; |
2510 | 0 | blosc2_io_cb* io_cb = blosc2_get_io_cb(context->schunk->storage->io->id); |
2511 | 0 | if (io_cb == NULL) { |
2512 | 0 | BLOSC_TRACE_ERROR("Error getting the input/output API"); |
2513 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
2514 | 0 | } |
2515 | | |
2516 | 0 | int32_t trailer_offset = BLOSC_EXTENDED_HEADER_LENGTH + |
2517 | 0 | context->nblocks * (int32_t)sizeof(int32_t); |
2518 | 0 | int32_t nchunk_lazy = *(const int32_t*)(context->src + trailer_offset); |
2519 | 0 | int64_t chunk_offset = *(const int64_t*)(context->src + trailer_offset + |
2520 | 0 | (int32_t)sizeof(int32_t)); |
2521 | |
|
2522 | 0 | void* fp = NULL; |
2523 | 0 | int64_t io_pos; |
2524 | 0 | if (frame->sframe) { |
2525 | 0 | char* chunkpath = malloc(strlen(frame->urlpath) + 1 + 8 + strlen(".chunk") + 1); |
2526 | 0 | BLOSC_ERROR_NULL(chunkpath, BLOSC2_ERROR_MEMORY_ALLOC); |
2527 | 0 | sprintf(chunkpath, "%s/%08X.chunk", frame->urlpath, nchunk_lazy); |
2528 | 0 | fp = io_cb->open(chunkpath, "rb", context->schunk->storage->io->params); |
2529 | 0 | free(chunkpath); |
2530 | 0 | io_pos = offset; |
2531 | 0 | } |
2532 | 0 | else { |
2533 | 0 | fp = io_cb->open(frame->urlpath, "rb", context->schunk->storage->io->params); |
2534 | 0 | io_pos = frame->file_offset + chunk_offset + offset; |
2535 | 0 | } |
2536 | 0 | if (fp == NULL) { |
2537 | 0 | BLOSC_TRACE_ERROR("%s", open_error); |
2538 | 0 | return BLOSC2_ERROR_FILE_OPEN; |
2539 | 0 | } |
2540 | | |
2541 | 0 | uint8_t* read_buffer = buffer; |
2542 | 0 | int64_t rbytes = io_cb->read((void**)&read_buffer, 1, nbytes, io_pos, fp); |
2543 | 0 | io_cb->close(fp); |
2544 | 0 | if (read_buffer != buffer) { |
2545 | 0 | memcpy(buffer, read_buffer, (size_t)nbytes); |
2546 | 0 | free(read_buffer); |
2547 | 0 | } |
2548 | 0 | if (rbytes != nbytes) { |
2549 | 0 | BLOSC_TRACE_ERROR("%s", read_error); |
2550 | 0 | return BLOSC2_ERROR_FILE_READ; |
2551 | 0 | } |
2552 | | |
2553 | 0 | return 0; |
2554 | 0 | } |
2555 | | |
2556 | | |
2557 | 0 | static int load_lazy_chunk_dict(blosc2_context* context, blosc_header* header, int32_t bstarts_end) { |
2558 | 0 | int32_t dict_offset = bstarts_end; |
2559 | 0 | if (header->cbytes < dict_offset + (int32_t)sizeof(int32_t)) { |
2560 | 0 | BLOSC_TRACE_ERROR("Lazy chunk dictionary header exceeds chunk length."); |
2561 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2562 | 0 | } |
2563 | | |
2564 | 0 | uint8_t dict_size_buf[sizeof(int32_t)]; |
2565 | 0 | int rc = read_lazy_chunk_bytes(context, dict_offset, dict_size_buf, (int32_t)sizeof(dict_size_buf), |
2566 | 0 | "Cannot open frame file for lazy chunk dictionary read.", |
2567 | 0 | "Cannot read lazy chunk dictionary size from disk."); |
2568 | 0 | if (rc < 0) { |
2569 | 0 | return rc; |
2570 | 0 | } |
2571 | | |
2572 | 0 | context->dict_size = sw32_(dict_size_buf); |
2573 | 0 | if (context->dict_size <= 0 || context->dict_size > BLOSC2_MAXDICTSIZE) { |
2574 | 0 | BLOSC_TRACE_ERROR("Dictionary size is smaller than minimum or larger than maximum allowed."); |
2575 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2576 | 0 | } |
2577 | 0 | if (header->cbytes < dict_offset + (int32_t)sizeof(int32_t) + context->dict_size) { |
2578 | 0 | BLOSC_TRACE_ERROR("Lazy chunk dictionary exceeds chunk length."); |
2579 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2580 | 0 | } |
2581 | | |
2582 | 0 | context->dict_buffer = malloc((size_t)context->dict_size); |
2583 | 0 | BLOSC_ERROR_NULL(context->dict_buffer, BLOSC2_ERROR_MEMORY_ALLOC); |
2584 | 0 | context->dict_buffer_owned = true; |
2585 | 0 | rc = read_lazy_chunk_bytes(context, dict_offset + (int32_t)sizeof(int32_t), |
2586 | 0 | context->dict_buffer, context->dict_size, |
2587 | 0 | "Cannot open frame file for lazy chunk dictionary read.", |
2588 | 0 | "Cannot read lazy chunk dictionary from disk."); |
2589 | 0 | if (rc < 0) { |
2590 | 0 | release_context_dict_buffer(context); |
2591 | 0 | return rc; |
2592 | 0 | } |
2593 | | |
2594 | 0 | context->use_dict = 1; |
2595 | 0 | #if defined(HAVE_ZSTD) |
2596 | 0 | if (context->compcode == BLOSC_ZSTD_FORMAT) { |
2597 | 0 | context->dict_ddict = ZSTD_createDDict(context->dict_buffer, context->dict_size); |
2598 | 0 | if (context->dict_ddict == NULL) { |
2599 | 0 | release_context_dict_buffer(context); |
2600 | 0 | BLOSC_TRACE_ERROR("Cannot create ZSTD dictionary for lazy chunk."); |
2601 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2602 | 0 | } |
2603 | 0 | } |
2604 | 0 | #endif |
2605 | | |
2606 | 0 | return 0; |
2607 | 0 | } |
2608 | | |
2609 | | |
2610 | | static int initialize_context_decompression(blosc2_context* context, blosc_header* header, const void* src, |
2611 | 16.7k | int32_t srcsize, void* dest, int32_t destsize) { |
2612 | 16.7k | int32_t bstarts_end; |
2613 | 16.7k | bool vlblocks; |
2614 | | |
2615 | 16.7k | context->do_compress = 0; |
2616 | 16.7k | context->src = (const uint8_t*)src; |
2617 | 16.7k | context->srcsize = srcsize; |
2618 | 16.7k | context->dest = (uint8_t*)dest; |
2619 | 16.7k | context->destsize = destsize; |
2620 | 16.7k | context->output_bytes = 0; |
2621 | 16.7k | context->end_threads = 0; |
2622 | 16.7k | context->vlblock_sources = NULL; |
2623 | 16.7k | context->vlblock_dests = NULL; |
2624 | 16.7k | if (context->blocknbytes != NULL) { |
2625 | 0 | free(context->blocknbytes); |
2626 | 0 | context->blocknbytes = NULL; |
2627 | 0 | } |
2628 | 16.7k | if (context->blockoffsets != NULL) { |
2629 | 0 | free(context->blockoffsets); |
2630 | 0 | context->blockoffsets = NULL; |
2631 | 0 | } |
2632 | 16.7k | if (context->blockcbytes != NULL) { |
2633 | 0 | free(context->blockcbytes); |
2634 | 0 | context->blockcbytes = NULL; |
2635 | 0 | } |
2636 | | |
2637 | 16.7k | int rc = blosc2_initialize_context_from_header(context, header); |
2638 | 16.7k | if (rc < 0) { |
2639 | 0 | return rc; |
2640 | 0 | } |
2641 | 16.7k | clear_context_decompression_dict(context); |
2642 | 16.7k | vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
2643 | 16.7k | bool is_lazy = ((context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH) && |
2644 | 16.7k | (context->blosc2_flags & 0x08u)); |
2645 | | |
2646 | | /* Check that we have enough space to decompress */ |
2647 | 16.7k | if (context->sourcesize > (int32_t)context->destsize) { |
2648 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
2649 | 0 | } |
2650 | | |
2651 | 16.7k | if (context->block_maskout != NULL && context->block_maskout_nitems != context->nblocks) { |
2652 | 0 | BLOSC_TRACE_ERROR("The number of items in block_maskout (%d) must match the number" |
2653 | 0 | " of blocks in chunk (%d).", |
2654 | 0 | context->block_maskout_nitems, context->nblocks); |
2655 | 0 | return BLOSC2_ERROR_DATA; |
2656 | 0 | } |
2657 | | |
2658 | 16.7k | context->special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
2659 | 16.7k | if (context->special_type > BLOSC2_SPECIAL_LASTID) { |
2660 | 0 | BLOSC_TRACE_ERROR("Unknown special values ID (%d) ", |
2661 | 0 | context->special_type); |
2662 | 0 | return BLOSC2_ERROR_DATA; |
2663 | 0 | } |
2664 | | |
2665 | 16.7k | int memcpyed = (context->header_flags & (uint8_t) BLOSC_MEMCPYED); |
2666 | 16.7k | if (memcpyed && (header->cbytes != header->nbytes + context->header_overhead)) { |
2667 | 0 | BLOSC_TRACE_ERROR("Wrong header info for this memcpyed chunk"); |
2668 | 0 | return BLOSC2_ERROR_DATA; |
2669 | 0 | } |
2670 | | |
2671 | 16.7k | if ((header->nbytes == 0) && (header->cbytes == context->header_overhead) && |
2672 | 0 | !context->special_type) { |
2673 | | // A compressed buffer with only a header can only contain a zero-length buffer |
2674 | 0 | return 0; |
2675 | 0 | } |
2676 | | |
2677 | 16.7k | context->bstarts = (int32_t *) (context->src + context->header_overhead); |
2678 | 16.7k | bstarts_end = context->header_overhead; |
2679 | 16.7k | if (!context->special_type && !memcpyed) { |
2680 | | /* If chunk is not special or a memcpyed, we do have a bstarts section */ |
2681 | 14.2k | bstarts_end = (int32_t)(context->header_overhead + (context->nblocks * sizeof(int32_t))); |
2682 | 14.2k | } |
2683 | | |
2684 | 16.7k | if (srcsize < bstarts_end) { |
2685 | 0 | BLOSC_TRACE_ERROR("`bstarts` exceeds length of source buffer."); |
2686 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2687 | 0 | } |
2688 | 16.7k | srcsize -= bstarts_end; |
2689 | | |
2690 | | /* Read optional dictionary if flag set */ |
2691 | 16.7k | if ((context->blosc2_flags & BLOSC2_USEDICT) && !is_lazy) { |
2692 | 0 | context->use_dict = 1; |
2693 | | // The dictionary section is after the bstarts block: [int32 size | raw bytes] |
2694 | 0 | if (srcsize < (signed)sizeof(int32_t)) { |
2695 | 0 | BLOSC_TRACE_ERROR("Not enough space to read size of dictionary."); |
2696 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2697 | 0 | } |
2698 | 0 | srcsize -= sizeof(int32_t); |
2699 | | // Read dictionary size |
2700 | 0 | context->dict_size = sw32_(context->src + bstarts_end); |
2701 | 0 | if (context->dict_size <= 0 || context->dict_size > BLOSC2_MAXDICTSIZE) { |
2702 | 0 | BLOSC_TRACE_ERROR("Dictionary size is smaller than minimum or larger than maximum allowed."); |
2703 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2704 | 0 | } |
2705 | 0 | if (srcsize < (int32_t)context->dict_size) { |
2706 | 0 | BLOSC_TRACE_ERROR("Not enough space to read entire dictionary."); |
2707 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2708 | 0 | } |
2709 | 0 | srcsize -= context->dict_size; |
2710 | | // dict_buffer points directly into the source chunk — no copy needed |
2711 | 0 | context->dict_buffer = (void*)(context->src + bstarts_end + sizeof(int32_t)); |
2712 | 0 | context->dict_buffer_owned = false; |
2713 | 0 | #if defined(HAVE_ZSTD) |
2714 | | // context->compcode during decompression holds the format code (flags >> 5), |
2715 | | // so compare against BLOSC_ZSTD_FORMAT (not BLOSC_ZSTD). |
2716 | 0 | if (context->compcode == BLOSC_ZSTD_FORMAT) { |
2717 | 0 | context->dict_ddict = ZSTD_createDDict(context->dict_buffer, context->dict_size); |
2718 | 0 | if (context->dict_ddict == NULL) { |
2719 | 0 | BLOSC_TRACE_ERROR("Cannot create ZSTD dictionary for chunk."); |
2720 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2721 | 0 | } |
2722 | 0 | } |
2723 | 0 | #endif // HAVE_ZSTD |
2724 | | // For LZ4/LZ4HC: dict_buffer and dict_size are sufficient; no digested object needed. |
2725 | 0 | } |
2726 | 16.7k | else if ((context->blosc2_flags & BLOSC2_USEDICT) && is_lazy) { |
2727 | 0 | rc = load_lazy_chunk_dict(context, header, bstarts_end); |
2728 | 0 | if (rc < 0) { |
2729 | 0 | return rc; |
2730 | 0 | } |
2731 | 0 | } |
2732 | | |
2733 | 16.7k | if (vlblocks && !context->special_type && !memcpyed) { |
2734 | 0 | context->blocknbytes = malloc((size_t)context->nblocks * sizeof(int32_t)); |
2735 | 0 | BLOSC_ERROR_NULL(context->blocknbytes, BLOSC2_ERROR_MEMORY_ALLOC); |
2736 | 0 | context->blockoffsets = malloc((size_t)context->nblocks * sizeof(int32_t)); |
2737 | 0 | BLOSC_ERROR_NULL(context->blockoffsets, BLOSC2_ERROR_MEMORY_ALLOC); |
2738 | 0 | context->blockcbytes = malloc((size_t)context->nblocks * sizeof(int32_t)); |
2739 | 0 | BLOSC_ERROR_NULL(context->blockcbytes, BLOSC2_ERROR_MEMORY_ALLOC); |
2740 | | |
2741 | 0 | if (is_lazy) { |
2742 | | // Lazy VL: block data is on disk, so blocknbytes is unknown at this point. |
2743 | | // Populate blockcbytes from bstarts differences; blocksize gets max(blockcbytes) |
2744 | | // as a safe upper bound so tmp buffers are large enough for the lazy block read. |
2745 | 0 | int32_t max_csize = 0; |
2746 | 0 | for (int32_t i = 0; i < context->nblocks; ++i) { |
2747 | 0 | int32_t bstart = sw32_(context->bstarts + i); |
2748 | 0 | int32_t next_bstart = (i + 1 < context->nblocks) ? |
2749 | 0 | sw32_(context->bstarts + i + 1) : header->cbytes; |
2750 | 0 | if (bstart < bstarts_end || next_bstart <= bstart || |
2751 | 0 | next_bstart > header->cbytes) { |
2752 | 0 | BLOSC_TRACE_ERROR("Invalid VL-block offsets in lazy chunk."); |
2753 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2754 | 0 | } |
2755 | 0 | context->blocknbytes[i] = 0; // unknown until block is read from disk |
2756 | 0 | context->blockoffsets[i] = 0; // unknown |
2757 | 0 | context->blockcbytes[i] = next_bstart - bstart; |
2758 | 0 | if (context->blockcbytes[i] > max_csize) { |
2759 | 0 | max_csize = context->blockcbytes[i]; |
2760 | 0 | } |
2761 | 0 | } |
2762 | 0 | context->blocksize = max_csize; |
2763 | 0 | context->leftover = 0; |
2764 | 0 | } |
2765 | 0 | else { |
2766 | 0 | int32_t max_blocksize = 0; |
2767 | 0 | int32_t total_nbytes = 0; |
2768 | 0 | int32_t prev_bstart = 0; |
2769 | 0 | for (int32_t i = 0; i < context->nblocks; ++i) { |
2770 | 0 | int32_t bstart = sw32_(context->bstarts + i); |
2771 | 0 | int32_t next_bstart = (i + 1 < context->nblocks) ? |
2772 | 0 | sw32_(context->bstarts + i + 1) : header->cbytes; |
2773 | 0 | if (bstart < bstarts_end || bstart <= prev_bstart || next_bstart <= bstart || |
2774 | 0 | next_bstart > header->cbytes || bstart > context->srcsize - (int32_t)sizeof(int32_t)) { |
2775 | 0 | BLOSC_TRACE_ERROR("Invalid VL-block offsets in chunk."); |
2776 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2777 | 0 | } |
2778 | 0 | context->blockoffsets[i] = total_nbytes; |
2779 | 0 | context->blocknbytes[i] = sw32_(context->src + bstart); |
2780 | 0 | context->blockcbytes[i] = next_bstart - bstart; |
2781 | 0 | if (context->blocknbytes[i] <= 0) { |
2782 | 0 | BLOSC_TRACE_ERROR("Invalid VL-block uncompressed size in chunk."); |
2783 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2784 | 0 | } |
2785 | 0 | total_nbytes += context->blocknbytes[i]; |
2786 | 0 | if (context->blocknbytes[i] > max_blocksize) { |
2787 | 0 | max_blocksize = context->blocknbytes[i]; |
2788 | 0 | } |
2789 | 0 | prev_bstart = bstart; |
2790 | 0 | } |
2791 | 0 | if (total_nbytes != context->sourcesize) { |
2792 | 0 | BLOSC_TRACE_ERROR("VL-block sizes do not add up to chunk nbytes."); |
2793 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
2794 | 0 | } |
2795 | 0 | context->blocksize = max_blocksize; |
2796 | 0 | context->leftover = 0; |
2797 | 0 | } |
2798 | 0 | } |
2799 | | |
2800 | 16.7k | return 0; |
2801 | 16.7k | } |
2802 | | |
2803 | 52.0k | static int write_compression_header(blosc2_context* context, bool extended_header) { |
2804 | 52.0k | blosc_header header; |
2805 | 52.0k | int dont_split; |
2806 | 52.0k | bool vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
2807 | | |
2808 | 52.0k | if (context->clevel == 0) { |
2809 | | /* Compression level 0 means no compression — dicts serve no purpose here */ |
2810 | 363 | context->use_dict = 0; |
2811 | 363 | } |
2812 | | |
2813 | 52.0k | int dict_training = context->use_dict && (context->dict_cdict == NULL); |
2814 | | |
2815 | 52.0k | context->header_flags = 0; |
2816 | | |
2817 | 52.0k | if (!vlblocks && context->clevel == 0) { |
2818 | | /* Compression level 0 means buffer to be memcpy'ed */ |
2819 | 363 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2820 | 363 | } |
2821 | 52.0k | if (!vlblocks && context->sourcesize < BLOSC_MIN_BUFFERSIZE) { |
2822 | | /* Buffer is too small. Try memcpy'ing. */ |
2823 | 143 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2824 | 143 | } |
2825 | | |
2826 | 52.0k | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
2827 | 52.0k | if (extended_header) { |
2828 | | /* Indicate that we are building an extended header */ |
2829 | 52.0k | context->header_overhead = BLOSC_EXTENDED_HEADER_LENGTH; |
2830 | 52.0k | context->header_flags |= (BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE); |
2831 | | /* Store filter pipeline info at the end of the header */ |
2832 | 52.0k | if (dict_training || memcpyed) { |
2833 | 495 | context->bstarts = NULL; |
2834 | 495 | context->output_bytes = context->header_overhead; |
2835 | 51.5k | } else { |
2836 | 51.5k | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
2837 | 51.5k | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
2838 | 51.5k | } |
2839 | 52.0k | } else { |
2840 | | // Regular header |
2841 | 0 | context->header_overhead = BLOSC_MIN_HEADER_LENGTH; |
2842 | 0 | if (memcpyed) { |
2843 | 0 | context->bstarts = NULL; |
2844 | 0 | context->output_bytes = context->header_overhead; |
2845 | 0 | } else { |
2846 | 0 | context->bstarts = (int32_t *) (context->dest + context->header_overhead); |
2847 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
2848 | 0 | } |
2849 | 0 | } |
2850 | | |
2851 | | /* If the header + block starts don't fit in destsize, fall back to memcpy */ |
2852 | 52.0k | if (!memcpyed && context->output_bytes > context->destsize) { |
2853 | 0 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2854 | 0 | memcpyed = true; |
2855 | 0 | context->bstarts = NULL; |
2856 | 0 | context->output_bytes = context->header_overhead; |
2857 | 0 | } |
2858 | | |
2859 | | // when memcpyed bit is set, there is no point in dealing with others |
2860 | 52.0k | if (!memcpyed) { |
2861 | 51.5k | if (context->filter_flags & BLOSC_DOSHUFFLE) { |
2862 | | /* Byte-shuffle is active */ |
2863 | 20.9k | context->header_flags |= BLOSC_DOSHUFFLE; |
2864 | 20.9k | } |
2865 | | |
2866 | 51.5k | if (context->filter_flags & BLOSC_DOBITSHUFFLE) { |
2867 | | /* Bit-shuffle is active */ |
2868 | 16.7k | context->header_flags |= BLOSC_DOBITSHUFFLE; |
2869 | 16.7k | } |
2870 | | |
2871 | 51.5k | if (context->filter_flags & BLOSC_DODELTA) { |
2872 | | /* Delta is active */ |
2873 | 0 | context->header_flags |= BLOSC_DODELTA; |
2874 | 0 | } |
2875 | | |
2876 | 51.5k | dont_split = vlblocks || !split_block(context, context->typesize, |
2877 | 51.5k | context->blocksize); |
2878 | | |
2879 | | /* dont_split is in bit 4 */ |
2880 | 51.5k | context->header_flags |= dont_split << 4; |
2881 | | /* codec starts at bit 5 */ |
2882 | 51.5k | uint8_t compformat = compcode_to_compformat(context->compcode); |
2883 | 51.5k | context->header_flags |= compformat << 5; |
2884 | 51.5k | } |
2885 | | |
2886 | | // Create blosc header and store to dest |
2887 | 52.0k | blosc2_intialize_header_from_context(context, &header, extended_header); |
2888 | | |
2889 | 52.0k | memcpy(context->dest, &header, (extended_header) ? |
2890 | 52.0k | BLOSC_EXTENDED_HEADER_LENGTH : BLOSC_MIN_HEADER_LENGTH); |
2891 | | |
2892 | 52.0k | return 1; |
2893 | 52.0k | } |
2894 | | |
2895 | | |
2896 | 52.0k | static int blosc_compress_context(blosc2_context* context) { |
2897 | 52.0k | int ntbytes = 0; |
2898 | 52.0k | blosc_timestamp_t last, current; |
2899 | 52.0k | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
2900 | | |
2901 | 52.0k | blosc_set_timestamp(&last); |
2902 | | |
2903 | 52.0k | if (!memcpyed) { |
2904 | | /* Do the actual compression */ |
2905 | 51.5k | ntbytes = do_job(context); |
2906 | 51.5k | if (ntbytes < 0) { |
2907 | 0 | return ntbytes; |
2908 | 0 | } |
2909 | 51.5k | if (ntbytes == 0) { |
2910 | | // Try out with a memcpy later on (last chance for fitting src buffer in dest). |
2911 | 13.7k | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2912 | 13.7k | memcpyed = true; |
2913 | 13.7k | } |
2914 | 51.5k | } |
2915 | | |
2916 | 52.0k | int dont_split = (context->header_flags & 0x10) >> 4; |
2917 | 52.0k | int nstreams = context->nblocks; |
2918 | 52.0k | if (!dont_split) { |
2919 | | // When splitting, the number of streams is computed differently |
2920 | 21.4k | if (context->leftover) { |
2921 | 292 | nstreams = (context->nblocks - 1) * context->typesize + 1; |
2922 | 292 | } |
2923 | 21.1k | else { |
2924 | 21.1k | nstreams *= context->typesize; |
2925 | 21.1k | } |
2926 | 21.4k | } |
2927 | | |
2928 | 52.0k | if (memcpyed) { |
2929 | 14.2k | if (context->sourcesize + context->header_overhead > context->destsize) { |
2930 | | /* We are exceeding maximum output size */ |
2931 | 0 | ntbytes = 0; |
2932 | 0 | } |
2933 | 14.2k | else { |
2934 | 14.2k | context->output_bytes = context->header_overhead; |
2935 | 14.2k | ntbytes = do_job(context); |
2936 | 14.2k | if (ntbytes < 0) { |
2937 | 0 | return ntbytes; |
2938 | 0 | } |
2939 | | // Success! update the memcpy bit in header |
2940 | 14.2k | context->dest[BLOSC2_CHUNK_FLAGS] = context->header_flags; |
2941 | | // and clear the memcpy bit in context (for next reuse) |
2942 | 14.2k | context->header_flags &= ~(uint8_t)BLOSC_MEMCPYED; |
2943 | 14.2k | } |
2944 | 14.2k | } |
2945 | 37.7k | else { |
2946 | | // Check whether we have a run for the whole chunk |
2947 | 37.7k | int dict_training = context->use_dict && (context->dict_cdict == NULL); |
2948 | 37.7k | int start_csizes = context->header_overhead + 4 * context->nblocks; |
2949 | 37.7k | if (!dict_training && ntbytes == (int)(start_csizes + nstreams * sizeof(int32_t))) { |
2950 | | // The streams are all zero runs (by construction). Encode it... |
2951 | 4.68k | context->dest[BLOSC2_CHUNK_BLOSC2_FLAGS] |= BLOSC2_SPECIAL_ZERO << 4; |
2952 | | // ...and assign the new chunk length |
2953 | 4.68k | ntbytes = context->header_overhead; |
2954 | 4.68k | } |
2955 | 37.7k | } |
2956 | | |
2957 | | /* Set the number of compressed bytes in header */ |
2958 | 52.0k | _sw32(context->dest + BLOSC2_CHUNK_CBYTES, ntbytes); |
2959 | 52.0k | if (context->blosc2_flags & BLOSC2_INSTR_CODEC) { |
2960 | 0 | dont_split = (context->header_flags & 0x10) >> 4; |
2961 | 0 | int32_t blocksize = dont_split ? (int32_t)sizeof(blosc2_instr) : (int32_t)sizeof(blosc2_instr) * context->typesize; |
2962 | 0 | _sw32(context->dest + BLOSC2_CHUNK_NBYTES, nstreams * (int32_t)sizeof(blosc2_instr)); |
2963 | 0 | _sw32(context->dest + BLOSC2_CHUNK_BLOCKSIZE, blocksize); |
2964 | 0 | } |
2965 | | |
2966 | | /* Set the number of bytes in dest buffer (might be useful for tuner) */ |
2967 | 52.0k | context->destsize = ntbytes; |
2968 | | |
2969 | 52.0k | if (context->tuner_params != NULL) { |
2970 | 0 | blosc_set_timestamp(¤t); |
2971 | 0 | double ctime = blosc_elapsed_secs(last, current); |
2972 | 0 | int rc; |
2973 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2974 | 0 | rc = blosc_stune_update(context, ctime); |
2975 | 0 | } else { |
2976 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2977 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2978 | 0 | if (g_tuners[i].update == NULL) { |
2979 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2980 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2981 | 0 | return BLOSC2_ERROR_FAILURE; |
2982 | 0 | } |
2983 | 0 | } |
2984 | 0 | rc = g_tuners[i].update(context, ctime); |
2985 | 0 | goto urtunersuccess; |
2986 | 0 | } |
2987 | 0 | } |
2988 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2989 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2990 | 0 | urtunersuccess:; |
2991 | 0 | } |
2992 | 0 | if (rc < 0) { |
2993 | 0 | BLOSC_TRACE_ERROR("Error in tuner update func\n"); |
2994 | 0 | return BLOSC2_ERROR_TUNER; |
2995 | 0 | } |
2996 | 0 | } |
2997 | | |
2998 | 52.0k | return ntbytes; |
2999 | 52.0k | } |
3000 | | |
3001 | | |
3002 | 0 | static int blosc_compress_context_without_dict(blosc2_context* context) { |
3003 | 0 | int saved_use_dict = context->use_dict; |
3004 | 0 | context->use_dict = 0; |
3005 | 0 | context->dest[BLOSC2_CHUNK_BLOSC2_FLAGS] &= ~(uint8_t)BLOSC2_USEDICT; |
3006 | 0 | int cbytes = blosc_compress_context(context); |
3007 | 0 | context->use_dict = saved_use_dict; |
3008 | 0 | return cbytes; |
3009 | 0 | } |
3010 | | |
3011 | | |
3012 | | /* The public secure routine for compression with context. */ |
3013 | | int blosc2_compress_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
3014 | 52.0k | void* dest, int32_t destsize) { |
3015 | 52.0k | int error, cbytes; |
3016 | | |
3017 | 52.0k | if (context->do_compress != 1) { |
3018 | 0 | BLOSC_TRACE_ERROR("Context is not meant for compression. Giving up."); |
3019 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3020 | 0 | } |
3021 | | |
3022 | 52.0k | error = initialize_context_compression( |
3023 | 52.0k | context, src, srcsize, dest, destsize, |
3024 | 52.0k | context->clevel, context->filters, context->filters_meta, |
3025 | 52.0k | context->typesize, context->compcode, context->blocksize, |
3026 | 52.0k | context->new_nthreads, context->nthreads, context->splitmode, |
3027 | 52.0k | context->tuner_id, context->tuner_params, context->schunk); |
3028 | 52.0k | if (error <= 0) { |
3029 | 0 | return error; |
3030 | 0 | } |
3031 | | |
3032 | | /* Write the extended header */ |
3033 | 52.0k | error = write_compression_header(context, true); |
3034 | 52.0k | if (error < 0) { |
3035 | 0 | return error; |
3036 | 0 | } |
3037 | | |
3038 | 52.0k | cbytes = blosc_compress_context(context); |
3039 | 52.0k | if (cbytes < 0) { |
3040 | 0 | return cbytes; |
3041 | 0 | } |
3042 | | |
3043 | 52.0k | if (context->use_dict && context->dict_cdict == NULL) { |
3044 | | /* blosc_compress_context() overwrites context->destsize with the training-pass output |
3045 | | * size. Restore it so that the real compression pass has the correct output-buffer size. */ |
3046 | 0 | context->destsize = destsize; |
3047 | |
|
3048 | 0 | bool is_lz4 = (context->compcode == BLOSC_LZ4 || context->compcode == BLOSC_LZ4HC); |
3049 | 0 | if (!is_lz4 && context->compcode != BLOSC_ZSTD) { |
3050 | 0 | const char* compname; |
3051 | 0 | compname = clibcode_to_clibname(context->compcode); |
3052 | 0 | BLOSC_TRACE_ERROR("Codec %s does not support dicts. Giving up.", |
3053 | 0 | compname); |
3054 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
3055 | 0 | } |
3056 | | |
3057 | | // Build the dictionary out of the filters outcome and compress with it. |
3058 | | // For LZ4/LZ4HC the raw samples are used directly (no training algorithm). |
3059 | | // For ZSTD, ZDICT_trainFromBuffer() is used. |
3060 | 0 | int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; |
3061 | | // Do not make the dict more than 5% larger than uncompressed buffer |
3062 | 0 | if (dict_maxsize > srcsize / 20) { |
3063 | 0 | dict_maxsize = srcsize / 20; |
3064 | 0 | } |
3065 | 0 | void* samples_buffer = context->dest + context->header_overhead; |
3066 | 0 | unsigned nblocks = (unsigned)context->nblocks; |
3067 | 0 | int dont_split = (context->header_flags & 0x10) >> 4; |
3068 | 0 | if (!dont_split) { |
3069 | 0 | nblocks = nblocks * context->typesize; |
3070 | 0 | } |
3071 | 0 | if (nblocks < 8) { |
3072 | 0 | nblocks = 8; |
3073 | 0 | } |
3074 | |
|
3075 | 0 | unsigned sample_fraction = 16; |
3076 | 0 | size_t sample_size = context->sourcesize / nblocks / sample_fraction; |
3077 | | |
3078 | | // When the data is too small to produce useful dict samples, |
3079 | | // fall back to plain compression without a dict. |
3080 | 0 | if (dict_maxsize < BLOSC2_MINUSEFULDICT || sample_size == 0) { |
3081 | 0 | BLOSC_TRACE_WARNING("Data too small for dict training (dict_maxsize=%d, sample_size=%zu)." |
3082 | 0 | " Falling back to plain compression.", dict_maxsize, sample_size); |
3083 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3084 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
3085 | 0 | cbytes = blosc_compress_context_without_dict(context); |
3086 | 0 | } |
3087 | 0 | else if (is_lz4) { |
3088 | | // LZ4/LZ4HC: use raw sample data directly as the dictionary (no training step). |
3089 | 0 | int32_t dict_actual_size = (int32_t)(nblocks * sample_size); |
3090 | 0 | if (dict_actual_size > dict_maxsize) { |
3091 | 0 | dict_actual_size = dict_maxsize; |
3092 | 0 | } |
3093 | | |
3094 | | // Reset bstarts and embed dict in the output buffer. |
3095 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3096 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
3097 | | /* Write dict size */ |
3098 | 0 | _sw32(context->dest + context->output_bytes, dict_actual_size); |
3099 | 0 | context->output_bytes += (int32_t)sizeof(int32_t); |
3100 | | /* Copy dict bytes */ |
3101 | 0 | context->dict_buffer = context->dest + context->output_bytes; |
3102 | 0 | memcpy(context->dict_buffer, samples_buffer, (size_t)dict_actual_size); |
3103 | | /* Build the stream used as cdict (pre-loaded with the dict bytes) */ |
3104 | 0 | if (context->compcode == BLOSC_LZ4HC) { |
3105 | 0 | LZ4_streamHC_t* lz4hc_cdict = LZ4_createStreamHC(); |
3106 | 0 | LZ4_loadDictHC(lz4hc_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3107 | 0 | context->dict_cdict = lz4hc_cdict; |
3108 | 0 | } else { |
3109 | 0 | LZ4_stream_t* lz4_cdict = LZ4_createStream(); |
3110 | 0 | LZ4_loadDict(lz4_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3111 | 0 | context->dict_cdict = lz4_cdict; |
3112 | 0 | } |
3113 | 0 | context->output_bytes += dict_actual_size; |
3114 | 0 | context->dict_size = dict_actual_size; |
3115 | | |
3116 | | /* Compress with dict */ |
3117 | 0 | cbytes = blosc_compress_context(context); |
3118 | | |
3119 | | // Invalidate the dictionary so the context can be reused for the next chunk |
3120 | 0 | context->dict_buffer = NULL; |
3121 | 0 | if (context->compcode == BLOSC_LZ4HC) { |
3122 | 0 | LZ4_freeStreamHC((LZ4_streamHC_t*)context->dict_cdict); |
3123 | 0 | } else { |
3124 | 0 | LZ4_freeStream((LZ4_stream_t*)context->dict_cdict); |
3125 | 0 | } |
3126 | 0 | context->dict_cdict = NULL; |
3127 | 0 | } |
3128 | 0 | #ifdef HAVE_ZSTD |
3129 | 0 | else { |
3130 | | // Populate the samples sizes for training the dictionary |
3131 | 0 | size_t* samples_sizes = malloc(nblocks * sizeof(size_t)); |
3132 | 0 | BLOSC_ERROR_NULL(samples_sizes, BLOSC2_ERROR_MEMORY_ALLOC); |
3133 | 0 | for (size_t i = 0; i < nblocks; i++) { |
3134 | 0 | samples_sizes[i] = sample_size; |
3135 | 0 | } |
3136 | | |
3137 | | // Train from samples |
3138 | 0 | void* dict_buffer = malloc(dict_maxsize); |
3139 | 0 | if (dict_buffer == NULL) { |
3140 | 0 | free(samples_sizes); |
3141 | 0 | BLOSC_ERROR_NULL(dict_buffer, BLOSC2_ERROR_MEMORY_ALLOC); |
3142 | 0 | } |
3143 | 0 | int32_t dict_actual_size = (int32_t)ZDICT_trainFromBuffer( |
3144 | 0 | dict_buffer, dict_maxsize, |
3145 | 0 | samples_buffer, samples_sizes, nblocks); |
3146 | | |
3147 | | // TODO: experiment with parameters of low-level fast cover algorithm |
3148 | | // Note that this API is still unstable. See: https://github.com/facebook/zstd/issues/1599 |
3149 | | // ZDICT_fastCover_params_t fast_cover_params; |
3150 | | // memset(&fast_cover_params, 0, sizeof(fast_cover_params)); |
3151 | | // fast_cover_params.d = nblocks; |
3152 | | // fast_cover_params.steps = 4; |
3153 | | // fast_cover_params.zParams.compressionLevel = context->clevel; |
3154 | | // size_t dict_actual_size = ZDICT_optimizeTrainFromBuffer_fastCover( |
3155 | | // dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks, |
3156 | | // &fast_cover_params); |
3157 | |
|
3158 | 0 | free(samples_sizes); |
3159 | 0 | if (ZDICT_isError(dict_actual_size) != ZSTD_error_no_error) { |
3160 | 0 | BLOSC_TRACE_WARNING("ZDICT_trainFromBuffer() failed: '%s'." |
3161 | 0 | " Falling back to plain compression.", |
3162 | 0 | ZDICT_getErrorName(dict_actual_size)); |
3163 | 0 | free(dict_buffer); |
3164 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3165 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
3166 | 0 | cbytes = blosc_compress_context_without_dict(context); |
3167 | 0 | } |
3168 | 0 | else { |
3169 | 0 | assert(dict_actual_size > 0); |
3170 | | |
3171 | | // Update bytes counter and pointers to bstarts for the new compressed buffer |
3172 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3173 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
3174 | | /* Write the size of trained dict at the end of bstarts */ |
3175 | 0 | _sw32(context->dest + context->output_bytes, (int32_t)dict_actual_size); |
3176 | 0 | context->output_bytes += sizeof(int32_t); |
3177 | | /* Write the trained dict afterwards */ |
3178 | 0 | context->dict_buffer = context->dest + context->output_bytes; |
3179 | 0 | memcpy(context->dict_buffer, dict_buffer, (unsigned int)dict_actual_size); |
3180 | 0 | context->dict_cdict = ZSTD_createCDict(dict_buffer, dict_actual_size, 1); // TODO: use get_accel() |
3181 | 0 | free(dict_buffer); // the dictionary is copied in the header now |
3182 | 0 | context->output_bytes += (int32_t)dict_actual_size; |
3183 | 0 | context->dict_size = dict_actual_size; |
3184 | | |
3185 | | /* Compress with dict */ |
3186 | 0 | cbytes = blosc_compress_context(context); |
3187 | | |
3188 | | // Invalidate the dictionary for compressing other chunks using the same context |
3189 | 0 | context->dict_buffer = NULL; |
3190 | 0 | ZSTD_freeCDict(context->dict_cdict); |
3191 | 0 | context->dict_cdict = NULL; |
3192 | 0 | } // ZDICT_isError |
3193 | 0 | } // ZSTD else branch |
3194 | 0 | #endif // HAVE_ZSTD |
3195 | 0 | } |
3196 | | |
3197 | 52.0k | return cbytes; |
3198 | 52.0k | } |
3199 | | |
3200 | | /* Helper for reorder_vl_blocks_output: sort (bstart, logical_index) pairs by bstart */ |
3201 | | typedef struct { int32_t bstart; int32_t idx; } vl_bstart_entry_t; |
3202 | 0 | static int cmp_vl_bstart(const void *a, const void *b) { |
3203 | 0 | int32_t ba = ((const vl_bstart_entry_t *)a)->bstart; |
3204 | 0 | int32_t bb = ((const vl_bstart_entry_t *)b)->bstart; |
3205 | 0 | return (ba > bb) - (ba < bb); |
3206 | 0 | } |
3207 | | |
3208 | | /* Ensure that VL-block compressed data is stored in block-index order. |
3209 | | * |
3210 | | * The decompressor derives each block's compressed span as |
3211 | | * bstarts[i+1] - bstarts[i], which requires bstarts to be monotonically |
3212 | | * increasing. Multi-threaded compression writes blocks in finish order |
3213 | | * (non-deterministic), so bstarts may be out of order. This function |
3214 | | * rearranges the compressed block data in the output buffer and fixes |
3215 | | * bstarts so that block 0 comes first, block 1 second, and so on. |
3216 | | */ |
3217 | 0 | static int reorder_vl_blocks_output(blosc2_context *context) { |
3218 | 0 | int32_t nblocks = context->nblocks; |
3219 | 0 | if (nblocks <= 1) { |
3220 | 0 | return 0; |
3221 | 0 | } |
3222 | | |
3223 | 0 | int32_t output_bytes = context->output_bytes; |
3224 | 0 | int32_t *bstarts = context->bstarts; |
3225 | 0 | uint8_t *dest = context->dest; |
3226 | | |
3227 | | /* Fast path: blocks are already in index order (serial or lucky MT) */ |
3228 | 0 | bool ordered = true; |
3229 | 0 | for (int32_t i = 1; i < nblocks; i++) { |
3230 | 0 | if (sw32_(bstarts + i) < sw32_(bstarts + i - 1)) { |
3231 | 0 | ordered = false; |
3232 | 0 | break; |
3233 | 0 | } |
3234 | 0 | } |
3235 | 0 | if (ordered) { |
3236 | 0 | return 0; |
3237 | 0 | } |
3238 | | |
3239 | | /* When a dict is embedded, block data starts after [bstarts | dict_size | dict_data]. |
3240 | | * Use the minimum bstart to locate the actual start of the compressed block region; |
3241 | | * this avoids touching (and corrupting) the dict bytes. */ |
3242 | 0 | int32_t data_start = sw32_(bstarts); |
3243 | 0 | for (int32_t i = 1; i < nblocks; i++) { |
3244 | 0 | int32_t bs = sw32_(bstarts + i); |
3245 | 0 | if (bs < data_start) { |
3246 | 0 | data_start = bs; |
3247 | 0 | } |
3248 | 0 | } |
3249 | |
|
3250 | 0 | int32_t data_size = output_bytes - data_start; |
3251 | 0 | vl_bstart_entry_t *entries = malloc((size_t)nblocks * sizeof(vl_bstart_entry_t)); |
3252 | 0 | int32_t *block_cbytes = malloc((size_t)nblocks * sizeof(int32_t)); |
3253 | 0 | uint8_t *temp = malloc((size_t)data_size); |
3254 | 0 | if (entries == NULL || block_cbytes == NULL || temp == NULL) { |
3255 | 0 | free(entries); |
3256 | 0 | free(block_cbytes); |
3257 | 0 | free(temp); |
3258 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
3259 | 0 | } |
3260 | | |
3261 | 0 | for (int32_t i = 0; i < nblocks; i++) { |
3262 | 0 | entries[i].bstart = sw32_(bstarts + i); |
3263 | 0 | entries[i].idx = i; |
3264 | 0 | } |
3265 | | |
3266 | | /* Sort entries by physical position to compute each block's compressed size */ |
3267 | 0 | qsort(entries, (size_t)nblocks, sizeof(vl_bstart_entry_t), cmp_vl_bstart); |
3268 | |
|
3269 | 0 | for (int32_t j = 0; j < nblocks; j++) { |
3270 | 0 | int32_t next = (j + 1 < nblocks) ? entries[j + 1].bstart : output_bytes; |
3271 | 0 | block_cbytes[entries[j].idx] = next - entries[j].bstart; |
3272 | 0 | } |
3273 | | |
3274 | | /* Snapshot compressed block data (not the dict) so we can safely overwrite dest */ |
3275 | 0 | memcpy(temp, dest + data_start, (size_t)data_size); |
3276 | | |
3277 | | /* Write blocks to dest in logical index order and update bstarts */ |
3278 | 0 | int32_t cur_pos = data_start; |
3279 | 0 | for (int32_t i = 0; i < nblocks; i++) { |
3280 | 0 | int32_t old_pos = sw32_(bstarts + i); |
3281 | 0 | _sw32(bstarts + i, cur_pos); |
3282 | 0 | memcpy(dest + cur_pos, temp + (old_pos - data_start), (size_t)block_cbytes[i]); |
3283 | 0 | cur_pos += block_cbytes[i]; |
3284 | 0 | } |
3285 | |
|
3286 | 0 | free(entries); |
3287 | 0 | free(block_cbytes); |
3288 | 0 | free(temp); |
3289 | 0 | return 0; |
3290 | 0 | } |
3291 | | |
3292 | | |
3293 | 0 | int blosc2_vlchunk_get_nblocks(const void* src, int32_t srcsize, int32_t* nblocks) { |
3294 | 0 | if (src == NULL || nblocks == NULL) { |
3295 | 0 | BLOSC_TRACE_ERROR("src and nblocks must not be NULL."); |
3296 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3297 | 0 | } |
3298 | 0 | blosc_header header; |
3299 | 0 | int result = read_chunk_header((const uint8_t*)src, srcsize, true, &header); |
3300 | 0 | if (result < 0) { |
3301 | 0 | return result; |
3302 | 0 | } |
3303 | 0 | if ((header.blosc2_flags2 & BLOSC2_VL_BLOCKS) == 0) { |
3304 | 0 | BLOSC_TRACE_ERROR("Chunk does not use VL blocks."); |
3305 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3306 | 0 | } |
3307 | | /* For VL-block chunks the blocksize field in the header stores nblocks. */ |
3308 | 0 | *nblocks = header.blocksize; |
3309 | 0 | return 0; |
3310 | 0 | } |
3311 | | |
3312 | | |
3313 | | int blosc2_vlcompress_ctx(blosc2_context* context, const void* const* srcs, const int32_t* srcsizes, |
3314 | 0 | int32_t nblocks, void* dest, int32_t destsize) { |
3315 | 0 | int error, cbytes; |
3316 | 0 | int32_t max_blocksize = 0; |
3317 | 0 | int64_t srcsize = 0; |
3318 | |
|
3319 | 0 | if (context->do_compress != 1) { |
3320 | 0 | BLOSC_TRACE_ERROR("Context is not meant for compression. Giving up."); |
3321 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3322 | 0 | } |
3323 | 0 | if (srcs == NULL || srcsizes == NULL || nblocks <= 0) { |
3324 | 0 | BLOSC_TRACE_ERROR("Invalid sources for VL-block compression."); |
3325 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3326 | 0 | } |
3327 | | |
3328 | 0 | for (int32_t i = 0; i < nblocks; ++i) { |
3329 | 0 | if (srcs[i] == NULL || srcsizes[i] <= 0) { |
3330 | 0 | BLOSC_TRACE_ERROR("Invalid VL block at index %d.", i); |
3331 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3332 | 0 | } |
3333 | 0 | srcsize += srcsizes[i]; |
3334 | 0 | if (srcsize > BLOSC2_MAX_BUFFERSIZE) { |
3335 | 0 | BLOSC_TRACE_ERROR("Input buffer size cannot exceed %d bytes.", BLOSC2_MAX_BUFFERSIZE); |
3336 | 0 | return BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED; |
3337 | 0 | } |
3338 | 0 | if (srcsizes[i] > max_blocksize) { |
3339 | 0 | max_blocksize = srcsizes[i]; |
3340 | 0 | } |
3341 | 0 | } |
3342 | | |
3343 | 0 | error = initialize_context_compression( |
3344 | 0 | context, NULL, (int32_t)srcsize, dest, destsize, |
3345 | 0 | context->clevel, context->filters, context->filters_meta, |
3346 | 0 | context->typesize, context->compcode, max_blocksize, |
3347 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
3348 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
3349 | 0 | if (error <= 0) { |
3350 | 0 | return error; |
3351 | 0 | } |
3352 | | |
3353 | 0 | context->blosc2_flags2 = BLOSC2_VL_BLOCKS; |
3354 | 0 | context->header_blocksize = nblocks; |
3355 | 0 | context->nblocks = nblocks; |
3356 | 0 | context->leftover = 0; |
3357 | 0 | context->blocksize = max_blocksize; |
3358 | 0 | context->vlblock_sources = (const uint8_t**)srcs; |
3359 | 0 | context->blocknbytes = malloc((size_t)nblocks * sizeof(int32_t)); |
3360 | 0 | BLOSC_ERROR_NULL(context->blocknbytes, BLOSC2_ERROR_MEMORY_ALLOC); |
3361 | 0 | memcpy(context->blocknbytes, srcsizes, (size_t)nblocks * sizeof(int32_t)); |
3362 | |
|
3363 | 0 | error = write_compression_header(context, true); |
3364 | 0 | if (error < 0) { |
3365 | 0 | return error; |
3366 | 0 | } |
3367 | | |
3368 | 0 | cbytes = blosc_compress_context(context); |
3369 | 0 | if (cbytes < 0) { |
3370 | 0 | context->vlblock_sources = NULL; |
3371 | 0 | return cbytes; |
3372 | 0 | } |
3373 | | |
3374 | | /* blosc_compress_context() overwrites context->destsize with the training-pass output |
3375 | | * size (which is tiny — just the raw sample data). Restore it so that the real |
3376 | | * compression pass below has the correct output-buffer size. */ |
3377 | 0 | context->destsize = destsize; |
3378 | |
|
3379 | 0 | #ifdef HAVE_ZSTD |
3380 | 0 | if (context->use_dict && context->dict_cdict == NULL) { |
3381 | 0 | bool is_lz4 = (context->compcode == BLOSC_LZ4 || context->compcode == BLOSC_LZ4HC); |
3382 | | // The first blosc_compress_context() above was a dict-training pass that stored |
3383 | | // raw (uncompressed) VL block data at dest+header_overhead as samples. Now build |
3384 | | // the dictionary from those samples and do the real compression pass. |
3385 | 0 | int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; |
3386 | | // Do not make the dict more than 5% of the uncompressed size |
3387 | 0 | if (dict_maxsize > (int32_t)srcsize / 20) { |
3388 | 0 | dict_maxsize = (int32_t)srcsize / 20; |
3389 | 0 | } |
3390 | | // Mirror the sample_size guard from blosc2_compress_ctx: if the average |
3391 | | // per-block sample is too small for useful dict training, fall back. |
3392 | 0 | size_t vl_sample_size = (nblocks > 0) ? ((size_t)srcsize / (size_t)nblocks / 16) : 0; |
3393 | 0 | if (dict_maxsize < BLOSC2_MINUSEFULDICT || nblocks < 8 || vl_sample_size == 0) { |
3394 | | // Data is too small or too few VL blocks to build a useful dictionary; |
3395 | | // fall back to plain compression without a dict. |
3396 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3397 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3398 | 0 | context->vlblock_sources = (const uint8_t**)srcs; |
3399 | 0 | cbytes = blosc_compress_context_without_dict(context); |
3400 | 0 | context->vlblock_sources = NULL; |
3401 | 0 | } |
3402 | 0 | else if (is_lz4) { |
3403 | | // LZ4/LZ4HC: use the concatenated raw VL block data directly as the dictionary. |
3404 | 0 | void* samples_buffer = context->dest + context->header_overhead; |
3405 | 0 | int32_t total_raw = 0; |
3406 | 0 | for (int32_t i = 0; i < nblocks; i++) { |
3407 | 0 | total_raw += context->blocknbytes[i]; |
3408 | 0 | } |
3409 | 0 | int32_t dict_actual_size = total_raw < dict_maxsize ? total_raw : dict_maxsize; |
3410 | |
|
3411 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3412 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3413 | 0 | _sw32(context->dest + context->output_bytes, dict_actual_size); |
3414 | 0 | context->output_bytes += (int32_t)sizeof(int32_t); |
3415 | 0 | context->dict_buffer = context->dest + context->output_bytes; |
3416 | 0 | memcpy(context->dict_buffer, samples_buffer, (size_t)dict_actual_size); |
3417 | 0 | if (context->compcode == BLOSC_LZ4HC) { |
3418 | 0 | LZ4_streamHC_t* lz4hc_cdict = LZ4_createStreamHC(); |
3419 | 0 | LZ4_loadDictHC(lz4hc_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3420 | 0 | context->dict_cdict = lz4hc_cdict; |
3421 | 0 | } else { |
3422 | 0 | LZ4_stream_t* lz4_cdict = LZ4_createStream(); |
3423 | 0 | LZ4_loadDict(lz4_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3424 | 0 | context->dict_cdict = lz4_cdict; |
3425 | 0 | } |
3426 | 0 | context->output_bytes += dict_actual_size; |
3427 | 0 | context->dict_size = dict_actual_size; |
3428 | |
|
3429 | 0 | context->vlblock_sources = (const uint8_t**)srcs; |
3430 | 0 | cbytes = blosc_compress_context(context); |
3431 | 0 | context->vlblock_sources = NULL; |
3432 | |
|
3433 | 0 | context->dict_buffer = NULL; |
3434 | 0 | if (context->compcode == BLOSC_LZ4HC) { |
3435 | 0 | LZ4_freeStreamHC((LZ4_streamHC_t*)context->dict_cdict); |
3436 | 0 | } else { |
3437 | 0 | LZ4_freeStream((LZ4_stream_t*)context->dict_cdict); |
3438 | 0 | } |
3439 | 0 | context->dict_cdict = NULL; |
3440 | 0 | } |
3441 | 0 | else { |
3442 | | // ZSTD: use ZDICT_trainFromBuffer for dictionary training. |
3443 | | // The training pass left all VL blocks concatenated at dest+header_overhead. |
3444 | | // Use the actual per-block sizes as ZDICT sample sizes so that each VL block |
3445 | | // is treated as a complete, independent training example. |
3446 | 0 | void* samples_buffer = context->dest + context->header_overhead; |
3447 | 0 | size_t* samples_sizes = malloc((size_t)nblocks * sizeof(size_t)); |
3448 | 0 | if (samples_sizes == NULL) { |
3449 | 0 | context->vlblock_sources = NULL; |
3450 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
3451 | 0 | } |
3452 | 0 | for (int32_t i = 0; i < nblocks; i++) { |
3453 | 0 | samples_sizes[i] = (size_t)context->blocknbytes[i]; |
3454 | 0 | } |
3455 | 0 | void* dict_buffer = malloc((size_t)dict_maxsize); |
3456 | 0 | if (dict_buffer == NULL) { |
3457 | 0 | free(samples_sizes); |
3458 | 0 | context->vlblock_sources = NULL; |
3459 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
3460 | 0 | } |
3461 | 0 | int32_t dict_actual_size = (int32_t)ZDICT_trainFromBuffer( |
3462 | 0 | dict_buffer, (size_t)dict_maxsize, samples_buffer, samples_sizes, (unsigned)nblocks); |
3463 | 0 | free(samples_sizes); |
3464 | 0 | if (ZDICT_isError(dict_actual_size) != ZSTD_error_no_error) { |
3465 | | // Training failed (e.g. data is too small for a useful ZSTD dict). |
3466 | | // Fall back to plain compression rather than returning an error. |
3467 | 0 | BLOSC_TRACE_WARNING("ZDICT_trainFromBuffer() failed ('%s'); falling back to plain compression.", |
3468 | 0 | ZDICT_getErrorName(dict_actual_size)); |
3469 | 0 | free(dict_buffer); |
3470 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3471 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3472 | 0 | context->vlblock_sources = (const uint8_t**)srcs; |
3473 | 0 | cbytes = blosc_compress_context_without_dict(context); |
3474 | 0 | context->vlblock_sources = NULL; |
3475 | 0 | } |
3476 | 0 | else { |
3477 | | // Set up bstarts and embed the trained dictionary in the output buffer. |
3478 | | // Layout after header: [bstarts | dict_size(int32) | dict_data | compressed blocks] |
3479 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3480 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3481 | 0 | _sw32(context->dest + context->output_bytes, dict_actual_size); |
3482 | 0 | context->output_bytes += (int32_t)sizeof(int32_t); |
3483 | 0 | context->dict_buffer = context->dest + context->output_bytes; |
3484 | 0 | memcpy(context->dict_buffer, dict_buffer, (size_t)dict_actual_size); |
3485 | 0 | context->dict_cdict = ZSTD_createCDict(dict_buffer, (size_t)dict_actual_size, 1); |
3486 | 0 | free(dict_buffer); |
3487 | 0 | context->output_bytes += dict_actual_size; |
3488 | 0 | context->dict_size = dict_actual_size; |
3489 | | |
3490 | | /* Actual compression pass using the trained dictionary */ |
3491 | 0 | context->vlblock_sources = (const uint8_t**)srcs; |
3492 | 0 | cbytes = blosc_compress_context(context); |
3493 | 0 | context->vlblock_sources = NULL; |
3494 | | |
3495 | | /* Invalidate the dictionary so the context can be reused for the next chunk */ |
3496 | 0 | context->dict_buffer = NULL; |
3497 | 0 | ZSTD_freeCDict(context->dict_cdict); |
3498 | 0 | context->dict_cdict = NULL; |
3499 | 0 | } |
3500 | 0 | } |
3501 | 0 | if (cbytes < 0) { |
3502 | 0 | return cbytes; |
3503 | 0 | } |
3504 | 0 | } |
3505 | | #else |
3506 | | if (context->use_dict && context->dict_cdict == NULL) { |
3507 | | bool is_lz4 = (context->compcode == BLOSC_LZ4 || context->compcode == BLOSC_LZ4HC); |
3508 | | int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; |
3509 | | if (dict_maxsize > (int32_t)srcsize / 20) { |
3510 | | dict_maxsize = (int32_t)srcsize / 20; |
3511 | | } |
3512 | | size_t vl_sample_size_lz4 = (nblocks > 0) ? ((size_t)srcsize / (size_t)nblocks / 16) : 0; |
3513 | | if (!is_lz4 || dict_maxsize < BLOSC2_MINUSEFULDICT || nblocks < 8 || vl_sample_size_lz4 == 0) { |
3514 | | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3515 | | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3516 | | context->vlblock_sources = (const uint8_t**)srcs; |
3517 | | cbytes = blosc_compress_context_without_dict(context); |
3518 | | context->vlblock_sources = NULL; |
3519 | | } |
3520 | | else { |
3521 | | void* samples_buffer = context->dest + context->header_overhead; |
3522 | | int32_t total_raw = 0; |
3523 | | for (int32_t i = 0; i < nblocks; i++) { |
3524 | | total_raw += context->blocknbytes[i]; |
3525 | | } |
3526 | | int32_t dict_actual_size = total_raw < dict_maxsize ? total_raw : dict_maxsize; |
3527 | | |
3528 | | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
3529 | | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * nblocks; |
3530 | | _sw32(context->dest + context->output_bytes, dict_actual_size); |
3531 | | context->output_bytes += (int32_t)sizeof(int32_t); |
3532 | | context->dict_buffer = context->dest + context->output_bytes; |
3533 | | memcpy(context->dict_buffer, samples_buffer, (size_t)dict_actual_size); |
3534 | | if (context->compcode == BLOSC_LZ4HC) { |
3535 | | LZ4_streamHC_t* lz4hc_cdict = LZ4_createStreamHC(); |
3536 | | LZ4_loadDictHC(lz4hc_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3537 | | context->dict_cdict = lz4hc_cdict; |
3538 | | } else { |
3539 | | LZ4_stream_t* lz4_cdict = LZ4_createStream(); |
3540 | | LZ4_loadDict(lz4_cdict, (const char*)context->dict_buffer, dict_actual_size); |
3541 | | context->dict_cdict = lz4_cdict; |
3542 | | } |
3543 | | context->output_bytes += dict_actual_size; |
3544 | | context->dict_size = dict_actual_size; |
3545 | | |
3546 | | context->vlblock_sources = (const uint8_t**)srcs; |
3547 | | cbytes = blosc_compress_context(context); |
3548 | | context->vlblock_sources = NULL; |
3549 | | |
3550 | | context->dict_buffer = NULL; |
3551 | | if (context->compcode == BLOSC_LZ4HC) { |
3552 | | LZ4_freeStreamHC((LZ4_streamHC_t*)context->dict_cdict); |
3553 | | } else { |
3554 | | LZ4_freeStream((LZ4_stream_t*)context->dict_cdict); |
3555 | | } |
3556 | | context->dict_cdict = NULL; |
3557 | | } |
3558 | | if (cbytes < 0) { |
3559 | | return cbytes; |
3560 | | } |
3561 | | } |
3562 | | #endif // HAVE_ZSTD |
3563 | | |
3564 | 0 | context->vlblock_sources = NULL; |
3565 | | |
3566 | | /* Multi-threaded compression may have written blocks in non-index order. |
3567 | | * Rearrange the output so that bstarts is monotonically increasing, as |
3568 | | * required by the decompressor. */ |
3569 | 0 | error = reorder_vl_blocks_output(context); |
3570 | 0 | if (error < 0) { |
3571 | 0 | return error; |
3572 | 0 | } |
3573 | | |
3574 | 0 | return cbytes; |
3575 | 0 | } |
3576 | | |
3577 | | |
3578 | | void build_filters(const int doshuffle, const int delta, |
3579 | 2.74k | const int32_t typesize, uint8_t* filters) { |
3580 | | |
3581 | | /* Fill the end part of the filter pipeline */ |
3582 | 2.74k | if ((doshuffle == BLOSC_SHUFFLE) && (typesize > 1)) |
3583 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; |
3584 | 2.74k | if (doshuffle == BLOSC_BITSHUFFLE) |
3585 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; |
3586 | 2.74k | if (doshuffle == BLOSC_NOSHUFFLE) |
3587 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_NOSHUFFLE; |
3588 | 2.74k | if (delta) |
3589 | 0 | filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; |
3590 | 2.74k | } |
3591 | | |
3592 | | /* The public secure routine for compression. */ |
3593 | | int blosc2_compress(int clevel, int doshuffle, int32_t typesize, |
3594 | 0 | const void* src, int32_t srcsize, void* dest, int32_t destsize) { |
3595 | 0 | int error; |
3596 | 0 | int result; |
3597 | 0 | char* envvar; |
3598 | | |
3599 | | /* Check whether the library should be initialized */ |
3600 | 0 | if (!g_initlib) blosc2_init(); |
3601 | | |
3602 | | /* Check for a BLOSC_CLEVEL environment variable */ |
3603 | 0 | envvar = getenv("BLOSC_CLEVEL"); |
3604 | 0 | if (envvar != NULL) { |
3605 | 0 | long value; |
3606 | 0 | errno = 0; /* To distinguish success/failure after call */ |
3607 | 0 | value = strtol(envvar, NULL, 10); |
3608 | 0 | if ((errno != EINVAL) && (value >= 0)) { |
3609 | 0 | clevel = (int)value; |
3610 | 0 | } |
3611 | 0 | else { |
3612 | 0 | BLOSC_TRACE_WARNING("BLOSC_CLEVEL environment variable '%s' not recognized\n", envvar); |
3613 | 0 | } |
3614 | 0 | } |
3615 | | |
3616 | | /* Check for a BLOSC_SHUFFLE environment variable */ |
3617 | 0 | envvar = getenv("BLOSC_SHUFFLE"); |
3618 | 0 | if (envvar != NULL) { |
3619 | 0 | if (strcmp(envvar, "NOSHUFFLE") == 0) { |
3620 | 0 | doshuffle = BLOSC_NOSHUFFLE; |
3621 | 0 | } |
3622 | 0 | else if (strcmp(envvar, "SHUFFLE") == 0) { |
3623 | 0 | doshuffle = BLOSC_SHUFFLE; |
3624 | 0 | } |
3625 | 0 | else if (strcmp(envvar, "BITSHUFFLE") == 0) { |
3626 | 0 | doshuffle = BLOSC_BITSHUFFLE; |
3627 | 0 | } |
3628 | 0 | else { |
3629 | 0 | BLOSC_TRACE_WARNING("BLOSC_SHUFFLE environment variable '%s' not recognized\n", envvar); |
3630 | 0 | } |
3631 | 0 | } |
3632 | | |
3633 | | /* Check for a BLOSC_DELTA environment variable */ |
3634 | 0 | envvar = getenv("BLOSC_DELTA"); |
3635 | 0 | if (envvar != NULL) { |
3636 | 0 | if (strcmp(envvar, "1") == 0) { |
3637 | 0 | blosc2_set_delta(1); |
3638 | 0 | } else if (strcmp(envvar, "0") == 0) { |
3639 | 0 | blosc2_set_delta(0); |
3640 | 0 | } |
3641 | 0 | else { |
3642 | 0 | BLOSC_TRACE_WARNING("BLOSC_DELTA environment variable '%s' not recognized\n", envvar); |
3643 | 0 | } |
3644 | 0 | } |
3645 | | |
3646 | | /* Check for a BLOSC_TYPESIZE environment variable */ |
3647 | 0 | envvar = getenv("BLOSC_TYPESIZE"); |
3648 | 0 | if (envvar != NULL) { |
3649 | 0 | long value; |
3650 | 0 | errno = 0; /* To distinguish success/failure after call */ |
3651 | 0 | value = strtol(envvar, NULL, 10); |
3652 | 0 | if ((errno != EINVAL) && (value > 0)) { |
3653 | 0 | typesize = (int32_t)value; |
3654 | 0 | } |
3655 | 0 | else { |
3656 | 0 | BLOSC_TRACE_WARNING("BLOSC_TYPESIZE environment variable '%s' not recognized\n", envvar); |
3657 | 0 | } |
3658 | 0 | } |
3659 | | |
3660 | | /* Check for a BLOSC_COMPRESSOR environment variable */ |
3661 | 0 | envvar = getenv("BLOSC_COMPRESSOR"); |
3662 | 0 | if (envvar != NULL) { |
3663 | 0 | result = blosc1_set_compressor(envvar); |
3664 | 0 | if (result < 0) { |
3665 | 0 | BLOSC_TRACE_WARNING("BLOSC_COMPRESSOR environment variable '%s' not recognized\n", envvar); |
3666 | 0 | } |
3667 | 0 | } |
3668 | | |
3669 | | /* Check for a BLOSC_BLOCKSIZE environment variable */ |
3670 | 0 | envvar = getenv("BLOSC_BLOCKSIZE"); |
3671 | 0 | if (envvar != NULL) { |
3672 | 0 | long blocksize; |
3673 | 0 | errno = 0; /* To distinguish success/failure after call */ |
3674 | 0 | blocksize = strtol(envvar, NULL, 10); |
3675 | 0 | if ((errno != EINVAL) && (blocksize > 0)) { |
3676 | 0 | blosc1_set_blocksize((size_t) blocksize); |
3677 | 0 | } |
3678 | 0 | else { |
3679 | 0 | BLOSC_TRACE_WARNING("BLOSC_BLOCKSIZE environment variable '%s' not recognized\n", envvar); |
3680 | 0 | } |
3681 | 0 | } |
3682 | | |
3683 | | /* Check for a BLOSC_NTHREADS environment variable */ |
3684 | 0 | envvar = getenv("BLOSC_NTHREADS"); |
3685 | 0 | if (envvar != NULL) { |
3686 | 0 | long nthreads; |
3687 | 0 | errno = 0; /* To distinguish success/failure after call */ |
3688 | 0 | nthreads = strtol(envvar, NULL, 10); |
3689 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
3690 | 0 | result = blosc2_set_nthreads((int16_t) nthreads); |
3691 | 0 | if (result < 0) { |
3692 | 0 | BLOSC_TRACE_WARNING("BLOSC_NTHREADS environment variable '%s' not recognized\n", envvar); |
3693 | 0 | } |
3694 | 0 | } |
3695 | 0 | } |
3696 | | |
3697 | | /* Check for a BLOSC_SPLITMODE environment variable */ |
3698 | 0 | envvar = getenv("BLOSC_SPLITMODE"); |
3699 | 0 | if (envvar != NULL) { |
3700 | 0 | int32_t splitmode = -1; |
3701 | 0 | if (strcmp(envvar, "ALWAYS") == 0) { |
3702 | 0 | splitmode = BLOSC_ALWAYS_SPLIT; |
3703 | 0 | } |
3704 | 0 | else if (strcmp(envvar, "NEVER") == 0) { |
3705 | 0 | splitmode = BLOSC_NEVER_SPLIT; |
3706 | 0 | } |
3707 | 0 | else if (strcmp(envvar, "AUTO") == 0) { |
3708 | 0 | splitmode = BLOSC_AUTO_SPLIT; |
3709 | 0 | } |
3710 | 0 | else if (strcmp(envvar, "FORWARD_COMPAT") == 0) { |
3711 | 0 | splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
3712 | 0 | } |
3713 | 0 | else { |
3714 | 0 | BLOSC_TRACE_WARNING("BLOSC_SPLITMODE environment variable '%s' not recognized\n", envvar); |
3715 | 0 | } |
3716 | |
|
3717 | 0 | if (splitmode >= 0) { |
3718 | 0 | blosc1_set_splitmode(splitmode); |
3719 | 0 | } |
3720 | 0 | } |
3721 | | |
3722 | | /* Check for a BLOSC_NOLOCK environment variable. It is important |
3723 | | that this should be the last env var so that it can take the |
3724 | | previous ones into account */ |
3725 | 0 | envvar = getenv("BLOSC_NOLOCK"); |
3726 | 0 | if (envvar != NULL) { |
3727 | | // TODO: here is the only place that returns an extended header from |
3728 | | // a blosc1_compress() call. This should probably be fixed. |
3729 | 0 | const char *compname; |
3730 | 0 | blosc2_context *cctx; |
3731 | 0 | blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; |
3732 | |
|
3733 | 0 | blosc2_compcode_to_compname(g_compressor, &compname); |
3734 | | /* Create a context for compression */ |
3735 | 0 | build_filters(doshuffle, g_delta, typesize, cparams.filters); |
3736 | | // TODO: cparams can be shared in a multithreaded environment. do a copy! |
3737 | 0 | cparams.typesize = (uint8_t)typesize; |
3738 | 0 | cparams.compcode = (uint8_t)g_compressor; |
3739 | 0 | cparams.clevel = (uint8_t)clevel; |
3740 | 0 | cparams.nthreads = g_nthreads; |
3741 | 0 | cparams.splitmode = g_splitmode; |
3742 | 0 | cctx = blosc2_create_cctx(cparams); |
3743 | 0 | if (cctx == NULL) { |
3744 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
3745 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
3746 | 0 | } |
3747 | | /* Do the actual compression */ |
3748 | 0 | result = blosc2_compress_ctx(cctx, src, srcsize, dest, destsize); |
3749 | | /* Release context resources */ |
3750 | 0 | blosc2_free_ctx(cctx); |
3751 | 0 | return result; |
3752 | 0 | } |
3753 | | |
3754 | 0 | blosc2_pthread_mutex_lock(&global_comp_mutex); |
3755 | | |
3756 | | /* Initialize a context compression */ |
3757 | 0 | uint8_t* filters = calloc(1, BLOSC2_MAX_FILTERS); |
3758 | 0 | BLOSC_ERROR_NULL(filters, BLOSC2_ERROR_MEMORY_ALLOC); |
3759 | 0 | uint8_t* filters_meta = calloc(1, BLOSC2_MAX_FILTERS); |
3760 | 0 | BLOSC_ERROR_NULL(filters_meta, BLOSC2_ERROR_MEMORY_ALLOC); |
3761 | 0 | build_filters(doshuffle, g_delta, typesize, filters); |
3762 | 0 | error = initialize_context_compression( |
3763 | 0 | g_global_context, src, srcsize, dest, destsize, clevel, filters, |
3764 | 0 | filters_meta, (int32_t)typesize, g_compressor, g_force_blocksize, g_nthreads, g_nthreads, |
3765 | 0 | g_splitmode, g_tuner, NULL, g_schunk); |
3766 | 0 | free(filters); |
3767 | 0 | free(filters_meta); |
3768 | 0 | if (error <= 0) { |
3769 | 0 | blosc2_pthread_mutex_unlock(&global_comp_mutex); |
3770 | 0 | return error; |
3771 | 0 | } |
3772 | | |
3773 | 0 | envvar = getenv("BLOSC_BLOSC1_COMPAT"); |
3774 | 0 | if (envvar != NULL) { |
3775 | | /* Write chunk header without extended header (Blosc1 compatibility mode) */ |
3776 | 0 | error = write_compression_header(g_global_context, false); |
3777 | 0 | } |
3778 | 0 | else { |
3779 | 0 | error = write_compression_header(g_global_context, true); |
3780 | 0 | } |
3781 | 0 | if (error < 0) { |
3782 | 0 | blosc2_pthread_mutex_unlock(&global_comp_mutex); |
3783 | 0 | return error; |
3784 | 0 | } |
3785 | | |
3786 | 0 | result = blosc_compress_context(g_global_context); |
3787 | |
|
3788 | 0 | blosc2_pthread_mutex_unlock(&global_comp_mutex); |
3789 | |
|
3790 | 0 | return result; |
3791 | 0 | } |
3792 | | |
3793 | | |
3794 | | /* The public routine for compression. */ |
3795 | | int blosc1_compress(int clevel, int doshuffle, size_t typesize, size_t nbytes, |
3796 | 0 | const void* src, void* dest, size_t destsize) { |
3797 | 0 | return blosc2_compress(clevel, doshuffle, (int32_t)typesize, src, (int32_t)nbytes, dest, (int32_t)destsize); |
3798 | 0 | } |
3799 | | |
3800 | | |
3801 | | |
3802 | | static int blosc_run_decompression_with_context(blosc2_context* context, const void* src, int32_t srcsize, |
3803 | 16.7k | void* dest, int32_t destsize) { |
3804 | 16.7k | blosc_header header; |
3805 | 16.7k | int32_t ntbytes; |
3806 | 16.7k | int rc; |
3807 | | |
3808 | 16.7k | rc = read_chunk_header(src, srcsize, true, &header); |
3809 | 16.7k | if (rc < 0) { |
3810 | 0 | return rc; |
3811 | 0 | } |
3812 | | |
3813 | 16.7k | if (header.nbytes > destsize) { |
3814 | | // Not enough space for writing into the destination |
3815 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
3816 | 0 | } |
3817 | | |
3818 | 16.7k | rc = initialize_context_decompression(context, &header, src, srcsize, dest, destsize); |
3819 | 16.7k | if (rc < 0) { |
3820 | 0 | return rc; |
3821 | 0 | } |
3822 | | |
3823 | | /* Do the actual decompression */ |
3824 | 16.7k | ntbytes = do_job(context); |
3825 | 16.7k | if (ntbytes < 0) { |
3826 | 0 | return ntbytes; |
3827 | 0 | } |
3828 | | |
3829 | 16.7k | assert(ntbytes <= (int32_t)destsize); |
3830 | 16.7k | return ntbytes; |
3831 | 16.7k | } |
3832 | | |
3833 | | |
3834 | | /* The public secure routine for decompression with context. */ |
3835 | | int blosc2_decompress_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
3836 | 16.7k | void* dest, int32_t destsize) { |
3837 | 16.7k | int result; |
3838 | | |
3839 | 16.7k | if (context->do_compress != 0) { |
3840 | 0 | BLOSC_TRACE_ERROR("Context is not meant for decompression. Giving up."); |
3841 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3842 | 0 | } |
3843 | | |
3844 | 16.7k | result = blosc_run_decompression_with_context(context, src, srcsize, dest, destsize); |
3845 | | |
3846 | | // Reset a possible block_maskout |
3847 | 16.7k | if (context->block_maskout != NULL) { |
3848 | 0 | free(context->block_maskout); |
3849 | 0 | context->block_maskout = NULL; |
3850 | 0 | } |
3851 | 16.7k | context->block_maskout_nitems = 0; |
3852 | | |
3853 | 16.7k | return result; |
3854 | 16.7k | } |
3855 | | |
3856 | | int blosc2_vldecompress_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
3857 | 0 | void** dests, int32_t* destsizes, int32_t maxblocks) { |
3858 | 0 | int result; |
3859 | 0 | blosc_header header; |
3860 | |
|
3861 | 0 | if (context->do_compress != 0) { |
3862 | 0 | BLOSC_TRACE_ERROR("Context is not meant for decompression. Giving up."); |
3863 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3864 | 0 | } |
3865 | 0 | if (dests == NULL || destsizes == NULL || maxblocks <= 0) { |
3866 | 0 | BLOSC_TRACE_ERROR("Invalid destinations for VL-block decompression."); |
3867 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3868 | 0 | } |
3869 | | |
3870 | 0 | result = read_chunk_header(src, srcsize, true, &header); |
3871 | 0 | if (result < 0) { |
3872 | 0 | return result; |
3873 | 0 | } |
3874 | 0 | if ((header.blosc2_flags2 & BLOSC2_VL_BLOCKS) == 0) { |
3875 | 0 | BLOSC_TRACE_ERROR("Chunk does not use VL blocks."); |
3876 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3877 | 0 | } |
3878 | | |
3879 | 0 | result = initialize_context_decompression(context, &header, src, srcsize, NULL, header.nbytes); |
3880 | 0 | if (result < 0) { |
3881 | 0 | return result; |
3882 | 0 | } |
3883 | 0 | if (context->nblocks > maxblocks) { |
3884 | 0 | BLOSC_TRACE_ERROR("Not enough output entries for VL-block decompression."); |
3885 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3886 | 0 | } |
3887 | | |
3888 | 0 | for (int32_t i = 0; i < context->nblocks; ++i) { |
3889 | 0 | destsizes[i] = context->blocknbytes[i]; |
3890 | 0 | dests[i] = malloc((size_t)destsizes[i]); |
3891 | 0 | BLOSC_ERROR_NULL(dests[i], BLOSC2_ERROR_MEMORY_ALLOC); |
3892 | 0 | } |
3893 | | |
3894 | 0 | context->vlblock_dests = (uint8_t**)dests; |
3895 | 0 | result = do_job(context); |
3896 | 0 | context->vlblock_dests = NULL; |
3897 | 0 | if (result < 0) { |
3898 | 0 | for (int32_t i = 0; i < context->nblocks; ++i) { |
3899 | 0 | free(dests[i]); |
3900 | 0 | dests[i] = NULL; |
3901 | 0 | } |
3902 | 0 | return result; |
3903 | 0 | } |
3904 | | |
3905 | 0 | return context->nblocks; |
3906 | 0 | } |
3907 | | |
3908 | | |
3909 | | /* Decompress a single VL block from an already-initialised decompression |
3910 | | * context. The context must have been prepared by initialize_context_decompression() |
3911 | | * on a VL-block chunk so that blocknbytes[], blockoffsets[], blockcbytes[], and |
3912 | | * bstarts[] are all valid. |
3913 | | * |
3914 | | * On success, *dest points to a newly allocated buffer of *destsize bytes that |
3915 | | * the caller must free(). Returns *destsize, or a negative error code. */ |
3916 | | static int decompress_single_vlblock(blosc2_context* context, int32_t nblock, |
3917 | 0 | uint8_t** dest, int32_t* destsize) { |
3918 | 0 | if (nblock < 0 || nblock >= context->nblocks) { |
3919 | 0 | BLOSC_TRACE_ERROR("nblock (%d) out of range [0, %d).", nblock, context->nblocks); |
3920 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3921 | 0 | } |
3922 | | |
3923 | | // For lazy VL chunks blocknbytes[nblock] == 0 because the uncompressed size is |
3924 | | // stored as the first 4 bytes of the block span on disk, not in the in-memory |
3925 | | // header. Peek at the file to resolve it before we can allocate the output buffer. |
3926 | 0 | if (context->blocknbytes[nblock] == 0) { |
3927 | 0 | if (context->schunk == NULL || context->schunk->frame == NULL) { |
3928 | 0 | BLOSC_TRACE_ERROR("Lazy VL block needs an associated super-chunk with a frame."); |
3929 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3930 | 0 | } |
3931 | 0 | blosc2_frame_s* frame = (blosc2_frame_s*)context->schunk->frame; |
3932 | 0 | blosc2_io_cb *io_cb = blosc2_get_io_cb(context->schunk->storage->io->id); |
3933 | 0 | if (io_cb == NULL) { |
3934 | 0 | BLOSC_TRACE_ERROR("Error getting the input/output API"); |
3935 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
3936 | 0 | } |
3937 | | |
3938 | | // Lazy chunk trailer: [nchunk int32 | chunk_offset int64 | block_csizes int32*N] |
3939 | 0 | int32_t trailer_offset = BLOSC_EXTENDED_HEADER_LENGTH + |
3940 | 0 | context->nblocks * (int32_t)sizeof(int32_t); |
3941 | 0 | int32_t nchunk_lazy = *(const int32_t*)(context->src + trailer_offset); |
3942 | 0 | int64_t chunk_offset = *(const int64_t*)(context->src + trailer_offset + |
3943 | 0 | (int32_t)sizeof(int32_t)); |
3944 | 0 | int32_t bstart = sw32_(context->bstarts + nblock); |
3945 | |
|
3946 | 0 | void* fp = NULL; |
3947 | 0 | int64_t io_pos; |
3948 | 0 | if (frame->sframe) { |
3949 | 0 | char* chunkpath = malloc(strlen(frame->urlpath) + 1 + 8 + strlen(".chunk") + 1); |
3950 | 0 | BLOSC_ERROR_NULL(chunkpath, BLOSC2_ERROR_MEMORY_ALLOC); |
3951 | 0 | sprintf(chunkpath, "%s/%08X.chunk", frame->urlpath, nchunk_lazy); |
3952 | 0 | fp = io_cb->open(chunkpath, "rb", context->schunk->storage->io->params); |
3953 | 0 | free(chunkpath); |
3954 | 0 | io_pos = bstart; |
3955 | 0 | } |
3956 | 0 | else { |
3957 | 0 | fp = io_cb->open(frame->urlpath, "rb", context->schunk->storage->io->params); |
3958 | 0 | io_pos = frame->file_offset + chunk_offset + bstart; |
3959 | 0 | } |
3960 | 0 | if (fp == NULL) { |
3961 | 0 | BLOSC_TRACE_ERROR("Cannot open frame file for lazy VL block size peek."); |
3962 | 0 | return BLOSC2_ERROR_FILE_OPEN; |
3963 | 0 | } |
3964 | | |
3965 | | // Read only the 4-byte uncompressed-size prefix of the block span. |
3966 | 0 | uint8_t nbuf[sizeof(int32_t)]; |
3967 | 0 | uint8_t* nbufp = nbuf; |
3968 | 0 | int64_t rbytes = io_cb->read((void**)&nbufp, 1, sizeof(int32_t), io_pos, fp); |
3969 | 0 | io_cb->close(fp); |
3970 | 0 | if (nbufp != nbuf) { |
3971 | | // io_cb allocated new memory; copy the result and free. |
3972 | 0 | memcpy(nbuf, nbufp, sizeof(int32_t)); |
3973 | 0 | free(nbufp); |
3974 | 0 | } |
3975 | 0 | if (rbytes != (int64_t)sizeof(int32_t)) { |
3976 | 0 | BLOSC_TRACE_ERROR("Cannot read VL-block uncompressed-size prefix from disk."); |
3977 | 0 | return BLOSC2_ERROR_FILE_READ; |
3978 | 0 | } |
3979 | 0 | int32_t neblock = sw32_(nbuf); |
3980 | 0 | if (neblock <= 0) { |
3981 | 0 | BLOSC_TRACE_ERROR("Invalid VL-block uncompressed size read from disk."); |
3982 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
3983 | 0 | } |
3984 | 0 | context->blocknbytes[nblock] = neblock; |
3985 | | // Keep blocksize as an upper bound for tmp buffer allocation. |
3986 | 0 | if (neblock > context->blocksize) { |
3987 | 0 | context->blocksize = neblock; |
3988 | 0 | } |
3989 | 0 | } |
3990 | | |
3991 | 0 | int32_t bsize = context->blocknbytes[nblock]; |
3992 | 0 | uint8_t* buf = malloc((size_t)bsize); |
3993 | 0 | if (buf == NULL) { |
3994 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
3995 | 0 | } |
3996 | | |
3997 | | /* Ensure we have a serial thread context sized for this blocksize. */ |
3998 | 0 | if (context->serial_context == NULL) { |
3999 | 0 | context->serial_context = create_thread_context(context, 0); |
4000 | 0 | } |
4001 | 0 | else if (context->blocksize != context->serial_context->tmp_blocksize) { |
4002 | 0 | free_thread_context(context->serial_context); |
4003 | 0 | context->serial_context = create_thread_context(context, 0); |
4004 | 0 | } |
4005 | 0 | if (context->serial_context == NULL) { |
4006 | 0 | free(buf); |
4007 | 0 | return BLOSC2_ERROR_THREAD_CREATE; |
4008 | 0 | } |
4009 | | |
4010 | 0 | bool memcpyed = (context->header_flags & (uint8_t)BLOSC_MEMCPYED) != 0; |
4011 | 0 | int32_t src_offset = sw32_(context->bstarts + nblock); |
4012 | 0 | int cbytes = blosc_d(context->serial_context, bsize, 0, memcpyed, |
4013 | 0 | context->src, context->srcsize, src_offset, nblock, |
4014 | 0 | buf, 0, |
4015 | 0 | context->serial_context->tmp, context->serial_context->tmp2); |
4016 | 0 | if (cbytes < 0) { |
4017 | 0 | free(buf); |
4018 | 0 | return cbytes; |
4019 | 0 | } |
4020 | | |
4021 | 0 | *dest = buf; |
4022 | 0 | *destsize = bsize; |
4023 | 0 | return bsize; |
4024 | 0 | } |
4025 | | |
4026 | | |
4027 | | int blosc2_vldecompress_block_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
4028 | 0 | int32_t nblock, uint8_t** dest, int32_t* destsize) { |
4029 | 0 | if (context->do_compress != 0) { |
4030 | 0 | BLOSC_TRACE_ERROR("Context is not meant for decompression. Giving up."); |
4031 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4032 | 0 | } |
4033 | 0 | if (dest == NULL || destsize == NULL) { |
4034 | 0 | BLOSC_TRACE_ERROR("dest and destsize must not be NULL."); |
4035 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4036 | 0 | } |
4037 | | |
4038 | 0 | blosc_header header; |
4039 | 0 | int result = read_chunk_header((const uint8_t*)src, srcsize, true, &header); |
4040 | 0 | if (result < 0) { |
4041 | 0 | return result; |
4042 | 0 | } |
4043 | 0 | if ((header.blosc2_flags2 & BLOSC2_VL_BLOCKS) == 0) { |
4044 | 0 | BLOSC_TRACE_ERROR("Chunk does not use VL blocks."); |
4045 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4046 | 0 | } |
4047 | | |
4048 | | /* Pass header.nbytes as destsize so the size-check inside |
4049 | | * initialize_context_decompression passes; context->dest is set to NULL and |
4050 | | * is never written by blosc_d (which uses its own dest argument). */ |
4051 | 0 | result = initialize_context_decompression(context, &header, src, srcsize, NULL, header.nbytes); |
4052 | 0 | if (result < 0) { |
4053 | 0 | return result; |
4054 | 0 | } |
4055 | | |
4056 | 0 | return decompress_single_vlblock(context, nblock, dest, destsize); |
4057 | 0 | } |
4058 | | |
4059 | | |
4060 | | /* The public secure routine for decompression. */ |
4061 | 0 | int blosc2_decompress(const void* src, int32_t srcsize, void* dest, int32_t destsize) { |
4062 | 0 | int result; |
4063 | 0 | char* envvar; |
4064 | 0 | long nthreads; |
4065 | 0 | blosc2_context *dctx; |
4066 | 0 | blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; |
4067 | | |
4068 | | /* Check whether the library should be initialized */ |
4069 | 0 | if (!g_initlib) blosc2_init(); |
4070 | | |
4071 | | /* Check for a BLOSC_NTHREADS environment variable */ |
4072 | 0 | envvar = getenv("BLOSC_NTHREADS"); |
4073 | 0 | if (envvar != NULL) { |
4074 | 0 | errno = 0; /* To distinguish success/failure after call */ |
4075 | 0 | nthreads = strtol(envvar, NULL, 10); |
4076 | 0 | if ((errno != EINVAL)) { |
4077 | 0 | if ((nthreads <= 0) || (nthreads > INT16_MAX)) { |
4078 | 0 | BLOSC_TRACE_ERROR("nthreads must be >= 1 and <= %d", INT16_MAX); |
4079 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4080 | 0 | } |
4081 | 0 | result = blosc2_set_nthreads((int16_t) nthreads); |
4082 | 0 | if (result < 0) { |
4083 | 0 | return result; |
4084 | 0 | } |
4085 | 0 | } |
4086 | 0 | } |
4087 | | |
4088 | | /* Check for a BLOSC_NOLOCK environment variable. It is important |
4089 | | that this should be the last env var so that it can take the |
4090 | | previous ones into account */ |
4091 | 0 | envvar = getenv("BLOSC_NOLOCK"); |
4092 | 0 | if (envvar != NULL) { |
4093 | 0 | dparams.nthreads = g_nthreads; |
4094 | 0 | dctx = blosc2_create_dctx(dparams); |
4095 | 0 | if (dctx == NULL) { |
4096 | 0 | BLOSC_TRACE_ERROR("Error while creating the decompression context"); |
4097 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
4098 | 0 | } |
4099 | 0 | result = blosc2_decompress_ctx(dctx, src, srcsize, dest, destsize); |
4100 | 0 | blosc2_free_ctx(dctx); |
4101 | 0 | return result; |
4102 | 0 | } |
4103 | | |
4104 | 0 | blosc2_pthread_mutex_lock(&global_comp_mutex); |
4105 | |
|
4106 | 0 | result = blosc_run_decompression_with_context( |
4107 | 0 | g_global_context, src, srcsize, dest, destsize); |
4108 | |
|
4109 | 0 | blosc2_pthread_mutex_unlock(&global_comp_mutex); |
4110 | |
|
4111 | 0 | return result; |
4112 | 0 | } |
4113 | | |
4114 | | |
4115 | | /* The public routine for decompression. */ |
4116 | 0 | int blosc1_decompress(const void* src, void* dest, size_t destsize) { |
4117 | 0 | return blosc2_decompress(src, INT32_MAX, dest, (int32_t)destsize); |
4118 | 0 | } |
4119 | | |
4120 | | |
4121 | | /* Specific routine optimized for decompression a small number of |
4122 | | items out of a compressed chunk. This does not use threads because |
4123 | | it would affect negatively to performance. */ |
4124 | | int _blosc_getitem(blosc2_context* context, blosc_header* header, const void* src, int32_t srcsize, |
4125 | 0 | int start, int nitems, void* dest, int32_t destsize) { |
4126 | 0 | uint8_t* _src = (uint8_t*)(src); /* current pos for source buffer */ |
4127 | 0 | uint8_t* _dest = (uint8_t*)(dest); |
4128 | 0 | int32_t ntbytes = 0; /* the number of uncompressed bytes */ |
4129 | 0 | int32_t bsize, bsize2, ebsize, leftoverblock; |
4130 | 0 | int32_t startb, stopb; |
4131 | 0 | int32_t stop = start + nitems; |
4132 | 0 | int j, rc; |
4133 | |
|
4134 | 0 | if (nitems == 0) { |
4135 | | // We have nothing to do |
4136 | 0 | return 0; |
4137 | 0 | } |
4138 | 0 | if (nitems * header->typesize > destsize) { |
4139 | 0 | BLOSC_TRACE_ERROR("`nitems`*`typesize` out of dest bounds."); |
4140 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
4141 | 0 | } |
4142 | | |
4143 | 0 | int32_t* bstarts = (int32_t*)(_src + context->header_overhead); |
4144 | | |
4145 | | /* Check region boundaries */ |
4146 | 0 | if ((start < 0) || (start * header->typesize > header->nbytes)) { |
4147 | 0 | BLOSC_TRACE_ERROR("`start` out of bounds."); |
4148 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4149 | 0 | } |
4150 | | |
4151 | 0 | if ((stop < 0) || (stop * header->typesize > header->nbytes)) { |
4152 | 0 | BLOSC_TRACE_ERROR("`start`+`nitems` out of bounds."); |
4153 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4154 | 0 | } |
4155 | | |
4156 | 0 | int chunk_memcpy = header->flags & 0x1; |
4157 | 0 | if (!context->special_type && !chunk_memcpy && |
4158 | 0 | ((uint8_t *)(_src + srcsize) < (uint8_t *)(bstarts + context->nblocks))) { |
4159 | 0 | BLOSC_TRACE_ERROR("`bstarts` out of bounds."); |
4160 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
4161 | 0 | } |
4162 | | |
4163 | 0 | bool memcpyed = header->flags & (uint8_t)BLOSC_MEMCPYED; |
4164 | 0 | if (context->special_type) { |
4165 | | // Fake a runlen as if its a memcpyed chunk |
4166 | 0 | memcpyed = true; |
4167 | 0 | } |
4168 | |
|
4169 | 0 | bool is_lazy = ((context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH) && |
4170 | 0 | (context->blosc2_flags & 0x08u) && !context->special_type); |
4171 | 0 | if (memcpyed && !is_lazy && !context->postfilter) { |
4172 | | // Short-circuit for (non-lazy) memcpyed or special values |
4173 | 0 | ntbytes = nitems * header->typesize; |
4174 | 0 | switch (context->special_type) { |
4175 | 0 | case BLOSC2_SPECIAL_VALUE: |
4176 | | // All repeated values |
4177 | 0 | rc = set_values(context->typesize, _src, _dest, ntbytes); |
4178 | 0 | if (rc < 0) { |
4179 | 0 | BLOSC_TRACE_ERROR("set_values failed"); |
4180 | 0 | return BLOSC2_ERROR_DATA; |
4181 | 0 | } |
4182 | 0 | break; |
4183 | 0 | case BLOSC2_SPECIAL_NAN: |
4184 | 0 | rc = set_nans(context->typesize, _dest, ntbytes); |
4185 | 0 | if (rc < 0) { |
4186 | 0 | BLOSC_TRACE_ERROR("set_nans failed"); |
4187 | 0 | return BLOSC2_ERROR_DATA; |
4188 | 0 | } |
4189 | 0 | break; |
4190 | 0 | case BLOSC2_SPECIAL_ZERO: |
4191 | 0 | memset(_dest, 0, ntbytes); |
4192 | 0 | break; |
4193 | 0 | case BLOSC2_SPECIAL_UNINIT: |
4194 | | // We do nothing here |
4195 | 0 | break; |
4196 | 0 | case BLOSC2_NO_SPECIAL: |
4197 | 0 | _src += context->header_overhead + start * context->typesize; |
4198 | 0 | memcpy(_dest, _src, ntbytes); |
4199 | 0 | break; |
4200 | 0 | default: |
4201 | 0 | BLOSC_TRACE_ERROR("Unhandled special value case"); |
4202 | 0 | BLOSC_ERROR(BLOSC2_ERROR_SCHUNK_SPECIAL); |
4203 | 0 | } |
4204 | 0 | return ntbytes; |
4205 | 0 | } |
4206 | | |
4207 | 0 | ebsize = header->blocksize + header->typesize * (signed)sizeof(int32_t); |
4208 | 0 | struct thread_context* scontext = context->serial_context; |
4209 | | /* Resize the temporaries in serial context if needed */ |
4210 | 0 | if (header->blocksize > scontext->tmp_blocksize) { |
4211 | 0 | my_free(scontext->tmp); |
4212 | 0 | scontext->tmp_nbytes = (size_t)4 * ebsize; |
4213 | 0 | scontext->tmp = my_malloc(scontext->tmp_nbytes); |
4214 | 0 | BLOSC_ERROR_NULL(scontext->tmp, BLOSC2_ERROR_MEMORY_ALLOC); |
4215 | 0 | scontext->tmp2 = scontext->tmp + ebsize; |
4216 | 0 | scontext->tmp3 = scontext->tmp2 + ebsize; |
4217 | 0 | scontext->tmp4 = scontext->tmp3 + ebsize; |
4218 | 0 | scontext->tmp_blocksize = (int32_t)header->blocksize; |
4219 | 0 | } |
4220 | | |
4221 | 0 | for (j = 0; j < context->nblocks; j++) { |
4222 | 0 | bsize = header->blocksize; |
4223 | 0 | leftoverblock = 0; |
4224 | 0 | if ((j == context->nblocks - 1) && (context->leftover > 0)) { |
4225 | 0 | bsize = context->leftover; |
4226 | 0 | leftoverblock = 1; |
4227 | 0 | } |
4228 | | |
4229 | | /* Compute start & stop for each block */ |
4230 | 0 | startb = start * header->typesize - j * header->blocksize; |
4231 | 0 | stopb = stop * header->typesize - j * header->blocksize; |
4232 | 0 | if (stopb <= 0) { |
4233 | | // We can exit as soon as this block is beyond stop |
4234 | 0 | break; |
4235 | 0 | } |
4236 | 0 | if (startb >= header->blocksize) { |
4237 | 0 | continue; |
4238 | 0 | } |
4239 | 0 | if (startb < 0) { |
4240 | 0 | startb = 0; |
4241 | 0 | } |
4242 | 0 | if (stopb > header->blocksize) { |
4243 | 0 | stopb = header->blocksize; |
4244 | 0 | } |
4245 | 0 | bsize2 = stopb - startb; |
4246 | |
|
4247 | 0 | #if defined(HAVE_PLUGINS) |
4248 | 0 | if (context->compcode == BLOSC_CODEC_ZFP_FIXED_RATE) { |
4249 | 0 | scontext->zfp_cell_start = startb / context->typesize; |
4250 | 0 | scontext->zfp_cell_nitems = nitems; |
4251 | 0 | } |
4252 | 0 | #endif /* HAVE_PLUGINS */ |
4253 | | |
4254 | | /* Do the actual data copy */ |
4255 | | // Regular decompression. Put results in tmp2. |
4256 | | // If the block is aligned and the worst case fits in destination, let's avoid a copy |
4257 | 0 | bool get_single_block = ((startb == 0) && (bsize == nitems * header->typesize)); |
4258 | 0 | uint8_t* tmp2 = get_single_block ? dest : scontext->tmp2; |
4259 | | |
4260 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
4261 | 0 | int32_t src_offset = memcpyed ? |
4262 | 0 | context->header_overhead + j * header->blocksize : sw32_(bstarts + j); |
4263 | |
|
4264 | 0 | int32_t cbytes = blosc_d(context->serial_context, bsize, leftoverblock, memcpyed, |
4265 | 0 | src, srcsize, src_offset, j, |
4266 | 0 | tmp2, 0, scontext->tmp, scontext->tmp3); |
4267 | 0 | if (cbytes < 0) { |
4268 | 0 | ntbytes = cbytes; |
4269 | 0 | break; |
4270 | 0 | } |
4271 | 0 | if (scontext->zfp_cell_nitems > 0) { |
4272 | 0 | if (cbytes == bsize2) { |
4273 | 0 | memcpy((uint8_t *) dest, tmp2, (unsigned int) bsize2); |
4274 | 0 | } else if (cbytes == context->blocksize) { |
4275 | 0 | memcpy((uint8_t *) dest, tmp2 + scontext->zfp_cell_start * context->typesize, (unsigned int) bsize2); |
4276 | 0 | cbytes = bsize2; |
4277 | 0 | } |
4278 | 0 | } else if (!get_single_block) { |
4279 | | /* Copy to destination */ |
4280 | 0 | memcpy((uint8_t *) dest + ntbytes, tmp2 + startb, (unsigned int) bsize2); |
4281 | 0 | } |
4282 | 0 | ntbytes += bsize2; |
4283 | 0 | } |
4284 | |
|
4285 | 0 | scontext->zfp_cell_nitems = 0; |
4286 | |
|
4287 | 0 | return ntbytes; |
4288 | 0 | } |
4289 | | |
4290 | 0 | int blosc2_getitem(const void* src, int32_t srcsize, int start, int nitems, void* dest, int32_t destsize) { |
4291 | 0 | blosc2_context context; |
4292 | 0 | int result; |
4293 | | |
4294 | | /* Minimally populate the context */ |
4295 | 0 | memset(&context, 0, sizeof(blosc2_context)); |
4296 | |
|
4297 | 0 | context.schunk = g_schunk; |
4298 | 0 | context.nthreads = 1; // force a serial decompression; fixes #95 |
4299 | | |
4300 | | /* Call the actual getitem function */ |
4301 | 0 | result = blosc2_getitem_ctx(&context, src, srcsize, start, nitems, dest, destsize); |
4302 | | |
4303 | | /* Release resources */ |
4304 | 0 | if (context.serial_context != NULL) { |
4305 | 0 | free_thread_context(context.serial_context); |
4306 | 0 | } |
4307 | 0 | return result; |
4308 | 0 | } |
4309 | | |
4310 | | /* Specific routine optimized for decompression a small number of |
4311 | | items out of a compressed chunk. Public non-contextual API. */ |
4312 | 0 | int blosc1_getitem(const void* src, int start, int nitems, void* dest) { |
4313 | 0 | return blosc2_getitem(src, INT32_MAX, start, nitems, dest, INT32_MAX); |
4314 | 0 | } |
4315 | | |
4316 | | int blosc2_getitem_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
4317 | 0 | int start, int nitems, void* dest, int32_t destsize) { |
4318 | 0 | blosc_header header; |
4319 | 0 | int result; |
4320 | | |
4321 | | /* Minimally populate the context */ |
4322 | 0 | result = read_chunk_header((uint8_t *) src, srcsize, true, &header); |
4323 | 0 | if (result < 0) { |
4324 | 0 | return result; |
4325 | 0 | } |
4326 | 0 | if (header.blosc2_flags2 & BLOSC2_VL_BLOCKS) { |
4327 | 0 | BLOSC_TRACE_ERROR("getitem is not supported for VL-block chunks."); |
4328 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
4329 | 0 | } |
4330 | | |
4331 | 0 | context->src = src; |
4332 | 0 | context->srcsize = srcsize; |
4333 | 0 | context->dest = dest; |
4334 | 0 | context->destsize = destsize; |
4335 | |
|
4336 | 0 | result = blosc2_initialize_context_from_header(context, &header); |
4337 | 0 | if (result < 0) { |
4338 | 0 | return result; |
4339 | 0 | } |
4340 | | |
4341 | 0 | if (context->serial_context == NULL) { |
4342 | 0 | context->serial_context = create_thread_context(context, 0); |
4343 | 0 | } |
4344 | 0 | BLOSC_ERROR_NULL(context->serial_context, BLOSC2_ERROR_THREAD_CREATE); |
4345 | | /* Call the actual getitem function */ |
4346 | 0 | result = _blosc_getitem(context, &header, src, srcsize, start, nitems, dest, destsize); |
4347 | |
|
4348 | 0 | return result; |
4349 | 0 | } |
4350 | | |
4351 | | /* execute single compression/decompression job for a single thread_context */ |
4352 | | static void t_blosc_do_job(void *ctxt) |
4353 | 0 | { |
4354 | 0 | struct thread_context* thcontext = (struct thread_context*)ctxt; |
4355 | 0 | blosc2_context* context = thcontext->parent_context; |
4356 | 0 | int32_t cbytes; |
4357 | 0 | int32_t ntdest; |
4358 | 0 | int32_t tblocks; /* number of blocks per thread */ |
4359 | 0 | int32_t tblock; /* limit block on a thread */ |
4360 | 0 | int32_t nblock_; /* private copy of nblock */ |
4361 | 0 | int32_t bsize; |
4362 | 0 | int32_t leftoverblock; |
4363 | | /* Parameters for threads */ |
4364 | 0 | int32_t blocksize; |
4365 | 0 | int32_t ebsize; |
4366 | 0 | int32_t srcsize; |
4367 | 0 | bool compress = context->do_compress != 0; |
4368 | 0 | int32_t maxbytes; |
4369 | 0 | int32_t nblocks; |
4370 | 0 | int32_t leftover; |
4371 | 0 | int32_t leftover2; |
4372 | 0 | int32_t* bstarts; |
4373 | 0 | const uint8_t* src; |
4374 | 0 | uint8_t* dest; |
4375 | 0 | uint8_t* tmp; |
4376 | 0 | uint8_t* tmp2; |
4377 | 0 | uint8_t* tmp3; |
4378 | | |
4379 | | /* Get parameters for this thread before entering the main loop */ |
4380 | 0 | blocksize = context->blocksize; |
4381 | 0 | ebsize = blocksize + context->typesize * (int32_t)sizeof(int32_t); |
4382 | 0 | maxbytes = context->destsize; |
4383 | 0 | nblocks = context->nblocks; |
4384 | 0 | leftover = context->leftover; |
4385 | 0 | bstarts = context->bstarts; |
4386 | 0 | src = context->src; |
4387 | 0 | srcsize = context->srcsize; |
4388 | 0 | dest = context->dest; |
4389 | | |
4390 | | /* Resize the temporaries if needed */ |
4391 | 0 | if (blocksize > thcontext->tmp_blocksize) { |
4392 | 0 | my_free(thcontext->tmp); |
4393 | 0 | thcontext->tmp_nbytes = (size_t) 4 * ebsize; |
4394 | 0 | thcontext->tmp = my_malloc(thcontext->tmp_nbytes); |
4395 | 0 | thcontext->tmp2 = thcontext->tmp + ebsize; |
4396 | 0 | thcontext->tmp3 = thcontext->tmp2 + ebsize; |
4397 | 0 | thcontext->tmp4 = thcontext->tmp3 + ebsize; |
4398 | 0 | thcontext->tmp_blocksize = blocksize; |
4399 | 0 | } |
4400 | |
|
4401 | 0 | tmp = thcontext->tmp; |
4402 | 0 | tmp2 = thcontext->tmp2; |
4403 | 0 | tmp3 = thcontext->tmp3; |
4404 | | |
4405 | | // Determine whether we can do a static distribution of workload among different threads |
4406 | 0 | bool vlblocks = (context->blosc2_flags2 & BLOSC2_VL_BLOCKS) != 0; |
4407 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
4408 | 0 | if (!context->do_compress && context->special_type) { |
4409 | | // Fake a runlen as if its a memcpyed chunk |
4410 | 0 | memcpyed = true; |
4411 | 0 | } |
4412 | |
|
4413 | 0 | bool static_schedule = (!compress || memcpyed) && context->block_maskout == NULL; |
4414 | 0 | if (static_schedule) { |
4415 | | /* Blocks per thread */ |
4416 | 0 | tblocks = nblocks / context->nthreads; |
4417 | 0 | leftover2 = nblocks % context->nthreads; |
4418 | 0 | tblocks = (leftover2 > 0) ? tblocks + 1 : tblocks; |
4419 | 0 | nblock_ = thcontext->tid * tblocks; |
4420 | 0 | tblock = nblock_ + tblocks; |
4421 | 0 | if (tblock > nblocks) { |
4422 | 0 | tblock = nblocks; |
4423 | 0 | } |
4424 | 0 | } |
4425 | 0 | else { |
4426 | | // Use dynamic schedule via a queue. Get the next block. |
4427 | 0 | blosc2_pthread_mutex_lock(&context->count_mutex); |
4428 | 0 | context->thread_nblock++; |
4429 | 0 | nblock_ = context->thread_nblock; |
4430 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4431 | 0 | tblock = nblocks; |
4432 | 0 | } |
4433 | | |
4434 | | /* Loop over blocks */ |
4435 | 0 | leftoverblock = 0; |
4436 | 0 | while ((nblock_ < tblock) && (context->thread_giveup_code > 0)) { |
4437 | 0 | bsize = vlblocks ? context->blocknbytes[nblock_] : blocksize; |
4438 | 0 | if (!vlblocks && nblock_ == (nblocks - 1) && (leftover > 0)) { |
4439 | 0 | bsize = leftover; |
4440 | 0 | leftoverblock = 1; |
4441 | 0 | } |
4442 | 0 | if (compress) { |
4443 | 0 | if (memcpyed) { |
4444 | 0 | if (!context->prefilter) { |
4445 | | /* We want to memcpy only */ |
4446 | 0 | memcpy(dest + context->header_overhead + nblock_ * blocksize, |
4447 | 0 | src + nblock_ * blocksize, (unsigned int) bsize); |
4448 | 0 | cbytes = (int32_t) bsize; |
4449 | 0 | } |
4450 | 0 | else { |
4451 | | /* Only the prefilter has to be executed, and this is done in blosc_c(). |
4452 | | * However, no further actions are needed, so we can put the result |
4453 | | * directly in dest. */ |
4454 | 0 | cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, |
4455 | 0 | ebsize, |
4456 | 0 | vlblocks ? context->vlblock_sources[nblock_] : src, |
4457 | 0 | vlblocks ? 0 : nblock_ * blocksize, |
4458 | 0 | dest + context->header_overhead + nblock_ * blocksize, |
4459 | 0 | tmp, tmp3); |
4460 | 0 | } |
4461 | 0 | } |
4462 | 0 | else { |
4463 | | /* Regular compression */ |
4464 | 0 | cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, |
4465 | 0 | ebsize, |
4466 | 0 | vlblocks ? context->vlblock_sources[nblock_] : src, |
4467 | 0 | vlblocks ? 0 : nblock_ * blocksize, |
4468 | 0 | tmp2, tmp, tmp3); |
4469 | 0 | } |
4470 | 0 | } |
4471 | 0 | else { |
4472 | | /* Regular decompression */ |
4473 | 0 | if (context->special_type == BLOSC2_NO_SPECIAL && !memcpyed && |
4474 | 0 | (srcsize < (int32_t)(context->header_overhead + (sizeof(int32_t) * nblocks)))) { |
4475 | | /* Not enough input to read all `bstarts` */ |
4476 | 0 | cbytes = -1; |
4477 | 0 | } |
4478 | 0 | else { |
4479 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
4480 | 0 | int32_t src_offset = memcpyed ? |
4481 | 0 | context->header_overhead + nblock_ * blocksize : sw32_(bstarts + nblock_); |
4482 | 0 | uint8_t *dest_block = (vlblocks && context->vlblock_dests != NULL) ? context->vlblock_dests[nblock_] : dest; |
4483 | 0 | int32_t dest_offset = (vlblocks && context->vlblock_dests != NULL) ? 0 : |
4484 | 0 | (vlblocks ? context->blockoffsets[nblock_] : nblock_ * blocksize); |
4485 | 0 | cbytes = blosc_d(thcontext, bsize, leftoverblock, memcpyed, |
4486 | 0 | src, srcsize, src_offset, nblock_, |
4487 | 0 | dest_block, dest_offset, tmp, tmp2); |
4488 | 0 | } |
4489 | 0 | } |
4490 | | |
4491 | | /* Check whether current thread has to giveup */ |
4492 | 0 | if (context->thread_giveup_code <= 0) { |
4493 | 0 | break; |
4494 | 0 | } |
4495 | | |
4496 | | /* Check results for the compressed/decompressed block */ |
4497 | 0 | if (cbytes < 0) { /* compr/decompr failure */ |
4498 | | /* Set giveup_code error */ |
4499 | 0 | blosc2_pthread_mutex_lock(&context->count_mutex); |
4500 | 0 | context->thread_giveup_code = cbytes; |
4501 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4502 | 0 | break; |
4503 | 0 | } |
4504 | | |
4505 | 0 | if (compress && !memcpyed) { |
4506 | | /* Start critical section */ |
4507 | 0 | blosc2_pthread_mutex_lock(&context->count_mutex); |
4508 | 0 | ntdest = context->output_bytes; |
4509 | | // Note: do not use a typical local dict_training variable here |
4510 | | // because it is probably cached from previous calls if the number of |
4511 | | // threads does not change (the usual thing). |
4512 | 0 | if (!(context->use_dict && context->dict_cdict == NULL)) { |
4513 | 0 | _sw32(bstarts + nblock_, (int32_t) ntdest); |
4514 | 0 | } |
4515 | |
|
4516 | 0 | if ((cbytes == 0) || (ntdest + cbytes > maxbytes)) { |
4517 | 0 | context->thread_giveup_code = 0; /* incompressible buf */ |
4518 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4519 | 0 | break; |
4520 | 0 | } |
4521 | 0 | context->thread_nblock++; |
4522 | 0 | nblock_ = context->thread_nblock; |
4523 | 0 | context->output_bytes += cbytes; |
4524 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4525 | | /* End of critical section */ |
4526 | | |
4527 | | /* Copy the compressed buffer to destination */ |
4528 | 0 | memcpy(dest + ntdest, tmp2, (unsigned int) cbytes); |
4529 | 0 | } |
4530 | 0 | else if (static_schedule) { |
4531 | 0 | nblock_++; |
4532 | 0 | } |
4533 | 0 | else { |
4534 | 0 | blosc2_pthread_mutex_lock(&context->count_mutex); |
4535 | 0 | context->thread_nblock++; |
4536 | 0 | nblock_ = context->thread_nblock; |
4537 | 0 | context->output_bytes += cbytes; |
4538 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4539 | 0 | } |
4540 | |
|
4541 | 0 | } /* closes while (nblock_) */ |
4542 | |
|
4543 | 0 | if (static_schedule) { |
4544 | 0 | blosc2_pthread_mutex_lock(&context->count_mutex); |
4545 | 0 | context->output_bytes = context->sourcesize; |
4546 | 0 | if (compress) { |
4547 | 0 | context->output_bytes += context->header_overhead; |
4548 | 0 | } |
4549 | 0 | blosc2_pthread_mutex_unlock(&context->count_mutex); |
4550 | 0 | } |
4551 | |
|
4552 | 0 | } |
4553 | | |
4554 | | /* Decompress & unshuffle several blocks in a single thread */ |
4555 | 0 | static void* t_blosc(void* ctxt) { |
4556 | 0 | struct thread_context* thcontext = (struct thread_context*)ctxt; |
4557 | 0 | blosc2_context* context = thcontext->parent_context; |
4558 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
4559 | 0 | int rc; |
4560 | 0 | #endif |
4561 | |
|
4562 | 0 | while (1) { |
4563 | | /* Synchronization point for all threads (wait for initialization) */ |
4564 | 0 | WAIT_INIT(NULL, context); |
4565 | | |
4566 | 0 | if (context->end_threads) { |
4567 | 0 | break; |
4568 | 0 | } |
4569 | | |
4570 | 0 | t_blosc_do_job(ctxt); |
4571 | | |
4572 | | /* Meeting point for all threads (wait for finalization) */ |
4573 | 0 | WAIT_FINISH(NULL, context); |
4574 | 0 | } |
4575 | | |
4576 | | /* Cleanup our working space and context */ |
4577 | 0 | free_thread_context(thcontext); |
4578 | |
|
4579 | 0 | return (NULL); |
4580 | 0 | } |
4581 | | |
4582 | | |
4583 | 0 | int init_threadpool(blosc2_context *context) { |
4584 | 0 | int32_t tid; |
4585 | 0 | int rc2; |
4586 | | |
4587 | | /* Initialize mutex and condition variable objects */ |
4588 | 0 | blosc2_pthread_mutex_init(&context->count_mutex, NULL); |
4589 | 0 | blosc2_pthread_mutex_init(&context->delta_mutex, NULL); |
4590 | 0 | blosc2_pthread_mutex_init(&context->nchunk_mutex, NULL); |
4591 | 0 | blosc2_pthread_cond_init(&context->delta_cv, NULL); |
4592 | | |
4593 | | /* Set context thread sentinels */ |
4594 | 0 | context->thread_giveup_code = 1; |
4595 | 0 | context->thread_nblock = -1; |
4596 | | |
4597 | | /* Barrier initialization */ |
4598 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
4599 | 0 | pthread_barrier_init(&context->barr_init, NULL, context->nthreads + 1); |
4600 | 0 | pthread_barrier_init(&context->barr_finish, NULL, context->nthreads + 1); |
4601 | | #else |
4602 | | blosc2_pthread_mutex_init(&context->count_threads_mutex, NULL); |
4603 | | blosc2_pthread_cond_init(&context->count_threads_cv, NULL); |
4604 | | context->count_threads = 0; /* Reset threads counter */ |
4605 | | #endif |
4606 | |
|
4607 | 0 | if (threads_callback) { |
4608 | | /* Create thread contexts to store data for callback threads */ |
4609 | 0 | context->thread_contexts = (struct thread_context *)my_malloc( |
4610 | 0 | context->nthreads * sizeof(struct thread_context)); |
4611 | 0 | BLOSC_ERROR_NULL(context->thread_contexts, BLOSC2_ERROR_MEMORY_ALLOC); |
4612 | 0 | for (tid = 0; tid < context->nthreads; tid++) |
4613 | 0 | init_thread_context(context->thread_contexts + tid, context, tid); |
4614 | 0 | } |
4615 | 0 | else { |
4616 | 0 | #if !defined(_WIN32) |
4617 | | /* Initialize and set thread detached attribute */ |
4618 | 0 | pthread_attr_init(&context->ct_attr); |
4619 | 0 | pthread_attr_setdetachstate(&context->ct_attr, PTHREAD_CREATE_JOINABLE); |
4620 | 0 | #endif |
4621 | | |
4622 | | /* Make space for thread handlers */ |
4623 | 0 | context->threads = (blosc2_pthread_t*)my_malloc( |
4624 | 0 | context->nthreads * sizeof(blosc2_pthread_t)); |
4625 | 0 | BLOSC_ERROR_NULL(context->threads, BLOSC2_ERROR_MEMORY_ALLOC); |
4626 | | /* Finally, create the threads */ |
4627 | 0 | for (tid = 0; tid < context->nthreads; tid++) { |
4628 | | /* Create a thread context (will destroy when finished) */ |
4629 | 0 | struct thread_context *thread_context = create_thread_context(context, tid); |
4630 | 0 | BLOSC_ERROR_NULL(thread_context, BLOSC2_ERROR_THREAD_CREATE); |
4631 | 0 | #if !defined(_WIN32) |
4632 | 0 | rc2 = blosc2_pthread_create(&context->threads[tid], &context->ct_attr, t_blosc, |
4633 | 0 | (void*)thread_context); |
4634 | | #else |
4635 | | rc2 = blosc2_pthread_create(&context->threads[tid], NULL, t_blosc, |
4636 | | (void *)thread_context); |
4637 | | #endif |
4638 | 0 | if (rc2) { |
4639 | 0 | BLOSC_TRACE_ERROR("Return code from blosc2_pthread_create() is %d.\n" |
4640 | 0 | "\tError detail: %s\n", rc2, strerror(rc2)); |
4641 | 0 | return BLOSC2_ERROR_THREAD_CREATE; |
4642 | 0 | } |
4643 | 0 | } |
4644 | 0 | } |
4645 | | |
4646 | | /* We have now started/initialized the threads */ |
4647 | 0 | context->threads_started = context->nthreads; |
4648 | 0 | context->new_nthreads = context->nthreads; |
4649 | |
|
4650 | 0 | return 0; |
4651 | 0 | } |
4652 | | |
4653 | | int16_t blosc2_get_nthreads(void) |
4654 | 0 | { |
4655 | 0 | return g_nthreads; |
4656 | 0 | } |
4657 | | |
4658 | 0 | int16_t blosc2_set_nthreads(int16_t nthreads) { |
4659 | 0 | int16_t ret = g_nthreads; /* the previous number of threads */ |
4660 | | |
4661 | | /* Check whether the library should be initialized */ |
4662 | 0 | if (!g_initlib) blosc2_init(); |
4663 | |
|
4664 | 0 | if (nthreads != ret) { |
4665 | 0 | g_nthreads = nthreads; |
4666 | 0 | g_global_context->new_nthreads = nthreads; |
4667 | 0 | int16_t ret2 = check_nthreads(g_global_context); |
4668 | 0 | if (ret2 < 0) { |
4669 | 0 | return ret2; |
4670 | 0 | } |
4671 | 0 | } |
4672 | | |
4673 | 0 | return ret; |
4674 | 0 | } |
4675 | | |
4676 | | |
4677 | | const char* blosc1_get_compressor(void) |
4678 | 0 | { |
4679 | 0 | const char* compname; |
4680 | 0 | blosc2_compcode_to_compname(g_compressor, &compname); |
4681 | |
|
4682 | 0 | return compname; |
4683 | 0 | } |
4684 | | |
4685 | 2.74k | int blosc1_set_compressor(const char* compname) { |
4686 | 2.74k | int code = blosc2_compname_to_compcode(compname); |
4687 | 2.74k | if (code >= BLOSC_LAST_CODEC) { |
4688 | 0 | BLOSC_TRACE_ERROR("User defined codecs cannot be set here. Use Blosc2 mechanism instead."); |
4689 | 0 | BLOSC_ERROR(BLOSC2_ERROR_CODEC_SUPPORT); |
4690 | 0 | } |
4691 | 2.74k | g_compressor = code; |
4692 | | |
4693 | | /* Check whether the library should be initialized */ |
4694 | 2.74k | if (!g_initlib) blosc2_init(); |
4695 | | |
4696 | 2.74k | return code; |
4697 | 2.74k | } |
4698 | | |
4699 | 0 | void blosc2_set_delta(int dodelta) { |
4700 | |
|
4701 | 0 | g_delta = dodelta; |
4702 | | |
4703 | | /* Check whether the library should be initialized */ |
4704 | 0 | if (!g_initlib) blosc2_init(); |
4705 | |
|
4706 | 0 | } |
4707 | | |
4708 | 0 | const char* blosc2_list_compressors(void) { |
4709 | 0 | static int compressors_list_done = 0; |
4710 | 0 | static char ret[256]; |
4711 | |
|
4712 | 0 | if (compressors_list_done) return ret; |
4713 | 0 | ret[0] = '\0'; |
4714 | 0 | strcat(ret, BLOSC_BLOSCLZ_COMPNAME); |
4715 | 0 | strcat(ret, ","); |
4716 | 0 | strcat(ret, BLOSC_LZ4_COMPNAME); |
4717 | 0 | strcat(ret, ","); |
4718 | 0 | strcat(ret, BLOSC_LZ4HC_COMPNAME); |
4719 | 0 | #if defined(HAVE_ZLIB) |
4720 | 0 | strcat(ret, ","); |
4721 | 0 | strcat(ret, BLOSC_ZLIB_COMPNAME); |
4722 | 0 | #endif /* HAVE_ZLIB */ |
4723 | 0 | #if defined(HAVE_ZSTD) |
4724 | 0 | strcat(ret, ","); |
4725 | 0 | strcat(ret, BLOSC_ZSTD_COMPNAME); |
4726 | 0 | #endif /* HAVE_ZSTD */ |
4727 | 0 | compressors_list_done = 1; |
4728 | 0 | return ret; |
4729 | 0 | } |
4730 | | |
4731 | | |
4732 | 0 | const char* blosc2_get_version_string(void) { |
4733 | 0 | return BLOSC2_VERSION_STRING; |
4734 | 0 | } |
4735 | | |
4736 | | |
4737 | 0 | int blosc2_get_complib_info(const char* compname, char** complib, char** version) { |
4738 | 0 | int clibcode; |
4739 | 0 | const char* clibname; |
4740 | 0 | const char* clibversion = "unknown"; |
4741 | 0 | char sbuffer[256]; |
4742 | |
|
4743 | 0 | clibcode = compname_to_clibcode(compname); |
4744 | 0 | clibname = clibcode_to_clibname(clibcode); |
4745 | | |
4746 | | /* complib version */ |
4747 | 0 | if (clibcode == BLOSC_BLOSCLZ_LIB) { |
4748 | 0 | clibversion = BLOSCLZ_VERSION_STRING; |
4749 | 0 | } |
4750 | 0 | else if (clibcode == BLOSC_LZ4_LIB) { |
4751 | 0 | sprintf(sbuffer, "%d.%d.%d", |
4752 | 0 | LZ4_VERSION_MAJOR, LZ4_VERSION_MINOR, LZ4_VERSION_RELEASE); |
4753 | 0 | clibversion = sbuffer; |
4754 | 0 | } |
4755 | 0 | #if defined(HAVE_ZLIB) |
4756 | 0 | else if (clibcode == BLOSC_ZLIB_LIB) { |
4757 | 0 | #ifdef ZLIB_COMPAT |
4758 | 0 | clibversion = ZLIB_VERSION; |
4759 | | #elif defined(HAVE_ZLIB_NG) |
4760 | | clibversion = ZLIBNG_VERSION; |
4761 | | #else |
4762 | | clibversion = ZLIB_VERSION; |
4763 | | #endif |
4764 | 0 | } |
4765 | 0 | #endif /* HAVE_ZLIB */ |
4766 | 0 | #if defined(HAVE_ZSTD) |
4767 | 0 | else if (clibcode == BLOSC_ZSTD_LIB) { |
4768 | 0 | sprintf(sbuffer, "%d.%d.%d", |
4769 | 0 | ZSTD_VERSION_MAJOR, ZSTD_VERSION_MINOR, ZSTD_VERSION_RELEASE); |
4770 | 0 | clibversion = sbuffer; |
4771 | 0 | } |
4772 | 0 | #endif /* HAVE_ZSTD */ |
4773 | |
|
4774 | | #ifdef _MSC_VER |
4775 | | *complib = _strdup(clibname); |
4776 | | *version = _strdup(clibversion); |
4777 | | #else |
4778 | 0 | *complib = strdup(clibname); |
4779 | 0 | *version = strdup(clibversion); |
4780 | 0 | #endif |
4781 | 0 | return clibcode; |
4782 | 0 | } |
4783 | | |
4784 | | /* Return `nbytes`, `cbytes` and `blocksize` from a compressed buffer. */ |
4785 | 0 | void blosc1_cbuffer_sizes(const void* cbuffer, size_t* nbytes, size_t* cbytes, size_t* blocksize) { |
4786 | 0 | int32_t nbytes32, cbytes32, blocksize32; |
4787 | 0 | blosc2_cbuffer_sizes(cbuffer, &nbytes32, &cbytes32, &blocksize32); |
4788 | 0 | *nbytes = nbytes32; |
4789 | 0 | *cbytes = cbytes32; |
4790 | 0 | *blocksize = blocksize32; |
4791 | 0 | } |
4792 | | |
4793 | 136k | int blosc2_cbuffer_sizes(const void* cbuffer, int32_t* nbytes, int32_t* cbytes, int32_t* blocksize) { |
4794 | 136k | blosc_header header; |
4795 | 136k | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
4796 | 136k | if (rc < 0) { |
4797 | | /* Return zeros if error reading header */ |
4798 | 0 | memset(&header, 0, sizeof(header)); |
4799 | 0 | } |
4800 | | |
4801 | | /* Read the interesting values */ |
4802 | 136k | if (nbytes != NULL) |
4803 | 136k | *nbytes = header.nbytes; |
4804 | 136k | if (cbytes != NULL) |
4805 | 86.7k | *cbytes = header.cbytes; |
4806 | 136k | if (blocksize != NULL) |
4807 | 0 | *blocksize = header.blocksize; |
4808 | 136k | return rc; |
4809 | 136k | } |
4810 | | |
4811 | 0 | int blosc1_cbuffer_validate(const void* cbuffer, size_t cbytes, size_t* nbytes) { |
4812 | 0 | int32_t header_cbytes; |
4813 | 0 | int32_t header_nbytes; |
4814 | 0 | if (cbytes < BLOSC_MIN_HEADER_LENGTH) { |
4815 | | /* Compressed data should contain enough space for header */ |
4816 | 0 | *nbytes = 0; |
4817 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
4818 | 0 | } |
4819 | 0 | int rc = blosc2_cbuffer_sizes(cbuffer, &header_nbytes, &header_cbytes, NULL); |
4820 | 0 | if (rc < 0) { |
4821 | 0 | *nbytes = 0; |
4822 | 0 | return rc; |
4823 | 0 | } |
4824 | 0 | *nbytes = header_nbytes; |
4825 | 0 | if (header_cbytes != (int32_t)cbytes) { |
4826 | | /* Compressed size from header does not match `cbytes` */ |
4827 | 0 | *nbytes = 0; |
4828 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
4829 | 0 | } |
4830 | 0 | if (*nbytes > BLOSC2_MAX_BUFFERSIZE) { |
4831 | | /* Uncompressed size is larger than allowed */ |
4832 | 0 | *nbytes = 0; |
4833 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
4834 | 0 | } |
4835 | 0 | return 0; |
4836 | 0 | } |
4837 | | |
4838 | | /* Return `typesize` and `flags` from a compressed buffer. */ |
4839 | 0 | void blosc1_cbuffer_metainfo(const void* cbuffer, size_t* typesize, int* flags) { |
4840 | 0 | blosc_header header; |
4841 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
4842 | 0 | if (rc < 0) { |
4843 | 0 | *typesize = *flags = 0; |
4844 | 0 | return; |
4845 | 0 | } |
4846 | | |
4847 | | /* Read the interesting values */ |
4848 | 0 | *flags = header.flags; |
4849 | 0 | *typesize = header.typesize; |
4850 | 0 | } |
4851 | | |
4852 | | |
4853 | | /* Return version information from a compressed buffer. */ |
4854 | 0 | void blosc2_cbuffer_versions(const void* cbuffer, int* version, int* versionlz) { |
4855 | 0 | blosc_header header; |
4856 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
4857 | 0 | if (rc < 0) { |
4858 | 0 | *version = *versionlz = 0; |
4859 | 0 | return; |
4860 | 0 | } |
4861 | | |
4862 | | /* Read the version info */ |
4863 | 0 | *version = header.version; |
4864 | 0 | *versionlz = header.versionlz; |
4865 | 0 | } |
4866 | | |
4867 | | |
4868 | | /* Return the compressor library/format used in a compressed buffer. */ |
4869 | 0 | const char* blosc2_cbuffer_complib(const void* cbuffer) { |
4870 | 0 | blosc_header header; |
4871 | 0 | int clibcode; |
4872 | 0 | const char* complib; |
4873 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
4874 | 0 | if (rc < 0) { |
4875 | 0 | return NULL; |
4876 | 0 | } |
4877 | | |
4878 | | /* Read the compressor format/library info */ |
4879 | 0 | clibcode = (header.flags & 0xe0) >> 5; |
4880 | 0 | complib = clibcode_to_clibname(clibcode); |
4881 | 0 | return complib; |
4882 | 0 | } |
4883 | | |
4884 | | |
4885 | | /* Get the internal blocksize to be used during compression. 0 means |
4886 | | that an automatic blocksize is computed internally. */ |
4887 | | int blosc1_get_blocksize(void) |
4888 | 0 | { |
4889 | 0 | return (int)g_force_blocksize; |
4890 | 0 | } |
4891 | | |
4892 | | |
4893 | | /* Force the use of a specific blocksize. If 0, an automatic |
4894 | | blocksize will be used (the default). */ |
4895 | 0 | void blosc1_set_blocksize(size_t blocksize) { |
4896 | 0 | g_force_blocksize = (int32_t)blocksize; |
4897 | 0 | } |
4898 | | |
4899 | | |
4900 | | /* Force the use of a specific split mode. */ |
4901 | | void blosc1_set_splitmode(int mode) |
4902 | 0 | { |
4903 | 0 | g_splitmode = mode; |
4904 | 0 | } |
4905 | | |
4906 | | |
4907 | | /* Set pointer to super-chunk. If NULL, no super-chunk will be |
4908 | | reachable (the default). */ |
4909 | 0 | void blosc_set_schunk(blosc2_schunk* schunk) { |
4910 | 0 | g_schunk = schunk; |
4911 | 0 | g_global_context->schunk = schunk; |
4912 | 0 | } |
4913 | | |
4914 | | blosc2_io *blosc2_io_global = NULL; |
4915 | | blosc2_io_cb BLOSC2_IO_CB_DEFAULTS; |
4916 | | blosc2_io_cb BLOSC2_IO_CB_MMAP; |
4917 | | |
4918 | | int _blosc2_register_io_cb(const blosc2_io_cb *io); |
4919 | | |
4920 | 2.74k | void blosc2_init(void) { |
4921 | | /* Return if Blosc is already initialized */ |
4922 | 2.74k | if (g_initlib) return; |
4923 | | |
4924 | 2.74k | BLOSC2_IO_CB_DEFAULTS.id = BLOSC2_IO_FILESYSTEM; |
4925 | 2.74k | BLOSC2_IO_CB_DEFAULTS.name = "filesystem"; |
4926 | 2.74k | BLOSC2_IO_CB_DEFAULTS.is_allocation_necessary = true; |
4927 | 2.74k | BLOSC2_IO_CB_DEFAULTS.open = (blosc2_open_cb) blosc2_stdio_open; |
4928 | 2.74k | BLOSC2_IO_CB_DEFAULTS.close = (blosc2_close_cb) blosc2_stdio_close; |
4929 | 2.74k | BLOSC2_IO_CB_DEFAULTS.size = (blosc2_size_cb) blosc2_stdio_size; |
4930 | 2.74k | BLOSC2_IO_CB_DEFAULTS.write = (blosc2_write_cb) blosc2_stdio_write; |
4931 | 2.74k | BLOSC2_IO_CB_DEFAULTS.read = (blosc2_read_cb) blosc2_stdio_read; |
4932 | 2.74k | BLOSC2_IO_CB_DEFAULTS.truncate = (blosc2_truncate_cb) blosc2_stdio_truncate; |
4933 | 2.74k | BLOSC2_IO_CB_DEFAULTS.destroy = (blosc2_destroy_cb) blosc2_stdio_destroy; |
4934 | | |
4935 | 2.74k | _blosc2_register_io_cb(&BLOSC2_IO_CB_DEFAULTS); |
4936 | | |
4937 | 2.74k | BLOSC2_IO_CB_MMAP.id = BLOSC2_IO_FILESYSTEM_MMAP; |
4938 | 2.74k | BLOSC2_IO_CB_MMAP.name = "filesystem_mmap"; |
4939 | 2.74k | BLOSC2_IO_CB_MMAP.is_allocation_necessary = false; |
4940 | 2.74k | BLOSC2_IO_CB_MMAP.open = (blosc2_open_cb) blosc2_stdio_mmap_open; |
4941 | 2.74k | BLOSC2_IO_CB_MMAP.close = (blosc2_close_cb) blosc2_stdio_mmap_close; |
4942 | 2.74k | BLOSC2_IO_CB_MMAP.read = (blosc2_read_cb) blosc2_stdio_mmap_read; |
4943 | 2.74k | BLOSC2_IO_CB_MMAP.size = (blosc2_size_cb) blosc2_stdio_mmap_size; |
4944 | 2.74k | BLOSC2_IO_CB_MMAP.write = (blosc2_write_cb) blosc2_stdio_mmap_write; |
4945 | 2.74k | BLOSC2_IO_CB_MMAP.truncate = (blosc2_truncate_cb) blosc2_stdio_mmap_truncate; |
4946 | 2.74k | BLOSC2_IO_CB_MMAP.destroy = (blosc2_destroy_cb) blosc2_stdio_mmap_destroy; |
4947 | | |
4948 | 2.74k | _blosc2_register_io_cb(&BLOSC2_IO_CB_MMAP); |
4949 | | |
4950 | 2.74k | g_ncodecs = 0; |
4951 | 2.74k | g_nfilters = 0; |
4952 | 2.74k | g_ntuners = 0; |
4953 | | |
4954 | 2.74k | #if defined(HAVE_PLUGINS) |
4955 | 2.74k | #include "blosc2/blosc2-common.h" |
4956 | 2.74k | #include "blosc2/blosc2-stdio.h" |
4957 | 2.74k | register_codecs(); |
4958 | 2.74k | register_filters(); |
4959 | 2.74k | register_tuners(); |
4960 | 2.74k | #endif |
4961 | 2.74k | blosc2_pthread_mutex_init(&global_comp_mutex, NULL); |
4962 | | /* Create a global context */ |
4963 | 2.74k | g_global_context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
4964 | 2.74k | memset(g_global_context, 0, sizeof(blosc2_context)); |
4965 | 2.74k | g_global_context->nthreads = g_nthreads; |
4966 | 2.74k | g_global_context->new_nthreads = g_nthreads; |
4967 | 2.74k | g_initlib = 1; |
4968 | 2.74k | } |
4969 | | |
4970 | | |
4971 | 2.74k | int blosc2_free_resources(void) { |
4972 | | /* Return if Blosc is not initialized */ |
4973 | 2.74k | if (!g_initlib) return BLOSC2_ERROR_FAILURE; |
4974 | | |
4975 | 2.74k | return release_threadpool(g_global_context); |
4976 | 2.74k | } |
4977 | | |
4978 | | |
4979 | 2.74k | void blosc2_destroy(void) { |
4980 | | /* Return if Blosc is not initialized */ |
4981 | 2.74k | if (!g_initlib) return; |
4982 | | |
4983 | 2.74k | blosc2_free_resources(); |
4984 | 2.74k | g_initlib = 0; |
4985 | 2.74k | blosc2_free_ctx(g_global_context); |
4986 | | |
4987 | 2.74k | blosc2_pthread_mutex_destroy(&global_comp_mutex); |
4988 | | |
4989 | 2.74k | } |
4990 | | |
4991 | | |
4992 | 10.9k | int release_threadpool(blosc2_context *context) { |
4993 | 10.9k | int32_t t; |
4994 | 10.9k | void* status; |
4995 | 10.9k | int rc; |
4996 | | |
4997 | 10.9k | if (context->threads_started > 0) { |
4998 | 0 | if (threads_callback) { |
4999 | | /* free context data for user-managed threads */ |
5000 | 0 | for (t=0; t<context->threads_started; t++) |
5001 | 0 | destroy_thread_context(context->thread_contexts + t); |
5002 | 0 | my_free(context->thread_contexts); |
5003 | 0 | } |
5004 | 0 | else { |
5005 | | /* Tell all existing threads to finish */ |
5006 | 0 | context->end_threads = 1; |
5007 | 0 | WAIT_INIT(-1, context); |
5008 | | |
5009 | | /* Join exiting threads */ |
5010 | 0 | for (t = 0; t < context->threads_started; t++) { |
5011 | 0 | rc = blosc2_pthread_join(context->threads[t], &status); |
5012 | 0 | if (rc) { |
5013 | 0 | BLOSC_TRACE_ERROR("Return code from blosc2_pthread_join() is %d\n" |
5014 | 0 | "\tError detail: %s.", rc, strerror(rc)); |
5015 | 0 | } |
5016 | 0 | } |
5017 | | |
5018 | | /* Thread attributes */ |
5019 | 0 | #if !defined(_WIN32) |
5020 | 0 | pthread_attr_destroy(&context->ct_attr); |
5021 | 0 | #endif |
5022 | | |
5023 | | /* Release thread handlers */ |
5024 | 0 | my_free(context->threads); |
5025 | 0 | } |
5026 | | |
5027 | | /* Release mutex and condition variable objects */ |
5028 | 0 | blosc2_pthread_mutex_destroy(&context->count_mutex); |
5029 | 0 | blosc2_pthread_mutex_destroy(&context->delta_mutex); |
5030 | 0 | blosc2_pthread_mutex_destroy(&context->nchunk_mutex); |
5031 | 0 | blosc2_pthread_cond_destroy(&context->delta_cv); |
5032 | | |
5033 | | /* Barriers */ |
5034 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
5035 | 0 | pthread_barrier_destroy(&context->barr_init); |
5036 | 0 | pthread_barrier_destroy(&context->barr_finish); |
5037 | | #else |
5038 | | blosc2_pthread_mutex_destroy(&context->count_threads_mutex); |
5039 | | blosc2_pthread_cond_destroy(&context->count_threads_cv); |
5040 | | context->count_threads = 0; /* Reset threads counter */ |
5041 | | #endif |
5042 | | |
5043 | | /* Reset flags and counters */ |
5044 | 0 | context->end_threads = 0; |
5045 | 0 | context->threads_started = 0; |
5046 | 0 | } |
5047 | | |
5048 | | |
5049 | 10.9k | return 0; |
5050 | 10.9k | } |
5051 | | |
5052 | | |
5053 | | /* Contexts */ |
5054 | | |
5055 | | /* Create a context for compression */ |
5056 | 2.74k | blosc2_context* blosc2_create_cctx(blosc2_cparams cparams) { |
5057 | 2.74k | blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
5058 | 2.74k | BLOSC_ERROR_NULL(context, NULL); |
5059 | | |
5060 | | /* Populate the context, using zeros as default values */ |
5061 | 2.74k | memset(context, 0, sizeof(blosc2_context)); |
5062 | 2.74k | context->do_compress = 1; /* meant for compression */ |
5063 | 2.74k | context->use_dict = cparams.use_dict; |
5064 | 2.74k | if (cparams.instr_codec) { |
5065 | 0 | context->blosc2_flags = BLOSC2_INSTR_CODEC; |
5066 | 0 | } |
5067 | | |
5068 | 19.2k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
5069 | 16.4k | context->filters[i] = cparams.filters[i]; |
5070 | 16.4k | context->filters_meta[i] = cparams.filters_meta[i]; |
5071 | | |
5072 | 16.4k | if (context->filters[i] >= BLOSC_LAST_FILTER && context->filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
5073 | 0 | BLOSC_TRACE_ERROR("filter (%d) is not yet defined", |
5074 | 0 | context->filters[i]); |
5075 | 0 | free(context); |
5076 | 0 | return NULL; |
5077 | 0 | } |
5078 | 16.4k | if (context->filters[i] > BLOSC_LAST_REGISTERED_FILTER && context->filters[i] <= BLOSC2_GLOBAL_REGISTERED_FILTERS_STOP) { |
5079 | 0 | BLOSC_TRACE_ERROR("filter (%d) is not yet defined", |
5080 | 0 | context->filters[i]); |
5081 | 0 | free(context); |
5082 | 0 | return NULL; |
5083 | 0 | } |
5084 | 16.4k | } |
5085 | | |
5086 | 2.74k | #if defined(HAVE_PLUGINS) |
5087 | 2.74k | #include "blosc2/codecs-registry.h" |
5088 | 2.74k | if ((context->compcode >= BLOSC_CODEC_ZFP_FIXED_ACCURACY) && (context->compcode <= BLOSC_CODEC_ZFP_FIXED_RATE)) { |
5089 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; ++i) { |
5090 | 0 | if ((context->filters[i] == BLOSC_SHUFFLE) || (context->filters[i] == BLOSC_BITSHUFFLE)) { |
5091 | 0 | BLOSC_TRACE_ERROR("ZFP cannot be run in presence of SHUFFLE / BITSHUFFLE"); |
5092 | 0 | return NULL; |
5093 | 0 | } |
5094 | 0 | } |
5095 | 0 | } |
5096 | 2.74k | #endif /* HAVE_PLUGINS */ |
5097 | | |
5098 | | /* Check for a BLOSC_SHUFFLE environment variable */ |
5099 | 2.74k | int doshuffle = -1; |
5100 | 2.74k | char* envvar = getenv("BLOSC_SHUFFLE"); |
5101 | 2.74k | if (envvar != NULL) { |
5102 | 0 | if (strcmp(envvar, "NOSHUFFLE") == 0) { |
5103 | 0 | doshuffle = BLOSC_NOSHUFFLE; |
5104 | 0 | } |
5105 | 0 | else if (strcmp(envvar, "SHUFFLE") == 0) { |
5106 | 0 | doshuffle = BLOSC_SHUFFLE; |
5107 | 0 | } |
5108 | 0 | else if (strcmp(envvar, "BITSHUFFLE") == 0) { |
5109 | 0 | doshuffle = BLOSC_BITSHUFFLE; |
5110 | 0 | } |
5111 | 0 | else { |
5112 | 0 | BLOSC_TRACE_WARNING("BLOSC_SHUFFLE environment variable '%s' not recognized\n", envvar); |
5113 | 0 | } |
5114 | 0 | } |
5115 | | /* Check for a BLOSC_DELTA environment variable */ |
5116 | 2.74k | int dodelta = BLOSC_NOFILTER; |
5117 | 2.74k | envvar = getenv("BLOSC_DELTA"); |
5118 | 2.74k | if (envvar != NULL) { |
5119 | 0 | if (strcmp(envvar, "1") == 0) { |
5120 | 0 | dodelta = BLOSC_DELTA; |
5121 | 0 | } else if (strcmp(envvar, "0") == 0){ |
5122 | 0 | dodelta = BLOSC_NOFILTER; |
5123 | 0 | } |
5124 | 0 | else { |
5125 | 0 | BLOSC_TRACE_WARNING("BLOSC_DELTA environment variable '%s' not recognized\n", envvar); |
5126 | 0 | } |
5127 | 0 | } |
5128 | | /* Check for a BLOSC_TYPESIZE environment variable */ |
5129 | 2.74k | context->typesize = cparams.typesize; |
5130 | 2.74k | envvar = getenv("BLOSC_TYPESIZE"); |
5131 | 2.74k | if (envvar != NULL) { |
5132 | 0 | int32_t value; |
5133 | 0 | errno = 0; /* To distinguish success/failure after call */ |
5134 | 0 | value = (int32_t) strtol(envvar, NULL, 10); |
5135 | 0 | if ((errno != EINVAL) && (value > 0)) { |
5136 | 0 | context->typesize = value; |
5137 | 0 | } |
5138 | 0 | else { |
5139 | 0 | BLOSC_TRACE_WARNING("BLOSC_TYPESIZE environment variable '%s' not recognized\n", envvar); |
5140 | 0 | } |
5141 | 0 | } |
5142 | 2.74k | build_filters(doshuffle, dodelta, context->typesize, context->filters); |
5143 | | |
5144 | 2.74k | context->clevel = cparams.clevel; |
5145 | | /* Check for a BLOSC_CLEVEL environment variable */ |
5146 | 2.74k | envvar = getenv("BLOSC_CLEVEL"); |
5147 | 2.74k | if (envvar != NULL) { |
5148 | 0 | int value; |
5149 | 0 | errno = 0; /* To distinguish success/failure after call */ |
5150 | 0 | value = (int)strtol(envvar, NULL, 10); |
5151 | 0 | if ((errno != EINVAL) && (value >= 0)) { |
5152 | 0 | context->clevel = value; |
5153 | 0 | } |
5154 | 0 | else { |
5155 | 0 | BLOSC_TRACE_WARNING("BLOSC_CLEVEL environment variable '%s' not recognized\n", envvar); |
5156 | 0 | } |
5157 | 0 | } |
5158 | | |
5159 | 2.74k | context->compcode = cparams.compcode; |
5160 | | /* Check for a BLOSC_COMPRESSOR environment variable */ |
5161 | 2.74k | envvar = getenv("BLOSC_COMPRESSOR"); |
5162 | 2.74k | if (envvar != NULL) { |
5163 | 0 | int codec = blosc2_compname_to_compcode(envvar); |
5164 | 0 | if (codec >= BLOSC_LAST_CODEC) { |
5165 | 0 | BLOSC_TRACE_ERROR("User defined codecs cannot be set here. Use Blosc2 mechanism instead."); |
5166 | 0 | return NULL; |
5167 | 0 | } |
5168 | 0 | context->compcode = codec; |
5169 | 0 | } |
5170 | 2.74k | context->compcode_meta = cparams.compcode_meta; |
5171 | | |
5172 | 2.74k | context->blocksize = cparams.blocksize; |
5173 | | /* Check for a BLOSC_BLOCKSIZE environment variable */ |
5174 | 2.74k | envvar = getenv("BLOSC_BLOCKSIZE"); |
5175 | 2.74k | if (envvar != NULL) { |
5176 | 0 | int32_t blocksize; |
5177 | 0 | errno = 0; /* To distinguish success/failure after call */ |
5178 | 0 | blocksize = (int32_t) strtol(envvar, NULL, 10); |
5179 | 0 | if ((errno != EINVAL) && (blocksize > 0)) { |
5180 | 0 | context->blocksize = blocksize; |
5181 | 0 | } |
5182 | 0 | else { |
5183 | 0 | BLOSC_TRACE_WARNING("BLOSC_BLOCKSIZE environment variable '%s' not recognized\n", envvar); |
5184 | 0 | } |
5185 | 0 | } |
5186 | | |
5187 | 2.74k | context->nthreads = cparams.nthreads; |
5188 | | /* Check for a BLOSC_NTHREADS environment variable */ |
5189 | 2.74k | envvar = getenv("BLOSC_NTHREADS"); |
5190 | 2.74k | if (envvar != NULL) { |
5191 | 0 | errno = 0; /* To distinguish success/failure after call */ |
5192 | 0 | int16_t nthreads = (int16_t) strtol(envvar, NULL, 10); |
5193 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
5194 | 0 | context->nthreads = nthreads; |
5195 | 0 | } |
5196 | 0 | else { |
5197 | 0 | BLOSC_TRACE_WARNING("BLOSC_NTHREADS environment variable '%s' not recognized\n", envvar); |
5198 | 0 | } |
5199 | 0 | } |
5200 | 2.74k | context->new_nthreads = context->nthreads; |
5201 | | |
5202 | 2.74k | context->splitmode = cparams.splitmode; |
5203 | | /* Check for a BLOSC_SPLITMODE environment variable */ |
5204 | 2.74k | envvar = getenv("BLOSC_SPLITMODE"); |
5205 | 2.74k | if (envvar != NULL) { |
5206 | 0 | int32_t splitmode = -1; |
5207 | 0 | if (strcmp(envvar, "ALWAYS") == 0) { |
5208 | 0 | splitmode = BLOSC_ALWAYS_SPLIT; |
5209 | 0 | } |
5210 | 0 | else if (strcmp(envvar, "NEVER") == 0) { |
5211 | 0 | splitmode = BLOSC_NEVER_SPLIT; |
5212 | 0 | } |
5213 | 0 | else if (strcmp(envvar, "AUTO") == 0) { |
5214 | 0 | splitmode = BLOSC_AUTO_SPLIT; |
5215 | 0 | } |
5216 | 0 | else if (strcmp(envvar, "FORWARD_COMPAT") == 0) { |
5217 | 0 | splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
5218 | 0 | } |
5219 | 0 | else { |
5220 | 0 | BLOSC_TRACE_WARNING("BLOSC_SPLITMODE environment variable '%s' not recognized\n", envvar); |
5221 | 0 | } |
5222 | 0 | if (splitmode >= 0) { |
5223 | 0 | context->splitmode = splitmode; |
5224 | 0 | } |
5225 | 0 | } |
5226 | | |
5227 | 2.74k | context->threads_started = 0; |
5228 | 2.74k | context->schunk = cparams.schunk; |
5229 | | |
5230 | 2.74k | if (cparams.prefilter != NULL) { |
5231 | 0 | context->prefilter = cparams.prefilter; |
5232 | 0 | context->preparams = (blosc2_prefilter_params*)my_malloc(sizeof(blosc2_prefilter_params)); |
5233 | 0 | BLOSC_ERROR_NULL(context->preparams, NULL); |
5234 | 0 | memcpy(context->preparams, cparams.preparams, sizeof(blosc2_prefilter_params)); |
5235 | 0 | } |
5236 | | |
5237 | 2.74k | if (cparams.tuner_id <= 0) { |
5238 | 2.74k | cparams.tuner_id = g_tuner; |
5239 | 2.74k | } else { |
5240 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
5241 | 0 | if (g_tuners[i].id == cparams.tuner_id) { |
5242 | 0 | if (g_tuners[i].init == NULL) { |
5243 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
5244 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
5245 | 0 | return NULL; |
5246 | 0 | } |
5247 | 0 | } |
5248 | 0 | if (g_tuners[i].init(cparams.tuner_params, context, NULL) < 0) { |
5249 | 0 | BLOSC_TRACE_ERROR("Error in user-defined tuner %d init function\n", cparams.tuner_id); |
5250 | 0 | return NULL; |
5251 | 0 | } |
5252 | 0 | goto urtunersuccess; |
5253 | 0 | } |
5254 | 0 | } |
5255 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", cparams.tuner_id); |
5256 | 0 | return NULL; |
5257 | 0 | } |
5258 | 2.74k | urtunersuccess:; |
5259 | | |
5260 | 2.74k | context->tuner_id = cparams.tuner_id; |
5261 | | |
5262 | 2.74k | context->codec_params = cparams.codec_params; |
5263 | 2.74k | memcpy(context->filter_params, cparams.filter_params, BLOSC2_MAX_FILTERS * sizeof(void*)); |
5264 | | |
5265 | 2.74k | return context; |
5266 | 2.74k | } |
5267 | | |
5268 | | /* Create a context for decompression */ |
5269 | 2.74k | blosc2_context* blosc2_create_dctx(blosc2_dparams dparams) { |
5270 | 2.74k | blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
5271 | 2.74k | BLOSC_ERROR_NULL(context, NULL); |
5272 | | |
5273 | | /* Populate the context, using zeros as default values */ |
5274 | 2.74k | memset(context, 0, sizeof(blosc2_context)); |
5275 | 2.74k | context->do_compress = 0; /* Meant for decompression */ |
5276 | | |
5277 | 2.74k | context->nthreads = dparams.nthreads; |
5278 | 2.74k | char* envvar = getenv("BLOSC_NTHREADS"); |
5279 | 2.74k | if (envvar != NULL) { |
5280 | 0 | errno = 0; /* To distinguish success/failure after call */ |
5281 | 0 | long nthreads = strtol(envvar, NULL, 10); |
5282 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
5283 | 0 | context->nthreads = (int16_t) nthreads; |
5284 | 0 | } |
5285 | 0 | } |
5286 | 2.74k | context->new_nthreads = context->nthreads; |
5287 | | |
5288 | 2.74k | context->threads_started = 0; |
5289 | 2.74k | context->block_maskout = NULL; |
5290 | 2.74k | context->block_maskout_nitems = 0; |
5291 | 2.74k | context->schunk = dparams.schunk; |
5292 | | |
5293 | 2.74k | if (dparams.postfilter != NULL) { |
5294 | 0 | context->postfilter = dparams.postfilter; |
5295 | 0 | context->postparams = (blosc2_postfilter_params*)my_malloc(sizeof(blosc2_postfilter_params)); |
5296 | 0 | BLOSC_ERROR_NULL(context->postparams, NULL); |
5297 | 0 | memcpy(context->postparams, dparams.postparams, sizeof(blosc2_postfilter_params)); |
5298 | 0 | } |
5299 | | |
5300 | 2.74k | return context; |
5301 | 2.74k | } |
5302 | | |
5303 | | |
5304 | 8.23k | void blosc2_free_ctx(blosc2_context* context) { |
5305 | 8.23k | release_threadpool(context); |
5306 | 8.23k | if (context->serial_context != NULL) { |
5307 | 5.01k | free_thread_context(context->serial_context); |
5308 | 5.01k | } |
5309 | 8.23k | release_context_dict_buffer(context); |
5310 | 8.23k | if (context->dict_cdict != NULL) { |
5311 | 0 | if (context->compcode == BLOSC_LZ4) { |
5312 | 0 | LZ4_freeStream((LZ4_stream_t*)context->dict_cdict); |
5313 | 0 | } else if (context->compcode == BLOSC_LZ4HC) { |
5314 | 0 | LZ4_freeStreamHC((LZ4_streamHC_t*)context->dict_cdict); |
5315 | 0 | } |
5316 | 0 | #ifdef HAVE_ZSTD |
5317 | 0 | else if (context->compcode == BLOSC_ZSTD) { |
5318 | 0 | ZSTD_freeCDict(context->dict_cdict); |
5319 | 0 | } |
5320 | 0 | #endif |
5321 | 0 | } |
5322 | 8.23k | if (context->dict_ddict != NULL) { |
5323 | 0 | #ifdef HAVE_ZSTD |
5324 | 0 | ZSTD_freeDDict(context->dict_ddict); |
5325 | 0 | #endif |
5326 | 0 | } |
5327 | 8.23k | if (context->tuner_params != NULL) { |
5328 | 0 | int rc; |
5329 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
5330 | 0 | rc = blosc_stune_free(context); |
5331 | 0 | } else { |
5332 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
5333 | 0 | if (g_tuners[i].id == context->tuner_id) { |
5334 | 0 | if (g_tuners[i].free == NULL) { |
5335 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
5336 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
5337 | 0 | return; |
5338 | 0 | } |
5339 | 0 | } |
5340 | 0 | rc = g_tuners[i].free(context); |
5341 | 0 | goto urtunersuccess; |
5342 | 0 | } |
5343 | 0 | } |
5344 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
5345 | 0 | return; |
5346 | 0 | urtunersuccess:; |
5347 | 0 | } |
5348 | 0 | if (rc < 0) { |
5349 | 0 | BLOSC_TRACE_ERROR("Error in user-defined tuner free function\n"); |
5350 | 0 | return; |
5351 | 0 | } |
5352 | 0 | } |
5353 | | /* May be needed if codec_params ever contains nested objects |
5354 | | if (context->codec_params != NULL) { |
5355 | | int rc; |
5356 | | for (int i = 0; i < g_ncodecs; ++i) { |
5357 | | if (g_codecs[i].compcode == context->compcode) { |
5358 | | if (g_codecs[i].free == NULL) { |
5359 | | // Dynamically load codec plugin |
5360 | | if (fill_codec(&g_codecs[i]) < 0) { |
5361 | | BLOSC_TRACE_ERROR("Could not load codec %d.", g_codecs[i].compcode); |
5362 | | return BLOSC2_ERROR_CODEC_SUPPORT; |
5363 | | } |
5364 | | } |
5365 | | if (g_codecs[i].free == NULL){ |
5366 | | // no free func, codec_params is simple |
5367 | | my_free(context->codec_params); |
5368 | | } |
5369 | | else{ // has free function for codec_params (e.g. openzl) |
5370 | | rc = g_codecs[i].free(context->codec_params); |
5371 | | goto urcodecsuccess; |
5372 | | } |
5373 | | } |
5374 | | } |
5375 | | BLOSC_TRACE_ERROR("User-defined compressor codec %d not found", context->compcode); |
5376 | | return BLOSC2_ERROR_CODEC_SUPPORT; |
5377 | | urcodecsuccess:; |
5378 | | if (rc < 0) { |
5379 | | BLOSC_TRACE_ERROR("Error in user-defined codec free function\n"); |
5380 | | return; |
5381 | | } |
5382 | | } |
5383 | | */ |
5384 | 8.23k | if (context->prefilter != NULL) { |
5385 | 0 | my_free(context->preparams); |
5386 | 0 | } |
5387 | 8.23k | if (context->postfilter != NULL) { |
5388 | 0 | my_free(context->postparams); |
5389 | 0 | } |
5390 | | |
5391 | 8.23k | if (context->block_maskout != NULL) { |
5392 | 0 | free(context->block_maskout); |
5393 | 0 | } |
5394 | 8.23k | if (context->blocknbytes != NULL) { |
5395 | 0 | free(context->blocknbytes); |
5396 | 0 | } |
5397 | 8.23k | if (context->blockoffsets != NULL) { |
5398 | 0 | free(context->blockoffsets); |
5399 | 0 | } |
5400 | 8.23k | if (context->blockcbytes != NULL) { |
5401 | 0 | free(context->blockcbytes); |
5402 | 0 | } |
5403 | 8.23k | my_free(context); |
5404 | 8.23k | } |
5405 | | |
5406 | | |
5407 | 0 | int blosc2_ctx_get_cparams(blosc2_context *ctx, blosc2_cparams *cparams) { |
5408 | 0 | cparams->compcode = ctx->compcode; |
5409 | 0 | cparams->compcode_meta = ctx->compcode_meta; |
5410 | 0 | cparams->clevel = ctx->clevel; |
5411 | 0 | cparams->use_dict = ctx->use_dict; |
5412 | 0 | cparams->instr_codec = ctx->blosc2_flags & BLOSC2_INSTR_CODEC; |
5413 | 0 | cparams->typesize = ctx->typesize; |
5414 | 0 | cparams->nthreads = ctx->nthreads; |
5415 | 0 | cparams->blocksize = ctx->blocksize; |
5416 | 0 | cparams->splitmode = ctx->splitmode; |
5417 | 0 | cparams->schunk = ctx->schunk; |
5418 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; ++i) { |
5419 | 0 | cparams->filters[i] = ctx->filters[i]; |
5420 | 0 | cparams->filters_meta[i] = ctx->filters_meta[i]; |
5421 | 0 | } |
5422 | 0 | cparams->prefilter = ctx->prefilter; |
5423 | 0 | cparams->preparams = ctx->preparams; |
5424 | 0 | cparams->tuner_id = ctx->tuner_id; |
5425 | 0 | cparams->codec_params = ctx->codec_params; |
5426 | |
|
5427 | 0 | return BLOSC2_ERROR_SUCCESS; |
5428 | 0 | } |
5429 | | |
5430 | | |
5431 | 0 | int blosc2_ctx_get_dparams(blosc2_context *ctx, blosc2_dparams *dparams) { |
5432 | 0 | dparams->nthreads = ctx->nthreads; |
5433 | 0 | dparams->schunk = ctx->schunk; |
5434 | 0 | dparams->postfilter = ctx->postfilter; |
5435 | 0 | dparams->postparams = ctx->postparams; |
5436 | 0 | dparams->typesize = ctx->typesize; |
5437 | |
|
5438 | 0 | return BLOSC2_ERROR_SUCCESS; |
5439 | 0 | } |
5440 | | |
5441 | | |
5442 | | /* Set a maskout in decompression context */ |
5443 | 0 | int blosc2_set_maskout(blosc2_context *ctx, bool *maskout, int nblocks) { |
5444 | |
|
5445 | 0 | if (ctx->block_maskout != NULL) { |
5446 | | // Get rid of a possible mask here |
5447 | 0 | free(ctx->block_maskout); |
5448 | 0 | } |
5449 | |
|
5450 | 0 | bool *maskout_ = malloc(nblocks); |
5451 | 0 | BLOSC_ERROR_NULL(maskout_, BLOSC2_ERROR_MEMORY_ALLOC); |
5452 | 0 | memcpy(maskout_, maskout, nblocks); |
5453 | 0 | ctx->block_maskout = maskout_; |
5454 | 0 | ctx->block_maskout_nitems = nblocks; |
5455 | |
|
5456 | 0 | return 0; |
5457 | 0 | } |
5458 | | |
5459 | | |
5460 | | /* Create a chunk made of zeros */ |
5461 | 0 | int blosc2_chunk_zeros(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
5462 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
5463 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
5464 | 0 | return BLOSC2_ERROR_DATA; |
5465 | 0 | } |
5466 | | |
5467 | 0 | if ((nbytes > 0) && (nbytes % cparams.typesize)) { |
5468 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
5469 | 0 | return BLOSC2_ERROR_DATA; |
5470 | 0 | } |
5471 | | |
5472 | 0 | blosc_header header; |
5473 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
5474 | 0 | if (context == NULL) { |
5475 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
5476 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
5477 | 0 | } |
5478 | | |
5479 | 0 | int error = initialize_context_compression( |
5480 | 0 | context, NULL, nbytes, dest, destsize, |
5481 | 0 | context->clevel, context->filters, context->filters_meta, |
5482 | 0 | context->typesize, context->compcode, context->blocksize, |
5483 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
5484 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
5485 | 0 | if (error <= 0) { |
5486 | 0 | blosc2_free_ctx(context); |
5487 | 0 | return error; |
5488 | 0 | } |
5489 | | |
5490 | 0 | memset(&header, 0, sizeof(header)); |
5491 | 0 | header.version = BLOSC2_VERSION_FORMAT_STABLE; |
5492 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
5493 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
5494 | 0 | header.typesize = context->typesize; |
5495 | 0 | header.nbytes = (int32_t)nbytes; |
5496 | 0 | header.blocksize = context->blocksize; |
5497 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
5498 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_ZERO << 4; // mark chunk as all zeros |
5499 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
5500 | |
|
5501 | 0 | blosc2_free_ctx(context); |
5502 | |
|
5503 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH; |
5504 | 0 | } |
5505 | | |
5506 | | |
5507 | | /* Create a chunk made of uninitialized values */ |
5508 | 0 | int blosc2_chunk_uninit(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
5509 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
5510 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
5511 | 0 | return BLOSC2_ERROR_DATA; |
5512 | 0 | } |
5513 | | |
5514 | 0 | if (nbytes % cparams.typesize) { |
5515 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
5516 | 0 | return BLOSC2_ERROR_DATA; |
5517 | 0 | } |
5518 | | |
5519 | 0 | blosc_header header; |
5520 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
5521 | 0 | if (context == NULL) { |
5522 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
5523 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
5524 | 0 | } |
5525 | 0 | int error = initialize_context_compression( |
5526 | 0 | context, NULL, nbytes, dest, destsize, |
5527 | 0 | context->clevel, context->filters, context->filters_meta, |
5528 | 0 | context->typesize, context->compcode, context->blocksize, |
5529 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
5530 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
5531 | 0 | if (error <= 0) { |
5532 | 0 | blosc2_free_ctx(context); |
5533 | 0 | return error; |
5534 | 0 | } |
5535 | | |
5536 | 0 | memset(&header, 0, sizeof(header)); |
5537 | 0 | header.version = BLOSC2_VERSION_FORMAT_STABLE; |
5538 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
5539 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
5540 | 0 | header.typesize = context->typesize; |
5541 | 0 | header.nbytes = (int32_t)nbytes; |
5542 | 0 | header.blocksize = context->blocksize; |
5543 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
5544 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_UNINIT << 4; // mark chunk as uninitialized |
5545 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
5546 | |
|
5547 | 0 | blosc2_free_ctx(context); |
5548 | |
|
5549 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH; |
5550 | 0 | } |
5551 | | |
5552 | | |
5553 | | /* Create a chunk made of nans */ |
5554 | 0 | int blosc2_chunk_nans(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
5555 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
5556 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
5557 | 0 | return BLOSC2_ERROR_DATA; |
5558 | 0 | } |
5559 | | |
5560 | 0 | if (nbytes % cparams.typesize) { |
5561 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
5562 | 0 | return BLOSC2_ERROR_DATA; |
5563 | 0 | } |
5564 | | |
5565 | 0 | blosc_header header; |
5566 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
5567 | 0 | if (context == NULL) { |
5568 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
5569 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
5570 | 0 | } |
5571 | | |
5572 | 0 | int error = initialize_context_compression( |
5573 | 0 | context, NULL, nbytes, dest, destsize, |
5574 | 0 | context->clevel, context->filters, context->filters_meta, |
5575 | 0 | context->typesize, context->compcode, context->blocksize, |
5576 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
5577 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
5578 | 0 | if (error <= 0) { |
5579 | 0 | blosc2_free_ctx(context); |
5580 | 0 | return error; |
5581 | 0 | } |
5582 | | |
5583 | 0 | memset(&header, 0, sizeof(header)); |
5584 | 0 | header.version = BLOSC2_VERSION_FORMAT_STABLE; |
5585 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
5586 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
5587 | 0 | header.typesize = context->typesize; |
5588 | 0 | header.nbytes = (int32_t)nbytes; |
5589 | 0 | header.blocksize = context->blocksize; |
5590 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
5591 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_NAN << 4; // mark chunk as all NaNs |
5592 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
5593 | |
|
5594 | 0 | blosc2_free_ctx(context); |
5595 | |
|
5596 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH; |
5597 | 0 | } |
5598 | | |
5599 | | |
5600 | | /* Create a chunk made of repeated values */ |
5601 | | int blosc2_chunk_repeatval(blosc2_cparams cparams, const int32_t nbytes, |
5602 | 0 | void* dest, int32_t destsize, const void* repeatval) { |
5603 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize) { |
5604 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
5605 | 0 | return BLOSC2_ERROR_DATA; |
5606 | 0 | } |
5607 | | |
5608 | 0 | if (nbytes % cparams.typesize) { |
5609 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
5610 | 0 | return BLOSC2_ERROR_DATA; |
5611 | 0 | } |
5612 | | |
5613 | 0 | blosc_header header; |
5614 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
5615 | 0 | if (context == NULL) { |
5616 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
5617 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
5618 | 0 | } |
5619 | | |
5620 | 0 | int error = initialize_context_compression( |
5621 | 0 | context, NULL, nbytes, dest, destsize, |
5622 | 0 | context->clevel, context->filters, context->filters_meta, |
5623 | 0 | context->typesize, context->compcode, context->blocksize, |
5624 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
5625 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
5626 | 0 | if (error <= 0) { |
5627 | 0 | blosc2_free_ctx(context); |
5628 | 0 | return error; |
5629 | 0 | } |
5630 | | |
5631 | 0 | memset(&header, 0, sizeof(header)); |
5632 | 0 | header.version = BLOSC2_VERSION_FORMAT_STABLE; |
5633 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
5634 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
5635 | 0 | header.typesize = context->typesize; |
5636 | 0 | header.nbytes = (int32_t)nbytes; |
5637 | 0 | header.blocksize = context->blocksize; |
5638 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize; |
5639 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_VALUE << 4; // mark chunk as all repeated value |
5640 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
5641 | 0 | memcpy((uint8_t *)dest + sizeof(header), repeatval, cparams.typesize); |
5642 | |
|
5643 | 0 | blosc2_free_ctx(context); |
5644 | |
|
5645 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize; |
5646 | 0 | } |
5647 | | |
5648 | | |
5649 | | /* Register filters */ |
5650 | | |
5651 | 13.7k | int register_filter_private(blosc2_filter *filter) { |
5652 | 13.7k | BLOSC_ERROR_NULL(filter, BLOSC2_ERROR_INVALID_PARAM); |
5653 | 13.7k | if (g_nfilters == UINT8_MAX) { |
5654 | 0 | BLOSC_TRACE_ERROR("Can not register more filters"); |
5655 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
5656 | 0 | } |
5657 | 13.7k | if (filter->id < BLOSC2_GLOBAL_REGISTERED_FILTERS_START) { |
5658 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_FILTERS_START); |
5659 | 0 | return BLOSC2_ERROR_FAILURE; |
5660 | 0 | } |
5661 | | /* This condition can never be fulfilled |
5662 | | if (filter->id > BLOSC2_USER_REGISTERED_FILTERS_STOP) { |
5663 | | BLOSC_TRACE_ERROR("The id must be less than or equal to %d", BLOSC2_USER_REGISTERED_FILTERS_STOP); |
5664 | | return BLOSC2_ERROR_FAILURE; |
5665 | | } |
5666 | | */ |
5667 | | |
5668 | 41.1k | for (uint64_t i = 0; i < g_nfilters; ++i) { |
5669 | 27.4k | if (g_filters[i].id == filter->id) { |
5670 | 0 | if (strcmp(g_filters[i].name, filter->name) != 0) { |
5671 | 0 | BLOSC_TRACE_ERROR("The filter (ID: %d) plugin is already registered with name: %s." |
5672 | 0 | " Choose another one !", filter->id, g_filters[i].name); |
5673 | 0 | return BLOSC2_ERROR_FAILURE; |
5674 | 0 | } |
5675 | 0 | else { |
5676 | | // Already registered, so no more actions needed |
5677 | 0 | return BLOSC2_ERROR_SUCCESS; |
5678 | 0 | } |
5679 | 0 | } |
5680 | 27.4k | } |
5681 | | |
5682 | 13.7k | blosc2_filter *filter_new = &g_filters[g_nfilters++]; |
5683 | 13.7k | memcpy(filter_new, filter, sizeof(blosc2_filter)); |
5684 | | |
5685 | 13.7k | return BLOSC2_ERROR_SUCCESS; |
5686 | 13.7k | } |
5687 | | |
5688 | | |
5689 | 0 | int blosc2_register_filter(blosc2_filter *filter) { |
5690 | 0 | if (filter->id < BLOSC2_USER_REGISTERED_FILTERS_START) { |
5691 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal to %d", BLOSC2_USER_REGISTERED_FILTERS_START); |
5692 | 0 | return BLOSC2_ERROR_FAILURE; |
5693 | 0 | } |
5694 | | |
5695 | 0 | return register_filter_private(filter); |
5696 | 0 | } |
5697 | | |
5698 | | |
5699 | | /* Register codecs */ |
5700 | | |
5701 | 19.2k | int register_codec_private(blosc2_codec *codec) { |
5702 | 19.2k | BLOSC_ERROR_NULL(codec, BLOSC2_ERROR_INVALID_PARAM); |
5703 | 19.2k | if (g_ncodecs == UINT8_MAX) { |
5704 | 0 | BLOSC_TRACE_ERROR("Can not register more codecs"); |
5705 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
5706 | 0 | } |
5707 | 19.2k | if (codec->compcode < BLOSC2_GLOBAL_REGISTERED_CODECS_START) { |
5708 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_CODECS_START); |
5709 | 0 | return BLOSC2_ERROR_FAILURE; |
5710 | 0 | } |
5711 | | /* This condition can never be fulfilled |
5712 | | if (codec->compcode > BLOSC2_USER_REGISTERED_CODECS_STOP) { |
5713 | | BLOSC_TRACE_ERROR("The id must be less or equal to %d", BLOSC2_USER_REGISTERED_CODECS_STOP); |
5714 | | return BLOSC2_ERROR_FAILURE; |
5715 | | } |
5716 | | */ |
5717 | | |
5718 | 76.8k | for (int i = 0; i < g_ncodecs; ++i) { |
5719 | 57.6k | if (g_codecs[i].compcode == codec->compcode) { |
5720 | 0 | if (strcmp(g_codecs[i].compname, codec->compname) != 0) { |
5721 | 0 | BLOSC_TRACE_ERROR("The codec (ID: %d) plugin is already registered with name: %s." |
5722 | 0 | " Choose another one !", codec->compcode, codec->compname); |
5723 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
5724 | 0 | } |
5725 | 0 | else { |
5726 | | // Already registered, so no more actions needed |
5727 | 0 | return BLOSC2_ERROR_SUCCESS; |
5728 | 0 | } |
5729 | 0 | } |
5730 | 57.6k | } |
5731 | | |
5732 | 19.2k | blosc2_codec *codec_new = &g_codecs[g_ncodecs++]; |
5733 | 19.2k | memcpy(codec_new, codec, sizeof(blosc2_codec)); |
5734 | | |
5735 | 19.2k | return BLOSC2_ERROR_SUCCESS; |
5736 | 19.2k | } |
5737 | | |
5738 | | |
5739 | 0 | int blosc2_register_codec(blosc2_codec *codec) { |
5740 | 0 | if (codec->compcode < BLOSC2_USER_REGISTERED_CODECS_START) { |
5741 | 0 | BLOSC_TRACE_ERROR("The compcode must be greater or equal than %d", BLOSC2_USER_REGISTERED_CODECS_START); |
5742 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
5743 | 0 | } |
5744 | | |
5745 | 0 | return register_codec_private(codec); |
5746 | 0 | } |
5747 | | |
5748 | | |
5749 | | /* Register tuners */ |
5750 | | |
5751 | 2.74k | int register_tuner_private(blosc2_tuner *tuner) { |
5752 | 2.74k | BLOSC_ERROR_NULL(tuner, BLOSC2_ERROR_INVALID_PARAM); |
5753 | 2.74k | if (g_ntuners == UINT8_MAX) { |
5754 | 0 | BLOSC_TRACE_ERROR("Can not register more tuners"); |
5755 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
5756 | 0 | } |
5757 | 2.74k | if (tuner->id < BLOSC2_GLOBAL_REGISTERED_TUNER_START) { |
5758 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_TUNER_START); |
5759 | 0 | return BLOSC2_ERROR_FAILURE; |
5760 | 0 | } |
5761 | | |
5762 | 2.74k | for (int i = 0; i < g_ntuners; ++i) { |
5763 | 0 | if (g_tuners[i].id == tuner->id) { |
5764 | 0 | if (strcmp(g_tuners[i].name, tuner->name) != 0) { |
5765 | 0 | BLOSC_TRACE_ERROR("The tuner (ID: %d) plugin is already registered with name: %s." |
5766 | 0 | " Choose another one !", tuner->id, g_tuners[i].name); |
5767 | 0 | return BLOSC2_ERROR_FAILURE; |
5768 | 0 | } |
5769 | 0 | else { |
5770 | | // Already registered, so no more actions needed |
5771 | 0 | return BLOSC2_ERROR_SUCCESS; |
5772 | 0 | } |
5773 | 0 | } |
5774 | 0 | } |
5775 | | |
5776 | 2.74k | blosc2_tuner *tuner_new = &g_tuners[g_ntuners++]; |
5777 | 2.74k | memcpy(tuner_new, tuner, sizeof(blosc2_tuner)); |
5778 | | |
5779 | 2.74k | return BLOSC2_ERROR_SUCCESS; |
5780 | 2.74k | } |
5781 | | |
5782 | | |
5783 | 0 | int blosc2_register_tuner(blosc2_tuner *tuner) { |
5784 | 0 | if (tuner->id < BLOSC2_USER_REGISTERED_TUNER_START) { |
5785 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal to %d", BLOSC2_USER_REGISTERED_TUNER_START); |
5786 | 0 | return BLOSC2_ERROR_FAILURE; |
5787 | 0 | } |
5788 | | |
5789 | 0 | return register_tuner_private(tuner); |
5790 | 0 | } |
5791 | | |
5792 | | |
5793 | 5.49k | int _blosc2_register_io_cb(const blosc2_io_cb *io) { |
5794 | | |
5795 | 8.23k | for (uint64_t i = 0; i < g_nio; ++i) { |
5796 | 8.23k | if (g_ios[i].id == io->id) { |
5797 | 5.49k | if (strcmp(g_ios[i].name, io->name) != 0) { |
5798 | 0 | BLOSC_TRACE_ERROR("The IO (ID: %d) plugin is already registered with name: %s." |
5799 | 0 | " Choose another one !", io->id, g_ios[i].name); |
5800 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
5801 | 0 | } |
5802 | 5.49k | else { |
5803 | | // Already registered, so no more actions needed |
5804 | 5.49k | return BLOSC2_ERROR_SUCCESS; |
5805 | 5.49k | } |
5806 | 5.49k | } |
5807 | 8.23k | } |
5808 | | |
5809 | 2 | blosc2_io_cb *io_new = &g_ios[g_nio++]; |
5810 | 2 | memcpy(io_new, io, sizeof(blosc2_io_cb)); |
5811 | | |
5812 | 2 | return BLOSC2_ERROR_SUCCESS; |
5813 | 5.49k | } |
5814 | | |
5815 | 0 | int blosc2_register_io_cb(const blosc2_io_cb *io) { |
5816 | 0 | BLOSC_ERROR_NULL(io, BLOSC2_ERROR_INVALID_PARAM); |
5817 | 0 | if (g_nio == UINT8_MAX) { |
5818 | 0 | BLOSC_TRACE_ERROR("Can not register more codecs"); |
5819 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
5820 | 0 | } |
5821 | | |
5822 | 0 | if (io->id < BLOSC2_IO_REGISTERED) { |
5823 | 0 | BLOSC_TRACE_ERROR("The compcode must be greater or equal than %d", BLOSC2_IO_REGISTERED); |
5824 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
5825 | 0 | } |
5826 | | |
5827 | 0 | return _blosc2_register_io_cb(io); |
5828 | 0 | } |
5829 | | |
5830 | 2.74k | blosc2_io_cb *blosc2_get_io_cb(uint8_t id) { |
5831 | | // If g_initlib is not set by blosc2_init() this function will try to read |
5832 | | // uninitialized memory. We should therefore always return NULL in that case |
5833 | 2.74k | if (!g_initlib) { |
5834 | 0 | return NULL; |
5835 | 0 | } |
5836 | 2.74k | for (uint64_t i = 0; i < g_nio; ++i) { |
5837 | 2.74k | if (g_ios[i].id == id) { |
5838 | 2.74k | return &g_ios[i]; |
5839 | 2.74k | } |
5840 | 2.74k | } |
5841 | 0 | if (id == BLOSC2_IO_FILESYSTEM) { |
5842 | 0 | if (_blosc2_register_io_cb(&BLOSC2_IO_CB_DEFAULTS) < 0) { |
5843 | 0 | BLOSC_TRACE_ERROR("Error registering the default IO API"); |
5844 | 0 | return NULL; |
5845 | 0 | } |
5846 | 0 | return blosc2_get_io_cb(id); |
5847 | 0 | } |
5848 | 0 | else if (id == BLOSC2_IO_FILESYSTEM_MMAP) { |
5849 | 0 | if (_blosc2_register_io_cb(&BLOSC2_IO_CB_MMAP) < 0) { |
5850 | 0 | BLOSC_TRACE_ERROR("Error registering the mmap IO API"); |
5851 | 0 | return NULL; |
5852 | 0 | } |
5853 | 0 | return blosc2_get_io_cb(id); |
5854 | 0 | } |
5855 | 0 | return NULL; |
5856 | 0 | } |
5857 | | |
5858 | 0 | void blosc2_unidim_to_multidim(uint8_t ndim, int64_t *shape, int64_t i, int64_t *index) { |
5859 | 0 | if (ndim == 0) { |
5860 | 0 | return; |
5861 | 0 | } |
5862 | 0 | assert(ndim <= B2ND_MAX_DIM); |
5863 | 0 | int64_t strides[B2ND_MAX_DIM]; |
5864 | |
|
5865 | 0 | strides[ndim - 1] = 1; |
5866 | 0 | for (int j = ndim - 2; j >= 0; --j) { |
5867 | 0 | strides[j] = shape[j + 1] * strides[j + 1]; |
5868 | 0 | } |
5869 | |
|
5870 | 0 | index[0] = i / strides[0]; |
5871 | 0 | for (int j = 1; j < ndim; ++j) { |
5872 | 0 | index[j] = (i % strides[j - 1]) / strides[j]; |
5873 | 0 | } |
5874 | 0 | } |
5875 | | |
5876 | 0 | void blosc2_multidim_to_unidim(const int64_t *index, int8_t ndim, const int64_t *strides, int64_t *i) { |
5877 | 0 | *i = 0; |
5878 | 0 | for (int j = 0; j < ndim; ++j) { |
5879 | 0 | *i += index[j] * strides[j]; |
5880 | 0 | } |
5881 | 0 | } |
5882 | | |
5883 | 0 | int blosc2_get_slice_nchunks(blosc2_schunk* schunk, int64_t *start, int64_t *stop, int64_t **chunks_idx) { |
5884 | 0 | BLOSC_ERROR_NULL(schunk, BLOSC2_ERROR_NULL_POINTER); |
5885 | 0 | if (blosc2_meta_exists(schunk, "b2nd") < 0) { |
5886 | | // Try with a caterva metalayer; we are meant to be backward compatible with it |
5887 | 0 | if (blosc2_meta_exists(schunk, "caterva") < 0) { |
5888 | 0 | return schunk_get_slice_nchunks(schunk, *start, *stop, chunks_idx); |
5889 | 0 | } |
5890 | 0 | } |
5891 | | |
5892 | 0 | b2nd_array_t *array; |
5893 | 0 | int rc = b2nd_from_schunk(schunk, &array); |
5894 | 0 | if (rc < 0) { |
5895 | 0 | BLOSC_TRACE_ERROR("Could not get b2nd array from schunk."); |
5896 | 0 | return rc; |
5897 | 0 | } |
5898 | 0 | rc = b2nd_get_slice_nchunks(array, start, stop, chunks_idx); |
5899 | 0 | array->sc = NULL; // Free only array struct |
5900 | 0 | b2nd_free(array); |
5901 | |
|
5902 | 0 | return rc; |
5903 | 0 | } |
5904 | | |
5905 | 0 | blosc2_cparams blosc2_get_blosc2_cparams_defaults(void) { |
5906 | 0 | return BLOSC2_CPARAMS_DEFAULTS; |
5907 | 0 | }; |
5908 | | |
5909 | 0 | blosc2_dparams blosc2_get_blosc2_dparams_defaults(void) { |
5910 | 0 | return BLOSC2_DPARAMS_DEFAULTS; |
5911 | 0 | }; |
5912 | | |
5913 | 0 | blosc2_storage blosc2_get_blosc2_storage_defaults(void) { |
5914 | 0 | return BLOSC2_STORAGE_DEFAULTS; |
5915 | 0 | }; |
5916 | | |
5917 | 0 | blosc2_io blosc2_get_blosc2_io_defaults(void) { |
5918 | 0 | return BLOSC2_IO_DEFAULTS; |
5919 | 0 | }; |
5920 | | |
5921 | 0 | blosc2_stdio_mmap blosc2_get_blosc2_stdio_mmap_defaults(void) { |
5922 | 0 | return BLOSC2_STDIO_MMAP_DEFAULTS; |
5923 | 0 | }; |
5924 | | |
5925 | 0 | const char *blosc2_error_string(int error_code) { |
5926 | 0 | switch (error_code) { |
5927 | 0 | case BLOSC2_ERROR_FAILURE: |
5928 | 0 | return "Generic failure"; |
5929 | 0 | case BLOSC2_ERROR_STREAM: |
5930 | 0 | return "Bad stream"; |
5931 | 0 | case BLOSC2_ERROR_DATA: |
5932 | 0 | return "Invalid data"; |
5933 | 0 | case BLOSC2_ERROR_MEMORY_ALLOC: |
5934 | 0 | return "Memory alloc/realloc failure"; |
5935 | 0 | case BLOSC2_ERROR_READ_BUFFER: |
5936 | 0 | return "Not enough space to read"; |
5937 | 0 | case BLOSC2_ERROR_WRITE_BUFFER: |
5938 | 0 | return "Not enough space to write"; |
5939 | 0 | case BLOSC2_ERROR_CODEC_SUPPORT: |
5940 | 0 | return "Codec not supported"; |
5941 | 0 | case BLOSC2_ERROR_CODEC_PARAM: |
5942 | 0 | return "Invalid parameter supplied to codec"; |
5943 | 0 | case BLOSC2_ERROR_CODEC_DICT: |
5944 | 0 | return "Codec dictionary error"; |
5945 | 0 | case BLOSC2_ERROR_VERSION_SUPPORT: |
5946 | 0 | return "Version not supported"; |
5947 | 0 | case BLOSC2_ERROR_INVALID_HEADER: |
5948 | 0 | return "Invalid value in header"; |
5949 | 0 | case BLOSC2_ERROR_INVALID_PARAM: |
5950 | 0 | return "Invalid parameter supplied to function"; |
5951 | 0 | case BLOSC2_ERROR_FILE_READ: |
5952 | 0 | return "File read failure"; |
5953 | 0 | case BLOSC2_ERROR_FILE_WRITE: |
5954 | 0 | return "File write failure"; |
5955 | 0 | case BLOSC2_ERROR_FILE_OPEN: |
5956 | 0 | return "File open failure"; |
5957 | 0 | case BLOSC2_ERROR_NOT_FOUND: |
5958 | 0 | return "Not found"; |
5959 | 0 | case BLOSC2_ERROR_RUN_LENGTH: |
5960 | 0 | return "Bad run length encoding"; |
5961 | 0 | case BLOSC2_ERROR_FILTER_PIPELINE: |
5962 | 0 | return "Filter pipeline error"; |
5963 | 0 | case BLOSC2_ERROR_CHUNK_INSERT: |
5964 | 0 | return "Chunk insert failure"; |
5965 | 0 | case BLOSC2_ERROR_CHUNK_APPEND: |
5966 | 0 | return "Chunk append failure"; |
5967 | 0 | case BLOSC2_ERROR_CHUNK_UPDATE: |
5968 | 0 | return "Chunk update failure"; |
5969 | 0 | case BLOSC2_ERROR_2GB_LIMIT: |
5970 | 0 | return "Sizes larger than 2gb not supported"; |
5971 | 0 | case BLOSC2_ERROR_SCHUNK_COPY: |
5972 | 0 | return "Super-chunk copy failure"; |
5973 | 0 | case BLOSC2_ERROR_FRAME_TYPE: |
5974 | 0 | return "Wrong type for frame"; |
5975 | 0 | case BLOSC2_ERROR_FILE_TRUNCATE: |
5976 | 0 | return "File truncate failure"; |
5977 | 0 | case BLOSC2_ERROR_THREAD_CREATE: |
5978 | 0 | return "Thread or thread context creation failure"; |
5979 | 0 | case BLOSC2_ERROR_POSTFILTER: |
5980 | 0 | return "Postfilter failure"; |
5981 | 0 | case BLOSC2_ERROR_FRAME_SPECIAL: |
5982 | 0 | return "Special frame failure"; |
5983 | 0 | case BLOSC2_ERROR_SCHUNK_SPECIAL: |
5984 | 0 | return "Special super-chunk failure"; |
5985 | 0 | case BLOSC2_ERROR_PLUGIN_IO: |
5986 | 0 | return "IO plugin error"; |
5987 | 0 | case BLOSC2_ERROR_FILE_REMOVE: |
5988 | 0 | return "Remove file failure"; |
5989 | 0 | case BLOSC2_ERROR_NULL_POINTER: |
5990 | 0 | return "Pointer is null"; |
5991 | 0 | case BLOSC2_ERROR_INVALID_INDEX: |
5992 | 0 | return "Invalid index"; |
5993 | 0 | case BLOSC2_ERROR_METALAYER_NOT_FOUND: |
5994 | 0 | return "Metalayer has not been found"; |
5995 | 0 | case BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED: |
5996 | 0 | return "Maximum buffersize exceeded"; |
5997 | 0 | case BLOSC2_ERROR_TUNER: |
5998 | 0 | return "Tuner failure"; |
5999 | 0 | default: |
6000 | 0 | return "Unknown error"; |
6001 | 0 | } |
6002 | 0 | } |