/src/c-blosc2/blosc/blosc2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /********************************************************************* |
2 | | Blosc - Blocked Shuffling and Compression Library |
3 | | |
4 | | Copyright (c) 2021 Blosc Development Team <blosc@blosc.org> |
5 | | https://blosc.org |
6 | | License: BSD 3-Clause (see LICENSE.txt) |
7 | | |
8 | | See LICENSE.txt for details about copyright and rights to use. |
9 | | **********************************************************************/ |
10 | | |
11 | | |
12 | | #include "blosc2.h" |
13 | | #include "blosc-private.h" |
14 | | #include "../plugins/codecs/zfp/blosc2-zfp.h" |
15 | | #include "frame.h" |
16 | | #include "b2nd-private.h" |
17 | | #include "schunk-private.h" |
18 | | |
19 | | #if defined(USING_CMAKE) |
20 | | #include "config.h" |
21 | | #endif /* USING_CMAKE */ |
22 | | #include "context.h" |
23 | | |
24 | | #include "shuffle.h" |
25 | | #include "delta.h" |
26 | | #include "trunc-prec.h" |
27 | | #include "blosclz.h" |
28 | | #include "stune.h" |
29 | | #include "blosc2/codecs-registry.h" |
30 | | #include "blosc2/filters-registry.h" |
31 | | #include "blosc2/tuners-registry.h" |
32 | | |
33 | | #include "lz4.h" |
34 | | #include "lz4hc.h" |
35 | | #ifdef HAVE_IPP |
36 | | #include <ipps.h> |
37 | | #include <ippdc.h> |
38 | | #endif |
39 | | #if defined(HAVE_ZLIB_NG) |
40 | | #ifdef ZLIB_COMPAT |
41 | | #include "zlib.h" |
42 | | #else |
43 | | #include "zlib-ng.h" |
44 | | #endif |
45 | | #elif defined(HAVE_ZLIB) |
46 | | #include "zlib.h" |
47 | | #endif /* HAVE_MINIZ */ |
48 | | #if defined(HAVE_ZSTD) |
49 | | #include "zstd.h" |
50 | | #include "zstd_errors.h" |
51 | | // #include "cover.h" // for experimenting with fast cover training for building dicts |
52 | | #include "zdict.h" |
53 | | #endif /* HAVE_ZSTD */ |
54 | | |
55 | | #if defined(_WIN32) && !defined(__MINGW32__) |
56 | | #include <windows.h> |
57 | | #include <malloc.h> |
58 | | #include <process.h> |
59 | | #define getpid _getpid |
60 | | #endif /* _WIN32 */ |
61 | | |
62 | | #if defined(_WIN32) && !defined(__GNUC__) |
63 | | #include "win32/pthread.c" |
64 | | #endif |
65 | | |
66 | | #include <stdio.h> |
67 | | #include <stdlib.h> |
68 | | #include <errno.h> |
69 | | #include <string.h> |
70 | | #include <sys/types.h> |
71 | | #include <assert.h> |
72 | | #include <math.h> |
73 | | |
74 | | |
75 | | /* Synchronization variables */ |
76 | | |
77 | | /* Global context for non-contextual API */ |
78 | | static blosc2_context* g_global_context; |
79 | | static pthread_mutex_t global_comp_mutex; |
80 | | static int g_compressor = BLOSC_BLOSCLZ; |
81 | | static int g_delta = 0; |
82 | | /* The default splitmode */ |
83 | | static int32_t g_splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
84 | | /* the compressor to use by default */ |
85 | | static int16_t g_nthreads = 1; |
86 | | static int32_t g_force_blocksize = 0; |
87 | | static int g_initlib = 0; |
88 | | static blosc2_schunk* g_schunk = NULL; /* the pointer to super-chunk */ |
89 | | |
90 | | blosc2_codec g_codecs[256] = {0}; |
91 | | uint8_t g_ncodecs = 0; |
92 | | |
93 | | static blosc2_filter g_filters[256] = {0}; |
94 | | static uint64_t g_nfilters = 0; |
95 | | |
96 | | static blosc2_io_cb g_ios[256] = {0}; |
97 | | static uint64_t g_nio = 0; |
98 | | |
99 | | blosc2_tuner g_tuners[256] = {0}; |
100 | | int g_ntuners = 0; |
101 | | |
102 | | static int g_tuner = BLOSC_STUNE; |
103 | | |
104 | | // Forward declarations |
105 | | int init_threadpool(blosc2_context *context); |
106 | | int release_threadpool(blosc2_context *context); |
107 | | |
108 | | /* Macros for synchronization */ |
109 | | |
110 | | /* Wait until all threads are initialized */ |
111 | | #ifdef BLOSC_POSIX_BARRIERS |
112 | | #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ |
113 | 0 | do { \ |
114 | 0 | rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_init); \ |
115 | 0 | if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ |
116 | 0 | BLOSC_TRACE_ERROR("Could not wait on barrier (init): %d", rc); \ |
117 | 0 | return((RET_VAL)); \ |
118 | 0 | } \ |
119 | 0 | } while (0) |
120 | | #else |
121 | | #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ |
122 | | do { \ |
123 | | pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ |
124 | | if ((CONTEXT_PTR)->count_threads < (CONTEXT_PTR)->nthreads) { \ |
125 | | (CONTEXT_PTR)->count_threads++; \ |
126 | | pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ |
127 | | &(CONTEXT_PTR)->count_threads_mutex); \ |
128 | | } \ |
129 | | else { \ |
130 | | pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ |
131 | | } \ |
132 | | pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); \ |
133 | | } while (0) |
134 | | #endif |
135 | | |
136 | | /* Wait for all threads to finish */ |
137 | | #ifdef BLOSC_POSIX_BARRIERS |
138 | | #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ |
139 | 0 | do { \ |
140 | 0 | rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_finish); \ |
141 | 0 | if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ |
142 | 0 | BLOSC_TRACE_ERROR("Could not wait on barrier (finish)"); \ |
143 | 0 | return((RET_VAL)); \ |
144 | 0 | } \ |
145 | 0 | } while (0) |
146 | | #else |
147 | | #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ |
148 | | do { \ |
149 | | pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ |
150 | | if ((CONTEXT_PTR)->count_threads > 0) { \ |
151 | | (CONTEXT_PTR)->count_threads--; \ |
152 | | pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ |
153 | | &(CONTEXT_PTR)->count_threads_mutex); \ |
154 | | } \ |
155 | | else { \ |
156 | | pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ |
157 | | } \ |
158 | | pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); \ |
159 | | } while (0) |
160 | | #endif |
161 | | |
162 | | |
163 | | /* global variable to change threading backend from Blosc-managed to caller-managed */ |
164 | | static blosc_threads_callback threads_callback = 0; |
165 | | static void *threads_callback_data = 0; |
166 | | |
167 | | |
168 | | /* non-threadsafe function should be called before any other Blosc function in |
169 | | order to change how threads are managed */ |
170 | | void blosc2_set_threads_callback(blosc_threads_callback callback, void *callback_data) |
171 | 0 | { |
172 | 0 | threads_callback = callback; |
173 | 0 | threads_callback_data = callback_data; |
174 | 0 | } |
175 | | |
176 | | |
177 | | /* A function for aligned malloc that is portable */ |
178 | 552 | static uint8_t* my_malloc(size_t size) { |
179 | 552 | void* block = NULL; |
180 | 552 | int res = 0; |
181 | | |
182 | | /* Do an alignment to 32 bytes because AVX2 is supported */ |
183 | | #if defined(_WIN32) |
184 | | /* A (void *) cast needed for avoiding a warning with MINGW :-/ */ |
185 | | block = (void *)_aligned_malloc(size, 32); |
186 | | #elif _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 |
187 | | /* Platform does have an implementation of posix_memalign */ |
188 | 552 | res = posix_memalign(&block, 32, size); |
189 | | #else |
190 | | block = malloc(size); |
191 | | #endif /* _WIN32 */ |
192 | | |
193 | 552 | if (block == NULL || res != 0) { |
194 | 0 | BLOSC_TRACE_ERROR("Error allocating memory!"); |
195 | 0 | return NULL; |
196 | 0 | } |
197 | | |
198 | 552 | return (uint8_t*)block; |
199 | 552 | } |
200 | | |
201 | | |
202 | | /* Release memory booked by my_malloc */ |
203 | 552 | static void my_free(void* block) { |
204 | | #if defined(_WIN32) |
205 | | _aligned_free(block); |
206 | | #else |
207 | 552 | free(block); |
208 | 552 | #endif /* _WIN32 */ |
209 | 552 | } |
210 | | |
211 | | |
212 | | /* |
213 | | * Conversion routines between compressor and compression libraries |
214 | | */ |
215 | | |
216 | | /* Return the library code associated with the compressor name */ |
217 | 0 | static int compname_to_clibcode(const char* compname) { |
218 | 0 | if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) |
219 | 0 | return BLOSC_BLOSCLZ_LIB; |
220 | 0 | if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) |
221 | 0 | return BLOSC_LZ4_LIB; |
222 | 0 | if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) |
223 | 0 | return BLOSC_LZ4_LIB; |
224 | 0 | if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) |
225 | 0 | return BLOSC_ZLIB_LIB; |
226 | 0 | if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) |
227 | 0 | return BLOSC_ZSTD_LIB; |
228 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
229 | 0 | if (strcmp(compname, g_codecs[i].compname) == 0) |
230 | 0 | return g_codecs[i].complib; |
231 | 0 | } |
232 | 0 | return BLOSC2_ERROR_NOT_FOUND; |
233 | 0 | } |
234 | | |
235 | | /* Return the library name associated with the compressor code */ |
236 | 0 | static const char* clibcode_to_clibname(int clibcode) { |
237 | 0 | if (clibcode == BLOSC_BLOSCLZ_LIB) return BLOSC_BLOSCLZ_LIBNAME; |
238 | 0 | if (clibcode == BLOSC_LZ4_LIB) return BLOSC_LZ4_LIBNAME; |
239 | 0 | if (clibcode == BLOSC_ZLIB_LIB) return BLOSC_ZLIB_LIBNAME; |
240 | 0 | if (clibcode == BLOSC_ZSTD_LIB) return BLOSC_ZSTD_LIBNAME; |
241 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
242 | 0 | if (clibcode == g_codecs[i].complib) |
243 | 0 | return g_codecs[i].compname; |
244 | 0 | } |
245 | 0 | return NULL; /* should never happen */ |
246 | 0 | } |
247 | | |
248 | | |
249 | | /* |
250 | | * Conversion routines between compressor names and compressor codes |
251 | | */ |
252 | | |
253 | | /* Get the compressor name associated with the compressor code */ |
254 | 0 | int blosc2_compcode_to_compname(int compcode, const char** compname) { |
255 | 0 | int code = -1; /* -1 means non-existent compressor code */ |
256 | 0 | const char* name = NULL; |
257 | | |
258 | | /* Map the compressor code */ |
259 | 0 | if (compcode == BLOSC_BLOSCLZ) |
260 | 0 | name = BLOSC_BLOSCLZ_COMPNAME; |
261 | 0 | else if (compcode == BLOSC_LZ4) |
262 | 0 | name = BLOSC_LZ4_COMPNAME; |
263 | 0 | else if (compcode == BLOSC_LZ4HC) |
264 | 0 | name = BLOSC_LZ4HC_COMPNAME; |
265 | 0 | else if (compcode == BLOSC_ZLIB) |
266 | 0 | name = BLOSC_ZLIB_COMPNAME; |
267 | 0 | else if (compcode == BLOSC_ZSTD) |
268 | 0 | name = BLOSC_ZSTD_COMPNAME; |
269 | 0 | else { |
270 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
271 | 0 | if (compcode == g_codecs[i].compcode) { |
272 | 0 | name = g_codecs[i].compname; |
273 | 0 | break; |
274 | 0 | } |
275 | 0 | } |
276 | 0 | } |
277 | |
|
278 | 0 | *compname = name; |
279 | | |
280 | | /* Guess if there is support for this code */ |
281 | 0 | if (compcode == BLOSC_BLOSCLZ) |
282 | 0 | code = BLOSC_BLOSCLZ; |
283 | 0 | else if (compcode == BLOSC_LZ4) |
284 | 0 | code = BLOSC_LZ4; |
285 | 0 | else if (compcode == BLOSC_LZ4HC) |
286 | 0 | code = BLOSC_LZ4HC; |
287 | 0 | #if defined(HAVE_ZLIB) |
288 | 0 | else if (compcode == BLOSC_ZLIB) |
289 | 0 | code = BLOSC_ZLIB; |
290 | 0 | #endif /* HAVE_ZLIB */ |
291 | 0 | #if defined(HAVE_ZSTD) |
292 | 0 | else if (compcode == BLOSC_ZSTD) |
293 | 0 | code = BLOSC_ZSTD; |
294 | 0 | #endif /* HAVE_ZSTD */ |
295 | 0 | else if (compcode >= BLOSC_LAST_CODEC) |
296 | 0 | code = compcode; |
297 | 0 | return code; |
298 | 0 | } |
299 | | |
300 | | /* Get the compressor code for the compressor name. -1 if it is not available */ |
301 | 0 | int blosc2_compname_to_compcode(const char* compname) { |
302 | 0 | int code = -1; /* -1 means non-existent compressor code */ |
303 | |
|
304 | 0 | if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) { |
305 | 0 | code = BLOSC_BLOSCLZ; |
306 | 0 | } |
307 | 0 | else if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) { |
308 | 0 | code = BLOSC_LZ4; |
309 | 0 | } |
310 | 0 | else if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) { |
311 | 0 | code = BLOSC_LZ4HC; |
312 | 0 | } |
313 | 0 | #if defined(HAVE_ZLIB) |
314 | 0 | else if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) { |
315 | 0 | code = BLOSC_ZLIB; |
316 | 0 | } |
317 | 0 | #endif /* HAVE_ZLIB */ |
318 | 0 | #if defined(HAVE_ZSTD) |
319 | 0 | else if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) { |
320 | 0 | code = BLOSC_ZSTD; |
321 | 0 | } |
322 | 0 | #endif /* HAVE_ZSTD */ |
323 | 0 | else{ |
324 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
325 | 0 | if (strcmp(compname, g_codecs[i].compname) == 0) { |
326 | 0 | code = g_codecs[i].compcode; |
327 | 0 | break; |
328 | 0 | } |
329 | 0 | } |
330 | 0 | } |
331 | 0 | return code; |
332 | 0 | } |
333 | | |
334 | | |
335 | | /* Convert compressor code to blosc compressor format code */ |
336 | 0 | static int compcode_to_compformat(int compcode) { |
337 | 0 | switch (compcode) { |
338 | 0 | case BLOSC_BLOSCLZ: return BLOSC_BLOSCLZ_FORMAT; |
339 | 0 | case BLOSC_LZ4: return BLOSC_LZ4_FORMAT; |
340 | 0 | case BLOSC_LZ4HC: return BLOSC_LZ4HC_FORMAT; |
341 | | |
342 | 0 | #if defined(HAVE_ZLIB) |
343 | 0 | case BLOSC_ZLIB: return BLOSC_ZLIB_FORMAT; |
344 | 0 | #endif /* HAVE_ZLIB */ |
345 | | |
346 | 0 | #if defined(HAVE_ZSTD) |
347 | 0 | case BLOSC_ZSTD: return BLOSC_ZSTD_FORMAT; |
348 | 0 | break; |
349 | 0 | #endif /* HAVE_ZSTD */ |
350 | 0 | default: |
351 | 0 | return BLOSC_UDCODEC_FORMAT; |
352 | 0 | } |
353 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
354 | 0 | } |
355 | | |
356 | | |
357 | | /* Convert compressor code to blosc compressor format version */ |
358 | 0 | static int compcode_to_compversion(int compcode) { |
359 | | /* Write compressor format */ |
360 | 0 | switch (compcode) { |
361 | 0 | case BLOSC_BLOSCLZ: |
362 | 0 | return BLOSC_BLOSCLZ_VERSION_FORMAT; |
363 | 0 | case BLOSC_LZ4: |
364 | 0 | return BLOSC_LZ4_VERSION_FORMAT; |
365 | 0 | case BLOSC_LZ4HC: |
366 | 0 | return BLOSC_LZ4HC_VERSION_FORMAT; |
367 | | |
368 | 0 | #if defined(HAVE_ZLIB) |
369 | 0 | case BLOSC_ZLIB: |
370 | 0 | return BLOSC_ZLIB_VERSION_FORMAT; |
371 | 0 | break; |
372 | 0 | #endif /* HAVE_ZLIB */ |
373 | | |
374 | 0 | #if defined(HAVE_ZSTD) |
375 | 0 | case BLOSC_ZSTD: |
376 | 0 | return BLOSC_ZSTD_VERSION_FORMAT; |
377 | 0 | break; |
378 | 0 | #endif /* HAVE_ZSTD */ |
379 | 0 | default: |
380 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
381 | 0 | if (compcode == g_codecs[i].compcode) { |
382 | 0 | return g_codecs[i].version; |
383 | 0 | } |
384 | 0 | } |
385 | 0 | } |
386 | 0 | return BLOSC2_ERROR_FAILURE; |
387 | 0 | } |
388 | | |
389 | | |
390 | | static int lz4_wrap_compress(const char* input, size_t input_length, |
391 | 0 | char* output, size_t maxout, int accel, void* hash_table) { |
392 | 0 | BLOSC_UNUSED_PARAM(accel); |
393 | 0 | int cbytes; |
394 | | #ifdef HAVE_IPP |
395 | | if (hash_table == NULL) { |
396 | | return BLOSC2_ERROR_INVALID_PARAM; // the hash table should always be initialized |
397 | | } |
398 | | int outlen = (int)maxout; |
399 | | int inlen = (int)input_length; |
400 | | // I have not found any function that uses `accel` like in `LZ4_compress_fast`, but |
401 | | // the IPP LZ4Safe call does a pretty good job on compressing well, so let's use it |
402 | | IppStatus status = ippsEncodeLZ4Safe_8u((const Ipp8u*)input, &inlen, |
403 | | (Ipp8u*)output, &outlen, (Ipp8u*)hash_table); |
404 | | if (status == ippStsDstSizeLessExpected) { |
405 | | return 0; // we cannot compress in required outlen |
406 | | } |
407 | | else if (status != ippStsNoErr) { |
408 | | return BLOSC2_ERROR_FAILURE; // an unexpected error happened |
409 | | } |
410 | | cbytes = outlen; |
411 | | #else |
412 | 0 | BLOSC_UNUSED_PARAM(hash_table); |
413 | 0 | accel = 1; // deactivate acceleration to match IPP behaviour |
414 | 0 | cbytes = LZ4_compress_fast(input, output, (int)input_length, (int)maxout, accel); |
415 | 0 | #endif |
416 | 0 | return cbytes; |
417 | 0 | } |
418 | | |
419 | | |
420 | | static int lz4hc_wrap_compress(const char* input, size_t input_length, |
421 | 0 | char* output, size_t maxout, int clevel) { |
422 | 0 | int cbytes; |
423 | 0 | if (input_length > (size_t)(UINT32_C(2) << 30)) |
424 | 0 | return BLOSC2_ERROR_2GB_LIMIT; |
425 | | /* clevel for lz4hc goes up to 12, at least in LZ4 1.7.5 |
426 | | * but levels larger than 9 do not buy much compression. */ |
427 | 0 | cbytes = LZ4_compress_HC(input, output, (int)input_length, (int)maxout, |
428 | 0 | clevel); |
429 | 0 | return cbytes; |
430 | 0 | } |
431 | | |
432 | | |
433 | | static int lz4_wrap_decompress(const char* input, size_t compressed_length, |
434 | 129 | char* output, size_t maxout) { |
435 | 129 | int nbytes; |
436 | | #ifdef HAVE_IPP |
437 | | int outlen = (int)maxout; |
438 | | int inlen = (int)compressed_length; |
439 | | IppStatus status; |
440 | | status = ippsDecodeLZ4_8u((const Ipp8u*)input, inlen, (Ipp8u*)output, &outlen); |
441 | | //status = ippsDecodeLZ4Dict_8u((const Ipp8u*)input, &inlen, (Ipp8u*)output, 0, &outlen, NULL, 1 << 16); |
442 | | nbytes = (status == ippStsNoErr) ? outlen : -outlen; |
443 | | #else |
444 | 129 | nbytes = LZ4_decompress_safe(input, output, (int)compressed_length, (int)maxout); |
445 | 129 | #endif |
446 | 129 | if (nbytes != (int)maxout) { |
447 | 26 | return 0; |
448 | 26 | } |
449 | 103 | return (int)maxout; |
450 | 129 | } |
451 | | |
452 | | #if defined(HAVE_ZLIB) |
453 | | /* zlib is not very respectful with sharing name space with others. |
454 | | Fortunately, its names do not collide with those already in blosc. */ |
455 | | static int zlib_wrap_compress(const char* input, size_t input_length, |
456 | 0 | char* output, size_t maxout, int clevel) { |
457 | 0 | int status; |
458 | | #if defined(HAVE_ZLIB_NG) && ! defined(ZLIB_COMPAT) |
459 | | size_t cl = maxout; |
460 | | status = zng_compress2( |
461 | | (uint8_t*)output, &cl, (uint8_t*)input, input_length, clevel); |
462 | | #else |
463 | 0 | uLongf cl = (uLongf)maxout; |
464 | 0 | status = compress2( |
465 | 0 | (Bytef*)output, &cl, (Bytef*)input, (uLong)input_length, clevel); |
466 | 0 | #endif |
467 | 0 | if (status != Z_OK) { |
468 | 0 | return 0; |
469 | 0 | } |
470 | 0 | return (int)cl; |
471 | 0 | } |
472 | | |
473 | | static int zlib_wrap_decompress(const char* input, size_t compressed_length, |
474 | 1 | char* output, size_t maxout) { |
475 | 1 | int status; |
476 | | #if defined(HAVE_ZLIB_NG) && ! defined(ZLIB_COMPAT) |
477 | | size_t ul = maxout; |
478 | | status = zng_uncompress( |
479 | | (uint8_t*)output, &ul, (uint8_t*)input, compressed_length); |
480 | | #else |
481 | 1 | uLongf ul = (uLongf)maxout; |
482 | 1 | status = uncompress( |
483 | 1 | (Bytef*)output, &ul, (Bytef*)input, (uLong)compressed_length); |
484 | 1 | #endif |
485 | 1 | if (status != Z_OK) { |
486 | 1 | return 0; |
487 | 1 | } |
488 | 0 | return (int)ul; |
489 | 1 | } |
490 | | #endif /* HAVE_ZLIB */ |
491 | | |
492 | | |
493 | | #if defined(HAVE_ZSTD) |
494 | | static int zstd_wrap_compress(struct thread_context* thread_context, |
495 | | const char* input, size_t input_length, |
496 | 0 | char* output, size_t maxout, int clevel) { |
497 | 0 | size_t code; |
498 | 0 | blosc2_context* context = thread_context->parent_context; |
499 | |
|
500 | 0 | clevel = (clevel < 9) ? clevel * 2 - 1 : ZSTD_maxCLevel(); |
501 | | /* Make the level 8 close enough to maxCLevel */ |
502 | 0 | if (clevel == 8) clevel = ZSTD_maxCLevel() - 2; |
503 | |
|
504 | 0 | if (thread_context->zstd_cctx == NULL) { |
505 | 0 | thread_context->zstd_cctx = ZSTD_createCCtx(); |
506 | 0 | } |
507 | |
|
508 | 0 | if (context->use_dict) { |
509 | 0 | assert(context->dict_cdict != NULL); |
510 | 0 | code = ZSTD_compress_usingCDict( |
511 | 0 | thread_context->zstd_cctx, (void*)output, maxout, (void*)input, |
512 | 0 | input_length, context->dict_cdict); |
513 | 0 | } else { |
514 | 0 | code = ZSTD_compressCCtx(thread_context->zstd_cctx, |
515 | 0 | (void*)output, maxout, (void*)input, input_length, clevel); |
516 | 0 | } |
517 | 0 | if (ZSTD_isError(code) != ZSTD_error_no_error) { |
518 | | // Blosc will just memcpy this buffer |
519 | 0 | return 0; |
520 | 0 | } |
521 | 0 | return (int)code; |
522 | 0 | } |
523 | | |
524 | | static int zstd_wrap_decompress(struct thread_context* thread_context, |
525 | | const char* input, size_t compressed_length, |
526 | 0 | char* output, size_t maxout) { |
527 | 0 | size_t code; |
528 | 0 | blosc2_context* context = thread_context->parent_context; |
529 | |
|
530 | 0 | if (thread_context->zstd_dctx == NULL) { |
531 | 0 | thread_context->zstd_dctx = ZSTD_createDCtx(); |
532 | 0 | } |
533 | |
|
534 | 0 | if (context->use_dict) { |
535 | 0 | assert(context->dict_ddict != NULL); |
536 | 0 | code = ZSTD_decompress_usingDDict( |
537 | 0 | thread_context->zstd_dctx, (void*)output, maxout, (void*)input, |
538 | 0 | compressed_length, context->dict_ddict); |
539 | 0 | } else { |
540 | 0 | code = ZSTD_decompressDCtx(thread_context->zstd_dctx, |
541 | 0 | (void*)output, maxout, (void*)input, compressed_length); |
542 | 0 | } |
543 | 0 | if (ZSTD_isError(code) != ZSTD_error_no_error) { |
544 | 0 | BLOSC_TRACE_ERROR("Error in ZSTD decompression: '%s'. Giving up.", |
545 | 0 | ZDICT_getErrorName(code)); |
546 | 0 | return 0; |
547 | 0 | } |
548 | 0 | return (int)code; |
549 | 0 | } |
550 | | #endif /* HAVE_ZSTD */ |
551 | | |
552 | | /* Compute acceleration for blosclz */ |
553 | 0 | static int get_accel(const blosc2_context* context) { |
554 | 0 | int clevel = context->clevel; |
555 | |
|
556 | 0 | if (context->compcode == BLOSC_LZ4) { |
557 | | /* This acceleration setting based on discussions held in: |
558 | | * https://groups.google.com/forum/#!topic/lz4c/zosy90P8MQw |
559 | | */ |
560 | 0 | return (10 - clevel); |
561 | 0 | } |
562 | 0 | return 1; |
563 | 0 | } |
564 | | |
565 | | |
566 | 1.46k | int do_nothing(uint8_t filter, char cmode) { |
567 | 1.46k | if (cmode == 'c') { |
568 | 0 | return (filter == BLOSC_NOFILTER); |
569 | 1.46k | } else { |
570 | | // TRUNC_PREC do not have to be applied during decompression |
571 | 1.46k | return ((filter == BLOSC_NOFILTER) || (filter == BLOSC_TRUNC_PREC)); |
572 | 1.46k | } |
573 | 1.46k | } |
574 | | |
575 | | |
576 | 304 | int next_filter(const uint8_t* filters, int current_filter, char cmode) { |
577 | 477 | for (int i = current_filter - 1; i >= 0; i--) { |
578 | 477 | if (!do_nothing(filters[i], cmode)) { |
579 | 304 | return filters[i]; |
580 | 304 | } |
581 | 477 | } |
582 | 0 | return BLOSC_NOFILTER; |
583 | 304 | } |
584 | | |
585 | | |
586 | 165 | int last_filter(const uint8_t* filters, char cmode) { |
587 | 165 | int last_index = -1; |
588 | 1.15k | for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { |
589 | 990 | if (!do_nothing(filters[i], cmode)) { |
590 | 240 | last_index = i; |
591 | 240 | } |
592 | 990 | } |
593 | 165 | return last_index; |
594 | 165 | } |
595 | | |
596 | | |
597 | | /* Convert filter pipeline to filter flags */ |
598 | 295 | static uint8_t filters_to_flags(const uint8_t* filters) { |
599 | 295 | uint8_t flags = 0; |
600 | | |
601 | 2.06k | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
602 | 1.77k | switch (filters[i]) { |
603 | 229 | case BLOSC_SHUFFLE: |
604 | 229 | flags |= BLOSC_DOSHUFFLE; |
605 | 229 | break; |
606 | 0 | case BLOSC_BITSHUFFLE: |
607 | 0 | flags |= BLOSC_DOBITSHUFFLE; |
608 | 0 | break; |
609 | 41 | case BLOSC_DELTA: |
610 | 41 | flags |= BLOSC_DODELTA; |
611 | 41 | break; |
612 | 1.50k | default : |
613 | 1.50k | break; |
614 | 1.77k | } |
615 | 1.77k | } |
616 | 295 | return flags; |
617 | 295 | } |
618 | | |
619 | | |
620 | | /* Convert filter flags to filter pipeline */ |
621 | 200k | static void flags_to_filters(const uint8_t flags, uint8_t* filters) { |
622 | | /* Initialize the filter pipeline */ |
623 | 200k | memset(filters, 0, BLOSC2_MAX_FILTERS); |
624 | | /* Fill the filter pipeline */ |
625 | 200k | if (flags & BLOSC_DOSHUFFLE) |
626 | 200k | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; |
627 | 200k | if (flags & BLOSC_DOBITSHUFFLE) |
628 | 200k | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; |
629 | 200k | if (flags & BLOSC_DODELTA) |
630 | 5 | filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; |
631 | 200k | } |
632 | | |
633 | | |
634 | | /* Get filter flags from header flags */ |
635 | | static uint8_t get_filter_flags(const uint8_t header_flags, |
636 | 3 | const int32_t typesize) { |
637 | 3 | uint8_t flags = 0; |
638 | | |
639 | 3 | if ((header_flags & BLOSC_DOSHUFFLE) && (typesize > 1)) { |
640 | 2 | flags |= BLOSC_DOSHUFFLE; |
641 | 2 | } |
642 | 3 | if (header_flags & BLOSC_DOBITSHUFFLE) { |
643 | 1 | flags |= BLOSC_DOBITSHUFFLE; |
644 | 1 | } |
645 | 3 | if (header_flags & BLOSC_DODELTA) { |
646 | 0 | flags |= BLOSC_DODELTA; |
647 | 0 | } |
648 | 3 | if (header_flags & BLOSC_MEMCPYED) { |
649 | 0 | flags |= BLOSC_MEMCPYED; |
650 | 0 | } |
651 | 3 | return flags; |
652 | 3 | } |
653 | | |
654 | | typedef struct blosc_header_s { |
655 | | uint8_t version; |
656 | | uint8_t versionlz; |
657 | | uint8_t flags; |
658 | | uint8_t typesize; |
659 | | int32_t nbytes; |
660 | | int32_t blocksize; |
661 | | int32_t cbytes; |
662 | | // Extended Blosc2 header |
663 | | uint8_t filters[BLOSC2_MAX_FILTERS]; |
664 | | uint8_t udcompcode; |
665 | | uint8_t compcode_meta; |
666 | | uint8_t filters_meta[BLOSC2_MAX_FILTERS]; |
667 | | uint8_t reserved2; |
668 | | uint8_t blosc2_flags; |
669 | | } blosc_header; |
670 | | |
671 | | |
672 | | int read_chunk_header(const uint8_t* src, int32_t srcsize, bool extended_header, blosc_header* header) |
673 | 200k | { |
674 | 200k | memset(header, 0, sizeof(blosc_header)); |
675 | | |
676 | 200k | if (srcsize < BLOSC_MIN_HEADER_LENGTH) { |
677 | 0 | BLOSC_TRACE_ERROR("Not enough space to read Blosc header."); |
678 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
679 | 0 | } |
680 | | |
681 | 200k | memcpy(header, src, BLOSC_MIN_HEADER_LENGTH); |
682 | | |
683 | 200k | bool little_endian = is_little_endian(); |
684 | | |
685 | 200k | if (!little_endian) { |
686 | 0 | header->nbytes = bswap32_(header->nbytes); |
687 | 0 | header->blocksize = bswap32_(header->blocksize); |
688 | 0 | header->cbytes = bswap32_(header->cbytes); |
689 | 0 | } |
690 | | |
691 | 200k | if (header->version > BLOSC2_VERSION_FORMAT) { |
692 | | /* Version from future */ |
693 | 0 | return BLOSC2_ERROR_VERSION_SUPPORT; |
694 | 0 | } |
695 | 200k | if (header->cbytes < BLOSC_MIN_HEADER_LENGTH) { |
696 | 0 | BLOSC_TRACE_ERROR("`cbytes` is too small to read min header."); |
697 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
698 | 0 | } |
699 | 200k | if (header->blocksize <= 0 || (header->nbytes > 0 && (header->blocksize > header->nbytes))) { |
700 | 0 | BLOSC_TRACE_ERROR("`blocksize` is zero or greater than uncompressed size"); |
701 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
702 | 0 | } |
703 | 200k | if (header->blocksize > BLOSC2_MAXBLOCKSIZE) { |
704 | 0 | BLOSC_TRACE_ERROR("`blocksize` greater than maximum allowed"); |
705 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
706 | 0 | } |
707 | 200k | if (header->typesize == 0) { |
708 | 0 | BLOSC_TRACE_ERROR("`typesize` is zero."); |
709 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
710 | 0 | } |
711 | | |
712 | | /* Read extended header if it is wanted */ |
713 | 200k | if ((extended_header) && (header->flags & BLOSC_DOSHUFFLE) && (header->flags & BLOSC_DOBITSHUFFLE)) { |
714 | 263 | if (header->cbytes < BLOSC_EXTENDED_HEADER_LENGTH) { |
715 | 0 | BLOSC_TRACE_ERROR("`cbytes` is too small to read extended header."); |
716 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
717 | 0 | } |
718 | 263 | if (srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
719 | 0 | BLOSC_TRACE_ERROR("Not enough space to read Blosc extended header."); |
720 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
721 | 0 | } |
722 | | |
723 | 263 | memcpy((uint8_t *)header + BLOSC_MIN_HEADER_LENGTH, src + BLOSC_MIN_HEADER_LENGTH, |
724 | 263 | BLOSC_EXTENDED_HEADER_LENGTH - BLOSC_MIN_HEADER_LENGTH); |
725 | | |
726 | 263 | int32_t special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
727 | 263 | if (special_type != 0) { |
728 | 55 | if (special_type == BLOSC2_SPECIAL_VALUE) { |
729 | | // In this case, the actual type size must be derived from the cbytes |
730 | 1 | int32_t typesize = header->cbytes - BLOSC_EXTENDED_HEADER_LENGTH; |
731 | 1 | if (typesize <= 0) { |
732 | 0 | BLOSC_TRACE_ERROR("`typesize` is zero or negative"); |
733 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
734 | 0 | } |
735 | 1 | if (typesize > BLOSC2_MAXTYPESIZE) { |
736 | 0 | BLOSC_TRACE_ERROR("`typesize` is greater than maximum allowed"); |
737 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
738 | 0 | } |
739 | 1 | if (typesize > header->nbytes) { |
740 | 1 | BLOSC_TRACE_ERROR("`typesize` is greater than `nbytes`"); |
741 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
742 | 1 | } |
743 | 0 | if (header->nbytes % typesize != 0) { |
744 | 0 | BLOSC_TRACE_ERROR("`nbytes` is not a multiple of typesize"); |
745 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
746 | 0 | } |
747 | 0 | } |
748 | 54 | else { |
749 | 54 | if (header->nbytes % header->typesize != 0) { |
750 | 0 | BLOSC_TRACE_ERROR("`nbytes` is not a multiple of typesize"); |
751 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
752 | 0 | } |
753 | 54 | } |
754 | 55 | } |
755 | | // The number of filters depends on the version of the header. Blosc2 alpha series |
756 | | // did not initialize filters to zero beyond the max supported. |
757 | 262 | if (header->version == BLOSC2_VERSION_FORMAT_ALPHA) { |
758 | 12 | header->filters[5] = 0; |
759 | 12 | header->filters_meta[5] = 0; |
760 | 12 | } |
761 | 262 | } |
762 | 200k | else { |
763 | 200k | flags_to_filters(header->flags, header->filters); |
764 | 200k | } |
765 | 200k | return 0; |
766 | 200k | } |
767 | | |
768 | 298 | static inline void blosc2_calculate_blocks(blosc2_context* context) { |
769 | | /* Compute number of blocks in buffer */ |
770 | 298 | context->nblocks = context->sourcesize / context->blocksize; |
771 | 298 | context->leftover = context->sourcesize % context->blocksize; |
772 | 298 | context->nblocks = (context->leftover > 0) ? |
773 | 200 | (context->nblocks + 1) : context->nblocks; |
774 | 298 | } |
775 | | |
776 | 265 | static int blosc2_initialize_context_from_header(blosc2_context* context, blosc_header* header) { |
777 | 265 | context->header_flags = header->flags; |
778 | 265 | context->typesize = header->typesize; |
779 | 265 | context->sourcesize = header->nbytes; |
780 | 265 | context->blocksize = header->blocksize; |
781 | 265 | context->blosc2_flags = header->blosc2_flags; |
782 | 265 | context->compcode = header->flags >> 5; |
783 | 265 | if (context->compcode == BLOSC_UDCODEC_FORMAT) { |
784 | 0 | context->compcode = header->udcompcode; |
785 | 0 | } |
786 | 265 | blosc2_calculate_blocks(context); |
787 | | |
788 | 265 | bool is_lazy = false; |
789 | 265 | if ((context->header_flags & BLOSC_DOSHUFFLE) && |
790 | 265 | (context->header_flags & BLOSC_DOBITSHUFFLE)) { |
791 | | /* Extended header */ |
792 | 262 | context->header_overhead = BLOSC_EXTENDED_HEADER_LENGTH; |
793 | | |
794 | 262 | memcpy(context->filters, header->filters, BLOSC2_MAX_FILTERS); |
795 | 262 | memcpy(context->filters_meta, header->filters_meta, BLOSC2_MAX_FILTERS); |
796 | 262 | context->compcode_meta = header->compcode_meta; |
797 | | |
798 | 262 | context->filter_flags = filters_to_flags(header->filters); |
799 | 262 | context->special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
800 | | |
801 | 262 | is_lazy = (context->blosc2_flags & 0x08u); |
802 | 262 | } |
803 | 3 | else { |
804 | 3 | context->header_overhead = BLOSC_MIN_HEADER_LENGTH; |
805 | 3 | context->filter_flags = get_filter_flags(context->header_flags, context->typesize); |
806 | 3 | flags_to_filters(context->header_flags, context->filters); |
807 | 3 | } |
808 | | |
809 | | // Some checks for malformed headers |
810 | 265 | if (!is_lazy && header->cbytes > context->srcsize) { |
811 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
812 | 0 | } |
813 | | |
814 | 265 | return 0; |
815 | 265 | } |
816 | | |
817 | | |
818 | 0 | int fill_filter(blosc2_filter *filter) { |
819 | 0 | char libpath[PATH_MAX]; |
820 | 0 | void *lib = load_lib(filter->name, libpath); |
821 | 0 | if(lib == NULL) { |
822 | 0 | BLOSC_TRACE_ERROR("Error while loading the library"); |
823 | 0 | return BLOSC2_ERROR_FAILURE; |
824 | 0 | } |
825 | | |
826 | 0 | filter_info *info = dlsym(lib, "info"); |
827 | 0 | filter->forward = dlsym(lib, info->forward); |
828 | 0 | filter->backward = dlsym(lib, info->backward); |
829 | |
|
830 | 0 | if (filter->forward == NULL || filter->backward == NULL){ |
831 | 0 | BLOSC_TRACE_ERROR("Wrong library loaded"); |
832 | 0 | dlclose(lib); |
833 | 0 | return BLOSC2_ERROR_FAILURE; |
834 | 0 | } |
835 | | |
836 | 0 | return BLOSC2_ERROR_SUCCESS; |
837 | 0 | } |
838 | | |
839 | | |
840 | 0 | int fill_codec(blosc2_codec *codec) { |
841 | 0 | char libpath[PATH_MAX]; |
842 | 0 | void *lib = load_lib(codec->compname, libpath); |
843 | 0 | if(lib == NULL) { |
844 | 0 | BLOSC_TRACE_ERROR("Error while loading the library for codec `%s`", codec->compname); |
845 | 0 | return BLOSC2_ERROR_FAILURE; |
846 | 0 | } |
847 | | |
848 | 0 | codec_info *info = dlsym(lib, "info"); |
849 | 0 | if (info == NULL) { |
850 | 0 | BLOSC_TRACE_ERROR("`info` symbol cannot be loaded from plugin `%s`", codec->compname); |
851 | 0 | dlclose(lib); |
852 | 0 | return BLOSC2_ERROR_FAILURE; |
853 | 0 | } |
854 | | |
855 | 0 | codec->encoder = dlsym(lib, info->encoder); |
856 | 0 | codec->decoder = dlsym(lib, info->decoder); |
857 | 0 | if (codec->encoder == NULL || codec->decoder == NULL) { |
858 | 0 | BLOSC_TRACE_ERROR("encoder or decoder cannot be loaded from plugin `%s`", codec->compname); |
859 | 0 | dlclose(lib); |
860 | 0 | return BLOSC2_ERROR_FAILURE; |
861 | 0 | } |
862 | | |
863 | 0 | return BLOSC2_ERROR_SUCCESS; |
864 | 0 | } |
865 | | |
866 | | |
867 | 0 | int fill_tuner(blosc2_tuner *tuner) { |
868 | 0 | char libpath[PATH_MAX] = {0}; |
869 | 0 | void *lib = load_lib(tuner->name, libpath); |
870 | 0 | if(lib == NULL) { |
871 | 0 | BLOSC_TRACE_ERROR("Error while loading the library"); |
872 | 0 | return BLOSC2_ERROR_FAILURE; |
873 | 0 | } |
874 | | |
875 | 0 | tuner_info *info = dlsym(lib, "info"); |
876 | 0 | tuner->init = dlsym(lib, info->init); |
877 | 0 | tuner->update = dlsym(lib, info->update); |
878 | 0 | tuner->next_blocksize = dlsym(lib, info->next_blocksize); |
879 | 0 | tuner->free = dlsym(lib, info->free); |
880 | 0 | tuner->next_cparams = dlsym(lib, info->next_cparams); |
881 | |
|
882 | 0 | if (tuner->init == NULL || tuner->update == NULL || tuner->next_blocksize == NULL || tuner->free == NULL |
883 | 0 | || tuner->next_cparams == NULL){ |
884 | 0 | BLOSC_TRACE_ERROR("Wrong library loaded"); |
885 | 0 | dlclose(lib); |
886 | 0 | return BLOSC2_ERROR_FAILURE; |
887 | 0 | } |
888 | | |
889 | 0 | return BLOSC2_ERROR_SUCCESS; |
890 | 0 | } |
891 | | |
892 | | |
893 | 0 | static int blosc2_intialize_header_from_context(blosc2_context* context, blosc_header* header, bool extended_header) { |
894 | 0 | memset(header, 0, sizeof(blosc_header)); |
895 | |
|
896 | 0 | header->version = BLOSC2_VERSION_FORMAT; |
897 | 0 | header->versionlz = compcode_to_compversion(context->compcode); |
898 | 0 | header->flags = context->header_flags; |
899 | 0 | header->typesize = (uint8_t)context->typesize; |
900 | 0 | header->nbytes = (int32_t)context->sourcesize; |
901 | 0 | header->blocksize = (int32_t)context->blocksize; |
902 | |
|
903 | 0 | int little_endian = is_little_endian(); |
904 | 0 | if (!little_endian) { |
905 | 0 | header->nbytes = bswap32_(header->nbytes); |
906 | 0 | header->blocksize = bswap32_(header->blocksize); |
907 | | // cbytes written after compression |
908 | 0 | } |
909 | |
|
910 | 0 | if (extended_header) { |
911 | | /* Store filter pipeline info at the end of the header */ |
912 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
913 | 0 | header->filters[i] = context->filters[i]; |
914 | 0 | header->filters_meta[i] = context->filters_meta[i]; |
915 | 0 | } |
916 | 0 | header->udcompcode = context->compcode; |
917 | 0 | header->compcode_meta = context->compcode_meta; |
918 | |
|
919 | 0 | if (!little_endian) { |
920 | 0 | header->blosc2_flags |= BLOSC2_BIGENDIAN; |
921 | 0 | } |
922 | 0 | if (context->use_dict) { |
923 | 0 | header->blosc2_flags |= BLOSC2_USEDICT; |
924 | 0 | } |
925 | 0 | if (context->blosc2_flags & BLOSC2_INSTR_CODEC) { |
926 | 0 | header->blosc2_flags |= BLOSC2_INSTR_CODEC; |
927 | 0 | } |
928 | 0 | } |
929 | |
|
930 | 0 | return 0; |
931 | 0 | } |
932 | | |
933 | 191 | void _cycle_buffers(uint8_t **src, uint8_t **dest, uint8_t **tmp) { |
934 | 191 | uint8_t *tmp2 = *src; |
935 | 191 | *src = *dest; |
936 | 191 | *dest = *tmp; |
937 | 191 | *tmp = tmp2; |
938 | 191 | } |
939 | | |
940 | | uint8_t* pipeline_forward(struct thread_context* thread_context, const int32_t bsize, |
941 | | const uint8_t* src, const int32_t offset, |
942 | 0 | uint8_t* dest, uint8_t* tmp) { |
943 | 0 | blosc2_context* context = thread_context->parent_context; |
944 | 0 | uint8_t* _src = (uint8_t*)src + offset; |
945 | 0 | uint8_t* _tmp = tmp; |
946 | 0 | uint8_t* _dest = dest; |
947 | 0 | int32_t typesize = context->typesize; |
948 | 0 | uint8_t* filters = context->filters; |
949 | 0 | uint8_t* filters_meta = context->filters_meta; |
950 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
951 | | |
952 | | /* Prefilter function */ |
953 | 0 | if (context->prefilter != NULL) { |
954 | | /* Set unwritten values to zero */ |
955 | 0 | memset(_dest, 0, bsize); |
956 | | // Create new prefilter parameters for this block (must be private for each thread) |
957 | 0 | blosc2_prefilter_params preparams; |
958 | 0 | memcpy(&preparams, context->preparams, sizeof(preparams)); |
959 | 0 | preparams.input = _src; |
960 | 0 | preparams.output = _dest; |
961 | 0 | preparams.output_size = bsize; |
962 | 0 | preparams.output_typesize = typesize; |
963 | 0 | preparams.output_offset = offset; |
964 | 0 | preparams.nblock = offset / context->blocksize; |
965 | 0 | preparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
966 | 0 | preparams.tid = thread_context->tid; |
967 | 0 | preparams.ttmp = thread_context->tmp; |
968 | 0 | preparams.ttmp_nbytes = thread_context->tmp_nbytes; |
969 | 0 | preparams.ctx = context; |
970 | |
|
971 | 0 | if (context->prefilter(&preparams) != 0) { |
972 | 0 | BLOSC_TRACE_ERROR("Execution of prefilter function failed"); |
973 | 0 | return NULL; |
974 | 0 | } |
975 | | |
976 | 0 | if (memcpyed) { |
977 | | // No more filters are required |
978 | 0 | return _dest; |
979 | 0 | } |
980 | 0 | _cycle_buffers(&_src, &_dest, &_tmp); |
981 | 0 | } |
982 | | |
983 | | /* Process the filter pipeline */ |
984 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
985 | 0 | int rc = BLOSC2_ERROR_SUCCESS; |
986 | 0 | if (filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
987 | 0 | switch (filters[i]) { |
988 | 0 | case BLOSC_SHUFFLE: |
989 | 0 | shuffle(typesize, bsize, _src, _dest); |
990 | 0 | break; |
991 | 0 | case BLOSC_BITSHUFFLE: |
992 | 0 | if (bitshuffle(typesize, bsize, _src, _dest) < 0) { |
993 | 0 | return NULL; |
994 | 0 | } |
995 | 0 | break; |
996 | 0 | case BLOSC_DELTA: |
997 | 0 | delta_encoder(src, offset, bsize, typesize, _src, _dest); |
998 | 0 | break; |
999 | 0 | case BLOSC_TRUNC_PREC: |
1000 | 0 | if (truncate_precision(filters_meta[i], typesize, bsize, _src, _dest) < 0) { |
1001 | 0 | return NULL; |
1002 | 0 | } |
1003 | 0 | break; |
1004 | 0 | default: |
1005 | 0 | if (filters[i] != BLOSC_NOFILTER) { |
1006 | 0 | BLOSC_TRACE_ERROR("Filter %d not handled during compression\n", filters[i]); |
1007 | 0 | return NULL; |
1008 | 0 | } |
1009 | 0 | } |
1010 | 0 | } |
1011 | 0 | else { |
1012 | | // Look for the filters_meta in user filters and run it |
1013 | 0 | for (uint64_t j = 0; j < g_nfilters; ++j) { |
1014 | 0 | if (g_filters[j].id == filters[i]) { |
1015 | 0 | if (g_filters[j].forward == NULL) { |
1016 | | // Dynamically load library |
1017 | 0 | if (fill_filter(&g_filters[j]) < 0) { |
1018 | 0 | BLOSC_TRACE_ERROR("Could not load filter %d\n", g_filters[j].id); |
1019 | 0 | return NULL; |
1020 | 0 | } |
1021 | 0 | } |
1022 | 0 | if (g_filters[j].forward != NULL) { |
1023 | 0 | blosc2_cparams cparams; |
1024 | 0 | blosc2_ctx_get_cparams(context, &cparams); |
1025 | 0 | rc = g_filters[j].forward(_src, _dest, bsize, filters_meta[i], &cparams, g_filters[j].id); |
1026 | 0 | } else { |
1027 | 0 | BLOSC_TRACE_ERROR("Forward function is NULL"); |
1028 | 0 | return NULL; |
1029 | 0 | } |
1030 | 0 | if (rc != BLOSC2_ERROR_SUCCESS) { |
1031 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d failed during compression\n", filters[i]); |
1032 | 0 | return NULL; |
1033 | 0 | } |
1034 | 0 | goto urfiltersuccess; |
1035 | 0 | } |
1036 | 0 | } |
1037 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d not found during compression\n", filters[i]); |
1038 | 0 | return NULL; |
1039 | | |
1040 | 0 | urfiltersuccess:; |
1041 | |
|
1042 | 0 | } |
1043 | | |
1044 | | // Cycle buffers when required |
1045 | 0 | if (filters[i] != BLOSC_NOFILTER) { |
1046 | 0 | _cycle_buffers(&_src, &_dest, &_tmp); |
1047 | 0 | } |
1048 | 0 | } |
1049 | 0 | return _src; |
1050 | 0 | } |
1051 | | |
1052 | | |
1053 | | // Optimized version for detecting runs. It compares 8 bytes values wherever possible. |
1054 | 0 | static bool get_run(const uint8_t* ip, const uint8_t* ip_bound) { |
1055 | 0 | uint8_t x = *ip; |
1056 | 0 | int64_t value, value2; |
1057 | | /* Broadcast the value for every byte in a 64-bit register */ |
1058 | 0 | memset(&value, x, 8); |
1059 | 0 | while (ip < (ip_bound - 8)) { |
1060 | | #if defined(BLOSC_STRICT_ALIGN) |
1061 | | memcpy(&value2, ip, 8); |
1062 | | #else |
1063 | 0 | value2 = *(int64_t*)ip; |
1064 | 0 | #endif |
1065 | 0 | if (value != value2) { |
1066 | | // Values differ. We don't have a run. |
1067 | 0 | return false; |
1068 | 0 | } |
1069 | 0 | else { |
1070 | 0 | ip += 8; |
1071 | 0 | } |
1072 | 0 | } |
1073 | | /* Look into the remainder */ |
1074 | 0 | while ((ip < ip_bound) && (*ip == x)) ip++; |
1075 | 0 | return ip == ip_bound ? true : false; |
1076 | 0 | } |
1077 | | |
1078 | | |
1079 | | /* Shuffle & compress a single block */ |
1080 | | static int blosc_c(struct thread_context* thread_context, int32_t bsize, |
1081 | | int32_t leftoverblock, int32_t ntbytes, int32_t destsize, |
1082 | | const uint8_t* src, const int32_t offset, uint8_t* dest, |
1083 | 0 | uint8_t* tmp, uint8_t* tmp2) { |
1084 | 0 | blosc2_context* context = thread_context->parent_context; |
1085 | 0 | int dont_split = (context->header_flags & 0x10) >> 4; |
1086 | 0 | int dict_training = context->use_dict && context->dict_cdict == NULL; |
1087 | 0 | int32_t j, neblock, nstreams; |
1088 | 0 | int32_t cbytes; /* number of compressed bytes in split */ |
1089 | 0 | int32_t ctbytes = 0; /* number of compressed bytes in block */ |
1090 | 0 | int32_t maxout; |
1091 | 0 | int32_t typesize = context->typesize; |
1092 | 0 | const char* compname; |
1093 | 0 | int accel; |
1094 | 0 | const uint8_t* _src; |
1095 | 0 | uint8_t *_tmp = tmp, *_tmp2 = tmp2; |
1096 | 0 | int last_filter_index = last_filter(context->filters, 'c'); |
1097 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
1098 | 0 | bool instr_codec = context->blosc2_flags & BLOSC2_INSTR_CODEC; |
1099 | 0 | blosc_timestamp_t last, current; |
1100 | 0 | float filter_time = 0.f; |
1101 | |
|
1102 | 0 | if (instr_codec) { |
1103 | 0 | blosc_set_timestamp(&last); |
1104 | 0 | } |
1105 | | |
1106 | | // See whether we have a run here |
1107 | 0 | if (last_filter_index >= 0 || context->prefilter != NULL) { |
1108 | | /* Apply the filter pipeline just for the prefilter */ |
1109 | 0 | if (memcpyed && context->prefilter != NULL) { |
1110 | | // We only need the prefilter output |
1111 | 0 | _src = pipeline_forward(thread_context, bsize, src, offset, dest, _tmp2); |
1112 | 0 | if (_src == NULL) { |
1113 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1114 | 0 | } |
1115 | 0 | return bsize; |
1116 | 0 | } |
1117 | | /* Apply regular filter pipeline */ |
1118 | 0 | _src = pipeline_forward(thread_context, bsize, src, offset, _tmp, _tmp2); |
1119 | 0 | if (_src == NULL) { |
1120 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1121 | 0 | } |
1122 | 0 | } else { |
1123 | 0 | _src = src + offset; |
1124 | 0 | } |
1125 | | |
1126 | 0 | if (instr_codec) { |
1127 | 0 | blosc_set_timestamp(¤t); |
1128 | 0 | filter_time = (float) blosc_elapsed_secs(last, current); |
1129 | 0 | last = current; |
1130 | 0 | } |
1131 | |
|
1132 | 0 | assert(context->clevel > 0); |
1133 | | |
1134 | | /* Calculate acceleration for different compressors */ |
1135 | 0 | accel = get_accel(context); |
1136 | | |
1137 | | /* The number of compressed data streams for this block */ |
1138 | 0 | if (!dont_split && !leftoverblock && !dict_training) { |
1139 | 0 | nstreams = (int32_t)typesize; |
1140 | 0 | } |
1141 | 0 | else { |
1142 | 0 | nstreams = 1; |
1143 | 0 | } |
1144 | 0 | neblock = bsize / nstreams; |
1145 | 0 | for (j = 0; j < nstreams; j++) { |
1146 | 0 | if (instr_codec) { |
1147 | 0 | blosc_set_timestamp(&last); |
1148 | 0 | } |
1149 | 0 | if (!dict_training) { |
1150 | 0 | dest += sizeof(int32_t); |
1151 | 0 | ntbytes += sizeof(int32_t); |
1152 | 0 | ctbytes += sizeof(int32_t); |
1153 | |
|
1154 | 0 | const uint8_t *ip = (uint8_t *) _src + j * neblock; |
1155 | 0 | const uint8_t *ipbound = (uint8_t *) _src + (j + 1) * neblock; |
1156 | |
|
1157 | 0 | if (context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH && get_run(ip, ipbound)) { |
1158 | | // A run |
1159 | 0 | int32_t value = _src[j * neblock]; |
1160 | 0 | if (ntbytes > destsize) { |
1161 | 0 | return 0; /* Non-compressible data */ |
1162 | 0 | } |
1163 | | |
1164 | 0 | if (instr_codec) { |
1165 | 0 | blosc_set_timestamp(¤t); |
1166 | 0 | int32_t instr_size = sizeof(blosc2_instr); |
1167 | 0 | ntbytes += instr_size; |
1168 | 0 | ctbytes += instr_size; |
1169 | 0 | if (ntbytes > destsize) { |
1170 | 0 | return 0; /* Non-compressible data */ |
1171 | 0 | } |
1172 | 0 | _sw32(dest - 4, instr_size); |
1173 | 0 | blosc2_instr *desti = (blosc2_instr *)dest; |
1174 | 0 | memset(desti, 0, sizeof(blosc2_instr)); |
1175 | | // Special values have an overhead of about 1 int32 |
1176 | 0 | int32_t ssize = value == 0 ? sizeof(int32_t) : sizeof(int32_t) + 1; |
1177 | 0 | desti->cratio = (float) neblock / (float) ssize; |
1178 | 0 | float ctime = (float) blosc_elapsed_secs(last, current); |
1179 | 0 | desti->cspeed = (float) neblock / ctime; |
1180 | 0 | desti->filter_speed = (float) neblock / filter_time; |
1181 | 0 | desti->flags[0] = 1; // mark a runlen |
1182 | 0 | dest += instr_size; |
1183 | 0 | continue; |
1184 | 0 | } |
1185 | | |
1186 | | // Encode the repeated byte in the first (LSB) byte of the length of the split. |
1187 | 0 | _sw32(dest - 4, -value); // write the value in two's complement |
1188 | 0 | if (value > 0) { |
1189 | | // Mark encoding as a run-length (== 0 is always a 0's run) |
1190 | 0 | ntbytes += 1; |
1191 | 0 | ctbytes += 1; |
1192 | 0 | if (ntbytes > destsize) { |
1193 | 0 | return 0; /* Non-compressible data */ |
1194 | 0 | } |
1195 | | // Set MSB bit (sign) to 1 (not really necessary here, but for demonstration purposes) |
1196 | | // dest[-1] |= 0x80; |
1197 | 0 | dest[0] = 0x1; // set run-length bit (0) in token |
1198 | 0 | dest += 1; |
1199 | 0 | } |
1200 | 0 | continue; |
1201 | 0 | } |
1202 | 0 | } |
1203 | | |
1204 | 0 | maxout = neblock; |
1205 | 0 | if (ntbytes + maxout > destsize && !instr_codec) { |
1206 | | /* avoid buffer * overrun */ |
1207 | 0 | maxout = destsize - ntbytes; |
1208 | 0 | if (maxout <= 0) { |
1209 | 0 | return 0; /* non-compressible block */ |
1210 | 0 | } |
1211 | 0 | } |
1212 | 0 | if (dict_training) { |
1213 | | // We are in the build dict state, so don't compress |
1214 | | // TODO: copy only a percentage for sampling |
1215 | 0 | memcpy(dest, _src + j * neblock, (unsigned int)neblock); |
1216 | 0 | cbytes = (int32_t)neblock; |
1217 | 0 | } |
1218 | 0 | else if (context->compcode == BLOSC_BLOSCLZ) { |
1219 | 0 | cbytes = blosclz_compress(context->clevel, _src + j * neblock, |
1220 | 0 | (int)neblock, dest, maxout, context); |
1221 | 0 | } |
1222 | 0 | else if (context->compcode == BLOSC_LZ4) { |
1223 | 0 | void *hash_table = NULL; |
1224 | | #ifdef HAVE_IPP |
1225 | | hash_table = (void*)thread_context->lz4_hash_table; |
1226 | | #endif |
1227 | 0 | cbytes = lz4_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1228 | 0 | (char*)dest, (size_t)maxout, accel, hash_table); |
1229 | 0 | } |
1230 | 0 | else if (context->compcode == BLOSC_LZ4HC) { |
1231 | 0 | cbytes = lz4hc_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1232 | 0 | (char*)dest, (size_t)maxout, context->clevel); |
1233 | 0 | } |
1234 | 0 | #if defined(HAVE_ZLIB) |
1235 | 0 | else if (context->compcode == BLOSC_ZLIB) { |
1236 | 0 | cbytes = zlib_wrap_compress((char*)_src + j * neblock, (size_t)neblock, |
1237 | 0 | (char*)dest, (size_t)maxout, context->clevel); |
1238 | 0 | } |
1239 | 0 | #endif /* HAVE_ZLIB */ |
1240 | 0 | #if defined(HAVE_ZSTD) |
1241 | 0 | else if (context->compcode == BLOSC_ZSTD) { |
1242 | 0 | cbytes = zstd_wrap_compress(thread_context, |
1243 | 0 | (char*)_src + j * neblock, (size_t)neblock, |
1244 | 0 | (char*)dest, (size_t)maxout, context->clevel); |
1245 | 0 | } |
1246 | 0 | #endif /* HAVE_ZSTD */ |
1247 | 0 | else if (context->compcode > BLOSC2_DEFINED_CODECS_STOP) { |
1248 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
1249 | 0 | if (g_codecs[i].compcode == context->compcode) { |
1250 | 0 | if (g_codecs[i].encoder == NULL) { |
1251 | | // Dynamically load codec plugin |
1252 | 0 | if (fill_codec(&g_codecs[i]) < 0) { |
1253 | 0 | BLOSC_TRACE_ERROR("Could not load codec %d.", g_codecs[i].compcode); |
1254 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1255 | 0 | } |
1256 | 0 | } |
1257 | 0 | blosc2_cparams cparams; |
1258 | 0 | blosc2_ctx_get_cparams(context, &cparams); |
1259 | 0 | cbytes = g_codecs[i].encoder(_src + j * neblock, |
1260 | 0 | neblock, |
1261 | 0 | dest, |
1262 | 0 | maxout, |
1263 | 0 | context->compcode_meta, |
1264 | 0 | &cparams, |
1265 | 0 | context->src); |
1266 | 0 | goto urcodecsuccess; |
1267 | 0 | } |
1268 | 0 | } |
1269 | 0 | BLOSC_TRACE_ERROR("User-defined compressor codec %d not found during compression", context->compcode); |
1270 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1271 | 0 | urcodecsuccess: |
1272 | 0 | ; |
1273 | 0 | } else { |
1274 | 0 | blosc2_compcode_to_compname(context->compcode, &compname); |
1275 | 0 | BLOSC_TRACE_ERROR("Blosc has not been compiled with '%s' compression support." |
1276 | 0 | "Please use one having it.", compname); |
1277 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1278 | 0 | } |
1279 | | |
1280 | 0 | if (cbytes > maxout) { |
1281 | | /* Buffer overrun caused by compression (should never happen) */ |
1282 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
1283 | 0 | } |
1284 | 0 | if (cbytes < 0) { |
1285 | | /* cbytes should never be negative */ |
1286 | 0 | return BLOSC2_ERROR_DATA; |
1287 | 0 | } |
1288 | 0 | if (cbytes == 0) { |
1289 | | // When cbytes is 0, the compressor has not been able to compress anything |
1290 | 0 | cbytes = neblock; |
1291 | 0 | } |
1292 | |
|
1293 | 0 | if (instr_codec) { |
1294 | 0 | blosc_set_timestamp(¤t); |
1295 | 0 | int32_t instr_size = sizeof(blosc2_instr); |
1296 | 0 | ntbytes += instr_size; |
1297 | 0 | ctbytes += instr_size; |
1298 | 0 | if (ntbytes > destsize) { |
1299 | 0 | return 0; /* Non-compressible data */ |
1300 | 0 | } |
1301 | 0 | _sw32(dest - 4, instr_size); |
1302 | 0 | float ctime = (float)blosc_elapsed_secs(last, current); |
1303 | 0 | blosc2_instr *desti = (blosc2_instr *)dest; |
1304 | 0 | memset(desti, 0, sizeof(blosc2_instr)); |
1305 | | // cratio is computed having into account 1 additional int (csize) |
1306 | 0 | desti->cratio = (float)neblock / (float)(cbytes + sizeof(int32_t)); |
1307 | 0 | desti->cspeed = (float)neblock / ctime; |
1308 | 0 | desti->filter_speed = (float) neblock / filter_time; |
1309 | 0 | dest += instr_size; |
1310 | 0 | continue; |
1311 | 0 | } |
1312 | | |
1313 | 0 | if (!dict_training) { |
1314 | 0 | if (cbytes == neblock) { |
1315 | | /* The compressor has been unable to compress data at all. */ |
1316 | | /* Before doing the copy, check that we are not running into a |
1317 | | buffer overflow. */ |
1318 | 0 | if ((ntbytes + neblock) > destsize) { |
1319 | 0 | return 0; /* Non-compressible data */ |
1320 | 0 | } |
1321 | 0 | memcpy(dest, _src + j * neblock, (unsigned int)neblock); |
1322 | 0 | cbytes = neblock; |
1323 | 0 | } |
1324 | 0 | _sw32(dest - 4, cbytes); |
1325 | 0 | } |
1326 | 0 | dest += cbytes; |
1327 | 0 | ntbytes += cbytes; |
1328 | 0 | ctbytes += cbytes; |
1329 | 0 | } /* Closes j < nstreams */ |
1330 | | |
1331 | 0 | return ctbytes; |
1332 | 0 | } |
1333 | | |
1334 | | |
1335 | | /* Process the filter pipeline (decompression mode) */ |
1336 | | int pipeline_backward(struct thread_context* thread_context, const int32_t bsize, uint8_t* dest, |
1337 | | const int32_t offset, uint8_t* src, uint8_t* tmp, |
1338 | 112 | uint8_t* tmp2, int last_filter_index, int32_t nblock) { |
1339 | 112 | blosc2_context* context = thread_context->parent_context; |
1340 | 112 | int32_t typesize = context->typesize; |
1341 | 112 | uint8_t* filters = context->filters; |
1342 | 112 | uint8_t* filters_meta = context->filters_meta; |
1343 | 112 | uint8_t* _src = src; |
1344 | 112 | uint8_t* _dest = tmp; |
1345 | 112 | uint8_t* _tmp = tmp2; |
1346 | 112 | int errcode = 0; |
1347 | | |
1348 | 282 | for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { |
1349 | | // Delta filter requires the whole chunk ready |
1350 | 282 | int last_copy_filter = (last_filter_index == i) || (next_filter(filters, i, 'd') == BLOSC_DELTA); |
1351 | 282 | if (last_copy_filter && context->postfilter == NULL) { |
1352 | 203 | _dest = dest + offset; |
1353 | 203 | } |
1354 | 282 | int rc = BLOSC2_ERROR_SUCCESS; |
1355 | 282 | if (filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
1356 | 281 | switch (filters[i]) { |
1357 | 128 | case BLOSC_SHUFFLE: |
1358 | 128 | unshuffle(typesize, bsize, _src, _dest); |
1359 | 128 | break; |
1360 | 2 | case BLOSC_BITSHUFFLE: |
1361 | 2 | if (bitunshuffle(typesize, bsize, _src, _dest, context->src[BLOSC2_CHUNK_VERSION]) < 0) { |
1362 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1363 | 0 | } |
1364 | 2 | break; |
1365 | 61 | case BLOSC_DELTA: |
1366 | 61 | if (context->nthreads == 1) { |
1367 | | /* Serial mode */ |
1368 | 61 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1369 | 61 | } else { |
1370 | | /* Force the thread in charge of the block 0 to go first */ |
1371 | 0 | pthread_mutex_lock(&context->delta_mutex); |
1372 | 0 | if (context->dref_not_init) { |
1373 | 0 | if (offset != 0) { |
1374 | 0 | pthread_cond_wait(&context->delta_cv, &context->delta_mutex); |
1375 | 0 | } else { |
1376 | 0 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1377 | 0 | context->dref_not_init = 0; |
1378 | 0 | pthread_cond_broadcast(&context->delta_cv); |
1379 | 0 | } |
1380 | 0 | } |
1381 | 0 | pthread_mutex_unlock(&context->delta_mutex); |
1382 | 0 | if (offset != 0) { |
1383 | 0 | delta_decoder(dest, offset, bsize, typesize, _dest); |
1384 | 0 | } |
1385 | 0 | } |
1386 | 61 | break; |
1387 | 1 | case BLOSC_TRUNC_PREC: |
1388 | | // TRUNC_PREC filter does not need to be undone |
1389 | 1 | break; |
1390 | 89 | default: |
1391 | 89 | if (filters[i] != BLOSC_NOFILTER) { |
1392 | 0 | BLOSC_TRACE_ERROR("Filter %d not handled during decompression.", |
1393 | 0 | filters[i]); |
1394 | 0 | errcode = -1; |
1395 | 0 | } |
1396 | 281 | } |
1397 | 281 | } else { |
1398 | | // Look for the filters_meta in user filters and run it |
1399 | 6 | for (uint64_t j = 0; j < g_nfilters; ++j) { |
1400 | 5 | if (g_filters[j].id == filters[i]) { |
1401 | 0 | if (g_filters[j].backward == NULL) { |
1402 | | // Dynamically load filter |
1403 | 0 | if (fill_filter(&g_filters[j]) < 0) { |
1404 | 0 | BLOSC_TRACE_ERROR("Could not load filter %d.", g_filters[j].id); |
1405 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1406 | 0 | } |
1407 | 0 | } |
1408 | 0 | if (g_filters[j].backward != NULL) { |
1409 | 0 | blosc2_dparams dparams; |
1410 | 0 | blosc2_ctx_get_dparams(context, &dparams); |
1411 | 0 | rc = g_filters[j].backward(_src, _dest, bsize, filters_meta[i], &dparams, g_filters[j].id); |
1412 | 0 | } else { |
1413 | 0 | BLOSC_TRACE_ERROR("Backward function is NULL"); |
1414 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1415 | 0 | } |
1416 | 0 | if (rc != BLOSC2_ERROR_SUCCESS) { |
1417 | 0 | BLOSC_TRACE_ERROR("User-defined filter %d failed during decompression.", filters[i]); |
1418 | 0 | return rc; |
1419 | 0 | } |
1420 | 0 | goto urfiltersuccess; |
1421 | 0 | } |
1422 | 5 | } |
1423 | 1 | BLOSC_TRACE_ERROR("User-defined filter %d not found during decompression.", filters[i]); |
1424 | 0 | return BLOSC2_ERROR_FILTER_PIPELINE; |
1425 | 0 | urfiltersuccess:; |
1426 | 0 | } |
1427 | | |
1428 | | // Cycle buffers when required |
1429 | 281 | if ((filters[i] != BLOSC_NOFILTER) && (filters[i] != BLOSC_TRUNC_PREC)) { |
1430 | 191 | _cycle_buffers(&_src, &_dest, &_tmp); |
1431 | 191 | } |
1432 | 281 | if (last_filter_index == i) { |
1433 | 111 | break; |
1434 | 111 | } |
1435 | 281 | } |
1436 | | |
1437 | | /* Postfilter function */ |
1438 | 111 | if (context->postfilter != NULL) { |
1439 | | // Create new postfilter parameters for this block (must be private for each thread) |
1440 | 0 | blosc2_postfilter_params postparams; |
1441 | 0 | memcpy(&postparams, context->postparams, sizeof(postparams)); |
1442 | 0 | postparams.input = _src; |
1443 | 0 | postparams.output = dest + offset; |
1444 | 0 | postparams.size = bsize; |
1445 | 0 | postparams.typesize = typesize; |
1446 | 0 | postparams.offset = nblock * context->blocksize; |
1447 | 0 | postparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
1448 | 0 | postparams.nblock = nblock; |
1449 | 0 | postparams.tid = thread_context->tid; |
1450 | 0 | postparams.ttmp = thread_context->tmp; |
1451 | 0 | postparams.ttmp_nbytes = thread_context->tmp_nbytes; |
1452 | 0 | postparams.ctx = context; |
1453 | |
|
1454 | 0 | if (context->postfilter(&postparams) != 0) { |
1455 | 0 | BLOSC_TRACE_ERROR("Execution of postfilter function failed"); |
1456 | 0 | return BLOSC2_ERROR_POSTFILTER; |
1457 | 0 | } |
1458 | 0 | } |
1459 | | |
1460 | 111 | return errcode; |
1461 | 111 | } |
1462 | | |
1463 | | |
1464 | 0 | static int32_t set_nans(int32_t typesize, uint8_t* dest, int32_t destsize) { |
1465 | 0 | if (destsize % typesize != 0) { |
1466 | 0 | BLOSC_TRACE_ERROR("destsize can only be a multiple of typesize"); |
1467 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1468 | 0 | } |
1469 | 0 | int32_t nitems = destsize / typesize; |
1470 | 0 | if (nitems == 0) { |
1471 | 0 | return 0; |
1472 | 0 | } |
1473 | | |
1474 | 0 | if (typesize == 4) { |
1475 | 0 | float* dest_ = (float*)dest; |
1476 | 0 | float val = nanf(""); |
1477 | 0 | for (int i = 0; i < nitems; i++) { |
1478 | 0 | dest_[i] = val; |
1479 | 0 | } |
1480 | 0 | return nitems; |
1481 | 0 | } |
1482 | 0 | else if (typesize == 8) { |
1483 | 0 | double* dest_ = (double*)dest; |
1484 | 0 | double val = nan(""); |
1485 | 0 | for (int i = 0; i < nitems; i++) { |
1486 | 0 | dest_[i] = val; |
1487 | 0 | } |
1488 | 0 | return nitems; |
1489 | 0 | } |
1490 | | |
1491 | 0 | BLOSC_TRACE_ERROR("Unsupported typesize for NaN"); |
1492 | 0 | return BLOSC2_ERROR_DATA; |
1493 | 0 | } |
1494 | | |
1495 | | |
1496 | 0 | static int32_t set_values(int32_t typesize, const uint8_t* src, uint8_t* dest, int32_t destsize) { |
1497 | | #if defined(BLOSC_STRICT_ALIGN) |
1498 | | if (destsize % typesize != 0) { |
1499 | | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1500 | | } |
1501 | | int32_t nitems = destsize / typesize; |
1502 | | if (nitems == 0) { |
1503 | | return 0; |
1504 | | } |
1505 | | for (int i = 0; i < nitems; i++) { |
1506 | | memcpy(dest + i * typesize, src + BLOSC_EXTENDED_HEADER_LENGTH, typesize); |
1507 | | } |
1508 | | #else |
1509 | | // destsize can only be a multiple of typesize |
1510 | 0 | int64_t val8; |
1511 | 0 | int64_t* dest8; |
1512 | 0 | int32_t val4; |
1513 | 0 | int32_t* dest4; |
1514 | 0 | int16_t val2; |
1515 | 0 | int16_t* dest2; |
1516 | 0 | int8_t val1; |
1517 | 0 | int8_t* dest1; |
1518 | |
|
1519 | 0 | if (destsize % typesize != 0) { |
1520 | 0 | BLOSC_ERROR(BLOSC2_ERROR_FAILURE); |
1521 | 0 | } |
1522 | 0 | int32_t nitems = destsize / typesize; |
1523 | 0 | if (nitems == 0) { |
1524 | 0 | return 0; |
1525 | 0 | } |
1526 | | |
1527 | 0 | switch (typesize) { |
1528 | 0 | case 8: |
1529 | 0 | val8 = ((int64_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1530 | 0 | dest8 = (int64_t*)dest; |
1531 | 0 | for (int i = 0; i < nitems; i++) { |
1532 | 0 | dest8[i] = val8; |
1533 | 0 | } |
1534 | 0 | break; |
1535 | 0 | case 4: |
1536 | 0 | val4 = ((int32_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1537 | 0 | dest4 = (int32_t*)dest; |
1538 | 0 | for (int i = 0; i < nitems; i++) { |
1539 | 0 | dest4[i] = val4; |
1540 | 0 | } |
1541 | 0 | break; |
1542 | 0 | case 2: |
1543 | 0 | val2 = ((int16_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1544 | 0 | dest2 = (int16_t*)dest; |
1545 | 0 | for (int i = 0; i < nitems; i++) { |
1546 | 0 | dest2[i] = val2; |
1547 | 0 | } |
1548 | 0 | break; |
1549 | 0 | case 1: |
1550 | 0 | val1 = ((int8_t*)(src + BLOSC_EXTENDED_HEADER_LENGTH))[0]; |
1551 | 0 | dest1 = (int8_t*)dest; |
1552 | 0 | for (int i = 0; i < nitems; i++) { |
1553 | 0 | dest1[i] = val1; |
1554 | 0 | } |
1555 | 0 | break; |
1556 | 0 | default: |
1557 | 0 | for (int i = 0; i < nitems; i++) { |
1558 | 0 | memcpy(dest + i * typesize, src + BLOSC_EXTENDED_HEADER_LENGTH, typesize); |
1559 | 0 | } |
1560 | 0 | } |
1561 | 0 | #endif |
1562 | | |
1563 | 0 | return nitems; |
1564 | 0 | } |
1565 | | |
1566 | | |
1567 | | /* Decompress & unshuffle a single block */ |
1568 | | static int blosc_d( |
1569 | | struct thread_context* thread_context, int32_t bsize, |
1570 | | int32_t leftoverblock, bool memcpyed, const uint8_t* src, int32_t srcsize, int32_t src_offset, |
1571 | 200k | int32_t nblock, uint8_t* dest, int32_t dest_offset, uint8_t* tmp, uint8_t* tmp2) { |
1572 | 200k | blosc2_context* context = thread_context->parent_context; |
1573 | 200k | uint8_t* filters = context->filters; |
1574 | 200k | uint8_t *tmp3 = thread_context->tmp4; |
1575 | 200k | int32_t compformat = (context->header_flags & (uint8_t)0xe0) >> 5u; |
1576 | 200k | int dont_split = (context->header_flags & 0x10) >> 4; |
1577 | 200k | int32_t chunk_nbytes; |
1578 | 200k | int32_t chunk_cbytes; |
1579 | 200k | int nstreams; |
1580 | 200k | int32_t neblock; |
1581 | 200k | int32_t nbytes; /* number of decompressed bytes in split */ |
1582 | 200k | int32_t cbytes; /* number of compressed bytes in split */ |
1583 | | // int32_t ctbytes = 0; /* number of compressed bytes in block */ |
1584 | 200k | int32_t ntbytes = 0; /* number of uncompressed bytes in block */ |
1585 | 200k | uint8_t* _dest; |
1586 | 200k | int32_t typesize = context->typesize; |
1587 | 200k | bool instr_codec = context->blosc2_flags & BLOSC2_INSTR_CODEC; |
1588 | 200k | const char* compname; |
1589 | 200k | int rc; |
1590 | | |
1591 | 200k | if (context->block_maskout != NULL && context->block_maskout[nblock]) { |
1592 | | // Do not decompress, but act as if we successfully decompressed everything |
1593 | 0 | return bsize; |
1594 | 0 | } |
1595 | | |
1596 | 200k | rc = blosc2_cbuffer_sizes(src, &chunk_nbytes, &chunk_cbytes, NULL); |
1597 | 200k | if (rc < 0) { |
1598 | 0 | return rc; |
1599 | 0 | } |
1600 | 200k | if (context->special_type == BLOSC2_SPECIAL_VALUE) { |
1601 | | // We need the actual typesize in this case, but it cannot be encoded in the header, so derive it from cbytes |
1602 | 0 | typesize = chunk_cbytes - context->header_overhead; |
1603 | 0 | } |
1604 | | |
1605 | | // In some situations (lazychunks) the context can arrive uninitialized |
1606 | | // (but BITSHUFFLE needs it for accessing the format of the chunk) |
1607 | 200k | if (context->src == NULL) { |
1608 | 0 | context->src = src; |
1609 | 0 | } |
1610 | | |
1611 | | // Chunks with special values cannot be lazy |
1612 | 200k | bool is_lazy = ((context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH) && |
1613 | 200k | (context->blosc2_flags & 0x08u) && !context->special_type); |
1614 | 200k | if (is_lazy) { |
1615 | | // The chunk is on disk, so just lazily load the block |
1616 | 0 | if (context->schunk == NULL) { |
1617 | 0 | BLOSC_TRACE_ERROR("Lazy chunk needs an associated super-chunk."); |
1618 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
1619 | 0 | } |
1620 | 0 | if (context->schunk->frame == NULL) { |
1621 | 0 | BLOSC_TRACE_ERROR("Lazy chunk needs an associated frame."); |
1622 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
1623 | 0 | } |
1624 | 0 | blosc2_frame_s* frame = (blosc2_frame_s*)context->schunk->frame; |
1625 | 0 | char* urlpath = frame->urlpath; |
1626 | 0 | size_t trailer_offset = BLOSC_EXTENDED_HEADER_LENGTH + context->nblocks * sizeof(int32_t); |
1627 | 0 | int32_t nchunk; |
1628 | 0 | int64_t chunk_offset; |
1629 | | // The nchunk and the offset of the current chunk are in the trailer |
1630 | 0 | nchunk = *(int32_t*)(src + trailer_offset); |
1631 | 0 | chunk_offset = *(int64_t*)(src + trailer_offset + sizeof(int32_t)); |
1632 | | // Get the csize of the nblock |
1633 | 0 | int32_t *block_csizes = (int32_t *)(src + trailer_offset + sizeof(int32_t) + sizeof(int64_t)); |
1634 | 0 | int32_t block_csize = block_csizes[nblock]; |
1635 | | // Read the lazy block on disk |
1636 | 0 | void* fp = NULL; |
1637 | 0 | blosc2_io_cb *io_cb = blosc2_get_io_cb(context->schunk->storage->io->id); |
1638 | 0 | if (io_cb == NULL) { |
1639 | 0 | BLOSC_TRACE_ERROR("Error getting the input/output API"); |
1640 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
1641 | 0 | } |
1642 | | |
1643 | 0 | int64_t io_pos = 0; |
1644 | 0 | if (frame->sframe) { |
1645 | | // The chunk is not in the frame |
1646 | 0 | char* chunkpath = malloc(strlen(frame->urlpath) + 1 + 8 + strlen(".chunk") + 1); |
1647 | 0 | BLOSC_ERROR_NULL(chunkpath, BLOSC2_ERROR_MEMORY_ALLOC); |
1648 | 0 | sprintf(chunkpath, "%s/%08X.chunk", frame->urlpath, nchunk); |
1649 | 0 | fp = io_cb->open(chunkpath, "rb", context->schunk->storage->io->params); |
1650 | 0 | BLOSC_ERROR_NULL(fp, BLOSC2_ERROR_FILE_OPEN); |
1651 | 0 | free(chunkpath); |
1652 | | // The offset of the block is src_offset |
1653 | 0 | io_pos = src_offset; |
1654 | 0 | } |
1655 | 0 | else { |
1656 | 0 | fp = io_cb->open(urlpath, "rb", context->schunk->storage->io->params); |
1657 | 0 | BLOSC_ERROR_NULL(fp, BLOSC2_ERROR_FILE_OPEN); |
1658 | | // The offset of the block is src_offset |
1659 | 0 | io_pos = frame->file_offset + chunk_offset + src_offset; |
1660 | 0 | } |
1661 | | // We can make use of tmp3 because it will be used after src is not needed anymore |
1662 | 0 | int64_t rbytes = io_cb->read((void**)&tmp3, 1, block_csize, io_pos, fp); |
1663 | 0 | io_cb->close(fp); |
1664 | 0 | if ((int32_t)rbytes != block_csize) { |
1665 | 0 | BLOSC_TRACE_ERROR("Cannot read the (lazy) block out of the fileframe."); |
1666 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1667 | 0 | } |
1668 | 0 | src = tmp3; |
1669 | 0 | src_offset = 0; |
1670 | 0 | srcsize = block_csize; |
1671 | 0 | } |
1672 | | |
1673 | | // If the chunk is memcpyed, we just have to copy the block to dest and return |
1674 | 200k | if (memcpyed) { |
1675 | 200k | int bsize_ = leftoverblock ? chunk_nbytes % context->blocksize : bsize; |
1676 | 200k | if (!context->special_type) { |
1677 | 0 | if (chunk_nbytes + context->header_overhead != chunk_cbytes) { |
1678 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
1679 | 0 | } |
1680 | 0 | if (chunk_cbytes < context->header_overhead + (nblock * context->blocksize) + bsize_) { |
1681 | | /* Not enough input to copy block */ |
1682 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1683 | 0 | } |
1684 | 0 | } |
1685 | 200k | if (!is_lazy) { |
1686 | 200k | src += context->header_overhead + nblock * context->blocksize; |
1687 | 200k | } |
1688 | 200k | _dest = dest + dest_offset; |
1689 | 200k | if (context->postfilter != NULL) { |
1690 | | // We are making use of a postfilter, so use a temp for destination |
1691 | 0 | _dest = tmp; |
1692 | 0 | } |
1693 | 200k | rc = 0; |
1694 | 200k | switch (context->special_type) { |
1695 | 0 | case BLOSC2_SPECIAL_VALUE: |
1696 | | // All repeated values |
1697 | 0 | rc = set_values(typesize, context->src, _dest, bsize_); |
1698 | 0 | if (rc < 0) { |
1699 | 0 | BLOSC_TRACE_ERROR("set_values failed"); |
1700 | 0 | return BLOSC2_ERROR_DATA; |
1701 | 0 | } |
1702 | 0 | break; |
1703 | 0 | case BLOSC2_SPECIAL_NAN: |
1704 | 0 | rc = set_nans(context->typesize, _dest, bsize_); |
1705 | 0 | if (rc < 0) { |
1706 | 0 | BLOSC_TRACE_ERROR("set_nans failed"); |
1707 | 0 | return BLOSC2_ERROR_DATA; |
1708 | 0 | } |
1709 | 0 | break; |
1710 | 200k | case BLOSC2_SPECIAL_ZERO: |
1711 | 200k | memset(_dest, 0, bsize_); |
1712 | 200k | break; |
1713 | 0 | case BLOSC2_SPECIAL_UNINIT: |
1714 | | // We do nothing here |
1715 | 0 | break; |
1716 | 0 | default: |
1717 | 0 | memcpy(_dest, src, bsize_); |
1718 | 200k | } |
1719 | 200k | if (context->postfilter != NULL) { |
1720 | | // Create new postfilter parameters for this block (must be private for each thread) |
1721 | 0 | blosc2_postfilter_params postparams; |
1722 | 0 | memcpy(&postparams, context->postparams, sizeof(postparams)); |
1723 | 0 | postparams.input = tmp; |
1724 | 0 | postparams.output = dest + dest_offset; |
1725 | 0 | postparams.size = bsize; |
1726 | 0 | postparams.typesize = typesize; |
1727 | 0 | postparams.offset = nblock * context->blocksize; |
1728 | 0 | postparams.nchunk = context->schunk != NULL ? context->schunk->current_nchunk : -1; |
1729 | 0 | postparams.nblock = nblock; |
1730 | 0 | postparams.tid = thread_context->tid; |
1731 | 0 | postparams.ttmp = thread_context->tmp; |
1732 | 0 | postparams.ttmp_nbytes = thread_context->tmp_nbytes; |
1733 | 0 | postparams.ctx = context; |
1734 | | |
1735 | | // Execute the postfilter (the processed block will be copied to dest) |
1736 | 0 | if (context->postfilter(&postparams) != 0) { |
1737 | 0 | BLOSC_TRACE_ERROR("Execution of postfilter function failed"); |
1738 | 0 | return BLOSC2_ERROR_POSTFILTER; |
1739 | 0 | } |
1740 | 0 | } |
1741 | 200k | thread_context->zfp_cell_nitems = 0; |
1742 | | |
1743 | 200k | return bsize_; |
1744 | 200k | } |
1745 | | |
1746 | 166 | if (!is_lazy && (src_offset <= 0 || src_offset >= srcsize)) { |
1747 | | /* Invalid block src offset encountered */ |
1748 | 1 | return BLOSC2_ERROR_DATA; |
1749 | 1 | } |
1750 | | |
1751 | 165 | src += src_offset; |
1752 | 165 | srcsize -= src_offset; |
1753 | | |
1754 | 165 | int last_filter_index = last_filter(filters, 'd'); |
1755 | 165 | if (instr_codec) { |
1756 | | // If instrumented, we don't want to run the filters |
1757 | 0 | _dest = dest + dest_offset; |
1758 | 0 | } |
1759 | 165 | else if (((last_filter_index >= 0) && |
1760 | 165 | (next_filter(filters, BLOSC2_MAX_FILTERS, 'd') != BLOSC_DELTA)) || |
1761 | 165 | context->postfilter != NULL) { |
1762 | | // We are making use of some filter, so use a temp for destination |
1763 | 100 | _dest = tmp; |
1764 | 100 | } |
1765 | 65 | else { |
1766 | | // If no filters, or only DELTA in pipeline |
1767 | 65 | _dest = dest + dest_offset; |
1768 | 65 | } |
1769 | | |
1770 | | /* The number of compressed data streams for this block */ |
1771 | 165 | if (!dont_split && !leftoverblock) { |
1772 | 1 | nstreams = context->typesize; |
1773 | 1 | } |
1774 | 164 | else { |
1775 | 164 | nstreams = 1; |
1776 | 164 | } |
1777 | | |
1778 | 165 | neblock = bsize / nstreams; |
1779 | 165 | if (neblock == 0) { |
1780 | | /* Not enough space to output bytes */ |
1781 | 0 | BLOSC_ERROR(BLOSC2_ERROR_WRITE_BUFFER); |
1782 | 0 | } |
1783 | 301 | for (int j = 0; j < nstreams; j++) { |
1784 | 165 | if (srcsize < (signed)sizeof(int32_t)) { |
1785 | | /* Not enough input to read compressed size */ |
1786 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1787 | 0 | } |
1788 | 165 | srcsize -= sizeof(int32_t); |
1789 | 165 | cbytes = sw32_(src); /* amount of compressed bytes */ |
1790 | 165 | if (cbytes > 0) { |
1791 | 131 | if (srcsize < cbytes) { |
1792 | | /* Not enough input to read compressed bytes */ |
1793 | 1 | return BLOSC2_ERROR_READ_BUFFER; |
1794 | 1 | } |
1795 | 130 | srcsize -= cbytes; |
1796 | 130 | } |
1797 | 164 | src += sizeof(int32_t); |
1798 | | // ctbytes += (signed)sizeof(int32_t); |
1799 | | |
1800 | | /* Uncompress */ |
1801 | 164 | if (cbytes == 0) { |
1802 | | // A run of 0's |
1803 | 0 | memset(_dest, 0, (unsigned int)neblock); |
1804 | 0 | nbytes = neblock; |
1805 | 0 | } |
1806 | 164 | else if (cbytes < 0) { |
1807 | | // A negative number means some encoding depending on the token that comes next |
1808 | 34 | uint8_t token; |
1809 | | |
1810 | 34 | if (srcsize < (signed)sizeof(uint8_t)) { |
1811 | | // Not enough input to read token */ |
1812 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
1813 | 0 | } |
1814 | 34 | srcsize -= sizeof(uint8_t); |
1815 | | |
1816 | 34 | token = src[0]; |
1817 | 34 | src += 1; |
1818 | | // ctbytes += 1; |
1819 | | |
1820 | 34 | if (token & 0x1) { |
1821 | | // A run of bytes that are different than 0 |
1822 | 34 | if (cbytes < -255) { |
1823 | | // Runs can only encode a byte |
1824 | 1 | return BLOSC2_ERROR_RUN_LENGTH; |
1825 | 1 | } |
1826 | 33 | uint8_t value = -cbytes; |
1827 | 33 | memset(_dest, value, (unsigned int)neblock); |
1828 | 33 | } else { |
1829 | 0 | BLOSC_TRACE_ERROR("Invalid or unsupported compressed stream token value - %d", token); |
1830 | 0 | return BLOSC2_ERROR_RUN_LENGTH; |
1831 | 0 | } |
1832 | 33 | nbytes = neblock; |
1833 | 33 | cbytes = 0; // everything is encoded in the cbytes token |
1834 | 33 | } |
1835 | 130 | else if (cbytes == neblock) { |
1836 | 0 | memcpy(_dest, src, (unsigned int)neblock); |
1837 | 0 | nbytes = (int32_t)neblock; |
1838 | 0 | } |
1839 | 130 | else { |
1840 | 130 | if (compformat == BLOSC_BLOSCLZ_FORMAT) { |
1841 | 0 | nbytes = blosclz_decompress(src, cbytes, _dest, (int)neblock); |
1842 | 0 | } |
1843 | 130 | else if (compformat == BLOSC_LZ4_FORMAT) { |
1844 | 129 | nbytes = lz4_wrap_decompress((char*)src, (size_t)cbytes, |
1845 | 129 | (char*)_dest, (size_t)neblock); |
1846 | 129 | } |
1847 | 1 | #if defined(HAVE_ZLIB) |
1848 | 1 | else if (compformat == BLOSC_ZLIB_FORMAT) { |
1849 | 1 | nbytes = zlib_wrap_decompress((char*)src, (size_t)cbytes, |
1850 | 1 | (char*)_dest, (size_t)neblock); |
1851 | 1 | } |
1852 | 0 | #endif /* HAVE_ZLIB */ |
1853 | 0 | #if defined(HAVE_ZSTD) |
1854 | 0 | else if (compformat == BLOSC_ZSTD_FORMAT) { |
1855 | 0 | nbytes = zstd_wrap_decompress(thread_context, |
1856 | 0 | (char*)src, (size_t)cbytes, |
1857 | 0 | (char*)_dest, (size_t)neblock); |
1858 | 0 | } |
1859 | 0 | #endif /* HAVE_ZSTD */ |
1860 | 0 | else if (compformat == BLOSC_UDCODEC_FORMAT) { |
1861 | 0 | bool getcell = false; |
1862 | |
|
1863 | 0 | #if defined(HAVE_PLUGINS) |
1864 | 0 | if ((context->compcode == BLOSC_CODEC_ZFP_FIXED_RATE) && |
1865 | 0 | (thread_context->zfp_cell_nitems > 0)) { |
1866 | 0 | nbytes = zfp_getcell(thread_context, src, cbytes, _dest, neblock); |
1867 | 0 | if (nbytes < 0) { |
1868 | 0 | return BLOSC2_ERROR_DATA; |
1869 | 0 | } |
1870 | 0 | if (nbytes == thread_context->zfp_cell_nitems * typesize) { |
1871 | 0 | getcell = true; |
1872 | 0 | } |
1873 | 0 | } |
1874 | 0 | #endif /* HAVE_PLUGINS */ |
1875 | 0 | if (!getcell) { |
1876 | 0 | thread_context->zfp_cell_nitems = 0; |
1877 | 0 | for (int i = 0; i < g_ncodecs; ++i) { |
1878 | 0 | if (g_codecs[i].compcode == context->compcode) { |
1879 | 0 | if (g_codecs[i].decoder == NULL) { |
1880 | | // Dynamically load codec plugin |
1881 | 0 | if (fill_codec(&g_codecs[i]) < 0) { |
1882 | 0 | BLOSC_TRACE_ERROR("Could not load codec %d.", g_codecs[i].compcode); |
1883 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1884 | 0 | } |
1885 | 0 | } |
1886 | 0 | blosc2_dparams dparams; |
1887 | 0 | blosc2_ctx_get_dparams(context, &dparams); |
1888 | 0 | nbytes = g_codecs[i].decoder(src, |
1889 | 0 | cbytes, |
1890 | 0 | _dest, |
1891 | 0 | neblock, |
1892 | 0 | context->compcode_meta, |
1893 | 0 | &dparams, |
1894 | 0 | context->src); |
1895 | 0 | goto urcodecsuccess; |
1896 | 0 | } |
1897 | 0 | } |
1898 | 0 | BLOSC_TRACE_ERROR("User-defined compressor codec %d not found during decompression", context->compcode); |
1899 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1900 | 0 | } |
1901 | 0 | urcodecsuccess: |
1902 | 0 | ; |
1903 | 0 | } |
1904 | 0 | else { |
1905 | 0 | compname = clibcode_to_clibname(compformat); |
1906 | 0 | BLOSC_TRACE_ERROR( |
1907 | 0 | "Blosc has not been compiled with decompression " |
1908 | 0 | "support for '%s' format. " |
1909 | 0 | "Please recompile for adding this support.", compname); |
1910 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
1911 | 0 | } |
1912 | | |
1913 | | /* Check that decompressed bytes number is correct */ |
1914 | 130 | if ((nbytes != neblock) && (thread_context->zfp_cell_nitems == 0)) { |
1915 | 27 | return BLOSC2_ERROR_DATA; |
1916 | 27 | } |
1917 | | |
1918 | 130 | } |
1919 | 136 | src += cbytes; |
1920 | | // ctbytes += cbytes; |
1921 | 136 | _dest += nbytes; |
1922 | 136 | ntbytes += nbytes; |
1923 | 136 | } /* Closes j < nstreams */ |
1924 | | |
1925 | 136 | if (!instr_codec) { |
1926 | 136 | if (last_filter_index >= 0 || context->postfilter != NULL) { |
1927 | | /* Apply regular filter pipeline */ |
1928 | 112 | int errcode = pipeline_backward(thread_context, bsize, dest, dest_offset, tmp, tmp2, tmp3, |
1929 | 112 | last_filter_index, nblock); |
1930 | 112 | if (errcode < 0) |
1931 | 1 | return errcode; |
1932 | 112 | } |
1933 | 136 | } |
1934 | | |
1935 | | /* Return the number of uncompressed bytes */ |
1936 | 135 | return (int)ntbytes; |
1937 | 136 | } |
1938 | | |
1939 | | |
1940 | | /* Serial version for compression/decompression */ |
1941 | 130 | static int serial_blosc(struct thread_context* thread_context) { |
1942 | 130 | blosc2_context* context = thread_context->parent_context; |
1943 | 130 | int32_t j, bsize, leftoverblock; |
1944 | 130 | int32_t cbytes; |
1945 | 130 | int32_t ntbytes = context->output_bytes; |
1946 | 130 | int32_t* bstarts = context->bstarts; |
1947 | 130 | uint8_t* tmp = thread_context->tmp; |
1948 | 130 | uint8_t* tmp2 = thread_context->tmp2; |
1949 | 130 | int dict_training = context->use_dict && (context->dict_cdict == NULL); |
1950 | 130 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
1951 | 130 | if (!context->do_compress && context->special_type) { |
1952 | | // Fake a runlen as if it was a memcpyed chunk |
1953 | 33 | memcpyed = true; |
1954 | 33 | } |
1955 | | |
1956 | 200k | for (j = 0; j < context->nblocks; j++) { |
1957 | 200k | if (context->do_compress && !memcpyed && !dict_training) { |
1958 | 0 | _sw32(bstarts + j, ntbytes); |
1959 | 0 | } |
1960 | 200k | bsize = context->blocksize; |
1961 | 200k | leftoverblock = 0; |
1962 | 200k | if ((j == context->nblocks - 1) && (context->leftover > 0)) { |
1963 | 69 | bsize = context->leftover; |
1964 | 69 | leftoverblock = 1; |
1965 | 69 | } |
1966 | 200k | if (context->do_compress) { |
1967 | 0 | if (memcpyed && !context->prefilter) { |
1968 | | /* We want to memcpy only */ |
1969 | 0 | memcpy(context->dest + context->header_overhead + j * context->blocksize, |
1970 | 0 | context->src + j * context->blocksize, (unsigned int)bsize); |
1971 | 0 | cbytes = (int32_t)bsize; |
1972 | 0 | } |
1973 | 0 | else { |
1974 | | /* Regular compression */ |
1975 | 0 | cbytes = blosc_c(thread_context, bsize, leftoverblock, ntbytes, |
1976 | 0 | context->destsize, context->src, j * context->blocksize, |
1977 | 0 | context->dest + ntbytes, tmp, tmp2); |
1978 | 0 | if (cbytes == 0) { |
1979 | 0 | ntbytes = 0; /* incompressible data */ |
1980 | 0 | break; |
1981 | 0 | } |
1982 | 0 | } |
1983 | 0 | } |
1984 | 200k | else { |
1985 | | /* Regular decompression */ |
1986 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
1987 | 200k | int32_t src_offset = memcpyed ? |
1988 | 200k | context->header_overhead + j * context->blocksize : sw32_(bstarts + j); |
1989 | 200k | cbytes = blosc_d(thread_context, bsize, leftoverblock, memcpyed, |
1990 | 200k | context->src, context->srcsize, src_offset, j, |
1991 | 200k | context->dest, j * context->blocksize, tmp, tmp2); |
1992 | 200k | } |
1993 | | |
1994 | 200k | if (cbytes < 0) { |
1995 | 31 | ntbytes = cbytes; /* error in blosc_c or blosc_d */ |
1996 | 31 | break; |
1997 | 31 | } |
1998 | 200k | ntbytes += cbytes; |
1999 | 200k | } |
2000 | | |
2001 | 130 | return ntbytes; |
2002 | 130 | } |
2003 | | |
2004 | | static void t_blosc_do_job(void *ctxt); |
2005 | | |
2006 | | /* Threaded version for compression/decompression */ |
2007 | 0 | static int parallel_blosc(blosc2_context* context) { |
2008 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
2009 | 0 | int rc; |
2010 | 0 | #endif |
2011 | | /* Set sentinels */ |
2012 | 0 | context->thread_giveup_code = 1; |
2013 | 0 | context->thread_nblock = -1; |
2014 | |
|
2015 | 0 | if (threads_callback) { |
2016 | 0 | threads_callback(threads_callback_data, t_blosc_do_job, |
2017 | 0 | context->nthreads, sizeof(struct thread_context), (void*) context->thread_contexts); |
2018 | 0 | } |
2019 | 0 | else { |
2020 | | /* Synchronization point for all threads (wait for initialization) */ |
2021 | 0 | WAIT_INIT(-1, context); |
2022 | | |
2023 | | /* Synchronization point for all threads (wait for finalization) */ |
2024 | 0 | WAIT_FINISH(-1, context); |
2025 | 0 | } |
2026 | | |
2027 | 0 | if (context->thread_giveup_code <= 0) { |
2028 | | /* Compression/decompression gave up. Return error code. */ |
2029 | 0 | return context->thread_giveup_code; |
2030 | 0 | } |
2031 | | |
2032 | | /* Return the total bytes (de-)compressed in threads */ |
2033 | 0 | return (int)context->output_bytes; |
2034 | 0 | } |
2035 | | |
2036 | | /* initialize a thread_context that has already been allocated */ |
2037 | | static int init_thread_context(struct thread_context* thread_context, blosc2_context* context, int32_t tid) |
2038 | 201 | { |
2039 | 201 | int32_t ebsize; |
2040 | | |
2041 | 201 | thread_context->parent_context = context; |
2042 | 201 | thread_context->tid = tid; |
2043 | | |
2044 | 201 | ebsize = context->blocksize + context->typesize * (signed)sizeof(int32_t); |
2045 | 201 | thread_context->tmp_nbytes = (size_t)4 * ebsize; |
2046 | 201 | thread_context->tmp = my_malloc(thread_context->tmp_nbytes); |
2047 | 201 | BLOSC_ERROR_NULL(thread_context->tmp, BLOSC2_ERROR_MEMORY_ALLOC); |
2048 | 201 | thread_context->tmp2 = thread_context->tmp + ebsize; |
2049 | 201 | thread_context->tmp3 = thread_context->tmp2 + ebsize; |
2050 | 201 | thread_context->tmp4 = thread_context->tmp3 + ebsize; |
2051 | 201 | thread_context->tmp_blocksize = context->blocksize; |
2052 | 201 | thread_context->zfp_cell_nitems = 0; |
2053 | 201 | thread_context->zfp_cell_start = 0; |
2054 | 201 | #if defined(HAVE_ZSTD) |
2055 | 201 | thread_context->zstd_cctx = NULL; |
2056 | 201 | thread_context->zstd_dctx = NULL; |
2057 | 201 | #endif |
2058 | | |
2059 | | /* Create the hash table for LZ4 in case we are using IPP */ |
2060 | | #ifdef HAVE_IPP |
2061 | | IppStatus status; |
2062 | | int inlen = thread_context->tmp_blocksize > 0 ? thread_context->tmp_blocksize : 1 << 16; |
2063 | | int hash_size = 0; |
2064 | | status = ippsEncodeLZ4HashTableGetSize_8u(&hash_size); |
2065 | | if (status != ippStsNoErr) { |
2066 | | BLOSC_TRACE_ERROR("Error in ippsEncodeLZ4HashTableGetSize_8u."); |
2067 | | } |
2068 | | Ipp8u *hash_table = ippsMalloc_8u(hash_size); |
2069 | | status = ippsEncodeLZ4HashTableInit_8u(hash_table, inlen); |
2070 | | if (status != ippStsNoErr) { |
2071 | | BLOSC_TRACE_ERROR("Error in ippsEncodeLZ4HashTableInit_8u."); |
2072 | | } |
2073 | | thread_context->lz4_hash_table = hash_table; |
2074 | | #endif |
2075 | 201 | return 0; |
2076 | 201 | } |
2077 | | |
2078 | | static struct thread_context* |
2079 | 201 | create_thread_context(blosc2_context* context, int32_t tid) { |
2080 | 201 | struct thread_context* thread_context; |
2081 | 201 | thread_context = (struct thread_context*)my_malloc(sizeof(struct thread_context)); |
2082 | 201 | BLOSC_ERROR_NULL(thread_context, NULL); |
2083 | 201 | int rc = init_thread_context(thread_context, context, tid); |
2084 | 201 | if (rc < 0) { |
2085 | 0 | return NULL; |
2086 | 0 | } |
2087 | 201 | return thread_context; |
2088 | 201 | } |
2089 | | |
2090 | | /* free members of thread_context, but not thread_context itself */ |
2091 | 201 | static void destroy_thread_context(struct thread_context* thread_context) { |
2092 | 201 | my_free(thread_context->tmp); |
2093 | 201 | #if defined(HAVE_ZSTD) |
2094 | 201 | if (thread_context->zstd_cctx != NULL) { |
2095 | 0 | ZSTD_freeCCtx(thread_context->zstd_cctx); |
2096 | 0 | } |
2097 | 201 | if (thread_context->zstd_dctx != NULL) { |
2098 | 0 | ZSTD_freeDCtx(thread_context->zstd_dctx); |
2099 | 0 | } |
2100 | 201 | #endif |
2101 | | #ifdef HAVE_IPP |
2102 | | if (thread_context->lz4_hash_table != NULL) { |
2103 | | ippsFree(thread_context->lz4_hash_table); |
2104 | | } |
2105 | | #endif |
2106 | 201 | } |
2107 | | |
2108 | 201 | void free_thread_context(struct thread_context* thread_context) { |
2109 | 201 | destroy_thread_context(thread_context); |
2110 | 201 | my_free(thread_context); |
2111 | 201 | } |
2112 | | |
2113 | | |
2114 | 130 | int check_nthreads(blosc2_context* context) { |
2115 | 130 | if (context->nthreads <= 0) { |
2116 | 0 | BLOSC_TRACE_ERROR("nthreads must be >= 1 and <= %d", INT16_MAX); |
2117 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2118 | 0 | } |
2119 | | |
2120 | 130 | if (context->new_nthreads != context->nthreads) { |
2121 | 0 | if (context->nthreads > 1) { |
2122 | 0 | release_threadpool(context); |
2123 | 0 | } |
2124 | 0 | context->nthreads = context->new_nthreads; |
2125 | 0 | } |
2126 | 130 | if (context->new_nthreads > 1 && context->threads_started == 0) { |
2127 | 0 | init_threadpool(context); |
2128 | 0 | } |
2129 | | |
2130 | 130 | return context->nthreads; |
2131 | 130 | } |
2132 | | |
2133 | | /* Do the compression or decompression of the buffer depending on the |
2134 | | global params. */ |
2135 | 130 | static int do_job(blosc2_context* context) { |
2136 | 130 | int32_t ntbytes; |
2137 | | |
2138 | | /* Set sentinels */ |
2139 | 130 | context->dref_not_init = 1; |
2140 | | |
2141 | | /* Check whether we need to restart threads */ |
2142 | 130 | check_nthreads(context); |
2143 | | |
2144 | | /* Run the serial version when nthreads is 1 or when the buffers are |
2145 | | not larger than blocksize */ |
2146 | 130 | if (context->nthreads == 1 || (context->sourcesize / context->blocksize) <= 1) { |
2147 | | /* The context for this 'thread' has no been initialized yet */ |
2148 | 130 | if (context->serial_context == NULL) { |
2149 | 35 | context->serial_context = create_thread_context(context, 0); |
2150 | 35 | } |
2151 | 95 | else if (context->blocksize != context->serial_context->tmp_blocksize) { |
2152 | 32 | free_thread_context(context->serial_context); |
2153 | 32 | context->serial_context = create_thread_context(context, 0); |
2154 | 32 | } |
2155 | 130 | BLOSC_ERROR_NULL(context->serial_context, BLOSC2_ERROR_THREAD_CREATE); |
2156 | 130 | ntbytes = serial_blosc(context->serial_context); |
2157 | 130 | } |
2158 | 0 | else { |
2159 | 0 | ntbytes = parallel_blosc(context); |
2160 | 0 | } |
2161 | | |
2162 | 130 | return ntbytes; |
2163 | 130 | } |
2164 | | |
2165 | | |
2166 | | static int initialize_context_compression( |
2167 | | blosc2_context* context, const void* src, int32_t srcsize, void* dest, |
2168 | | int32_t destsize, int clevel, uint8_t const *filters, |
2169 | | uint8_t const *filters_meta, int32_t typesize, int compressor, |
2170 | | int32_t blocksize, int16_t new_nthreads, int16_t nthreads, |
2171 | | int32_t splitmode, |
2172 | | int tuner_id, void *tuner_params, |
2173 | 33 | blosc2_schunk* schunk) { |
2174 | | |
2175 | | /* Set parameters */ |
2176 | 33 | context->do_compress = 1; |
2177 | 33 | context->src = (const uint8_t*)src; |
2178 | 33 | context->srcsize = srcsize; |
2179 | 33 | context->dest = (uint8_t*)dest; |
2180 | 33 | context->output_bytes = 0; |
2181 | 33 | context->destsize = destsize; |
2182 | 33 | context->sourcesize = srcsize; |
2183 | 33 | context->typesize = typesize; |
2184 | 33 | context->filter_flags = filters_to_flags(filters); |
2185 | 231 | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
2186 | 198 | context->filters[i] = filters[i]; |
2187 | 198 | context->filters_meta[i] = filters_meta[i]; |
2188 | 198 | } |
2189 | 33 | context->compcode = compressor; |
2190 | 33 | context->nthreads = nthreads; |
2191 | 33 | context->new_nthreads = new_nthreads; |
2192 | 33 | context->end_threads = 0; |
2193 | 33 | context->clevel = clevel; |
2194 | 33 | context->schunk = schunk; |
2195 | 33 | context->tuner_params = tuner_params; |
2196 | 33 | context->tuner_id = tuner_id; |
2197 | 33 | context->splitmode = splitmode; |
2198 | | /* tuner some compression parameters */ |
2199 | 33 | context->blocksize = (int32_t)blocksize; |
2200 | 33 | int rc = 0; |
2201 | 33 | if (context->tuner_params != NULL) { |
2202 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2203 | 0 | if (blosc_stune_next_cparams(context) < 0) { |
2204 | 0 | BLOSC_TRACE_ERROR("Error in stune next_cparams func\n"); |
2205 | 0 | return BLOSC2_ERROR_TUNER; |
2206 | 0 | } |
2207 | 0 | } else { |
2208 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2209 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2210 | 0 | if (g_tuners[i].next_cparams == NULL) { |
2211 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2212 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2213 | 0 | return BLOSC2_ERROR_FAILURE; |
2214 | 0 | } |
2215 | 0 | } |
2216 | 0 | if (g_tuners[i].next_cparams(context) < 0) { |
2217 | 0 | BLOSC_TRACE_ERROR("Error in tuner %d next_cparams func\n", context->tuner_id); |
2218 | 0 | return BLOSC2_ERROR_TUNER; |
2219 | 0 | } |
2220 | 0 | if (g_tuners[i].id == BLOSC_BTUNE && context->blocksize == 0) { |
2221 | | // Call stune for initializing blocksize |
2222 | 0 | if (blosc_stune_next_blocksize(context) < 0) { |
2223 | 0 | BLOSC_TRACE_ERROR("Error in stune next_blocksize func\n"); |
2224 | 0 | return BLOSC2_ERROR_TUNER; |
2225 | 0 | } |
2226 | 0 | } |
2227 | 0 | goto urtunersuccess; |
2228 | 0 | } |
2229 | 0 | } |
2230 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2231 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2232 | 0 | } |
2233 | 33 | } else { |
2234 | 33 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2235 | 33 | rc = blosc_stune_next_blocksize(context); |
2236 | 33 | } else { |
2237 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2238 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2239 | 0 | if (g_tuners[i].next_blocksize == NULL) { |
2240 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2241 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2242 | 0 | return BLOSC2_ERROR_FAILURE; |
2243 | 0 | } |
2244 | 0 | } |
2245 | 0 | rc = g_tuners[i].next_blocksize(context); |
2246 | 0 | goto urtunersuccess; |
2247 | 0 | } |
2248 | 0 | } |
2249 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2250 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2251 | 0 | } |
2252 | 33 | } |
2253 | 33 | urtunersuccess:; |
2254 | 33 | if (rc < 0) { |
2255 | 0 | BLOSC_TRACE_ERROR("Error in tuner next_blocksize func\n"); |
2256 | 0 | return BLOSC2_ERROR_TUNER; |
2257 | 0 | } |
2258 | | |
2259 | | |
2260 | | /* Check buffer size limits */ |
2261 | 33 | if (srcsize > BLOSC2_MAX_BUFFERSIZE) { |
2262 | 0 | BLOSC_TRACE_ERROR("Input buffer size cannot exceed %d bytes.", |
2263 | 0 | BLOSC2_MAX_BUFFERSIZE); |
2264 | 0 | return BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED; |
2265 | 0 | } |
2266 | | |
2267 | 33 | if (destsize < BLOSC2_MAX_OVERHEAD) { |
2268 | 0 | BLOSC_TRACE_ERROR("Output buffer size should be larger than %d bytes.", |
2269 | 0 | BLOSC2_MAX_OVERHEAD); |
2270 | 0 | return BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED; |
2271 | 0 | } |
2272 | | |
2273 | | /* Compression level */ |
2274 | 33 | if (clevel < 0 || clevel > 9) { |
2275 | | /* If clevel not in 0..9, print an error */ |
2276 | 0 | BLOSC_TRACE_ERROR("`clevel` parameter must be between 0 and 9!."); |
2277 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
2278 | 0 | } |
2279 | | |
2280 | | /* Check typesize limits */ |
2281 | 33 | if (context->typesize > BLOSC2_MAXTYPESIZE) { |
2282 | | // If typesize is too large for Blosc2, return an error |
2283 | 0 | BLOSC_TRACE_ERROR("Typesize cannot exceed %d bytes.", BLOSC2_MAXTYPESIZE); |
2284 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2285 | 0 | } |
2286 | | /* Now, cap typesize so that blosc2 split machinery can continue to work */ |
2287 | 33 | if (context->typesize > BLOSC_MAX_TYPESIZE) { |
2288 | | /* If typesize is too large, treat buffer as an 1-byte stream. */ |
2289 | 0 | context->typesize = 1; |
2290 | 0 | } |
2291 | | |
2292 | 33 | blosc2_calculate_blocks(context); |
2293 | | |
2294 | 33 | return 1; |
2295 | 33 | } |
2296 | | |
2297 | | |
2298 | | static int initialize_context_decompression(blosc2_context* context, blosc_header* header, const void* src, |
2299 | 131 | int32_t srcsize, void* dest, int32_t destsize) { |
2300 | 131 | int32_t bstarts_end; |
2301 | | |
2302 | 131 | context->do_compress = 0; |
2303 | 131 | context->src = (const uint8_t*)src; |
2304 | 131 | context->srcsize = srcsize; |
2305 | 131 | context->dest = (uint8_t*)dest; |
2306 | 131 | context->destsize = destsize; |
2307 | 131 | context->output_bytes = 0; |
2308 | 131 | context->end_threads = 0; |
2309 | | |
2310 | 131 | int rc = blosc2_initialize_context_from_header(context, header); |
2311 | 131 | if (rc < 0) { |
2312 | 0 | return rc; |
2313 | 0 | } |
2314 | | |
2315 | | /* Check that we have enough space to decompress */ |
2316 | 131 | if (context->sourcesize > (int32_t)context->destsize) { |
2317 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
2318 | 0 | } |
2319 | | |
2320 | 131 | if (context->block_maskout != NULL && context->block_maskout_nitems != context->nblocks) { |
2321 | 0 | BLOSC_TRACE_ERROR("The number of items in block_maskout (%d) must match the number" |
2322 | 0 | " of blocks in chunk (%d).", |
2323 | 0 | context->block_maskout_nitems, context->nblocks); |
2324 | 0 | return BLOSC2_ERROR_DATA; |
2325 | 0 | } |
2326 | | |
2327 | 131 | context->special_type = (header->blosc2_flags >> 4) & BLOSC2_SPECIAL_MASK; |
2328 | 131 | if (context->special_type > BLOSC2_SPECIAL_LASTID) { |
2329 | 0 | BLOSC_TRACE_ERROR("Unknown special values ID (%d) ", |
2330 | 0 | context->special_type); |
2331 | 0 | return BLOSC2_ERROR_DATA; |
2332 | 0 | } |
2333 | | |
2334 | 131 | int memcpyed = (context->header_flags & (uint8_t) BLOSC_MEMCPYED); |
2335 | 131 | if (memcpyed && (header->cbytes != header->nbytes + context->header_overhead)) { |
2336 | 0 | BLOSC_TRACE_ERROR("Wrong header info for this memcpyed chunk"); |
2337 | 0 | return BLOSC2_ERROR_DATA; |
2338 | 0 | } |
2339 | | |
2340 | 131 | if ((header->nbytes == 0) && (header->cbytes == context->header_overhead) && |
2341 | 131 | !context->special_type) { |
2342 | | // A compressed buffer with only a header can only contain a zero-length buffer |
2343 | 0 | return 0; |
2344 | 0 | } |
2345 | | |
2346 | 131 | context->bstarts = (int32_t *) (context->src + context->header_overhead); |
2347 | 131 | bstarts_end = context->header_overhead; |
2348 | 131 | if (!context->special_type && !memcpyed) { |
2349 | | /* If chunk is not special or a memcpyed, we do have a bstarts section */ |
2350 | 97 | bstarts_end = (int32_t)(context->header_overhead + (context->nblocks * sizeof(int32_t))); |
2351 | 97 | } |
2352 | | |
2353 | 131 | if (srcsize < bstarts_end) { |
2354 | 0 | BLOSC_TRACE_ERROR("`bstarts` exceeds length of source buffer."); |
2355 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2356 | 0 | } |
2357 | 131 | srcsize -= bstarts_end; |
2358 | | |
2359 | | /* Read optional dictionary if flag set */ |
2360 | 131 | if (context->blosc2_flags & BLOSC2_USEDICT) { |
2361 | 11 | #if defined(HAVE_ZSTD) |
2362 | 11 | context->use_dict = 1; |
2363 | 11 | if (context->dict_ddict != NULL) { |
2364 | | // Free the existing dictionary (probably from another chunk) |
2365 | 1 | ZSTD_freeDDict(context->dict_ddict); |
2366 | 1 | } |
2367 | | // The trained dictionary is after the bstarts block |
2368 | 11 | if (srcsize < (signed)sizeof(int32_t)) { |
2369 | 0 | BLOSC_TRACE_ERROR("Not enough space to read size of dictionary."); |
2370 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2371 | 0 | } |
2372 | 11 | srcsize -= sizeof(int32_t); |
2373 | | // Read dictionary size |
2374 | 11 | context->dict_size = sw32_(context->src + bstarts_end); |
2375 | 11 | if (context->dict_size <= 0 || context->dict_size > BLOSC2_MAXDICTSIZE) { |
2376 | 1 | BLOSC_TRACE_ERROR("Dictionary size is smaller than minimum or larger than maximum allowed."); |
2377 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2378 | 1 | } |
2379 | 10 | if (srcsize < (int32_t)context->dict_size) { |
2380 | 0 | BLOSC_TRACE_ERROR("Not enough space to read entire dictionary."); |
2381 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
2382 | 0 | } |
2383 | 10 | srcsize -= context->dict_size; |
2384 | | // Read dictionary |
2385 | 10 | context->dict_buffer = (void*)(context->src + bstarts_end + sizeof(int32_t)); |
2386 | 10 | context->dict_ddict = ZSTD_createDDict(context->dict_buffer, context->dict_size); |
2387 | 10 | #endif // HAVE_ZSTD |
2388 | 10 | } |
2389 | | |
2390 | 130 | return 0; |
2391 | 131 | } |
2392 | | |
2393 | 0 | static int write_compression_header(blosc2_context* context, bool extended_header) { |
2394 | 0 | blosc_header header; |
2395 | 0 | int dont_split; |
2396 | 0 | int dict_training = context->use_dict && (context->dict_cdict == NULL); |
2397 | |
|
2398 | 0 | context->header_flags = 0; |
2399 | |
|
2400 | 0 | if (context->clevel == 0) { |
2401 | | /* Compression level 0 means buffer to be memcpy'ed */ |
2402 | 0 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2403 | 0 | } |
2404 | 0 | if (context->sourcesize < BLOSC_MIN_BUFFERSIZE) { |
2405 | | /* Buffer is too small. Try memcpy'ing. */ |
2406 | 0 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2407 | 0 | } |
2408 | |
|
2409 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
2410 | 0 | if (extended_header) { |
2411 | | /* Indicate that we are building an extended header */ |
2412 | 0 | context->header_overhead = BLOSC_EXTENDED_HEADER_LENGTH; |
2413 | 0 | context->header_flags |= (BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE); |
2414 | | /* Store filter pipeline info at the end of the header */ |
2415 | 0 | if (dict_training || memcpyed) { |
2416 | 0 | context->bstarts = NULL; |
2417 | 0 | context->output_bytes = context->header_overhead; |
2418 | 0 | } else { |
2419 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
2420 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
2421 | 0 | } |
2422 | 0 | } else { |
2423 | | // Regular header |
2424 | 0 | context->header_overhead = BLOSC_MIN_HEADER_LENGTH; |
2425 | 0 | if (memcpyed) { |
2426 | 0 | context->bstarts = NULL; |
2427 | 0 | context->output_bytes = context->header_overhead; |
2428 | 0 | } else { |
2429 | 0 | context->bstarts = (int32_t *) (context->dest + context->header_overhead); |
2430 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
2431 | 0 | } |
2432 | 0 | } |
2433 | | |
2434 | | // when memcpyed bit is set, there is no point in dealing with others |
2435 | 0 | if (!memcpyed) { |
2436 | 0 | if (context->filter_flags & BLOSC_DOSHUFFLE) { |
2437 | | /* Byte-shuffle is active */ |
2438 | 0 | context->header_flags |= BLOSC_DOSHUFFLE; |
2439 | 0 | } |
2440 | |
|
2441 | 0 | if (context->filter_flags & BLOSC_DOBITSHUFFLE) { |
2442 | | /* Bit-shuffle is active */ |
2443 | 0 | context->header_flags |= BLOSC_DOBITSHUFFLE; |
2444 | 0 | } |
2445 | |
|
2446 | 0 | if (context->filter_flags & BLOSC_DODELTA) { |
2447 | | /* Delta is active */ |
2448 | 0 | context->header_flags |= BLOSC_DODELTA; |
2449 | 0 | } |
2450 | |
|
2451 | 0 | dont_split = !split_block(context, context->typesize, |
2452 | 0 | context->blocksize); |
2453 | | |
2454 | | /* dont_split is in bit 4 */ |
2455 | 0 | context->header_flags |= dont_split << 4; |
2456 | | /* codec starts at bit 5 */ |
2457 | 0 | uint8_t compformat = compcode_to_compformat(context->compcode); |
2458 | 0 | context->header_flags |= compformat << 5; |
2459 | 0 | } |
2460 | | |
2461 | | // Create blosc header and store to dest |
2462 | 0 | blosc2_intialize_header_from_context(context, &header, extended_header); |
2463 | |
|
2464 | 0 | memcpy(context->dest, &header, (extended_header) ? |
2465 | 0 | BLOSC_EXTENDED_HEADER_LENGTH : BLOSC_MIN_HEADER_LENGTH); |
2466 | |
|
2467 | 0 | return 1; |
2468 | 0 | } |
2469 | | |
2470 | | |
2471 | 0 | static int blosc_compress_context(blosc2_context* context) { |
2472 | 0 | int ntbytes = 0; |
2473 | 0 | blosc_timestamp_t last, current; |
2474 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
2475 | |
|
2476 | 0 | blosc_set_timestamp(&last); |
2477 | |
|
2478 | 0 | if (!memcpyed) { |
2479 | | /* Do the actual compression */ |
2480 | 0 | ntbytes = do_job(context); |
2481 | 0 | if (ntbytes < 0) { |
2482 | 0 | return ntbytes; |
2483 | 0 | } |
2484 | 0 | if (ntbytes == 0) { |
2485 | | // Try out with a memcpy later on (last chance for fitting src buffer in dest). |
2486 | 0 | context->header_flags |= (uint8_t)BLOSC_MEMCPYED; |
2487 | 0 | memcpyed = true; |
2488 | 0 | } |
2489 | 0 | } |
2490 | | |
2491 | 0 | int dont_split = (context->header_flags & 0x10) >> 4; |
2492 | 0 | int nstreams = context->nblocks; |
2493 | 0 | if (!dont_split) { |
2494 | | // When splitting, the number of streams is computed differently |
2495 | 0 | if (context->leftover) { |
2496 | 0 | nstreams = (context->nblocks - 1) * context->typesize + 1; |
2497 | 0 | } |
2498 | 0 | else { |
2499 | 0 | nstreams *= context->typesize; |
2500 | 0 | } |
2501 | 0 | } |
2502 | |
|
2503 | 0 | if (memcpyed) { |
2504 | 0 | if (context->sourcesize + context->header_overhead > context->destsize) { |
2505 | | /* We are exceeding maximum output size */ |
2506 | 0 | ntbytes = 0; |
2507 | 0 | } |
2508 | 0 | else { |
2509 | 0 | context->output_bytes = context->header_overhead; |
2510 | 0 | ntbytes = do_job(context); |
2511 | 0 | if (ntbytes < 0) { |
2512 | 0 | return ntbytes; |
2513 | 0 | } |
2514 | | // Success! update the memcpy bit in header |
2515 | 0 | context->dest[BLOSC2_CHUNK_FLAGS] = context->header_flags; |
2516 | | // and clear the memcpy bit in context (for next reuse) |
2517 | 0 | context->header_flags &= ~(uint8_t)BLOSC_MEMCPYED; |
2518 | 0 | } |
2519 | 0 | } |
2520 | 0 | else { |
2521 | | // Check whether we have a run for the whole chunk |
2522 | 0 | int start_csizes = context->header_overhead + 4 * context->nblocks; |
2523 | 0 | if (ntbytes == (int)(start_csizes + nstreams * sizeof(int32_t))) { |
2524 | | // The streams are all zero runs (by construction). Encode it... |
2525 | 0 | context->dest[BLOSC2_CHUNK_BLOSC2_FLAGS] |= BLOSC2_SPECIAL_ZERO << 4; |
2526 | | // ...and assign the new chunk length |
2527 | 0 | ntbytes = context->header_overhead; |
2528 | 0 | } |
2529 | 0 | } |
2530 | | |
2531 | | /* Set the number of compressed bytes in header */ |
2532 | 0 | _sw32(context->dest + BLOSC2_CHUNK_CBYTES, ntbytes); |
2533 | 0 | if (context->blosc2_flags & BLOSC2_INSTR_CODEC) { |
2534 | 0 | dont_split = (context->header_flags & 0x10) >> 4; |
2535 | 0 | int32_t blocksize = dont_split ? (int32_t)sizeof(blosc2_instr) : (int32_t)sizeof(blosc2_instr) * context->typesize; |
2536 | 0 | _sw32(context->dest + BLOSC2_CHUNK_NBYTES, nstreams * (int32_t)sizeof(blosc2_instr)); |
2537 | 0 | _sw32(context->dest + BLOSC2_CHUNK_BLOCKSIZE, blocksize); |
2538 | 0 | } |
2539 | | |
2540 | | /* Set the number of bytes in dest buffer (might be useful for tuner) */ |
2541 | 0 | context->destsize = ntbytes; |
2542 | |
|
2543 | 0 | if (context->tuner_params != NULL) { |
2544 | 0 | blosc_set_timestamp(¤t); |
2545 | 0 | double ctime = blosc_elapsed_secs(last, current); |
2546 | 0 | int rc; |
2547 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
2548 | 0 | rc = blosc_stune_update(context, ctime); |
2549 | 0 | } else { |
2550 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
2551 | 0 | if (g_tuners[i].id == context->tuner_id) { |
2552 | 0 | if (g_tuners[i].update == NULL) { |
2553 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
2554 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
2555 | 0 | return BLOSC2_ERROR_FAILURE; |
2556 | 0 | } |
2557 | 0 | } |
2558 | 0 | rc = g_tuners[i].update(context, ctime); |
2559 | 0 | goto urtunersuccess; |
2560 | 0 | } |
2561 | 0 | } |
2562 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
2563 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2564 | 0 | urtunersuccess:; |
2565 | 0 | } |
2566 | 0 | if (rc < 0) { |
2567 | 0 | BLOSC_TRACE_ERROR("Error in tuner update func\n"); |
2568 | 0 | return BLOSC2_ERROR_TUNER; |
2569 | 0 | } |
2570 | 0 | } |
2571 | | |
2572 | 0 | return ntbytes; |
2573 | 0 | } |
2574 | | |
2575 | | |
2576 | | /* The public secure routine for compression with context. */ |
2577 | | int blosc2_compress_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
2578 | 0 | void* dest, int32_t destsize) { |
2579 | 0 | int error, cbytes; |
2580 | |
|
2581 | 0 | if (context->do_compress != 1) { |
2582 | 0 | BLOSC_TRACE_ERROR("Context is not meant for compression. Giving up."); |
2583 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2584 | 0 | } |
2585 | | |
2586 | 0 | error = initialize_context_compression( |
2587 | 0 | context, src, srcsize, dest, destsize, |
2588 | 0 | context->clevel, context->filters, context->filters_meta, |
2589 | 0 | context->typesize, context->compcode, context->blocksize, |
2590 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
2591 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
2592 | 0 | if (error <= 0) { |
2593 | 0 | return error; |
2594 | 0 | } |
2595 | | |
2596 | | /* Write the extended header */ |
2597 | 0 | error = write_compression_header(context, true); |
2598 | 0 | if (error < 0) { |
2599 | 0 | return error; |
2600 | 0 | } |
2601 | | |
2602 | 0 | cbytes = blosc_compress_context(context); |
2603 | 0 | if (cbytes < 0) { |
2604 | 0 | return cbytes; |
2605 | 0 | } |
2606 | | |
2607 | 0 | if (context->use_dict && context->dict_cdict == NULL) { |
2608 | |
|
2609 | 0 | if (context->compcode != BLOSC_ZSTD) { |
2610 | 0 | const char* compname; |
2611 | 0 | compname = clibcode_to_clibname(context->compcode); |
2612 | 0 | BLOSC_TRACE_ERROR("Codec %s does not support dicts. Giving up.", |
2613 | 0 | compname); |
2614 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2615 | 0 | } |
2616 | | |
2617 | 0 | #ifdef HAVE_ZSTD |
2618 | | // Build the dictionary out of the filters outcome and compress with it |
2619 | 0 | int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; |
2620 | | // Do not make the dict more than 5% larger than uncompressed buffer |
2621 | 0 | if (dict_maxsize > srcsize / 20) { |
2622 | 0 | dict_maxsize = srcsize / 20; |
2623 | 0 | } |
2624 | 0 | void* samples_buffer = context->dest + context->header_overhead; |
2625 | 0 | unsigned nblocks = (unsigned)context->nblocks; |
2626 | 0 | int dont_split = (context->header_flags & 0x10) >> 4; |
2627 | 0 | if (!dont_split) { |
2628 | 0 | nblocks = nblocks * context->typesize; |
2629 | 0 | } |
2630 | 0 | if (nblocks < 8) { |
2631 | 0 | nblocks = 8; // the minimum that accepts zstd as of 1.4.0 |
2632 | 0 | } |
2633 | | |
2634 | | // 1 allows to use most of the chunk for training, but it is slower, |
2635 | | // and it does not always seem to improve compression ratio. |
2636 | | // Let's use 16, which is faster and still gives good results |
2637 | | // on test_dict_schunk.c, but this seems very dependent on the data. |
2638 | 0 | unsigned sample_fraction = 16; |
2639 | 0 | size_t sample_size = context->sourcesize / nblocks / sample_fraction; |
2640 | | |
2641 | | // Populate the samples sizes for training the dictionary |
2642 | 0 | size_t* samples_sizes = malloc(nblocks * sizeof(void*)); |
2643 | 0 | BLOSC_ERROR_NULL(samples_sizes, BLOSC2_ERROR_MEMORY_ALLOC); |
2644 | 0 | for (size_t i = 0; i < nblocks; i++) { |
2645 | 0 | samples_sizes[i] = sample_size; |
2646 | 0 | } |
2647 | | |
2648 | | // Train from samples |
2649 | 0 | void* dict_buffer = malloc(dict_maxsize); |
2650 | 0 | BLOSC_ERROR_NULL(dict_buffer, BLOSC2_ERROR_MEMORY_ALLOC); |
2651 | 0 | int32_t dict_actual_size = (int32_t)ZDICT_trainFromBuffer( |
2652 | 0 | dict_buffer, dict_maxsize, |
2653 | 0 | samples_buffer, samples_sizes, nblocks); |
2654 | | |
2655 | | // TODO: experiment with parameters of low-level fast cover algorithm |
2656 | | // Note that this API is still unstable. See: https://github.com/facebook/zstd/issues/1599 |
2657 | | // ZDICT_fastCover_params_t fast_cover_params; |
2658 | | // memset(&fast_cover_params, 0, sizeof(fast_cover_params)); |
2659 | | // fast_cover_params.d = nblocks; |
2660 | | // fast_cover_params.steps = 4; |
2661 | | // fast_cover_params.zParams.compressionLevel = context->clevel; |
2662 | | // size_t dict_actual_size = ZDICT_optimizeTrainFromBuffer_fastCover( |
2663 | | // dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks, |
2664 | | // &fast_cover_params); |
2665 | |
|
2666 | 0 | if (ZDICT_isError(dict_actual_size) != ZSTD_error_no_error) { |
2667 | 0 | BLOSC_TRACE_ERROR("Error in ZDICT_trainFromBuffer(): '%s'." |
2668 | 0 | " Giving up.", ZDICT_getErrorName(dict_actual_size)); |
2669 | 0 | return BLOSC2_ERROR_CODEC_DICT; |
2670 | 0 | } |
2671 | 0 | assert(dict_actual_size > 0); |
2672 | 0 | free(samples_sizes); |
2673 | | |
2674 | | // Update bytes counter and pointers to bstarts for the new compressed buffer |
2675 | 0 | context->bstarts = (int32_t*)(context->dest + context->header_overhead); |
2676 | 0 | context->output_bytes = context->header_overhead + (int32_t)sizeof(int32_t) * context->nblocks; |
2677 | | /* Write the size of trained dict at the end of bstarts */ |
2678 | 0 | _sw32(context->dest + context->output_bytes, (int32_t)dict_actual_size); |
2679 | 0 | context->output_bytes += sizeof(int32_t); |
2680 | | /* Write the trained dict afterwards */ |
2681 | 0 | context->dict_buffer = context->dest + context->output_bytes; |
2682 | 0 | memcpy(context->dict_buffer, dict_buffer, (unsigned int)dict_actual_size); |
2683 | 0 | context->dict_cdict = ZSTD_createCDict(dict_buffer, dict_actual_size, 1); // TODO: use get_accel() |
2684 | 0 | free(dict_buffer); // the dictionary is copied in the header now |
2685 | 0 | context->output_bytes += (int32_t)dict_actual_size; |
2686 | 0 | context->dict_size = dict_actual_size; |
2687 | | |
2688 | | /* Compress with dict */ |
2689 | 0 | cbytes = blosc_compress_context(context); |
2690 | | |
2691 | | // Invalidate the dictionary for compressing other chunks using the same context |
2692 | 0 | context->dict_buffer = NULL; |
2693 | 0 | ZSTD_freeCDict(context->dict_cdict); |
2694 | 0 | context->dict_cdict = NULL; |
2695 | 0 | #endif // HAVE_ZSTD |
2696 | 0 | } |
2697 | | |
2698 | 0 | return cbytes; |
2699 | 0 | } |
2700 | | |
2701 | | |
2702 | | void build_filters(const int doshuffle, const int delta, |
2703 | 70 | const int32_t typesize, uint8_t* filters) { |
2704 | | |
2705 | | /* Fill the end part of the filter pipeline */ |
2706 | 70 | if ((doshuffle == BLOSC_SHUFFLE) && (typesize > 1)) |
2707 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; |
2708 | 70 | if (doshuffle == BLOSC_BITSHUFFLE) |
2709 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; |
2710 | 70 | if (doshuffle == BLOSC_NOSHUFFLE) |
2711 | 0 | filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_NOSHUFFLE; |
2712 | 70 | if (delta) |
2713 | 0 | filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; |
2714 | 70 | } |
2715 | | |
2716 | | /* The public secure routine for compression. */ |
2717 | | int blosc2_compress(int clevel, int doshuffle, int32_t typesize, |
2718 | 0 | const void* src, int32_t srcsize, void* dest, int32_t destsize) { |
2719 | 0 | int error; |
2720 | 0 | int result; |
2721 | 0 | char* envvar; |
2722 | | |
2723 | | /* Check whether the library should be initialized */ |
2724 | 0 | if (!g_initlib) blosc2_init(); |
2725 | | |
2726 | | /* Check for a BLOSC_CLEVEL environment variable */ |
2727 | 0 | envvar = getenv("BLOSC_CLEVEL"); |
2728 | 0 | if (envvar != NULL) { |
2729 | 0 | long value; |
2730 | 0 | value = strtol(envvar, NULL, 10); |
2731 | 0 | if ((errno != EINVAL) && (value >= 0)) { |
2732 | 0 | clevel = (int)value; |
2733 | 0 | } |
2734 | 0 | else { |
2735 | 0 | BLOSC_TRACE_WARNING("BLOSC_CLEVEL environment variable '%s' not recognized\n", envvar); |
2736 | 0 | } |
2737 | 0 | } |
2738 | | |
2739 | | /* Check for a BLOSC_SHUFFLE environment variable */ |
2740 | 0 | envvar = getenv("BLOSC_SHUFFLE"); |
2741 | 0 | if (envvar != NULL) { |
2742 | 0 | if (strcmp(envvar, "NOSHUFFLE") == 0) { |
2743 | 0 | doshuffle = BLOSC_NOSHUFFLE; |
2744 | 0 | } |
2745 | 0 | else if (strcmp(envvar, "SHUFFLE") == 0) { |
2746 | 0 | doshuffle = BLOSC_SHUFFLE; |
2747 | 0 | } |
2748 | 0 | else if (strcmp(envvar, "BITSHUFFLE") == 0) { |
2749 | 0 | doshuffle = BLOSC_BITSHUFFLE; |
2750 | 0 | } |
2751 | 0 | else { |
2752 | 0 | BLOSC_TRACE_WARNING("BLOSC_SHUFFLE environment variable '%s' not recognized\n", envvar); |
2753 | 0 | } |
2754 | 0 | } |
2755 | | |
2756 | | /* Check for a BLOSC_DELTA environment variable */ |
2757 | 0 | envvar = getenv("BLOSC_DELTA"); |
2758 | 0 | if (envvar != NULL) { |
2759 | 0 | if (strcmp(envvar, "1") == 0) { |
2760 | 0 | blosc2_set_delta(1); |
2761 | 0 | } else if (strcmp(envvar, "0") == 0) { |
2762 | 0 | blosc2_set_delta(0); |
2763 | 0 | } |
2764 | 0 | else { |
2765 | 0 | BLOSC_TRACE_WARNING("BLOSC_DELTA environment variable '%s' not recognized\n", envvar); |
2766 | 0 | } |
2767 | 0 | } |
2768 | | |
2769 | | /* Check for a BLOSC_TYPESIZE environment variable */ |
2770 | 0 | envvar = getenv("BLOSC_TYPESIZE"); |
2771 | 0 | if (envvar != NULL) { |
2772 | 0 | long value; |
2773 | 0 | value = strtol(envvar, NULL, 10); |
2774 | 0 | if ((errno != EINVAL) && (value > 0)) { |
2775 | 0 | typesize = (int32_t)value; |
2776 | 0 | } |
2777 | 0 | else { |
2778 | 0 | BLOSC_TRACE_WARNING("BLOSC_TYPESIZE environment variable '%s' not recognized\n", envvar); |
2779 | 0 | } |
2780 | 0 | } |
2781 | | |
2782 | | /* Check for a BLOSC_COMPRESSOR environment variable */ |
2783 | 0 | envvar = getenv("BLOSC_COMPRESSOR"); |
2784 | 0 | if (envvar != NULL) { |
2785 | 0 | result = blosc1_set_compressor(envvar); |
2786 | 0 | if (result < 0) { |
2787 | 0 | BLOSC_TRACE_WARNING("BLOSC_COMPRESSOR environment variable '%s' not recognized\n", envvar); |
2788 | 0 | } |
2789 | 0 | } |
2790 | | |
2791 | | /* Check for a BLOSC_BLOCKSIZE environment variable */ |
2792 | 0 | envvar = getenv("BLOSC_BLOCKSIZE"); |
2793 | 0 | if (envvar != NULL) { |
2794 | 0 | long blocksize; |
2795 | 0 | blocksize = strtol(envvar, NULL, 10); |
2796 | 0 | if ((errno != EINVAL) && (blocksize > 0)) { |
2797 | 0 | blosc1_set_blocksize((size_t) blocksize); |
2798 | 0 | } |
2799 | 0 | else { |
2800 | 0 | BLOSC_TRACE_WARNING("BLOSC_BLOCKSIZE environment variable '%s' not recognized\n", envvar); |
2801 | 0 | } |
2802 | 0 | } |
2803 | | |
2804 | | /* Check for a BLOSC_NTHREADS environment variable */ |
2805 | 0 | envvar = getenv("BLOSC_NTHREADS"); |
2806 | 0 | if (envvar != NULL) { |
2807 | 0 | long nthreads; |
2808 | 0 | nthreads = strtol(envvar, NULL, 10); |
2809 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
2810 | 0 | result = blosc2_set_nthreads((int16_t) nthreads); |
2811 | 0 | if (result < 0) { |
2812 | 0 | BLOSC_TRACE_WARNING("BLOSC_NTHREADS environment variable '%s' not recognized\n", envvar); |
2813 | 0 | } |
2814 | 0 | } |
2815 | 0 | } |
2816 | | |
2817 | | /* Check for a BLOSC_SPLITMODE environment variable */ |
2818 | 0 | envvar = getenv("BLOSC_SPLITMODE"); |
2819 | 0 | if (envvar != NULL) { |
2820 | 0 | int32_t splitmode = -1; |
2821 | 0 | if (strcmp(envvar, "ALWAYS") == 0) { |
2822 | 0 | splitmode = BLOSC_ALWAYS_SPLIT; |
2823 | 0 | } |
2824 | 0 | else if (strcmp(envvar, "NEVER") == 0) { |
2825 | 0 | splitmode = BLOSC_NEVER_SPLIT; |
2826 | 0 | } |
2827 | 0 | else if (strcmp(envvar, "AUTO") == 0) { |
2828 | 0 | splitmode = BLOSC_AUTO_SPLIT; |
2829 | 0 | } |
2830 | 0 | else if (strcmp(envvar, "FORWARD_COMPAT") == 0) { |
2831 | 0 | splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
2832 | 0 | } |
2833 | 0 | else { |
2834 | 0 | BLOSC_TRACE_WARNING("BLOSC_SPLITMODE environment variable '%s' not recognized\n", envvar); |
2835 | 0 | } |
2836 | | |
2837 | 0 | if (splitmode >= 0) { |
2838 | 0 | blosc1_set_splitmode(splitmode); |
2839 | 0 | } |
2840 | 0 | } |
2841 | | |
2842 | | /* Check for a BLOSC_NOLOCK environment variable. It is important |
2843 | | that this should be the last env var so that it can take the |
2844 | | previous ones into account */ |
2845 | 0 | envvar = getenv("BLOSC_NOLOCK"); |
2846 | 0 | if (envvar != NULL) { |
2847 | | // TODO: here is the only place that returns an extended header from |
2848 | | // a blosc1_compress() call. This should probably be fixed. |
2849 | 0 | const char *compname; |
2850 | 0 | blosc2_context *cctx; |
2851 | 0 | blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; |
2852 | |
|
2853 | 0 | blosc2_compcode_to_compname(g_compressor, &compname); |
2854 | | /* Create a context for compression */ |
2855 | 0 | build_filters(doshuffle, g_delta, typesize, cparams.filters); |
2856 | | // TODO: cparams can be shared in a multithreaded environment. do a copy! |
2857 | 0 | cparams.typesize = (uint8_t)typesize; |
2858 | 0 | cparams.compcode = (uint8_t)g_compressor; |
2859 | 0 | cparams.clevel = (uint8_t)clevel; |
2860 | 0 | cparams.nthreads = g_nthreads; |
2861 | 0 | cparams.splitmode = g_splitmode; |
2862 | 0 | cctx = blosc2_create_cctx(cparams); |
2863 | 0 | if (cctx == NULL) { |
2864 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
2865 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
2866 | 0 | } |
2867 | | /* Do the actual compression */ |
2868 | 0 | result = blosc2_compress_ctx(cctx, src, srcsize, dest, destsize); |
2869 | | /* Release context resources */ |
2870 | 0 | blosc2_free_ctx(cctx); |
2871 | 0 | return result; |
2872 | 0 | } |
2873 | | |
2874 | 0 | pthread_mutex_lock(&global_comp_mutex); |
2875 | | |
2876 | | /* Initialize a context compression */ |
2877 | 0 | uint8_t* filters = calloc(1, BLOSC2_MAX_FILTERS); |
2878 | 0 | BLOSC_ERROR_NULL(filters, BLOSC2_ERROR_MEMORY_ALLOC); |
2879 | 0 | uint8_t* filters_meta = calloc(1, BLOSC2_MAX_FILTERS); |
2880 | 0 | BLOSC_ERROR_NULL(filters_meta, BLOSC2_ERROR_MEMORY_ALLOC); |
2881 | 0 | build_filters(doshuffle, g_delta, typesize, filters); |
2882 | 0 | error = initialize_context_compression( |
2883 | 0 | g_global_context, src, srcsize, dest, destsize, clevel, filters, |
2884 | 0 | filters_meta, (int32_t)typesize, g_compressor, g_force_blocksize, g_nthreads, g_nthreads, |
2885 | 0 | g_splitmode, g_tuner, NULL, g_schunk); |
2886 | 0 | free(filters); |
2887 | 0 | free(filters_meta); |
2888 | 0 | if (error <= 0) { |
2889 | 0 | pthread_mutex_unlock(&global_comp_mutex); |
2890 | 0 | return error; |
2891 | 0 | } |
2892 | | |
2893 | 0 | envvar = getenv("BLOSC_BLOSC1_COMPAT"); |
2894 | 0 | if (envvar != NULL) { |
2895 | | /* Write chunk header without extended header (Blosc1 compatibility mode) */ |
2896 | 0 | error = write_compression_header(g_global_context, false); |
2897 | 0 | } |
2898 | 0 | else { |
2899 | 0 | error = write_compression_header(g_global_context, true); |
2900 | 0 | } |
2901 | 0 | if (error < 0) { |
2902 | 0 | pthread_mutex_unlock(&global_comp_mutex); |
2903 | 0 | return error; |
2904 | 0 | } |
2905 | | |
2906 | 0 | result = blosc_compress_context(g_global_context); |
2907 | |
|
2908 | 0 | pthread_mutex_unlock(&global_comp_mutex); |
2909 | |
|
2910 | 0 | return result; |
2911 | 0 | } |
2912 | | |
2913 | | |
2914 | | /* The public routine for compression. */ |
2915 | | int blosc1_compress(int clevel, int doshuffle, size_t typesize, size_t nbytes, |
2916 | 0 | const void* src, void* dest, size_t destsize) { |
2917 | 0 | return blosc2_compress(clevel, doshuffle, (int32_t)typesize, src, (int32_t)nbytes, dest, (int32_t)destsize); |
2918 | 0 | } |
2919 | | |
2920 | | |
2921 | | |
2922 | | static int blosc_run_decompression_with_context(blosc2_context* context, const void* src, int32_t srcsize, |
2923 | 132 | void* dest, int32_t destsize) { |
2924 | 132 | blosc_header header; |
2925 | 132 | int32_t ntbytes; |
2926 | 132 | int rc; |
2927 | | |
2928 | 132 | rc = read_chunk_header(src, srcsize, true, &header); |
2929 | 132 | if (rc < 0) { |
2930 | 1 | return rc; |
2931 | 1 | } |
2932 | | |
2933 | 131 | if (header.nbytes > destsize) { |
2934 | | // Not enough space for writing into the destination |
2935 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
2936 | 0 | } |
2937 | | |
2938 | 131 | rc = initialize_context_decompression(context, &header, src, srcsize, dest, destsize); |
2939 | 131 | if (rc < 0) { |
2940 | 1 | return rc; |
2941 | 1 | } |
2942 | | |
2943 | | /* Do the actual decompression */ |
2944 | 130 | ntbytes = do_job(context); |
2945 | 130 | if (ntbytes < 0) { |
2946 | 31 | return ntbytes; |
2947 | 31 | } |
2948 | | |
2949 | 99 | assert(ntbytes <= (int32_t)destsize); |
2950 | 99 | return ntbytes; |
2951 | 130 | } |
2952 | | |
2953 | | |
2954 | | /* The public secure routine for decompression with context. */ |
2955 | | int blosc2_decompress_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
2956 | 132 | void* dest, int32_t destsize) { |
2957 | 132 | int result; |
2958 | | |
2959 | 132 | if (context->do_compress != 0) { |
2960 | 0 | BLOSC_TRACE_ERROR("Context is not meant for decompression. Giving up."); |
2961 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2962 | 0 | } |
2963 | | |
2964 | 132 | result = blosc_run_decompression_with_context(context, src, srcsize, dest, destsize); |
2965 | | |
2966 | | // Reset a possible block_maskout |
2967 | 132 | if (context->block_maskout != NULL) { |
2968 | 0 | free(context->block_maskout); |
2969 | 0 | context->block_maskout = NULL; |
2970 | 0 | } |
2971 | 132 | context->block_maskout_nitems = 0; |
2972 | | |
2973 | 132 | return result; |
2974 | 132 | } |
2975 | | |
2976 | | |
2977 | | /* The public secure routine for decompression. */ |
2978 | 0 | int blosc2_decompress(const void* src, int32_t srcsize, void* dest, int32_t destsize) { |
2979 | 0 | int result; |
2980 | 0 | char* envvar; |
2981 | 0 | long nthreads; |
2982 | 0 | blosc2_context *dctx; |
2983 | 0 | blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; |
2984 | | |
2985 | | /* Check whether the library should be initialized */ |
2986 | 0 | if (!g_initlib) blosc2_init(); |
2987 | | |
2988 | | /* Check for a BLOSC_NTHREADS environment variable */ |
2989 | 0 | envvar = getenv("BLOSC_NTHREADS"); |
2990 | 0 | if (envvar != NULL) { |
2991 | 0 | nthreads = strtol(envvar, NULL, 10); |
2992 | 0 | if ((errno != EINVAL)) { |
2993 | 0 | if ((nthreads <= 0) || (nthreads > INT16_MAX)) { |
2994 | 0 | BLOSC_TRACE_ERROR("nthreads must be >= 1 and <= %d", INT16_MAX); |
2995 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
2996 | 0 | } |
2997 | 0 | result = blosc2_set_nthreads((int16_t) nthreads); |
2998 | 0 | if (result < 0) { |
2999 | 0 | return result; |
3000 | 0 | } |
3001 | 0 | } |
3002 | 0 | } |
3003 | | |
3004 | | /* Check for a BLOSC_NOLOCK environment variable. It is important |
3005 | | that this should be the last env var so that it can take the |
3006 | | previous ones into account */ |
3007 | 0 | envvar = getenv("BLOSC_NOLOCK"); |
3008 | 0 | if (envvar != NULL) { |
3009 | 0 | dparams.nthreads = g_nthreads; |
3010 | 0 | dctx = blosc2_create_dctx(dparams); |
3011 | 0 | if (dctx == NULL) { |
3012 | 0 | BLOSC_TRACE_ERROR("Error while creating the decompression context"); |
3013 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
3014 | 0 | } |
3015 | 0 | result = blosc2_decompress_ctx(dctx, src, srcsize, dest, destsize); |
3016 | 0 | blosc2_free_ctx(dctx); |
3017 | 0 | return result; |
3018 | 0 | } |
3019 | | |
3020 | 0 | pthread_mutex_lock(&global_comp_mutex); |
3021 | |
|
3022 | 0 | result = blosc_run_decompression_with_context( |
3023 | 0 | g_global_context, src, srcsize, dest, destsize); |
3024 | |
|
3025 | 0 | pthread_mutex_unlock(&global_comp_mutex); |
3026 | |
|
3027 | 0 | return result; |
3028 | 0 | } |
3029 | | |
3030 | | |
3031 | | /* The public routine for decompression. */ |
3032 | 0 | int blosc1_decompress(const void* src, void* dest, size_t destsize) { |
3033 | 0 | return blosc2_decompress(src, INT32_MAX, dest, (int32_t)destsize); |
3034 | 0 | } |
3035 | | |
3036 | | |
3037 | | /* Specific routine optimized for decompression a small number of |
3038 | | items out of a compressed chunk. This does not use threads because |
3039 | | it would affect negatively to performance. */ |
3040 | | int _blosc_getitem(blosc2_context* context, blosc_header* header, const void* src, int32_t srcsize, |
3041 | 134 | int start, int nitems, void* dest, int32_t destsize) { |
3042 | 134 | uint8_t* _src = (uint8_t*)(src); /* current pos for source buffer */ |
3043 | 134 | uint8_t* _dest = (uint8_t*)(dest); |
3044 | 134 | int32_t ntbytes = 0; /* the number of uncompressed bytes */ |
3045 | 134 | int32_t bsize, bsize2, ebsize, leftoverblock; |
3046 | 134 | int32_t startb, stopb; |
3047 | 134 | int32_t stop = start + nitems; |
3048 | 134 | int j, rc; |
3049 | | |
3050 | 134 | if (nitems == 0) { |
3051 | | // We have nothing to do |
3052 | 0 | return 0; |
3053 | 0 | } |
3054 | 134 | if (nitems * header->typesize > destsize) { |
3055 | 0 | BLOSC_TRACE_ERROR("`nitems`*`typesize` out of dest bounds."); |
3056 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
3057 | 0 | } |
3058 | | |
3059 | 134 | context->bstarts = (int32_t*)(_src + context->header_overhead); |
3060 | | |
3061 | | /* Check region boundaries */ |
3062 | 134 | if ((start < 0) || (start * header->typesize > header->nbytes)) { |
3063 | 0 | BLOSC_TRACE_ERROR("`start` out of bounds."); |
3064 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3065 | 0 | } |
3066 | | |
3067 | 134 | if ((stop < 0) || (stop * header->typesize > header->nbytes)) { |
3068 | 0 | BLOSC_TRACE_ERROR("`start`+`nitems` out of bounds."); |
3069 | 0 | return BLOSC2_ERROR_INVALID_PARAM; |
3070 | 0 | } |
3071 | | |
3072 | 134 | int chunk_memcpy = header->flags & 0x1; |
3073 | 134 | if (!context->special_type && !chunk_memcpy && |
3074 | 134 | ((uint8_t *)(_src + srcsize) < (uint8_t *)(context->bstarts + context->nblocks))) { |
3075 | 0 | BLOSC_TRACE_ERROR("`bstarts` out of bounds."); |
3076 | 0 | return BLOSC2_ERROR_READ_BUFFER; |
3077 | 0 | } |
3078 | | |
3079 | 134 | bool memcpyed = header->flags & (uint8_t)BLOSC_MEMCPYED; |
3080 | 134 | if (context->special_type) { |
3081 | | // Fake a runlen as if its a memcpyed chunk |
3082 | 20 | memcpyed = true; |
3083 | 20 | } |
3084 | | |
3085 | 134 | bool is_lazy = ((context->header_overhead == BLOSC_EXTENDED_HEADER_LENGTH) && |
3086 | 134 | (context->blosc2_flags & 0x08u) && !context->special_type); |
3087 | 134 | if (memcpyed && !is_lazy && !context->postfilter) { |
3088 | | // Short-circuit for (non-lazy) memcpyed or special values |
3089 | 134 | ntbytes = nitems * header->typesize; |
3090 | 134 | switch (context->special_type) { |
3091 | 0 | case BLOSC2_SPECIAL_VALUE: |
3092 | | // All repeated values |
3093 | 0 | rc = set_values(context->typesize, _src, _dest, ntbytes); |
3094 | 0 | if (rc < 0) { |
3095 | 0 | BLOSC_TRACE_ERROR("set_values failed"); |
3096 | 0 | return BLOSC2_ERROR_DATA; |
3097 | 0 | } |
3098 | 0 | break; |
3099 | 0 | case BLOSC2_SPECIAL_NAN: |
3100 | 0 | rc = set_nans(context->typesize, _dest, ntbytes); |
3101 | 0 | if (rc < 0) { |
3102 | 0 | BLOSC_TRACE_ERROR("set_nans failed"); |
3103 | 0 | return BLOSC2_ERROR_DATA; |
3104 | 0 | } |
3105 | 0 | break; |
3106 | 18 | case BLOSC2_SPECIAL_ZERO: |
3107 | 18 | memset(_dest, 0, ntbytes); |
3108 | 18 | break; |
3109 | 2 | case BLOSC2_SPECIAL_UNINIT: |
3110 | | // We do nothing here |
3111 | 2 | break; |
3112 | 114 | case BLOSC2_NO_SPECIAL: |
3113 | 114 | _src += context->header_overhead + start * context->typesize; |
3114 | 114 | memcpy(_dest, _src, ntbytes); |
3115 | 114 | break; |
3116 | 0 | default: |
3117 | 0 | BLOSC_TRACE_ERROR("Unhandled special value case"); |
3118 | 0 | BLOSC_ERROR(BLOSC2_ERROR_SCHUNK_SPECIAL); |
3119 | 134 | } |
3120 | 134 | return ntbytes; |
3121 | 134 | } |
3122 | | |
3123 | 0 | ebsize = header->blocksize + header->typesize * (signed)sizeof(int32_t); |
3124 | 0 | struct thread_context* scontext = context->serial_context; |
3125 | | /* Resize the temporaries in serial context if needed */ |
3126 | 0 | if (header->blocksize > scontext->tmp_blocksize) { |
3127 | 0 | my_free(scontext->tmp); |
3128 | 0 | scontext->tmp_nbytes = (size_t)4 * ebsize; |
3129 | 0 | scontext->tmp = my_malloc(scontext->tmp_nbytes); |
3130 | 0 | BLOSC_ERROR_NULL(scontext->tmp, BLOSC2_ERROR_MEMORY_ALLOC); |
3131 | 0 | scontext->tmp2 = scontext->tmp + ebsize; |
3132 | 0 | scontext->tmp3 = scontext->tmp2 + ebsize; |
3133 | 0 | scontext->tmp4 = scontext->tmp3 + ebsize; |
3134 | 0 | scontext->tmp_blocksize = (int32_t)header->blocksize; |
3135 | 0 | } |
3136 | | |
3137 | 0 | for (j = 0; j < context->nblocks; j++) { |
3138 | 0 | bsize = header->blocksize; |
3139 | 0 | leftoverblock = 0; |
3140 | 0 | if ((j == context->nblocks - 1) && (context->leftover > 0)) { |
3141 | 0 | bsize = context->leftover; |
3142 | 0 | leftoverblock = 1; |
3143 | 0 | } |
3144 | | |
3145 | | /* Compute start & stop for each block */ |
3146 | 0 | startb = start * header->typesize - j * header->blocksize; |
3147 | 0 | stopb = stop * header->typesize - j * header->blocksize; |
3148 | 0 | if (stopb <= 0) { |
3149 | | // We can exit as soon as this block is beyond stop |
3150 | 0 | break; |
3151 | 0 | } |
3152 | 0 | if (startb >= header->blocksize) { |
3153 | 0 | continue; |
3154 | 0 | } |
3155 | 0 | if (startb < 0) { |
3156 | 0 | startb = 0; |
3157 | 0 | } |
3158 | 0 | if (stopb > header->blocksize) { |
3159 | 0 | stopb = header->blocksize; |
3160 | 0 | } |
3161 | 0 | bsize2 = stopb - startb; |
3162 | |
|
3163 | 0 | #if defined(HAVE_PLUGINS) |
3164 | 0 | if (context->compcode == BLOSC_CODEC_ZFP_FIXED_RATE) { |
3165 | 0 | scontext->zfp_cell_start = startb / context->typesize; |
3166 | 0 | scontext->zfp_cell_nitems = nitems; |
3167 | 0 | } |
3168 | 0 | #endif /* HAVE_PLUGINS */ |
3169 | | |
3170 | | /* Do the actual data copy */ |
3171 | | // Regular decompression. Put results in tmp2. |
3172 | | // If the block is aligned and the worst case fits in destination, let's avoid a copy |
3173 | 0 | bool get_single_block = ((startb == 0) && (bsize == nitems * header->typesize)); |
3174 | 0 | uint8_t* tmp2 = get_single_block ? dest : scontext->tmp2; |
3175 | | |
3176 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
3177 | 0 | int32_t src_offset = memcpyed ? |
3178 | 0 | context->header_overhead + j * header->blocksize : sw32_(context->bstarts + j); |
3179 | |
|
3180 | 0 | int32_t cbytes = blosc_d(context->serial_context, bsize, leftoverblock, memcpyed, |
3181 | 0 | src, srcsize, src_offset, j, |
3182 | 0 | tmp2, 0, scontext->tmp, scontext->tmp3); |
3183 | 0 | if (cbytes < 0) { |
3184 | 0 | ntbytes = cbytes; |
3185 | 0 | break; |
3186 | 0 | } |
3187 | 0 | if (scontext->zfp_cell_nitems > 0) { |
3188 | 0 | if (cbytes == bsize2) { |
3189 | 0 | memcpy((uint8_t *) dest, tmp2, (unsigned int) bsize2); |
3190 | 0 | } else if (cbytes == context->blocksize) { |
3191 | 0 | memcpy((uint8_t *) dest, tmp2 + scontext->zfp_cell_start * context->typesize, (unsigned int) bsize2); |
3192 | 0 | cbytes = bsize2; |
3193 | 0 | } |
3194 | 0 | } else if (!get_single_block) { |
3195 | | /* Copy to destination */ |
3196 | 0 | memcpy((uint8_t *) dest + ntbytes, tmp2 + startb, (unsigned int) bsize2); |
3197 | 0 | } |
3198 | 0 | ntbytes += bsize2; |
3199 | 0 | } |
3200 | |
|
3201 | 0 | scontext->zfp_cell_nitems = 0; |
3202 | |
|
3203 | 0 | return ntbytes; |
3204 | 0 | } |
3205 | | |
3206 | 134 | int blosc2_getitem(const void* src, int32_t srcsize, int start, int nitems, void* dest, int32_t destsize) { |
3207 | 134 | blosc2_context context; |
3208 | 134 | int result; |
3209 | | |
3210 | | /* Minimally populate the context */ |
3211 | 134 | memset(&context, 0, sizeof(blosc2_context)); |
3212 | | |
3213 | 134 | context.schunk = g_schunk; |
3214 | 134 | context.nthreads = 1; // force a serial decompression; fixes #95 |
3215 | | |
3216 | | /* Call the actual getitem function */ |
3217 | 134 | result = blosc2_getitem_ctx(&context, src, srcsize, start, nitems, dest, destsize); |
3218 | | |
3219 | | /* Release resources */ |
3220 | 134 | if (context.serial_context != NULL) { |
3221 | 134 | free_thread_context(context.serial_context); |
3222 | 134 | } |
3223 | 134 | return result; |
3224 | 134 | } |
3225 | | |
3226 | | /* Specific routine optimized for decompression a small number of |
3227 | | items out of a compressed chunk. Public non-contextual API. */ |
3228 | 0 | int blosc1_getitem(const void* src, int start, int nitems, void* dest) { |
3229 | 0 | return blosc2_getitem(src, INT32_MAX, start, nitems, dest, INT32_MAX); |
3230 | 0 | } |
3231 | | |
3232 | | int blosc2_getitem_ctx(blosc2_context* context, const void* src, int32_t srcsize, |
3233 | 134 | int start, int nitems, void* dest, int32_t destsize) { |
3234 | 134 | blosc_header header; |
3235 | 134 | int result; |
3236 | | |
3237 | | /* Minimally populate the context */ |
3238 | 134 | result = read_chunk_header((uint8_t *) src, srcsize, true, &header); |
3239 | 134 | if (result < 0) { |
3240 | 0 | return result; |
3241 | 0 | } |
3242 | | |
3243 | 134 | context->src = src; |
3244 | 134 | context->srcsize = srcsize; |
3245 | 134 | context->dest = dest; |
3246 | 134 | context->destsize = destsize; |
3247 | | |
3248 | 134 | result = blosc2_initialize_context_from_header(context, &header); |
3249 | 134 | if (result < 0) { |
3250 | 0 | return result; |
3251 | 0 | } |
3252 | | |
3253 | 134 | if (context->serial_context == NULL) { |
3254 | 134 | context->serial_context = create_thread_context(context, 0); |
3255 | 134 | } |
3256 | 134 | BLOSC_ERROR_NULL(context->serial_context, BLOSC2_ERROR_THREAD_CREATE); |
3257 | | /* Call the actual getitem function */ |
3258 | 134 | result = _blosc_getitem(context, &header, src, srcsize, start, nitems, dest, destsize); |
3259 | | |
3260 | 134 | return result; |
3261 | 134 | } |
3262 | | |
3263 | | /* execute single compression/decompression job for a single thread_context */ |
3264 | | static void t_blosc_do_job(void *ctxt) |
3265 | 0 | { |
3266 | 0 | struct thread_context* thcontext = (struct thread_context*)ctxt; |
3267 | 0 | blosc2_context* context = thcontext->parent_context; |
3268 | 0 | int32_t cbytes; |
3269 | 0 | int32_t ntdest; |
3270 | 0 | int32_t tblocks; /* number of blocks per thread */ |
3271 | 0 | int32_t tblock; /* limit block on a thread */ |
3272 | 0 | int32_t nblock_; /* private copy of nblock */ |
3273 | 0 | int32_t bsize; |
3274 | 0 | int32_t leftoverblock; |
3275 | | /* Parameters for threads */ |
3276 | 0 | int32_t blocksize; |
3277 | 0 | int32_t ebsize; |
3278 | 0 | int32_t srcsize; |
3279 | 0 | bool compress = context->do_compress != 0; |
3280 | 0 | int32_t maxbytes; |
3281 | 0 | int32_t nblocks; |
3282 | 0 | int32_t leftover; |
3283 | 0 | int32_t leftover2; |
3284 | 0 | int32_t* bstarts; |
3285 | 0 | const uint8_t* src; |
3286 | 0 | uint8_t* dest; |
3287 | 0 | uint8_t* tmp; |
3288 | 0 | uint8_t* tmp2; |
3289 | 0 | uint8_t* tmp3; |
3290 | | |
3291 | | /* Get parameters for this thread before entering the main loop */ |
3292 | 0 | blocksize = context->blocksize; |
3293 | 0 | ebsize = blocksize + context->typesize * (int32_t)sizeof(int32_t); |
3294 | 0 | maxbytes = context->destsize; |
3295 | 0 | nblocks = context->nblocks; |
3296 | 0 | leftover = context->leftover; |
3297 | 0 | bstarts = context->bstarts; |
3298 | 0 | src = context->src; |
3299 | 0 | srcsize = context->srcsize; |
3300 | 0 | dest = context->dest; |
3301 | | |
3302 | | /* Resize the temporaries if needed */ |
3303 | 0 | if (blocksize > thcontext->tmp_blocksize) { |
3304 | 0 | my_free(thcontext->tmp); |
3305 | 0 | thcontext->tmp_nbytes = (size_t) 4 * ebsize; |
3306 | 0 | thcontext->tmp = my_malloc(thcontext->tmp_nbytes); |
3307 | 0 | thcontext->tmp2 = thcontext->tmp + ebsize; |
3308 | 0 | thcontext->tmp3 = thcontext->tmp2 + ebsize; |
3309 | 0 | thcontext->tmp4 = thcontext->tmp3 + ebsize; |
3310 | 0 | thcontext->tmp_blocksize = blocksize; |
3311 | 0 | } |
3312 | |
|
3313 | 0 | tmp = thcontext->tmp; |
3314 | 0 | tmp2 = thcontext->tmp2; |
3315 | 0 | tmp3 = thcontext->tmp3; |
3316 | | |
3317 | | // Determine whether we can do a static distribution of workload among different threads |
3318 | 0 | bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; |
3319 | 0 | if (!context->do_compress && context->special_type) { |
3320 | | // Fake a runlen as if its a memcpyed chunk |
3321 | 0 | memcpyed = true; |
3322 | 0 | } |
3323 | |
|
3324 | 0 | bool static_schedule = (!compress || memcpyed) && context->block_maskout == NULL; |
3325 | 0 | if (static_schedule) { |
3326 | | /* Blocks per thread */ |
3327 | 0 | tblocks = nblocks / context->nthreads; |
3328 | 0 | leftover2 = nblocks % context->nthreads; |
3329 | 0 | tblocks = (leftover2 > 0) ? tblocks + 1 : tblocks; |
3330 | 0 | nblock_ = thcontext->tid * tblocks; |
3331 | 0 | tblock = nblock_ + tblocks; |
3332 | 0 | if (tblock > nblocks) { |
3333 | 0 | tblock = nblocks; |
3334 | 0 | } |
3335 | 0 | } |
3336 | 0 | else { |
3337 | | // Use dynamic schedule via a queue. Get the next block. |
3338 | 0 | pthread_mutex_lock(&context->count_mutex); |
3339 | 0 | context->thread_nblock++; |
3340 | 0 | nblock_ = context->thread_nblock; |
3341 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3342 | 0 | tblock = nblocks; |
3343 | 0 | } |
3344 | | |
3345 | | /* Loop over blocks */ |
3346 | 0 | leftoverblock = 0; |
3347 | 0 | while ((nblock_ < tblock) && (context->thread_giveup_code > 0)) { |
3348 | 0 | bsize = blocksize; |
3349 | 0 | if (nblock_ == (nblocks - 1) && (leftover > 0)) { |
3350 | 0 | bsize = leftover; |
3351 | 0 | leftoverblock = 1; |
3352 | 0 | } |
3353 | 0 | if (compress) { |
3354 | 0 | if (memcpyed) { |
3355 | 0 | if (!context->prefilter) { |
3356 | | /* We want to memcpy only */ |
3357 | 0 | memcpy(dest + context->header_overhead + nblock_ * blocksize, |
3358 | 0 | src + nblock_ * blocksize, (unsigned int) bsize); |
3359 | 0 | cbytes = (int32_t) bsize; |
3360 | 0 | } |
3361 | 0 | else { |
3362 | | /* Only the prefilter has to be executed, and this is done in blosc_c(). |
3363 | | * However, no further actions are needed, so we can put the result |
3364 | | * directly in dest. */ |
3365 | 0 | cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, |
3366 | 0 | ebsize, src, nblock_ * blocksize, |
3367 | 0 | dest + context->header_overhead + nblock_ * blocksize, |
3368 | 0 | tmp, tmp3); |
3369 | 0 | } |
3370 | 0 | } |
3371 | 0 | else { |
3372 | | /* Regular compression */ |
3373 | 0 | cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, |
3374 | 0 | ebsize, src, nblock_ * blocksize, tmp2, tmp, tmp3); |
3375 | 0 | } |
3376 | 0 | } |
3377 | 0 | else { |
3378 | | /* Regular decompression */ |
3379 | 0 | if (context->special_type == BLOSC2_NO_SPECIAL && !memcpyed && |
3380 | 0 | (srcsize < (int32_t)(context->header_overhead + (sizeof(int32_t) * nblocks)))) { |
3381 | | /* Not enough input to read all `bstarts` */ |
3382 | 0 | cbytes = -1; |
3383 | 0 | } |
3384 | 0 | else { |
3385 | | // If memcpyed we don't have a bstarts section (because it is not needed) |
3386 | 0 | int32_t src_offset = memcpyed ? |
3387 | 0 | context->header_overhead + nblock_ * blocksize : sw32_(bstarts + nblock_); |
3388 | 0 | cbytes = blosc_d(thcontext, bsize, leftoverblock, memcpyed, |
3389 | 0 | src, srcsize, src_offset, nblock_, |
3390 | 0 | dest, nblock_ * blocksize, tmp, tmp2); |
3391 | 0 | } |
3392 | 0 | } |
3393 | | |
3394 | | /* Check whether current thread has to giveup */ |
3395 | 0 | if (context->thread_giveup_code <= 0) { |
3396 | 0 | break; |
3397 | 0 | } |
3398 | | |
3399 | | /* Check results for the compressed/decompressed block */ |
3400 | 0 | if (cbytes < 0) { /* compr/decompr failure */ |
3401 | | /* Set giveup_code error */ |
3402 | 0 | pthread_mutex_lock(&context->count_mutex); |
3403 | 0 | context->thread_giveup_code = cbytes; |
3404 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3405 | 0 | break; |
3406 | 0 | } |
3407 | | |
3408 | 0 | if (compress && !memcpyed) { |
3409 | | /* Start critical section */ |
3410 | 0 | pthread_mutex_lock(&context->count_mutex); |
3411 | 0 | ntdest = context->output_bytes; |
3412 | | // Note: do not use a typical local dict_training variable here |
3413 | | // because it is probably cached from previous calls if the number of |
3414 | | // threads does not change (the usual thing). |
3415 | 0 | if (!(context->use_dict && context->dict_cdict == NULL)) { |
3416 | 0 | _sw32(bstarts + nblock_, (int32_t) ntdest); |
3417 | 0 | } |
3418 | |
|
3419 | 0 | if ((cbytes == 0) || (ntdest + cbytes > maxbytes)) { |
3420 | 0 | context->thread_giveup_code = 0; /* incompressible buf */ |
3421 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3422 | 0 | break; |
3423 | 0 | } |
3424 | 0 | context->thread_nblock++; |
3425 | 0 | nblock_ = context->thread_nblock; |
3426 | 0 | context->output_bytes += cbytes; |
3427 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3428 | | /* End of critical section */ |
3429 | | |
3430 | | /* Copy the compressed buffer to destination */ |
3431 | 0 | memcpy(dest + ntdest, tmp2, (unsigned int) cbytes); |
3432 | 0 | } |
3433 | 0 | else if (static_schedule) { |
3434 | 0 | nblock_++; |
3435 | 0 | } |
3436 | 0 | else { |
3437 | 0 | pthread_mutex_lock(&context->count_mutex); |
3438 | 0 | context->thread_nblock++; |
3439 | 0 | nblock_ = context->thread_nblock; |
3440 | 0 | context->output_bytes += cbytes; |
3441 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3442 | 0 | } |
3443 | |
|
3444 | 0 | } /* closes while (nblock_) */ |
3445 | |
|
3446 | 0 | if (static_schedule) { |
3447 | 0 | pthread_mutex_lock(&context->count_mutex); |
3448 | 0 | context->output_bytes = context->sourcesize; |
3449 | 0 | if (compress) { |
3450 | 0 | context->output_bytes += context->header_overhead; |
3451 | 0 | } |
3452 | 0 | pthread_mutex_unlock(&context->count_mutex); |
3453 | 0 | } |
3454 | |
|
3455 | 0 | } |
3456 | | |
3457 | | /* Decompress & unshuffle several blocks in a single thread */ |
3458 | 0 | static void* t_blosc(void* ctxt) { |
3459 | 0 | struct thread_context* thcontext = (struct thread_context*)ctxt; |
3460 | 0 | blosc2_context* context = thcontext->parent_context; |
3461 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
3462 | 0 | int rc; |
3463 | 0 | #endif |
3464 | |
|
3465 | 0 | while (1) { |
3466 | | /* Synchronization point for all threads (wait for initialization) */ |
3467 | 0 | WAIT_INIT(NULL, context); |
3468 | | |
3469 | 0 | if (context->end_threads) { |
3470 | 0 | break; |
3471 | 0 | } |
3472 | | |
3473 | 0 | t_blosc_do_job(ctxt); |
3474 | | |
3475 | | /* Meeting point for all threads (wait for finalization) */ |
3476 | 0 | WAIT_FINISH(NULL, context); |
3477 | 0 | } |
3478 | | |
3479 | | /* Cleanup our working space and context */ |
3480 | 0 | free_thread_context(thcontext); |
3481 | |
|
3482 | 0 | return (NULL); |
3483 | 0 | } |
3484 | | |
3485 | | |
3486 | 0 | int init_threadpool(blosc2_context *context) { |
3487 | 0 | int32_t tid; |
3488 | 0 | int rc2; |
3489 | | |
3490 | | /* Initialize mutex and condition variable objects */ |
3491 | 0 | pthread_mutex_init(&context->count_mutex, NULL); |
3492 | 0 | pthread_mutex_init(&context->delta_mutex, NULL); |
3493 | 0 | pthread_mutex_init(&context->nchunk_mutex, NULL); |
3494 | 0 | pthread_cond_init(&context->delta_cv, NULL); |
3495 | | |
3496 | | /* Set context thread sentinels */ |
3497 | 0 | context->thread_giveup_code = 1; |
3498 | 0 | context->thread_nblock = -1; |
3499 | | |
3500 | | /* Barrier initialization */ |
3501 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
3502 | 0 | pthread_barrier_init(&context->barr_init, NULL, context->nthreads + 1); |
3503 | 0 | pthread_barrier_init(&context->barr_finish, NULL, context->nthreads + 1); |
3504 | | #else |
3505 | | pthread_mutex_init(&context->count_threads_mutex, NULL); |
3506 | | pthread_cond_init(&context->count_threads_cv, NULL); |
3507 | | context->count_threads = 0; /* Reset threads counter */ |
3508 | | #endif |
3509 | |
|
3510 | 0 | if (threads_callback) { |
3511 | | /* Create thread contexts to store data for callback threads */ |
3512 | 0 | context->thread_contexts = (struct thread_context *)my_malloc( |
3513 | 0 | context->nthreads * sizeof(struct thread_context)); |
3514 | 0 | BLOSC_ERROR_NULL(context->thread_contexts, BLOSC2_ERROR_MEMORY_ALLOC); |
3515 | 0 | for (tid = 0; tid < context->nthreads; tid++) |
3516 | 0 | init_thread_context(context->thread_contexts + tid, context, tid); |
3517 | 0 | } |
3518 | 0 | else { |
3519 | 0 | #if !defined(_WIN32) |
3520 | | /* Initialize and set thread detached attribute */ |
3521 | 0 | pthread_attr_init(&context->ct_attr); |
3522 | 0 | pthread_attr_setdetachstate(&context->ct_attr, PTHREAD_CREATE_JOINABLE); |
3523 | 0 | #endif |
3524 | | |
3525 | | /* Make space for thread handlers */ |
3526 | 0 | context->threads = (pthread_t*)my_malloc( |
3527 | 0 | context->nthreads * sizeof(pthread_t)); |
3528 | 0 | BLOSC_ERROR_NULL(context->threads, BLOSC2_ERROR_MEMORY_ALLOC); |
3529 | | /* Finally, create the threads */ |
3530 | 0 | for (tid = 0; tid < context->nthreads; tid++) { |
3531 | | /* Create a thread context (will destroy when finished) */ |
3532 | 0 | struct thread_context *thread_context = create_thread_context(context, tid); |
3533 | 0 | BLOSC_ERROR_NULL(thread_context, BLOSC2_ERROR_THREAD_CREATE); |
3534 | 0 | #if !defined(_WIN32) |
3535 | 0 | rc2 = pthread_create(&context->threads[tid], &context->ct_attr, t_blosc, |
3536 | 0 | (void*)thread_context); |
3537 | | #else |
3538 | | rc2 = pthread_create(&context->threads[tid], NULL, t_blosc, |
3539 | | (void *)thread_context); |
3540 | | #endif |
3541 | 0 | if (rc2) { |
3542 | 0 | BLOSC_TRACE_ERROR("Return code from pthread_create() is %d.\n" |
3543 | 0 | "\tError detail: %s\n", rc2, strerror(rc2)); |
3544 | 0 | return BLOSC2_ERROR_THREAD_CREATE; |
3545 | 0 | } |
3546 | 0 | } |
3547 | 0 | } |
3548 | | |
3549 | | /* We have now started/initialized the threads */ |
3550 | 0 | context->threads_started = context->nthreads; |
3551 | 0 | context->new_nthreads = context->nthreads; |
3552 | |
|
3553 | 0 | return 0; |
3554 | 0 | } |
3555 | | |
3556 | | int16_t blosc2_get_nthreads(void) |
3557 | 74 | { |
3558 | 74 | return g_nthreads; |
3559 | 74 | } |
3560 | | |
3561 | 43 | int16_t blosc2_set_nthreads(int16_t nthreads) { |
3562 | 43 | int16_t ret = g_nthreads; /* the previous number of threads */ |
3563 | | |
3564 | | /* Check whether the library should be initialized */ |
3565 | 43 | if (!g_initlib) blosc2_init(); |
3566 | | |
3567 | 43 | if (nthreads != ret) { |
3568 | 0 | g_nthreads = nthreads; |
3569 | 0 | g_global_context->new_nthreads = nthreads; |
3570 | 0 | int16_t ret2 = check_nthreads(g_global_context); |
3571 | 0 | if (ret2 < 0) { |
3572 | 0 | return ret2; |
3573 | 0 | } |
3574 | 0 | } |
3575 | | |
3576 | 43 | return ret; |
3577 | 43 | } |
3578 | | |
3579 | | |
3580 | | const char* blosc1_get_compressor(void) |
3581 | 0 | { |
3582 | 0 | const char* compname; |
3583 | 0 | blosc2_compcode_to_compname(g_compressor, &compname); |
3584 | |
|
3585 | 0 | return compname; |
3586 | 0 | } |
3587 | | |
3588 | 0 | int blosc1_set_compressor(const char* compname) { |
3589 | 0 | int code = blosc2_compname_to_compcode(compname); |
3590 | 0 | if (code >= BLOSC_LAST_CODEC) { |
3591 | 0 | BLOSC_TRACE_ERROR("User defined codecs cannot be set here. Use Blosc2 mechanism instead."); |
3592 | 0 | BLOSC_ERROR(BLOSC2_ERROR_CODEC_SUPPORT); |
3593 | 0 | } |
3594 | 0 | g_compressor = code; |
3595 | | |
3596 | | /* Check whether the library should be initialized */ |
3597 | 0 | if (!g_initlib) blosc2_init(); |
3598 | |
|
3599 | 0 | return code; |
3600 | 0 | } |
3601 | | |
3602 | 0 | void blosc2_set_delta(int dodelta) { |
3603 | |
|
3604 | 0 | g_delta = dodelta; |
3605 | | |
3606 | | /* Check whether the library should be initialized */ |
3607 | 0 | if (!g_initlib) blosc2_init(); |
3608 | |
|
3609 | 0 | } |
3610 | | |
3611 | 0 | const char* blosc2_list_compressors(void) { |
3612 | 0 | static int compressors_list_done = 0; |
3613 | 0 | static char ret[256]; |
3614 | |
|
3615 | 0 | if (compressors_list_done) return ret; |
3616 | 0 | ret[0] = '\0'; |
3617 | 0 | strcat(ret, BLOSC_BLOSCLZ_COMPNAME); |
3618 | 0 | strcat(ret, ","); |
3619 | 0 | strcat(ret, BLOSC_LZ4_COMPNAME); |
3620 | 0 | strcat(ret, ","); |
3621 | 0 | strcat(ret, BLOSC_LZ4HC_COMPNAME); |
3622 | 0 | #if defined(HAVE_ZLIB) |
3623 | 0 | strcat(ret, ","); |
3624 | 0 | strcat(ret, BLOSC_ZLIB_COMPNAME); |
3625 | 0 | #endif /* HAVE_ZLIB */ |
3626 | 0 | #if defined(HAVE_ZSTD) |
3627 | 0 | strcat(ret, ","); |
3628 | 0 | strcat(ret, BLOSC_ZSTD_COMPNAME); |
3629 | 0 | #endif /* HAVE_ZSTD */ |
3630 | 0 | compressors_list_done = 1; |
3631 | 0 | return ret; |
3632 | 0 | } |
3633 | | |
3634 | | |
3635 | 0 | const char* blosc2_get_version_string(void) { |
3636 | 0 | return BLOSC2_VERSION_STRING; |
3637 | 0 | } |
3638 | | |
3639 | | |
3640 | 0 | int blosc2_get_complib_info(const char* compname, char** complib, char** version) { |
3641 | 0 | int clibcode; |
3642 | 0 | const char* clibname; |
3643 | 0 | const char* clibversion = "unknown"; |
3644 | 0 | char sbuffer[256]; |
3645 | |
|
3646 | 0 | clibcode = compname_to_clibcode(compname); |
3647 | 0 | clibname = clibcode_to_clibname(clibcode); |
3648 | | |
3649 | | /* complib version */ |
3650 | 0 | if (clibcode == BLOSC_BLOSCLZ_LIB) { |
3651 | 0 | clibversion = BLOSCLZ_VERSION_STRING; |
3652 | 0 | } |
3653 | 0 | else if (clibcode == BLOSC_LZ4_LIB) { |
3654 | 0 | sprintf(sbuffer, "%d.%d.%d", |
3655 | 0 | LZ4_VERSION_MAJOR, LZ4_VERSION_MINOR, LZ4_VERSION_RELEASE); |
3656 | 0 | clibversion = sbuffer; |
3657 | 0 | } |
3658 | 0 | #if defined(HAVE_ZLIB) |
3659 | 0 | else if (clibcode == BLOSC_ZLIB_LIB) { |
3660 | 0 | #ifdef ZLIB_COMPAT |
3661 | 0 | clibversion = ZLIB_VERSION; |
3662 | | #elif defined(HAVE_ZLIB_NG) |
3663 | | clibversion = ZLIBNG_VERSION; |
3664 | | #else |
3665 | | clibversion = ZLIB_VERSION; |
3666 | | #endif |
3667 | 0 | } |
3668 | 0 | #endif /* HAVE_ZLIB */ |
3669 | 0 | #if defined(HAVE_ZSTD) |
3670 | 0 | else if (clibcode == BLOSC_ZSTD_LIB) { |
3671 | 0 | sprintf(sbuffer, "%d.%d.%d", |
3672 | 0 | ZSTD_VERSION_MAJOR, ZSTD_VERSION_MINOR, ZSTD_VERSION_RELEASE); |
3673 | 0 | clibversion = sbuffer; |
3674 | 0 | } |
3675 | 0 | #endif /* HAVE_ZSTD */ |
3676 | |
|
3677 | | #ifdef _MSC_VER |
3678 | | *complib = _strdup(clibname); |
3679 | | *version = _strdup(clibversion); |
3680 | | #else |
3681 | 0 | *complib = strdup(clibname); |
3682 | 0 | *version = strdup(clibversion); |
3683 | 0 | #endif |
3684 | 0 | return clibcode; |
3685 | 0 | } |
3686 | | |
3687 | | /* Return `nbytes`, `cbytes` and `blocksize` from a compressed buffer. */ |
3688 | 0 | void blosc1_cbuffer_sizes(const void* cbuffer, size_t* nbytes, size_t* cbytes, size_t* blocksize) { |
3689 | 0 | int32_t nbytes32, cbytes32, blocksize32; |
3690 | 0 | blosc2_cbuffer_sizes(cbuffer, &nbytes32, &cbytes32, &blocksize32); |
3691 | 0 | *nbytes = nbytes32; |
3692 | 0 | *cbytes = cbytes32; |
3693 | 0 | *blocksize = blocksize32; |
3694 | 0 | } |
3695 | | |
3696 | 200k | int blosc2_cbuffer_sizes(const void* cbuffer, int32_t* nbytes, int32_t* cbytes, int32_t* blocksize) { |
3697 | 200k | blosc_header header; |
3698 | 200k | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
3699 | 200k | if (rc < 0) { |
3700 | | /* Return zeros if error reading header */ |
3701 | 0 | memset(&header, 0, sizeof(header)); |
3702 | 0 | } |
3703 | | |
3704 | | /* Read the interesting values */ |
3705 | 200k | if (nbytes != NULL) |
3706 | 200k | *nbytes = header.nbytes; |
3707 | 200k | if (cbytes != NULL) |
3708 | 200k | *cbytes = header.cbytes; |
3709 | 200k | if (blocksize != NULL) |
3710 | 134 | *blocksize = header.blocksize; |
3711 | 200k | return rc; |
3712 | 200k | } |
3713 | | |
3714 | 0 | int blosc1_cbuffer_validate(const void* cbuffer, size_t cbytes, size_t* nbytes) { |
3715 | 0 | int32_t header_cbytes; |
3716 | 0 | int32_t header_nbytes; |
3717 | 0 | if (cbytes < BLOSC_MIN_HEADER_LENGTH) { |
3718 | | /* Compressed data should contain enough space for header */ |
3719 | 0 | *nbytes = 0; |
3720 | 0 | return BLOSC2_ERROR_WRITE_BUFFER; |
3721 | 0 | } |
3722 | 0 | int rc = blosc2_cbuffer_sizes(cbuffer, &header_nbytes, &header_cbytes, NULL); |
3723 | 0 | if (rc < 0) { |
3724 | 0 | *nbytes = 0; |
3725 | 0 | return rc; |
3726 | 0 | } |
3727 | 0 | *nbytes = header_nbytes; |
3728 | 0 | if (header_cbytes != (int32_t)cbytes) { |
3729 | | /* Compressed size from header does not match `cbytes` */ |
3730 | 0 | *nbytes = 0; |
3731 | 0 | return BLOSC2_ERROR_INVALID_HEADER; |
3732 | 0 | } |
3733 | 0 | if (*nbytes > BLOSC2_MAX_BUFFERSIZE) { |
3734 | | /* Uncompressed size is larger than allowed */ |
3735 | 0 | *nbytes = 0; |
3736 | 0 | return BLOSC2_ERROR_MEMORY_ALLOC; |
3737 | 0 | } |
3738 | 0 | return 0; |
3739 | 0 | } |
3740 | | |
3741 | | /* Return `typesize` and `flags` from a compressed buffer. */ |
3742 | 0 | void blosc1_cbuffer_metainfo(const void* cbuffer, size_t* typesize, int* flags) { |
3743 | 0 | blosc_header header; |
3744 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
3745 | 0 | if (rc < 0) { |
3746 | 0 | *typesize = *flags = 0; |
3747 | 0 | return; |
3748 | 0 | } |
3749 | | |
3750 | | /* Read the interesting values */ |
3751 | 0 | *flags = header.flags; |
3752 | 0 | *typesize = header.typesize; |
3753 | 0 | } |
3754 | | |
3755 | | |
3756 | | /* Return version information from a compressed buffer. */ |
3757 | 0 | void blosc2_cbuffer_versions(const void* cbuffer, int* version, int* versionlz) { |
3758 | 0 | blosc_header header; |
3759 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
3760 | 0 | if (rc < 0) { |
3761 | 0 | *version = *versionlz = 0; |
3762 | 0 | return; |
3763 | 0 | } |
3764 | | |
3765 | | /* Read the version info */ |
3766 | 0 | *version = header.version; |
3767 | 0 | *versionlz = header.versionlz; |
3768 | 0 | } |
3769 | | |
3770 | | |
3771 | | /* Return the compressor library/format used in a compressed buffer. */ |
3772 | 0 | const char* blosc2_cbuffer_complib(const void* cbuffer) { |
3773 | 0 | blosc_header header; |
3774 | 0 | int clibcode; |
3775 | 0 | const char* complib; |
3776 | 0 | int rc = read_chunk_header((uint8_t *) cbuffer, BLOSC_MIN_HEADER_LENGTH, false, &header); |
3777 | 0 | if (rc < 0) { |
3778 | 0 | return NULL; |
3779 | 0 | } |
3780 | | |
3781 | | /* Read the compressor format/library info */ |
3782 | 0 | clibcode = (header.flags & 0xe0) >> 5; |
3783 | 0 | complib = clibcode_to_clibname(clibcode); |
3784 | 0 | return complib; |
3785 | 0 | } |
3786 | | |
3787 | | |
3788 | | /* Get the internal blocksize to be used during compression. 0 means |
3789 | | that an automatic blocksize is computed internally. */ |
3790 | | int blosc1_get_blocksize(void) |
3791 | 0 | { |
3792 | 0 | return (int)g_force_blocksize; |
3793 | 0 | } |
3794 | | |
3795 | | |
3796 | | /* Force the use of a specific blocksize. If 0, an automatic |
3797 | | blocksize will be used (the default). */ |
3798 | 0 | void blosc1_set_blocksize(size_t blocksize) { |
3799 | 0 | g_force_blocksize = (int32_t)blocksize; |
3800 | 0 | } |
3801 | | |
3802 | | |
3803 | | /* Force the use of a specific split mode. */ |
3804 | | void blosc1_set_splitmode(int mode) |
3805 | 0 | { |
3806 | 0 | g_splitmode = mode; |
3807 | 0 | } |
3808 | | |
3809 | | |
3810 | | /* Set pointer to super-chunk. If NULL, no super-chunk will be |
3811 | | reachable (the default). */ |
3812 | 0 | void blosc_set_schunk(blosc2_schunk* schunk) { |
3813 | 0 | g_schunk = schunk; |
3814 | 0 | g_global_context->schunk = schunk; |
3815 | 0 | } |
3816 | | |
3817 | | blosc2_io *blosc2_io_global = NULL; |
3818 | | blosc2_io_cb BLOSC2_IO_CB_DEFAULTS; |
3819 | | blosc2_io_cb BLOSC2_IO_CB_MMAP; |
3820 | | |
3821 | | int _blosc2_register_io_cb(const blosc2_io_cb *io); |
3822 | | |
3823 | 43 | void blosc2_init(void) { |
3824 | | /* Return if Blosc is already initialized */ |
3825 | 43 | if (g_initlib) return; |
3826 | | |
3827 | 43 | BLOSC2_IO_CB_DEFAULTS.id = BLOSC2_IO_FILESYSTEM; |
3828 | 43 | BLOSC2_IO_CB_DEFAULTS.name = "filesystem"; |
3829 | 43 | BLOSC2_IO_CB_DEFAULTS.is_allocation_necessary = true; |
3830 | 43 | BLOSC2_IO_CB_DEFAULTS.open = (blosc2_open_cb) blosc2_stdio_open; |
3831 | 43 | BLOSC2_IO_CB_DEFAULTS.close = (blosc2_close_cb) blosc2_stdio_close; |
3832 | 43 | BLOSC2_IO_CB_DEFAULTS.size = (blosc2_size_cb) blosc2_stdio_size; |
3833 | 43 | BLOSC2_IO_CB_DEFAULTS.write = (blosc2_write_cb) blosc2_stdio_write; |
3834 | 43 | BLOSC2_IO_CB_DEFAULTS.read = (blosc2_read_cb) blosc2_stdio_read; |
3835 | 43 | BLOSC2_IO_CB_DEFAULTS.truncate = (blosc2_truncate_cb) blosc2_stdio_truncate; |
3836 | 43 | BLOSC2_IO_CB_DEFAULTS.destroy = (blosc2_destroy_cb) blosc2_stdio_destroy; |
3837 | | |
3838 | 43 | _blosc2_register_io_cb(&BLOSC2_IO_CB_DEFAULTS); |
3839 | | |
3840 | 43 | BLOSC2_IO_CB_MMAP.id = BLOSC2_IO_FILESYSTEM_MMAP; |
3841 | 43 | BLOSC2_IO_CB_MMAP.name = "filesystem_mmap"; |
3842 | 43 | BLOSC2_IO_CB_MMAP.is_allocation_necessary = false; |
3843 | 43 | BLOSC2_IO_CB_MMAP.open = (blosc2_open_cb) blosc2_stdio_mmap_open; |
3844 | 43 | BLOSC2_IO_CB_MMAP.close = (blosc2_close_cb) blosc2_stdio_mmap_close; |
3845 | 43 | BLOSC2_IO_CB_MMAP.read = (blosc2_read_cb) blosc2_stdio_mmap_read; |
3846 | 43 | BLOSC2_IO_CB_MMAP.size = (blosc2_size_cb) blosc2_stdio_mmap_size; |
3847 | 43 | BLOSC2_IO_CB_MMAP.write = (blosc2_write_cb) blosc2_stdio_mmap_write; |
3848 | 43 | BLOSC2_IO_CB_MMAP.truncate = (blosc2_truncate_cb) blosc2_stdio_mmap_truncate; |
3849 | 43 | BLOSC2_IO_CB_MMAP.destroy = (blosc2_destroy_cb) blosc2_stdio_mmap_destroy; |
3850 | | |
3851 | 43 | _blosc2_register_io_cb(&BLOSC2_IO_CB_MMAP); |
3852 | | |
3853 | 43 | g_ncodecs = 0; |
3854 | 43 | g_nfilters = 0; |
3855 | 43 | g_ntuners = 0; |
3856 | | |
3857 | 43 | #if defined(HAVE_PLUGINS) |
3858 | 43 | #include "blosc2/blosc2-common.h" |
3859 | 43 | #include "blosc2/blosc2-stdio.h" |
3860 | 43 | register_codecs(); |
3861 | 43 | register_filters(); |
3862 | 43 | register_tuners(); |
3863 | 43 | #endif |
3864 | 43 | pthread_mutex_init(&global_comp_mutex, NULL); |
3865 | | /* Create a global context */ |
3866 | 43 | g_global_context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
3867 | 43 | memset(g_global_context, 0, sizeof(blosc2_context)); |
3868 | 43 | g_global_context->nthreads = g_nthreads; |
3869 | 43 | g_global_context->new_nthreads = g_nthreads; |
3870 | 43 | g_initlib = 1; |
3871 | 43 | } |
3872 | | |
3873 | | |
3874 | 43 | int blosc2_free_resources(void) { |
3875 | | /* Return if Blosc is not initialized */ |
3876 | 43 | if (!g_initlib) return BLOSC2_ERROR_FAILURE; |
3877 | | |
3878 | 43 | return release_threadpool(g_global_context); |
3879 | 43 | } |
3880 | | |
3881 | | |
3882 | 43 | void blosc2_destroy(void) { |
3883 | | /* Return if Blosc is not initialized */ |
3884 | 43 | if (!g_initlib) return; |
3885 | | |
3886 | 43 | blosc2_free_resources(); |
3887 | 43 | g_initlib = 0; |
3888 | 43 | blosc2_free_ctx(g_global_context); |
3889 | | |
3890 | 43 | pthread_mutex_destroy(&global_comp_mutex); |
3891 | | |
3892 | 43 | } |
3893 | | |
3894 | | |
3895 | 193 | int release_threadpool(blosc2_context *context) { |
3896 | 193 | int32_t t; |
3897 | 193 | void* status; |
3898 | 193 | int rc; |
3899 | | |
3900 | 193 | if (context->threads_started > 0) { |
3901 | 0 | if (threads_callback) { |
3902 | | /* free context data for user-managed threads */ |
3903 | 0 | for (t=0; t<context->threads_started; t++) |
3904 | 0 | destroy_thread_context(context->thread_contexts + t); |
3905 | 0 | my_free(context->thread_contexts); |
3906 | 0 | } |
3907 | 0 | else { |
3908 | | /* Tell all existing threads to finish */ |
3909 | 0 | context->end_threads = 1; |
3910 | 0 | WAIT_INIT(-1, context); |
3911 | | |
3912 | | /* Join exiting threads */ |
3913 | 0 | for (t = 0; t < context->threads_started; t++) { |
3914 | 0 | rc = pthread_join(context->threads[t], &status); |
3915 | 0 | if (rc) { |
3916 | 0 | BLOSC_TRACE_ERROR("Return code from pthread_join() is %d\n" |
3917 | 0 | "\tError detail: %s.", rc, strerror(rc)); |
3918 | 0 | } |
3919 | 0 | } |
3920 | | |
3921 | | /* Thread attributes */ |
3922 | 0 | #if !defined(_WIN32) |
3923 | 0 | pthread_attr_destroy(&context->ct_attr); |
3924 | 0 | #endif |
3925 | | |
3926 | | /* Release thread handlers */ |
3927 | 0 | my_free(context->threads); |
3928 | 0 | } |
3929 | | |
3930 | | /* Release mutex and condition variable objects */ |
3931 | 0 | pthread_mutex_destroy(&context->count_mutex); |
3932 | 0 | pthread_mutex_destroy(&context->delta_mutex); |
3933 | 0 | pthread_mutex_destroy(&context->nchunk_mutex); |
3934 | 0 | pthread_cond_destroy(&context->delta_cv); |
3935 | | |
3936 | | /* Barriers */ |
3937 | 0 | #ifdef BLOSC_POSIX_BARRIERS |
3938 | 0 | pthread_barrier_destroy(&context->barr_init); |
3939 | 0 | pthread_barrier_destroy(&context->barr_finish); |
3940 | | #else |
3941 | | pthread_mutex_destroy(&context->count_threads_mutex); |
3942 | | pthread_cond_destroy(&context->count_threads_cv); |
3943 | | context->count_threads = 0; /* Reset threads counter */ |
3944 | | #endif |
3945 | | |
3946 | | /* Reset flags and counters */ |
3947 | 0 | context->end_threads = 0; |
3948 | 0 | context->threads_started = 0; |
3949 | 0 | } |
3950 | | |
3951 | | |
3952 | 193 | return 0; |
3953 | 193 | } |
3954 | | |
3955 | | |
3956 | | /* Contexts */ |
3957 | | |
3958 | | /* Create a context for compression */ |
3959 | 70 | blosc2_context* blosc2_create_cctx(blosc2_cparams cparams) { |
3960 | 70 | blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
3961 | 70 | BLOSC_ERROR_NULL(context, NULL); |
3962 | | |
3963 | | /* Populate the context, using zeros as default values */ |
3964 | 70 | memset(context, 0, sizeof(blosc2_context)); |
3965 | 70 | context->do_compress = 1; /* meant for compression */ |
3966 | 70 | context->use_dict = cparams.use_dict; |
3967 | 70 | if (cparams.instr_codec) { |
3968 | 0 | context->blosc2_flags = BLOSC2_INSTR_CODEC; |
3969 | 0 | } |
3970 | | |
3971 | 490 | for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { |
3972 | 420 | context->filters[i] = cparams.filters[i]; |
3973 | 420 | context->filters_meta[i] = cparams.filters_meta[i]; |
3974 | | |
3975 | 420 | if (context->filters[i] >= BLOSC_LAST_FILTER && context->filters[i] <= BLOSC2_DEFINED_FILTERS_STOP) { |
3976 | 0 | BLOSC_TRACE_ERROR("filter (%d) is not yet defined", |
3977 | 0 | context->filters[i]); |
3978 | 0 | free(context); |
3979 | 0 | return NULL; |
3980 | 0 | } |
3981 | 420 | if (context->filters[i] > BLOSC_LAST_REGISTERED_FILTER && context->filters[i] <= BLOSC2_GLOBAL_REGISTERED_FILTERS_STOP) { |
3982 | 0 | BLOSC_TRACE_ERROR("filter (%d) is not yet defined", |
3983 | 0 | context->filters[i]); |
3984 | 0 | free(context); |
3985 | 0 | return NULL; |
3986 | 0 | } |
3987 | 420 | } |
3988 | | |
3989 | 70 | #if defined(HAVE_PLUGINS) |
3990 | 70 | #include "blosc2/codecs-registry.h" |
3991 | 70 | if ((context->compcode >= BLOSC_CODEC_ZFP_FIXED_ACCURACY) && (context->compcode <= BLOSC_CODEC_ZFP_FIXED_RATE)) { |
3992 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; ++i) { |
3993 | 0 | if ((context->filters[i] == BLOSC_SHUFFLE) || (context->filters[i] == BLOSC_BITSHUFFLE)) { |
3994 | 0 | BLOSC_TRACE_ERROR("ZFP cannot be run in presence of SHUFFLE / BITSHUFFLE"); |
3995 | 0 | return NULL; |
3996 | 0 | } |
3997 | 0 | } |
3998 | 0 | } |
3999 | 70 | #endif /* HAVE_PLUGINS */ |
4000 | | |
4001 | | /* Check for a BLOSC_SHUFFLE environment variable */ |
4002 | 70 | int doshuffle = -1; |
4003 | 70 | char* envvar = getenv("BLOSC_SHUFFLE"); |
4004 | 70 | if (envvar != NULL) { |
4005 | 0 | if (strcmp(envvar, "NOSHUFFLE") == 0) { |
4006 | 0 | doshuffle = BLOSC_NOSHUFFLE; |
4007 | 0 | } |
4008 | 0 | else if (strcmp(envvar, "SHUFFLE") == 0) { |
4009 | 0 | doshuffle = BLOSC_SHUFFLE; |
4010 | 0 | } |
4011 | 0 | else if (strcmp(envvar, "BITSHUFFLE") == 0) { |
4012 | 0 | doshuffle = BLOSC_BITSHUFFLE; |
4013 | 0 | } |
4014 | 0 | else { |
4015 | 0 | BLOSC_TRACE_WARNING("BLOSC_SHUFFLE environment variable '%s' not recognized\n", envvar); |
4016 | 0 | } |
4017 | 0 | } |
4018 | | /* Check for a BLOSC_DELTA environment variable */ |
4019 | 0 | int dodelta = BLOSC_NOFILTER; |
4020 | 70 | envvar = getenv("BLOSC_DELTA"); |
4021 | 70 | if (envvar != NULL) { |
4022 | 0 | if (strcmp(envvar, "1") == 0) { |
4023 | 0 | dodelta = BLOSC_DELTA; |
4024 | 0 | } else if (strcmp(envvar, "0") == 0){ |
4025 | 0 | dodelta = BLOSC_NOFILTER; |
4026 | 0 | } |
4027 | 0 | else { |
4028 | 0 | BLOSC_TRACE_WARNING("BLOSC_DELTA environment variable '%s' not recognized\n", envvar); |
4029 | 0 | } |
4030 | 0 | } |
4031 | | /* Check for a BLOSC_TYPESIZE environment variable */ |
4032 | 0 | context->typesize = cparams.typesize; |
4033 | 70 | envvar = getenv("BLOSC_TYPESIZE"); |
4034 | 70 | if (envvar != NULL) { |
4035 | 0 | int32_t value; |
4036 | 0 | value = (int32_t) strtol(envvar, NULL, 10); |
4037 | 0 | if ((errno != EINVAL) && (value > 0)) { |
4038 | 0 | context->typesize = value; |
4039 | 0 | } |
4040 | 0 | else { |
4041 | 0 | BLOSC_TRACE_WARNING("BLOSC_TYPESIZE environment variable '%s' not recognized\n", envvar); |
4042 | 0 | } |
4043 | 0 | } |
4044 | 0 | build_filters(doshuffle, dodelta, context->typesize, context->filters); |
4045 | | |
4046 | 70 | context->clevel = cparams.clevel; |
4047 | | /* Check for a BLOSC_CLEVEL environment variable */ |
4048 | 70 | envvar = getenv("BLOSC_CLEVEL"); |
4049 | 70 | if (envvar != NULL) { |
4050 | 0 | int value; |
4051 | 0 | value = (int)strtol(envvar, NULL, 10); |
4052 | 0 | if ((errno != EINVAL) && (value >= 0)) { |
4053 | 0 | context->clevel = value; |
4054 | 0 | } |
4055 | 0 | else { |
4056 | 0 | BLOSC_TRACE_WARNING("BLOSC_CLEVEL environment variable '%s' not recognized\n", envvar); |
4057 | 0 | } |
4058 | 0 | } |
4059 | | |
4060 | 0 | context->compcode = cparams.compcode; |
4061 | | /* Check for a BLOSC_COMPRESSOR environment variable */ |
4062 | 70 | envvar = getenv("BLOSC_COMPRESSOR"); |
4063 | 70 | if (envvar != NULL) { |
4064 | 0 | int codec = blosc2_compname_to_compcode(envvar); |
4065 | 0 | if (codec >= BLOSC_LAST_CODEC) { |
4066 | 0 | BLOSC_TRACE_ERROR("User defined codecs cannot be set here. Use Blosc2 mechanism instead."); |
4067 | 0 | return NULL; |
4068 | 0 | } |
4069 | 0 | context->compcode = codec; |
4070 | 0 | } |
4071 | 70 | context->compcode_meta = cparams.compcode_meta; |
4072 | | |
4073 | 70 | context->blocksize = cparams.blocksize; |
4074 | | /* Check for a BLOSC_BLOCKSIZE environment variable */ |
4075 | 70 | envvar = getenv("BLOSC_BLOCKSIZE"); |
4076 | 70 | if (envvar != NULL) { |
4077 | 0 | int32_t blocksize; |
4078 | 0 | blocksize = (int32_t) strtol(envvar, NULL, 10); |
4079 | 0 | if ((errno != EINVAL) && (blocksize > 0)) { |
4080 | 0 | context->blocksize = blocksize; |
4081 | 0 | } |
4082 | 0 | else { |
4083 | 0 | BLOSC_TRACE_WARNING("BLOSC_BLOCKSIZE environment variable '%s' not recognized\n", envvar); |
4084 | 0 | } |
4085 | 0 | } |
4086 | | |
4087 | 0 | context->nthreads = cparams.nthreads; |
4088 | | /* Check for a BLOSC_NTHREADS environment variable */ |
4089 | 70 | envvar = getenv("BLOSC_NTHREADS"); |
4090 | 70 | if (envvar != NULL) { |
4091 | 0 | int16_t nthreads = (int16_t) strtol(envvar, NULL, 10); |
4092 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
4093 | 0 | context->nthreads = nthreads; |
4094 | 0 | } |
4095 | 0 | else { |
4096 | 0 | BLOSC_TRACE_WARNING("BLOSC_NTHREADS environment variable '%s' not recognized\n", envvar); |
4097 | 0 | } |
4098 | 0 | } |
4099 | 0 | context->new_nthreads = context->nthreads; |
4100 | | |
4101 | 70 | context->splitmode = cparams.splitmode; |
4102 | | /* Check for a BLOSC_SPLITMODE environment variable */ |
4103 | 70 | envvar = getenv("BLOSC_SPLITMODE"); |
4104 | 70 | if (envvar != NULL) { |
4105 | 0 | int32_t splitmode = -1; |
4106 | 0 | if (strcmp(envvar, "ALWAYS") == 0) { |
4107 | 0 | splitmode = BLOSC_ALWAYS_SPLIT; |
4108 | 0 | } |
4109 | 0 | else if (strcmp(envvar, "NEVER") == 0) { |
4110 | 0 | splitmode = BLOSC_NEVER_SPLIT; |
4111 | 0 | } |
4112 | 0 | else if (strcmp(envvar, "AUTO") == 0) { |
4113 | 0 | splitmode = BLOSC_AUTO_SPLIT; |
4114 | 0 | } |
4115 | 0 | else if (strcmp(envvar, "FORWARD_COMPAT") == 0) { |
4116 | 0 | splitmode = BLOSC_FORWARD_COMPAT_SPLIT; |
4117 | 0 | } |
4118 | 0 | else { |
4119 | 0 | BLOSC_TRACE_WARNING("BLOSC_SPLITMODE environment variable '%s' not recognized\n", envvar); |
4120 | 0 | } |
4121 | 0 | if (splitmode >= 0) { |
4122 | 0 | context->splitmode = splitmode; |
4123 | 0 | } |
4124 | 0 | } |
4125 | | |
4126 | 0 | context->threads_started = 0; |
4127 | 70 | context->schunk = cparams.schunk; |
4128 | | |
4129 | 70 | if (cparams.prefilter != NULL) { |
4130 | 0 | context->prefilter = cparams.prefilter; |
4131 | 0 | context->preparams = (blosc2_prefilter_params*)my_malloc(sizeof(blosc2_prefilter_params)); |
4132 | 0 | BLOSC_ERROR_NULL(context->preparams, NULL); |
4133 | 0 | memcpy(context->preparams, cparams.preparams, sizeof(blosc2_prefilter_params)); |
4134 | 0 | } |
4135 | | |
4136 | 70 | if (cparams.tuner_id <= 0) { |
4137 | 70 | cparams.tuner_id = g_tuner; |
4138 | 70 | } else { |
4139 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
4140 | 0 | if (g_tuners[i].id == cparams.tuner_id) { |
4141 | 0 | if (g_tuners[i].init == NULL) { |
4142 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
4143 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
4144 | 0 | return NULL; |
4145 | 0 | } |
4146 | 0 | } |
4147 | 0 | if (g_tuners[i].init(cparams.tuner_params, context, NULL) < 0) { |
4148 | 0 | BLOSC_TRACE_ERROR("Error in user-defined tuner %d init function\n", cparams.tuner_id); |
4149 | 0 | return NULL; |
4150 | 0 | } |
4151 | 0 | goto urtunersuccess; |
4152 | 0 | } |
4153 | 0 | } |
4154 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", cparams.tuner_id); |
4155 | 0 | return NULL; |
4156 | 0 | } |
4157 | 70 | urtunersuccess:; |
4158 | | |
4159 | 70 | context->tuner_id = cparams.tuner_id; |
4160 | | |
4161 | 70 | context->codec_params = cparams.codec_params; |
4162 | 70 | memcpy(context->filter_params, cparams.filter_params, BLOSC2_MAX_FILTERS * sizeof(void*)); |
4163 | | |
4164 | 70 | return context; |
4165 | 70 | } |
4166 | | |
4167 | | /* Create a context for decompression */ |
4168 | 37 | blosc2_context* blosc2_create_dctx(blosc2_dparams dparams) { |
4169 | 37 | blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); |
4170 | 37 | BLOSC_ERROR_NULL(context, NULL); |
4171 | | |
4172 | | /* Populate the context, using zeros as default values */ |
4173 | 37 | memset(context, 0, sizeof(blosc2_context)); |
4174 | 37 | context->do_compress = 0; /* Meant for decompression */ |
4175 | | |
4176 | 37 | context->nthreads = dparams.nthreads; |
4177 | 37 | char* envvar = getenv("BLOSC_NTHREADS"); |
4178 | 37 | if (envvar != NULL) { |
4179 | 0 | long nthreads = strtol(envvar, NULL, 10); |
4180 | 0 | if ((errno != EINVAL) && (nthreads > 0)) { |
4181 | 0 | context->nthreads = (int16_t) nthreads; |
4182 | 0 | } |
4183 | 0 | } |
4184 | 37 | context->new_nthreads = context->nthreads; |
4185 | | |
4186 | 37 | context->threads_started = 0; |
4187 | 37 | context->block_maskout = NULL; |
4188 | 37 | context->block_maskout_nitems = 0; |
4189 | 37 | context->schunk = dparams.schunk; |
4190 | | |
4191 | 37 | if (dparams.postfilter != NULL) { |
4192 | 0 | context->postfilter = dparams.postfilter; |
4193 | 0 | context->postparams = (blosc2_postfilter_params*)my_malloc(sizeof(blosc2_postfilter_params)); |
4194 | 0 | BLOSC_ERROR_NULL(context->postparams, NULL); |
4195 | 0 | memcpy(context->postparams, dparams.postparams, sizeof(blosc2_postfilter_params)); |
4196 | 0 | } |
4197 | | |
4198 | 37 | return context; |
4199 | 37 | } |
4200 | | |
4201 | | |
4202 | 150 | void blosc2_free_ctx(blosc2_context* context) { |
4203 | 150 | release_threadpool(context); |
4204 | 150 | if (context->serial_context != NULL) { |
4205 | 35 | free_thread_context(context->serial_context); |
4206 | 35 | } |
4207 | 150 | if (context->dict_cdict != NULL) { |
4208 | 0 | #ifdef HAVE_ZSTD |
4209 | 0 | ZSTD_freeCDict(context->dict_cdict); |
4210 | 0 | #endif |
4211 | 0 | } |
4212 | 150 | if (context->dict_ddict != NULL) { |
4213 | 1 | #ifdef HAVE_ZSTD |
4214 | 1 | ZSTD_freeDDict(context->dict_ddict); |
4215 | 1 | #endif |
4216 | 1 | } |
4217 | 150 | if (context->tuner_params != NULL) { |
4218 | 0 | int rc; |
4219 | 0 | if (context->tuner_id < BLOSC_LAST_TUNER && context->tuner_id == BLOSC_STUNE) { |
4220 | 0 | rc = blosc_stune_free(context); |
4221 | 0 | } else { |
4222 | 0 | for (int i = 0; i < g_ntuners; ++i) { |
4223 | 0 | if (g_tuners[i].id == context->tuner_id) { |
4224 | 0 | if (g_tuners[i].free == NULL) { |
4225 | 0 | if (fill_tuner(&g_tuners[i]) < 0) { |
4226 | 0 | BLOSC_TRACE_ERROR("Could not load tuner %d.", g_tuners[i].id); |
4227 | 0 | return; |
4228 | 0 | } |
4229 | 0 | } |
4230 | 0 | rc = g_tuners[i].free(context); |
4231 | 0 | goto urtunersuccess; |
4232 | 0 | } |
4233 | 0 | } |
4234 | 0 | BLOSC_TRACE_ERROR("User-defined tuner %d not found\n", context->tuner_id); |
4235 | 0 | return; |
4236 | 0 | urtunersuccess:; |
4237 | 0 | } |
4238 | 0 | if (rc < 0) { |
4239 | 0 | BLOSC_TRACE_ERROR("Error in user-defined tuner free function\n"); |
4240 | 0 | return; |
4241 | 0 | } |
4242 | 0 | } |
4243 | 150 | if (context->prefilter != NULL) { |
4244 | 0 | my_free(context->preparams); |
4245 | 0 | } |
4246 | 150 | if (context->postfilter != NULL) { |
4247 | 0 | my_free(context->postparams); |
4248 | 0 | } |
4249 | | |
4250 | 150 | if (context->block_maskout != NULL) { |
4251 | 0 | free(context->block_maskout); |
4252 | 0 | } |
4253 | 150 | my_free(context); |
4254 | 150 | } |
4255 | | |
4256 | | |
4257 | 0 | int blosc2_ctx_get_cparams(blosc2_context *ctx, blosc2_cparams *cparams) { |
4258 | 0 | cparams->compcode = ctx->compcode; |
4259 | 0 | cparams->compcode_meta = ctx->compcode_meta; |
4260 | 0 | cparams->clevel = ctx->clevel; |
4261 | 0 | cparams->use_dict = ctx->use_dict; |
4262 | 0 | cparams->instr_codec = ctx->blosc2_flags & BLOSC2_INSTR_CODEC; |
4263 | 0 | cparams->typesize = ctx->typesize; |
4264 | 0 | cparams->nthreads = ctx->nthreads; |
4265 | 0 | cparams->blocksize = ctx->blocksize; |
4266 | 0 | cparams->splitmode = ctx->splitmode; |
4267 | 0 | cparams->schunk = ctx->schunk; |
4268 | 0 | for (int i = 0; i < BLOSC2_MAX_FILTERS; ++i) { |
4269 | 0 | cparams->filters[i] = ctx->filters[i]; |
4270 | 0 | cparams->filters_meta[i] = ctx->filters_meta[i]; |
4271 | 0 | } |
4272 | 0 | cparams->prefilter = ctx->prefilter; |
4273 | 0 | cparams->preparams = ctx->preparams; |
4274 | 0 | cparams->tuner_id = ctx->tuner_id; |
4275 | 0 | cparams->codec_params = ctx->codec_params; |
4276 | |
|
4277 | 0 | return BLOSC2_ERROR_SUCCESS; |
4278 | 0 | } |
4279 | | |
4280 | | |
4281 | 0 | int blosc2_ctx_get_dparams(blosc2_context *ctx, blosc2_dparams *dparams) { |
4282 | 0 | dparams->nthreads = ctx->nthreads; |
4283 | 0 | dparams->schunk = ctx->schunk; |
4284 | 0 | dparams->postfilter = ctx->postfilter; |
4285 | 0 | dparams->postparams = ctx->postparams; |
4286 | |
|
4287 | 0 | return BLOSC2_ERROR_SUCCESS; |
4288 | 0 | } |
4289 | | |
4290 | | |
4291 | | /* Set a maskout in decompression context */ |
4292 | 0 | int blosc2_set_maskout(blosc2_context *ctx, bool *maskout, int nblocks) { |
4293 | |
|
4294 | 0 | if (ctx->block_maskout != NULL) { |
4295 | | // Get rid of a possible mask here |
4296 | 0 | free(ctx->block_maskout); |
4297 | 0 | } |
4298 | |
|
4299 | 0 | bool *maskout_ = malloc(nblocks); |
4300 | 0 | BLOSC_ERROR_NULL(maskout_, BLOSC2_ERROR_MEMORY_ALLOC); |
4301 | 0 | memcpy(maskout_, maskout, nblocks); |
4302 | 0 | ctx->block_maskout = maskout_; |
4303 | 0 | ctx->block_maskout_nitems = nblocks; |
4304 | |
|
4305 | 0 | return 0; |
4306 | 0 | } |
4307 | | |
4308 | | |
4309 | | /* Create a chunk made of zeros */ |
4310 | 33 | int blosc2_chunk_zeros(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
4311 | 33 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
4312 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
4313 | 0 | return BLOSC2_ERROR_DATA; |
4314 | 0 | } |
4315 | | |
4316 | 33 | if ((nbytes > 0) && (nbytes % cparams.typesize)) { |
4317 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
4318 | 0 | return BLOSC2_ERROR_DATA; |
4319 | 0 | } |
4320 | | |
4321 | 33 | blosc_header header; |
4322 | 33 | blosc2_context* context = blosc2_create_cctx(cparams); |
4323 | 33 | if (context == NULL) { |
4324 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
4325 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
4326 | 0 | } |
4327 | | |
4328 | 33 | int error = initialize_context_compression( |
4329 | 33 | context, NULL, nbytes, dest, destsize, |
4330 | 33 | context->clevel, context->filters, context->filters_meta, |
4331 | 33 | context->typesize, context->compcode, context->blocksize, |
4332 | 33 | context->new_nthreads, context->nthreads, context->splitmode, |
4333 | 33 | context->tuner_id, context->tuner_params, context->schunk); |
4334 | 33 | if (error <= 0) { |
4335 | 0 | blosc2_free_ctx(context); |
4336 | 0 | return error; |
4337 | 0 | } |
4338 | | |
4339 | 33 | memset(&header, 0, sizeof(header)); |
4340 | 33 | header.version = BLOSC2_VERSION_FORMAT; |
4341 | 33 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
4342 | 33 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
4343 | 33 | header.typesize = context->typesize; |
4344 | 33 | header.nbytes = (int32_t)nbytes; |
4345 | 33 | header.blocksize = context->blocksize; |
4346 | 33 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
4347 | 33 | header.blosc2_flags = BLOSC2_SPECIAL_ZERO << 4; // mark chunk as all zeros |
4348 | 33 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
4349 | | |
4350 | 33 | blosc2_free_ctx(context); |
4351 | | |
4352 | 33 | return BLOSC_EXTENDED_HEADER_LENGTH; |
4353 | 33 | } |
4354 | | |
4355 | | |
4356 | | /* Create a chunk made of uninitialized values */ |
4357 | 0 | int blosc2_chunk_uninit(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
4358 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
4359 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
4360 | 0 | return BLOSC2_ERROR_DATA; |
4361 | 0 | } |
4362 | | |
4363 | 0 | if (nbytes % cparams.typesize) { |
4364 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
4365 | 0 | return BLOSC2_ERROR_DATA; |
4366 | 0 | } |
4367 | | |
4368 | 0 | blosc_header header; |
4369 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
4370 | 0 | if (context == NULL) { |
4371 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
4372 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
4373 | 0 | } |
4374 | 0 | int error = initialize_context_compression( |
4375 | 0 | context, NULL, nbytes, dest, destsize, |
4376 | 0 | context->clevel, context->filters, context->filters_meta, |
4377 | 0 | context->typesize, context->compcode, context->blocksize, |
4378 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
4379 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
4380 | 0 | if (error <= 0) { |
4381 | 0 | blosc2_free_ctx(context); |
4382 | 0 | return error; |
4383 | 0 | } |
4384 | | |
4385 | 0 | memset(&header, 0, sizeof(header)); |
4386 | 0 | header.version = BLOSC2_VERSION_FORMAT; |
4387 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
4388 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
4389 | 0 | header.typesize = context->typesize; |
4390 | 0 | header.nbytes = (int32_t)nbytes; |
4391 | 0 | header.blocksize = context->blocksize; |
4392 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
4393 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_UNINIT << 4; // mark chunk as uninitialized |
4394 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
4395 | |
|
4396 | 0 | blosc2_free_ctx(context); |
4397 | |
|
4398 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH; |
4399 | 0 | } |
4400 | | |
4401 | | |
4402 | | /* Create a chunk made of nans */ |
4403 | 0 | int blosc2_chunk_nans(blosc2_cparams cparams, const int32_t nbytes, void* dest, int32_t destsize) { |
4404 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH) { |
4405 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
4406 | 0 | return BLOSC2_ERROR_DATA; |
4407 | 0 | } |
4408 | | |
4409 | 0 | if (nbytes % cparams.typesize) { |
4410 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
4411 | 0 | return BLOSC2_ERROR_DATA; |
4412 | 0 | } |
4413 | | |
4414 | 0 | blosc_header header; |
4415 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
4416 | 0 | if (context == NULL) { |
4417 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
4418 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
4419 | 0 | } |
4420 | | |
4421 | 0 | int error = initialize_context_compression( |
4422 | 0 | context, NULL, nbytes, dest, destsize, |
4423 | 0 | context->clevel, context->filters, context->filters_meta, |
4424 | 0 | context->typesize, context->compcode, context->blocksize, |
4425 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
4426 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
4427 | 0 | if (error <= 0) { |
4428 | 0 | blosc2_free_ctx(context); |
4429 | 0 | return error; |
4430 | 0 | } |
4431 | | |
4432 | 0 | memset(&header, 0, sizeof(header)); |
4433 | 0 | header.version = BLOSC2_VERSION_FORMAT; |
4434 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
4435 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
4436 | 0 | header.typesize = context->typesize; |
4437 | 0 | header.nbytes = (int32_t)nbytes; |
4438 | 0 | header.blocksize = context->blocksize; |
4439 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH; |
4440 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_NAN << 4; // mark chunk as all NaNs |
4441 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
4442 | |
|
4443 | 0 | blosc2_free_ctx(context); |
4444 | |
|
4445 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH; |
4446 | 0 | } |
4447 | | |
4448 | | |
4449 | | /* Create a chunk made of repeated values */ |
4450 | | int blosc2_chunk_repeatval(blosc2_cparams cparams, const int32_t nbytes, |
4451 | 0 | void* dest, int32_t destsize, const void* repeatval) { |
4452 | 0 | if (destsize < BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize) { |
4453 | 0 | BLOSC_TRACE_ERROR("dest buffer is not long enough"); |
4454 | 0 | return BLOSC2_ERROR_DATA; |
4455 | 0 | } |
4456 | | |
4457 | 0 | if (nbytes % cparams.typesize) { |
4458 | 0 | BLOSC_TRACE_ERROR("nbytes must be a multiple of typesize"); |
4459 | 0 | return BLOSC2_ERROR_DATA; |
4460 | 0 | } |
4461 | | |
4462 | 0 | blosc_header header; |
4463 | 0 | blosc2_context* context = blosc2_create_cctx(cparams); |
4464 | 0 | if (context == NULL) { |
4465 | 0 | BLOSC_TRACE_ERROR("Error while creating the compression context"); |
4466 | 0 | return BLOSC2_ERROR_NULL_POINTER; |
4467 | 0 | } |
4468 | | |
4469 | 0 | int error = initialize_context_compression( |
4470 | 0 | context, NULL, nbytes, dest, destsize, |
4471 | 0 | context->clevel, context->filters, context->filters_meta, |
4472 | 0 | context->typesize, context->compcode, context->blocksize, |
4473 | 0 | context->new_nthreads, context->nthreads, context->splitmode, |
4474 | 0 | context->tuner_id, context->tuner_params, context->schunk); |
4475 | 0 | if (error <= 0) { |
4476 | 0 | blosc2_free_ctx(context); |
4477 | 0 | return error; |
4478 | 0 | } |
4479 | | |
4480 | 0 | memset(&header, 0, sizeof(header)); |
4481 | 0 | header.version = BLOSC2_VERSION_FORMAT; |
4482 | 0 | header.versionlz = BLOSC_BLOSCLZ_VERSION_FORMAT; |
4483 | 0 | header.flags = BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE; // extended header |
4484 | 0 | header.typesize = context->typesize; |
4485 | 0 | header.nbytes = (int32_t)nbytes; |
4486 | 0 | header.blocksize = context->blocksize; |
4487 | 0 | header.cbytes = BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize; |
4488 | 0 | header.blosc2_flags = BLOSC2_SPECIAL_VALUE << 4; // mark chunk as all repeated value |
4489 | 0 | memcpy((uint8_t *)dest, &header, sizeof(header)); |
4490 | 0 | memcpy((uint8_t *)dest + sizeof(header), repeatval, cparams.typesize); |
4491 | |
|
4492 | 0 | blosc2_free_ctx(context); |
4493 | |
|
4494 | 0 | return BLOSC_EXTENDED_HEADER_LENGTH + cparams.typesize; |
4495 | 0 | } |
4496 | | |
4497 | | |
4498 | | /* Register filters */ |
4499 | | |
4500 | 215 | int register_filter_private(blosc2_filter *filter) { |
4501 | 215 | BLOSC_ERROR_NULL(filter, BLOSC2_ERROR_INVALID_PARAM); |
4502 | 215 | if (g_nfilters == UINT8_MAX) { |
4503 | 0 | BLOSC_TRACE_ERROR("Can not register more filters"); |
4504 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
4505 | 0 | } |
4506 | 215 | if (filter->id < BLOSC2_GLOBAL_REGISTERED_FILTERS_START) { |
4507 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_FILTERS_START); |
4508 | 0 | return BLOSC2_ERROR_FAILURE; |
4509 | 0 | } |
4510 | | /* This condition can never be fulfilled |
4511 | | if (filter->id > BLOSC2_USER_REGISTERED_FILTERS_STOP) { |
4512 | | BLOSC_TRACE_ERROR("The id must be less than or equal to %d", BLOSC2_USER_REGISTERED_FILTERS_STOP); |
4513 | | return BLOSC2_ERROR_FAILURE; |
4514 | | } |
4515 | | */ |
4516 | | |
4517 | 645 | for (uint64_t i = 0; i < g_nfilters; ++i) { |
4518 | 430 | if (g_filters[i].id == filter->id) { |
4519 | 0 | if (strcmp(g_filters[i].name, filter->name) != 0) { |
4520 | 0 | BLOSC_TRACE_ERROR("The filter (ID: %d) plugin is already registered with name: %s." |
4521 | 0 | " Choose another one !", filter->id, g_filters[i].name); |
4522 | 0 | return BLOSC2_ERROR_FAILURE; |
4523 | 0 | } |
4524 | 0 | else { |
4525 | | // Already registered, so no more actions needed |
4526 | 0 | return BLOSC2_ERROR_SUCCESS; |
4527 | 0 | } |
4528 | 0 | } |
4529 | 430 | } |
4530 | | |
4531 | 215 | blosc2_filter *filter_new = &g_filters[g_nfilters++]; |
4532 | 215 | memcpy(filter_new, filter, sizeof(blosc2_filter)); |
4533 | | |
4534 | 215 | return BLOSC2_ERROR_SUCCESS; |
4535 | 215 | } |
4536 | | |
4537 | | |
4538 | 0 | int blosc2_register_filter(blosc2_filter *filter) { |
4539 | 0 | if (filter->id < BLOSC2_USER_REGISTERED_FILTERS_START) { |
4540 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal to %d", BLOSC2_USER_REGISTERED_FILTERS_START); |
4541 | 0 | return BLOSC2_ERROR_FAILURE; |
4542 | 0 | } |
4543 | | |
4544 | 0 | return register_filter_private(filter); |
4545 | 0 | } |
4546 | | |
4547 | | |
4548 | | /* Register codecs */ |
4549 | | |
4550 | 258 | int register_codec_private(blosc2_codec *codec) { |
4551 | 258 | BLOSC_ERROR_NULL(codec, BLOSC2_ERROR_INVALID_PARAM); |
4552 | 258 | if (g_ncodecs == UINT8_MAX) { |
4553 | 0 | BLOSC_TRACE_ERROR("Can not register more codecs"); |
4554 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
4555 | 0 | } |
4556 | 258 | if (codec->compcode < BLOSC2_GLOBAL_REGISTERED_CODECS_START) { |
4557 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_CODECS_START); |
4558 | 0 | return BLOSC2_ERROR_FAILURE; |
4559 | 0 | } |
4560 | | /* This condition can never be fulfilled |
4561 | | if (codec->compcode > BLOSC2_USER_REGISTERED_CODECS_STOP) { |
4562 | | BLOSC_TRACE_ERROR("The id must be less or equal to %d", BLOSC2_USER_REGISTERED_CODECS_STOP); |
4563 | | return BLOSC2_ERROR_FAILURE; |
4564 | | } |
4565 | | */ |
4566 | | |
4567 | 903 | for (int i = 0; i < g_ncodecs; ++i) { |
4568 | 645 | if (g_codecs[i].compcode == codec->compcode) { |
4569 | 0 | if (strcmp(g_codecs[i].compname, codec->compname) != 0) { |
4570 | 0 | BLOSC_TRACE_ERROR("The codec (ID: %d) plugin is already registered with name: %s." |
4571 | 0 | " Choose another one !", codec->compcode, codec->compname); |
4572 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
4573 | 0 | } |
4574 | 0 | else { |
4575 | | // Already registered, so no more actions needed |
4576 | 0 | return BLOSC2_ERROR_SUCCESS; |
4577 | 0 | } |
4578 | 0 | } |
4579 | 645 | } |
4580 | | |
4581 | 258 | blosc2_codec *codec_new = &g_codecs[g_ncodecs++]; |
4582 | 258 | memcpy(codec_new, codec, sizeof(blosc2_codec)); |
4583 | | |
4584 | 258 | return BLOSC2_ERROR_SUCCESS; |
4585 | 258 | } |
4586 | | |
4587 | | |
4588 | 0 | int blosc2_register_codec(blosc2_codec *codec) { |
4589 | 0 | if (codec->compcode < BLOSC2_USER_REGISTERED_CODECS_START) { |
4590 | 0 | BLOSC_TRACE_ERROR("The compcode must be greater or equal than %d", BLOSC2_USER_REGISTERED_CODECS_START); |
4591 | 0 | return BLOSC2_ERROR_CODEC_PARAM; |
4592 | 0 | } |
4593 | | |
4594 | 0 | return register_codec_private(codec); |
4595 | 0 | } |
4596 | | |
4597 | | |
4598 | | /* Register tuners */ |
4599 | | |
4600 | 43 | int register_tuner_private(blosc2_tuner *tuner) { |
4601 | 43 | BLOSC_ERROR_NULL(tuner, BLOSC2_ERROR_INVALID_PARAM); |
4602 | 43 | if (g_ntuners == UINT8_MAX) { |
4603 | 0 | BLOSC_TRACE_ERROR("Can not register more tuners"); |
4604 | 0 | return BLOSC2_ERROR_CODEC_SUPPORT; |
4605 | 0 | } |
4606 | 43 | if (tuner->id < BLOSC2_GLOBAL_REGISTERED_TUNER_START) { |
4607 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal than %d", BLOSC2_GLOBAL_REGISTERED_TUNER_START); |
4608 | 0 | return BLOSC2_ERROR_FAILURE; |
4609 | 0 | } |
4610 | | |
4611 | 43 | for (int i = 0; i < g_ntuners; ++i) { |
4612 | 0 | if (g_tuners[i].id == tuner->id) { |
4613 | 0 | if (strcmp(g_tuners[i].name, tuner->name) != 0) { |
4614 | 0 | BLOSC_TRACE_ERROR("The tuner (ID: %d) plugin is already registered with name: %s." |
4615 | 0 | " Choose another one !", tuner->id, g_tuners[i].name); |
4616 | 0 | return BLOSC2_ERROR_FAILURE; |
4617 | 0 | } |
4618 | 0 | else { |
4619 | | // Already registered, so no more actions needed |
4620 | 0 | return BLOSC2_ERROR_SUCCESS; |
4621 | 0 | } |
4622 | 0 | } |
4623 | 0 | } |
4624 | | |
4625 | 43 | blosc2_tuner *tuner_new = &g_tuners[g_ntuners++]; |
4626 | 43 | memcpy(tuner_new, tuner, sizeof(blosc2_tuner)); |
4627 | | |
4628 | 43 | return BLOSC2_ERROR_SUCCESS; |
4629 | 43 | } |
4630 | | |
4631 | | |
4632 | 0 | int blosc2_register_tuner(blosc2_tuner *tuner) { |
4633 | 0 | if (tuner->id < BLOSC2_USER_REGISTERED_TUNER_START) { |
4634 | 0 | BLOSC_TRACE_ERROR("The id must be greater or equal to %d", BLOSC2_USER_REGISTERED_TUNER_START); |
4635 | 0 | return BLOSC2_ERROR_FAILURE; |
4636 | 0 | } |
4637 | | |
4638 | 0 | return register_tuner_private(tuner); |
4639 | 0 | } |
4640 | | |
4641 | | |
4642 | 86 | int _blosc2_register_io_cb(const blosc2_io_cb *io) { |
4643 | | |
4644 | 129 | for (uint64_t i = 0; i < g_nio; ++i) { |
4645 | 127 | if (g_ios[i].id == io->id) { |
4646 | 84 | if (strcmp(g_ios[i].name, io->name) != 0) { |
4647 | 0 | BLOSC_TRACE_ERROR("The IO (ID: %d) plugin is already registered with name: %s." |
4648 | 0 | " Choose another one !", io->id, g_ios[i].name); |
4649 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
4650 | 0 | } |
4651 | 84 | else { |
4652 | | // Already registered, so no more actions needed |
4653 | 84 | return BLOSC2_ERROR_SUCCESS; |
4654 | 84 | } |
4655 | 84 | } |
4656 | 127 | } |
4657 | | |
4658 | 2 | blosc2_io_cb *io_new = &g_ios[g_nio++]; |
4659 | 2 | memcpy(io_new, io, sizeof(blosc2_io_cb)); |
4660 | | |
4661 | 2 | return BLOSC2_ERROR_SUCCESS; |
4662 | 86 | } |
4663 | | |
4664 | 0 | int blosc2_register_io_cb(const blosc2_io_cb *io) { |
4665 | 0 | BLOSC_ERROR_NULL(io, BLOSC2_ERROR_INVALID_PARAM); |
4666 | 0 | if (g_nio == UINT8_MAX) { |
4667 | 0 | BLOSC_TRACE_ERROR("Can not register more codecs"); |
4668 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
4669 | 0 | } |
4670 | | |
4671 | 0 | if (io->id < BLOSC2_IO_REGISTERED) { |
4672 | 0 | BLOSC_TRACE_ERROR("The compcode must be greater or equal than %d", BLOSC2_IO_REGISTERED); |
4673 | 0 | return BLOSC2_ERROR_PLUGIN_IO; |
4674 | 0 | } |
4675 | | |
4676 | 0 | return _blosc2_register_io_cb(io); |
4677 | 0 | } |
4678 | | |
4679 | 381 | blosc2_io_cb *blosc2_get_io_cb(uint8_t id) { |
4680 | | // If g_initlib is not set by blosc2_init() this function will try to read |
4681 | | // uninitialized memory. We should therefore always return NULL in that case |
4682 | 381 | if (!g_initlib) { |
4683 | 0 | return NULL; |
4684 | 0 | } |
4685 | 381 | for (uint64_t i = 0; i < g_nio; ++i) { |
4686 | 381 | if (g_ios[i].id == id) { |
4687 | 381 | return &g_ios[i]; |
4688 | 381 | } |
4689 | 381 | } |
4690 | 0 | if (id == BLOSC2_IO_FILESYSTEM) { |
4691 | 0 | if (_blosc2_register_io_cb(&BLOSC2_IO_CB_DEFAULTS) < 0) { |
4692 | 0 | BLOSC_TRACE_ERROR("Error registering the default IO API"); |
4693 | 0 | return NULL; |
4694 | 0 | } |
4695 | 0 | return blosc2_get_io_cb(id); |
4696 | 0 | } |
4697 | 0 | else if (id == BLOSC2_IO_FILESYSTEM_MMAP) { |
4698 | 0 | if (_blosc2_register_io_cb(&BLOSC2_IO_CB_MMAP) < 0) { |
4699 | 0 | BLOSC_TRACE_ERROR("Error registering the mmap IO API"); |
4700 | 0 | return NULL; |
4701 | 0 | } |
4702 | 0 | return blosc2_get_io_cb(id); |
4703 | 0 | } |
4704 | 0 | return NULL; |
4705 | 0 | } |
4706 | | |
4707 | 0 | void blosc2_unidim_to_multidim(uint8_t ndim, int64_t *shape, int64_t i, int64_t *index) { |
4708 | 0 | if (ndim == 0) { |
4709 | 0 | return; |
4710 | 0 | } |
4711 | 0 | int64_t *strides = malloc(ndim * sizeof(int64_t)); |
4712 | 0 | strides[ndim - 1] = 1; |
4713 | 0 | for (int j = ndim - 2; j >= 0; --j) { |
4714 | 0 | strides[j] = shape[j + 1] * strides[j + 1]; |
4715 | 0 | } |
4716 | |
|
4717 | 0 | index[0] = i / strides[0]; |
4718 | 0 | for (int j = 1; j < ndim; ++j) { |
4719 | 0 | index[j] = (i % strides[j - 1]) / strides[j]; |
4720 | 0 | } |
4721 | 0 | free(strides); |
4722 | 0 | } |
4723 | | |
4724 | 0 | void blosc2_multidim_to_unidim(const int64_t *index, int8_t ndim, const int64_t *strides, int64_t *i) { |
4725 | 0 | *i = 0; |
4726 | 0 | for (int j = 0; j < ndim; ++j) { |
4727 | 0 | *i += index[j] * strides[j]; |
4728 | 0 | } |
4729 | 0 | } |
4730 | | |
4731 | 0 | int blosc2_get_slice_nchunks(blosc2_schunk* schunk, int64_t *start, int64_t *stop, int64_t **chunks_idx) { |
4732 | 0 | BLOSC_ERROR_NULL(schunk, BLOSC2_ERROR_NULL_POINTER); |
4733 | 0 | if (blosc2_meta_exists(schunk, "b2nd") < 0) { |
4734 | | // Try with a caterva metalayer; we are meant to be backward compatible with it |
4735 | 0 | if (blosc2_meta_exists(schunk, "caterva") < 0) { |
4736 | 0 | return schunk_get_slice_nchunks(schunk, *start, *stop, chunks_idx); |
4737 | 0 | } |
4738 | 0 | } |
4739 | | |
4740 | 0 | b2nd_array_t *array; |
4741 | 0 | int rc = b2nd_from_schunk(schunk, &array); |
4742 | 0 | if (rc < 0) { |
4743 | 0 | BLOSC_TRACE_ERROR("Could not get b2nd array from schunk."); |
4744 | 0 | return rc; |
4745 | 0 | } |
4746 | 0 | rc = b2nd_get_slice_nchunks(array, start, stop, chunks_idx); |
4747 | 0 | array->sc = NULL; // Free only array struct |
4748 | 0 | b2nd_free(array); |
4749 | |
|
4750 | 0 | return rc; |
4751 | 0 | } |
4752 | | |
4753 | 0 | blosc2_cparams blosc2_get_blosc2_cparams_defaults(void) { |
4754 | 0 | return BLOSC2_CPARAMS_DEFAULTS; |
4755 | 0 | }; |
4756 | | |
4757 | 0 | blosc2_dparams blosc2_get_blosc2_dparams_defaults(void) { |
4758 | 0 | return BLOSC2_DPARAMS_DEFAULTS; |
4759 | 0 | }; |
4760 | | |
4761 | 0 | blosc2_storage blosc2_get_blosc2_storage_defaults(void) { |
4762 | 0 | return BLOSC2_STORAGE_DEFAULTS; |
4763 | 0 | }; |
4764 | | |
4765 | 0 | blosc2_io blosc2_get_blosc2_io_defaults(void) { |
4766 | 0 | return BLOSC2_IO_DEFAULTS; |
4767 | 0 | }; |
4768 | | |
4769 | 0 | blosc2_stdio_mmap blosc2_get_blosc2_stdio_mmap_defaults(void) { |
4770 | 0 | return BLOSC2_STDIO_MMAP_DEFAULTS; |
4771 | 0 | }; |
4772 | | |
4773 | 0 | const char *blosc2_error_string(int error_code) { |
4774 | 0 | switch (error_code) { |
4775 | 0 | case BLOSC2_ERROR_FAILURE: |
4776 | 0 | return "Generic failure"; |
4777 | 0 | case BLOSC2_ERROR_STREAM: |
4778 | 0 | return "Bad stream"; |
4779 | 0 | case BLOSC2_ERROR_DATA: |
4780 | 0 | return "Invalid data"; |
4781 | 0 | case BLOSC2_ERROR_MEMORY_ALLOC: |
4782 | 0 | return "Memory alloc/realloc failure"; |
4783 | 0 | case BLOSC2_ERROR_READ_BUFFER: |
4784 | 0 | return "Not enough space to read"; |
4785 | 0 | case BLOSC2_ERROR_WRITE_BUFFER: |
4786 | 0 | return "Not enough space to write"; |
4787 | 0 | case BLOSC2_ERROR_CODEC_SUPPORT: |
4788 | 0 | return "Codec not supported"; |
4789 | 0 | case BLOSC2_ERROR_CODEC_PARAM: |
4790 | 0 | return "Invalid parameter supplied to codec"; |
4791 | 0 | case BLOSC2_ERROR_CODEC_DICT: |
4792 | 0 | return "Codec dictionary error"; |
4793 | 0 | case BLOSC2_ERROR_VERSION_SUPPORT: |
4794 | 0 | return "Version not supported"; |
4795 | 0 | case BLOSC2_ERROR_INVALID_HEADER: |
4796 | 0 | return "Invalid value in header"; |
4797 | 0 | case BLOSC2_ERROR_INVALID_PARAM: |
4798 | 0 | return "Invalid parameter supplied to function"; |
4799 | 0 | case BLOSC2_ERROR_FILE_READ: |
4800 | 0 | return "File read failure"; |
4801 | 0 | case BLOSC2_ERROR_FILE_WRITE: |
4802 | 0 | return "File write failure"; |
4803 | 0 | case BLOSC2_ERROR_FILE_OPEN: |
4804 | 0 | return "File open failure"; |
4805 | 0 | case BLOSC2_ERROR_NOT_FOUND: |
4806 | 0 | return "Not found"; |
4807 | 0 | case BLOSC2_ERROR_RUN_LENGTH: |
4808 | 0 | return "Bad run length encoding"; |
4809 | 0 | case BLOSC2_ERROR_FILTER_PIPELINE: |
4810 | 0 | return "Filter pipeline error"; |
4811 | 0 | case BLOSC2_ERROR_CHUNK_INSERT: |
4812 | 0 | return "Chunk insert failure"; |
4813 | 0 | case BLOSC2_ERROR_CHUNK_APPEND: |
4814 | 0 | return "Chunk append failure"; |
4815 | 0 | case BLOSC2_ERROR_CHUNK_UPDATE: |
4816 | 0 | return "Chunk update failure"; |
4817 | 0 | case BLOSC2_ERROR_2GB_LIMIT: |
4818 | 0 | return "Sizes larger than 2gb not supported"; |
4819 | 0 | case BLOSC2_ERROR_SCHUNK_COPY: |
4820 | 0 | return "Super-chunk copy failure"; |
4821 | 0 | case BLOSC2_ERROR_FRAME_TYPE: |
4822 | 0 | return "Wrong type for frame"; |
4823 | 0 | case BLOSC2_ERROR_FILE_TRUNCATE: |
4824 | 0 | return "File truncate failure"; |
4825 | 0 | case BLOSC2_ERROR_THREAD_CREATE: |
4826 | 0 | return "Thread or thread context creation failure"; |
4827 | 0 | case BLOSC2_ERROR_POSTFILTER: |
4828 | 0 | return "Postfilter failure"; |
4829 | 0 | case BLOSC2_ERROR_FRAME_SPECIAL: |
4830 | 0 | return "Special frame failure"; |
4831 | 0 | case BLOSC2_ERROR_SCHUNK_SPECIAL: |
4832 | 0 | return "Special super-chunk failure"; |
4833 | 0 | case BLOSC2_ERROR_PLUGIN_IO: |
4834 | 0 | return "IO plugin error"; |
4835 | 0 | case BLOSC2_ERROR_FILE_REMOVE: |
4836 | 0 | return "Remove file failure"; |
4837 | 0 | case BLOSC2_ERROR_NULL_POINTER: |
4838 | 0 | return "Pointer is null"; |
4839 | 0 | case BLOSC2_ERROR_INVALID_INDEX: |
4840 | 0 | return "Invalid index"; |
4841 | 0 | case BLOSC2_ERROR_METALAYER_NOT_FOUND: |
4842 | 0 | return "Metalayer has not been found"; |
4843 | 0 | case BLOSC2_ERROR_MAX_BUFSIZE_EXCEEDED: |
4844 | 0 | return "Maximum buffersize exceeded"; |
4845 | 0 | case BLOSC2_ERROR_TUNER: |
4846 | 0 | return "Tuner failure"; |
4847 | 0 | default: |
4848 | 0 | return "Unknown error"; |
4849 | 0 | } |
4850 | 0 | } |