Line | Count | Source (jump to first uncovered line) |
1 | | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * |
2 | | * Copyright by The HDF Group. * |
3 | | * All rights reserved. * |
4 | | * * |
5 | | * This file is part of HDF5. The full HDF5 copyright notice, including * |
6 | | * terms governing use, modification, and redistribution, is contained in * |
7 | | * the COPYING file, which can be found at the root of the source code * |
8 | | * distribution tree, or in https://www.hdfgroup.org/licenses. * |
9 | | * If you do not have access to either file, you may request a copy from * |
10 | | * help@hdfgroup.org. * |
11 | | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
12 | | |
13 | | /*------------------------------------------------------------------------- |
14 | | * |
15 | | * Created: H5C.c |
16 | | * |
17 | | * Purpose: Functions in this file implement a generic cache for |
18 | | * things which exist on disk, and which may be |
19 | | * unambiguously referenced by their disk addresses. |
20 | | * |
21 | | * For a detailed overview of the cache, please see the |
22 | | * header comment for H5C_t in H5Cpkg.h. |
23 | | * |
24 | | *------------------------------------------------------------------------- |
25 | | */ |
26 | | |
27 | | /************************************************************************** |
28 | | * |
29 | | * To Do: |
30 | | * |
31 | | * Code Changes: |
32 | | * |
33 | | * - Change protect/unprotect to lock/unlock. |
34 | | * |
35 | | * - Flush entries in increasing address order in |
36 | | * H5C__make_space_in_cache(). |
37 | | * |
38 | | * - Also in H5C__make_space_in_cache(), use high and low water marks |
39 | | * to reduce the number of I/O calls. |
40 | | * |
41 | | * - When flushing, attempt to combine contiguous entries to reduce |
42 | | * I/O overhead. Can't do this just yet as some entries are not |
43 | | * contiguous. Do this in parallel only or in serial as well? |
44 | | * |
45 | | * - Fix nodes in memory to point directly to the skip list node from |
46 | | * the LRU list, eliminating skip list lookups when evicting objects |
47 | | * from the cache. |
48 | | * |
49 | | **************************************************************************/ |
50 | | |
51 | | /****************/ |
52 | | /* Module Setup */ |
53 | | /****************/ |
54 | | |
55 | | #include "H5Cmodule.h" /* This source code file is part of the H5C module */ |
56 | | #define H5F_FRIEND /* suppress error about including H5Fpkg */ |
57 | | |
58 | | /***********/ |
59 | | /* Headers */ |
60 | | /***********/ |
61 | | #include "H5private.h" /* Generic Functions */ |
62 | | #include "H5ACprivate.h" /* Metadata cache */ |
63 | | #include "H5Cpkg.h" /* Cache */ |
64 | | #include "H5Eprivate.h" /* Error handling */ |
65 | | #include "H5Fpkg.h" /* Files */ |
66 | | #include "H5FLprivate.h" /* Free Lists */ |
67 | | #include "H5MFprivate.h" /* File memory management */ |
68 | | #include "H5MMprivate.h" /* Memory management */ |
69 | | #include "H5SLprivate.h" /* Skip Lists */ |
70 | | |
71 | | /****************/ |
72 | | /* Local Macros */ |
73 | | /****************/ |
74 | | |
75 | | /******************/ |
76 | | /* Local Typedefs */ |
77 | | /******************/ |
78 | | |
79 | | /********************/ |
80 | | /* Local Prototypes */ |
81 | | /********************/ |
82 | | |
83 | | /*********************/ |
84 | | /* Package Variables */ |
85 | | /*********************/ |
86 | | |
87 | | /* Declare a free list to manage the tag info struct */ |
88 | | H5FL_DEFINE(H5C_tag_info_t); |
89 | | |
90 | | /*****************************/ |
91 | | /* Library Private Variables */ |
92 | | /*****************************/ |
93 | | |
94 | | /*******************/ |
95 | | /* Local Variables */ |
96 | | /*******************/ |
97 | | |
98 | | /* Declare a free list to manage the H5C_t struct */ |
99 | | H5FL_DEFINE_STATIC(H5C_t); |
100 | | |
101 | | /*------------------------------------------------------------------------- |
102 | | * Function: H5C_create |
103 | | * |
104 | | * Purpose: Allocate, initialize, and return the address of a new |
105 | | * instance of H5C_t. |
106 | | * |
107 | | * In general, the max_cache_size parameter must be positive, |
108 | | * and the min_clean_size parameter must lie in the closed |
109 | | * interval [0, max_cache_size]. |
110 | | * |
111 | | * The check_write_permitted parameter must either be NULL, |
112 | | * or point to a function of type H5C_write_permitted_func_t. |
113 | | * If it is NULL, the cache will use the write_permitted |
114 | | * flag to determine whether writes are permitted. |
115 | | * |
116 | | * Return: Success: Pointer to the new instance. |
117 | | * Failure: NULL |
118 | | * |
119 | | *------------------------------------------------------------------------- |
120 | | */ |
121 | | H5C_t * |
122 | | H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, |
123 | | const H5C_class_t *const *class_table_ptr, H5C_write_permitted_func_t check_write_permitted, |
124 | | bool write_permitted, H5C_log_flush_func_t log_flush, void *aux_ptr) |
125 | 10 | { |
126 | 10 | int i; |
127 | 10 | H5C_t *cache_ptr = NULL; |
128 | 10 | H5C_t *ret_value = NULL; /* Return value */ |
129 | | |
130 | 10 | FUNC_ENTER_NOAPI(NULL) |
131 | | |
132 | 10 | assert(max_cache_size >= H5C__MIN_MAX_CACHE_SIZE); |
133 | 10 | assert(max_cache_size <= H5C__MAX_MAX_CACHE_SIZE); |
134 | 10 | assert(min_clean_size <= max_cache_size); |
135 | | |
136 | 10 | assert(max_type_id >= 0); |
137 | 10 | assert(max_type_id < H5C__MAX_NUM_TYPE_IDS); |
138 | 10 | assert(class_table_ptr); |
139 | | |
140 | 310 | for (i = 0; i <= max_type_id; i++) { |
141 | 300 | assert((class_table_ptr)[i]); |
142 | 300 | assert(strlen((class_table_ptr)[i]->name) > 0); |
143 | 300 | } /* end for */ |
144 | | |
145 | 10 | if (NULL == (cache_ptr = H5FL_CALLOC(H5C_t))) |
146 | 0 | HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); |
147 | | |
148 | 10 | if (NULL == (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL))) |
149 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list"); |
150 | | |
151 | 10 | cache_ptr->tag_list = NULL; |
152 | | |
153 | | /* If we get this far, we should succeed. Go ahead and initialize all |
154 | | * the fields. |
155 | | */ |
156 | | |
157 | 10 | cache_ptr->flush_in_progress = false; |
158 | | |
159 | 10 | if (NULL == (cache_ptr->log_info = (H5C_log_info_t *)H5MM_calloc(sizeof(H5C_log_info_t)))) |
160 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed"); |
161 | | |
162 | 10 | cache_ptr->aux_ptr = aux_ptr; |
163 | | |
164 | 10 | cache_ptr->max_type_id = max_type_id; |
165 | | |
166 | 10 | cache_ptr->class_table_ptr = class_table_ptr; |
167 | | |
168 | 10 | cache_ptr->max_cache_size = max_cache_size; |
169 | 10 | cache_ptr->min_clean_size = min_clean_size; |
170 | | |
171 | 10 | cache_ptr->check_write_permitted = check_write_permitted; |
172 | 10 | cache_ptr->write_permitted = write_permitted; |
173 | | |
174 | 10 | cache_ptr->log_flush = log_flush; |
175 | | |
176 | 10 | cache_ptr->evictions_enabled = true; |
177 | 10 | cache_ptr->close_warning_received = false; |
178 | | |
179 | 10 | cache_ptr->index_len = 0; |
180 | 10 | cache_ptr->index_size = (size_t)0; |
181 | 10 | cache_ptr->clean_index_size = (size_t)0; |
182 | 10 | cache_ptr->dirty_index_size = (size_t)0; |
183 | | |
184 | 70 | for (i = 0; i < H5C_RING_NTYPES; i++) { |
185 | 60 | cache_ptr->index_ring_len[i] = 0; |
186 | 60 | cache_ptr->index_ring_size[i] = (size_t)0; |
187 | 60 | cache_ptr->clean_index_ring_size[i] = (size_t)0; |
188 | 60 | cache_ptr->dirty_index_ring_size[i] = (size_t)0; |
189 | | |
190 | 60 | cache_ptr->slist_ring_len[i] = 0; |
191 | 60 | cache_ptr->slist_ring_size[i] = (size_t)0; |
192 | 60 | } /* end for */ |
193 | | |
194 | 655k | for (i = 0; i < H5C__HASH_TABLE_LEN; i++) |
195 | 655k | (cache_ptr->index)[i] = NULL; |
196 | | |
197 | 10 | cache_ptr->il_len = 0; |
198 | 10 | cache_ptr->il_size = (size_t)0; |
199 | 10 | cache_ptr->il_head = NULL; |
200 | 10 | cache_ptr->il_tail = NULL; |
201 | | |
202 | | /* Tagging Field Initializations */ |
203 | 10 | cache_ptr->ignore_tags = false; |
204 | 10 | cache_ptr->num_objs_corked = 0; |
205 | | |
206 | | /* slist field initializations */ |
207 | 10 | cache_ptr->slist_enabled = false; |
208 | 10 | cache_ptr->slist_changed = false; |
209 | 10 | cache_ptr->slist_len = 0; |
210 | 10 | cache_ptr->slist_size = (size_t)0; |
211 | | |
212 | | /* slist_ring_len, slist_ring_size, and |
213 | | * slist_ptr initialized above. |
214 | | */ |
215 | | |
216 | | #ifdef H5C_DO_SANITY_CHECKS |
217 | | cache_ptr->slist_len_increase = 0; |
218 | | cache_ptr->slist_size_increase = 0; |
219 | | #endif /* H5C_DO_SANITY_CHECKS */ |
220 | | |
221 | 10 | cache_ptr->entries_removed_counter = 0; |
222 | 10 | cache_ptr->last_entry_removed_ptr = NULL; |
223 | 10 | cache_ptr->entry_watched_for_removal = NULL; |
224 | | |
225 | 10 | cache_ptr->pl_len = 0; |
226 | 10 | cache_ptr->pl_size = (size_t)0; |
227 | 10 | cache_ptr->pl_head_ptr = NULL; |
228 | 10 | cache_ptr->pl_tail_ptr = NULL; |
229 | | |
230 | 10 | cache_ptr->pel_len = 0; |
231 | 10 | cache_ptr->pel_size = (size_t)0; |
232 | 10 | cache_ptr->pel_head_ptr = NULL; |
233 | 10 | cache_ptr->pel_tail_ptr = NULL; |
234 | | |
235 | 10 | cache_ptr->LRU_list_len = 0; |
236 | 10 | cache_ptr->LRU_list_size = (size_t)0; |
237 | 10 | cache_ptr->LRU_head_ptr = NULL; |
238 | 10 | cache_ptr->LRU_tail_ptr = NULL; |
239 | | |
240 | | #ifdef H5_HAVE_PARALLEL |
241 | | cache_ptr->coll_list_len = 0; |
242 | | cache_ptr->coll_list_size = (size_t)0; |
243 | | cache_ptr->coll_head_ptr = NULL; |
244 | | cache_ptr->coll_tail_ptr = NULL; |
245 | | cache_ptr->coll_write_list = NULL; |
246 | | #endif /* H5_HAVE_PARALLEL */ |
247 | | |
248 | | #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS |
249 | | cache_ptr->cLRU_list_len = 0; |
250 | | cache_ptr->cLRU_list_size = (size_t)0; |
251 | | cache_ptr->cLRU_head_ptr = NULL; |
252 | | cache_ptr->cLRU_tail_ptr = NULL; |
253 | | |
254 | | cache_ptr->dLRU_list_len = 0; |
255 | | cache_ptr->dLRU_list_size = (size_t)0; |
256 | | cache_ptr->dLRU_head_ptr = NULL; |
257 | | cache_ptr->dLRU_tail_ptr = NULL; |
258 | | #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ |
259 | | |
260 | 10 | cache_ptr->size_increase_possible = false; |
261 | 10 | cache_ptr->flash_size_increase_possible = false; |
262 | 10 | cache_ptr->flash_size_increase_threshold = 0; |
263 | 10 | cache_ptr->size_decrease_possible = false; |
264 | 10 | cache_ptr->resize_enabled = false; |
265 | 10 | cache_ptr->cache_full = false; |
266 | 10 | cache_ptr->size_decreased = false; |
267 | 10 | cache_ptr->resize_in_progress = false; |
268 | 10 | cache_ptr->msic_in_progress = false; |
269 | | |
270 | 10 | cache_ptr->resize_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; |
271 | 10 | cache_ptr->resize_ctl.rpt_fcn = NULL; |
272 | 10 | cache_ptr->resize_ctl.set_initial_size = false; |
273 | 10 | cache_ptr->resize_ctl.initial_size = H5C__DEF_AR_INIT_SIZE; |
274 | 10 | cache_ptr->resize_ctl.min_clean_fraction = H5C__DEF_AR_MIN_CLEAN_FRAC; |
275 | 10 | cache_ptr->resize_ctl.max_size = H5C__DEF_AR_MAX_SIZE; |
276 | 10 | cache_ptr->resize_ctl.min_size = H5C__DEF_AR_MIN_SIZE; |
277 | 10 | cache_ptr->resize_ctl.epoch_length = H5C__DEF_AR_EPOCH_LENGTH; |
278 | | |
279 | 10 | cache_ptr->resize_ctl.incr_mode = H5C_incr__off; |
280 | 10 | cache_ptr->resize_ctl.lower_hr_threshold = H5C__DEF_AR_LOWER_THRESHHOLD; |
281 | 10 | cache_ptr->resize_ctl.increment = H5C__DEF_AR_INCREMENT; |
282 | 10 | cache_ptr->resize_ctl.apply_max_increment = true; |
283 | 10 | cache_ptr->resize_ctl.max_increment = H5C__DEF_AR_MAX_INCREMENT; |
284 | | |
285 | 10 | cache_ptr->resize_ctl.flash_incr_mode = H5C_flash_incr__off; |
286 | 10 | cache_ptr->resize_ctl.flash_multiple = 1.0; |
287 | 10 | cache_ptr->resize_ctl.flash_threshold = 0.25; |
288 | | |
289 | 10 | cache_ptr->resize_ctl.decr_mode = H5C_decr__off; |
290 | 10 | cache_ptr->resize_ctl.upper_hr_threshold = H5C__DEF_AR_UPPER_THRESHHOLD; |
291 | 10 | cache_ptr->resize_ctl.decrement = H5C__DEF_AR_DECREMENT; |
292 | 10 | cache_ptr->resize_ctl.apply_max_decrement = true; |
293 | 10 | cache_ptr->resize_ctl.max_decrement = H5C__DEF_AR_MAX_DECREMENT; |
294 | 10 | cache_ptr->resize_ctl.epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT; |
295 | 10 | cache_ptr->resize_ctl.apply_empty_reserve = true; |
296 | 10 | cache_ptr->resize_ctl.empty_reserve = H5C__DEF_AR_EMPTY_RESERVE; |
297 | | |
298 | 10 | cache_ptr->epoch_markers_active = 0; |
299 | | |
300 | | /* no need to initialize the ring buffer itself */ |
301 | 10 | cache_ptr->epoch_marker_ringbuf_first = 1; |
302 | 10 | cache_ptr->epoch_marker_ringbuf_last = 0; |
303 | 10 | cache_ptr->epoch_marker_ringbuf_size = 0; |
304 | | |
305 | | /* Initialize all epoch marker entries' fields to zero/false/NULL */ |
306 | 10 | memset(cache_ptr->epoch_markers, 0, sizeof(cache_ptr->epoch_markers)); |
307 | | |
308 | | /* Set non-zero/false/NULL fields for epoch markers */ |
309 | 110 | for (i = 0; i < H5C__MAX_EPOCH_MARKERS; i++) { |
310 | 100 | ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; |
311 | 100 | ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER; |
312 | 100 | } |
313 | | |
314 | | /* Initialize cache image generation on file close related fields. |
315 | | * Initial value of image_ctl must match H5C__DEFAULT_CACHE_IMAGE_CTL |
316 | | * in H5Cprivate.h. |
317 | | */ |
318 | 10 | cache_ptr->image_ctl.version = H5C__CURR_CACHE_IMAGE_CTL_VER; |
319 | 10 | cache_ptr->image_ctl.generate_image = false; |
320 | 10 | cache_ptr->image_ctl.save_resize_status = false; |
321 | 10 | cache_ptr->image_ctl.entry_ageout = -1; |
322 | 10 | cache_ptr->image_ctl.flags = H5C_CI__ALL_FLAGS; |
323 | | |
324 | 10 | cache_ptr->serialization_in_progress = false; |
325 | 10 | cache_ptr->load_image = false; |
326 | 10 | cache_ptr->image_loaded = false; |
327 | 10 | cache_ptr->delete_image = false; |
328 | 10 | cache_ptr->image_addr = HADDR_UNDEF; |
329 | 10 | cache_ptr->image_len = 0; |
330 | 10 | cache_ptr->image_data_len = 0; |
331 | | |
332 | 10 | cache_ptr->entries_loaded_counter = 0; |
333 | 10 | cache_ptr->entries_inserted_counter = 0; |
334 | 10 | cache_ptr->entries_relocated_counter = 0; |
335 | 10 | cache_ptr->entry_fd_height_change_counter = 0; |
336 | | |
337 | 10 | cache_ptr->num_entries_in_image = 0; |
338 | 10 | cache_ptr->image_entries = NULL; |
339 | 10 | cache_ptr->image_buffer = NULL; |
340 | | |
341 | | /* initialize free space manager related fields: */ |
342 | 10 | cache_ptr->rdfsm_settled = false; |
343 | 10 | cache_ptr->mdfsm_settled = false; |
344 | | |
345 | 10 | if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) |
346 | | /* this should be impossible... */ |
347 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "H5C_reset_cache_hit_rate_stats failed"); |
348 | | |
349 | 10 | H5C_stats__reset(cache_ptr); |
350 | | |
351 | 10 | cache_ptr->prefix[0] = '\0'; /* empty string */ |
352 | | |
353 | | #ifndef NDEBUG |
354 | | cache_ptr->get_entry_ptr_from_addr_counter = 0; |
355 | | #endif |
356 | | |
357 | | /* Set return value */ |
358 | 10 | ret_value = cache_ptr; |
359 | | |
360 | 10 | done: |
361 | 10 | if (NULL == ret_value) { |
362 | 0 | if (cache_ptr != NULL) { |
363 | 0 | if (cache_ptr->slist_ptr != NULL) |
364 | 0 | H5SL_close(cache_ptr->slist_ptr); |
365 | |
|
366 | 0 | HASH_CLEAR(hh, cache_ptr->tag_list); |
367 | 0 | cache_ptr->tag_list = NULL; |
368 | |
|
369 | 0 | if (cache_ptr->log_info != NULL) |
370 | 0 | H5MM_xfree(cache_ptr->log_info); |
371 | |
|
372 | 0 | cache_ptr = H5FL_FREE(H5C_t, cache_ptr); |
373 | 0 | } |
374 | 0 | } |
375 | | |
376 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
377 | 10 | } /* H5C_create() */ |
378 | | |
379 | | /*------------------------------------------------------------------------- |
380 | | * Function: H5C_prep_for_file_close |
381 | | * |
382 | | * Purpose: This function should be called just prior to the cache |
383 | | * flushes at file close. There should be no protected |
384 | | * entries in the cache at this point. |
385 | | * |
386 | | * Return: Non-negative on success/Negative on failure |
387 | | * |
388 | | *------------------------------------------------------------------------- |
389 | | */ |
390 | | herr_t |
391 | | H5C_prep_for_file_close(H5F_t *f) |
392 | 10 | { |
393 | 10 | H5C_t *cache_ptr; |
394 | 10 | bool image_generated = false; /* Whether a cache image was generated */ |
395 | 10 | herr_t ret_value = SUCCEED; /* Return value */ |
396 | | |
397 | 10 | FUNC_ENTER_NOAPI(FAIL) |
398 | | |
399 | | /* Sanity checks */ |
400 | 10 | assert(f); |
401 | 10 | assert(f->shared); |
402 | 10 | assert(f->shared->cache); |
403 | 10 | cache_ptr = f->shared->cache; |
404 | 10 | assert(cache_ptr); |
405 | | |
406 | | /* It is possible to receive the close warning more than once */ |
407 | 10 | if (cache_ptr->close_warning_received) |
408 | 0 | HGOTO_DONE(SUCCEED); |
409 | 10 | cache_ptr->close_warning_received = true; |
410 | | |
411 | | /* Make certain there aren't any protected entries */ |
412 | 10 | assert(cache_ptr->pl_len == 0); |
413 | | |
414 | | /* Prepare cache image */ |
415 | 10 | if (H5C__prep_image_for_file_close(f, &image_generated) < 0) |
416 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image"); |
417 | | |
418 | | #ifdef H5_HAVE_PARALLEL |
419 | | if ((H5F_INTENT(f) & H5F_ACC_RDWR) && !image_generated && cache_ptr->aux_ptr != NULL && |
420 | | f->shared->fs_persist) { |
421 | | /* If persistent free space managers are enabled, flushing the |
422 | | * metadata cache may result in the deletion, insertion, and/or |
423 | | * dirtying of entries. |
424 | | * |
425 | | * This is a problem in PHDF5, as it breaks two invariants of |
426 | | * our management of the metadata cache across all processes: |
427 | | * |
428 | | * 1) Entries will not be dirtied, deleted, inserted, or moved |
429 | | * during flush in the parallel case. |
430 | | * |
431 | | * 2) All processes contain the same set of dirty metadata |
432 | | * entries on entry to a sync point. |
433 | | * |
434 | | * To solve this problem for the persistent free space managers, |
435 | | * serialize the metadata cache on all processes prior to the |
436 | | * first sync point on file shutdown. The shutdown warning is |
437 | | * a convenient location for this call. |
438 | | * |
439 | | * This is sufficient since: |
440 | | * |
441 | | * 1) FSM settle routines are only invoked on file close. Since |
442 | | * serialization make the same settle calls as flush on file |
443 | | * close, and since the close warning is issued after all |
444 | | * non FSM related space allocations and just before the |
445 | | * first sync point on close, this call will leave the caches |
446 | | * in a consistent state across the processes if they were |
447 | | * consistent before. |
448 | | * |
449 | | * 2) Since the FSM settle routines are only invoked once during |
450 | | * file close, invoking them now will prevent their invocation |
451 | | * during a flush, and thus avoid any resulting entry dirties, |
452 | | * deletions, insertion, or moves during the flush. |
453 | | */ |
454 | | if (H5C__serialize_cache(f) < 0) |
455 | | HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialization of the cache failed"); |
456 | | } /* end if */ |
457 | | #endif /* H5_HAVE_PARALLEL */ |
458 | | |
459 | 10 | done: |
460 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
461 | 10 | } /* H5C_prep_for_file_close() */ |
462 | | |
463 | | /*------------------------------------------------------------------------- |
464 | | * Function: H5C_dest |
465 | | * |
466 | | * Purpose: Flush all data to disk and destroy the cache. |
467 | | * |
468 | | * This function fails if any object are protected since the |
469 | | * resulting file might not be consistent. |
470 | | * |
471 | | * Note: *cache_ptr has been freed upon successful return. |
472 | | * |
473 | | * Return: Non-negative on success/Negative on failure |
474 | | * |
475 | | *------------------------------------------------------------------------- |
476 | | */ |
477 | | herr_t |
478 | | H5C_dest(H5F_t *f) |
479 | 10 | { |
480 | 10 | H5C_t *cache_ptr = f->shared->cache; |
481 | 10 | H5C_tag_info_t *item = NULL; |
482 | 10 | H5C_tag_info_t *tmp = NULL; |
483 | 10 | herr_t ret_value = SUCCEED; /* Return value */ |
484 | | |
485 | 10 | FUNC_ENTER_NOAPI(FAIL) |
486 | | |
487 | | /* Sanity check */ |
488 | 10 | assert(cache_ptr); |
489 | 10 | assert(cache_ptr->close_warning_received); |
490 | | |
491 | | #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE |
492 | | if (H5C__image_stats(cache_ptr, true) < 0) |
493 | | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats"); |
494 | | #endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */ |
495 | | |
496 | | /* Enable the slist, as it is needed in the flush */ |
497 | 10 | if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0) |
498 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed"); |
499 | | |
500 | | /* Flush and invalidate all cache entries */ |
501 | 10 | if (H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0) |
502 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache"); |
503 | | |
504 | | /* Generate & write cache image if requested */ |
505 | 10 | if (cache_ptr->image_ctl.generate_image) |
506 | 0 | if (H5C__generate_cache_image(f, cache_ptr) < 0) |
507 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image"); |
508 | | |
509 | | /* Question: Is it possible for cache_ptr->slist be non-null at this |
510 | | * point? If no, shouldn't this if statement be an assert? |
511 | | */ |
512 | 10 | if (cache_ptr->slist_ptr != NULL) { |
513 | 10 | assert(cache_ptr->slist_len == 0); |
514 | 10 | assert(cache_ptr->slist_size == 0); |
515 | | |
516 | 10 | H5SL_close(cache_ptr->slist_ptr); |
517 | 10 | cache_ptr->slist_ptr = NULL; |
518 | 10 | } |
519 | | |
520 | 10 | HASH_ITER(hh, cache_ptr->tag_list, item, tmp) |
521 | 0 | { |
522 | 0 | HASH_DELETE(hh, cache_ptr->tag_list, item); |
523 | 0 | item = H5FL_FREE(H5C_tag_info_t, item); |
524 | 0 | } |
525 | | |
526 | 10 | if (cache_ptr->log_info != NULL) |
527 | 10 | H5MM_xfree(cache_ptr->log_info); |
528 | | |
529 | | #ifdef H5C_DO_SANITY_CHECKS |
530 | | if (cache_ptr->get_entry_ptr_from_addr_counter > 0) |
531 | | fprintf(stdout, "*** %" PRId64 " calls to H5C_get_entry_ptr_from_add(). ***\n", |
532 | | cache_ptr->get_entry_ptr_from_addr_counter); |
533 | | #endif /* H5C_DO_SANITY_CHECKS */ |
534 | | |
535 | 10 | cache_ptr = H5FL_FREE(H5C_t, cache_ptr); |
536 | | |
537 | 10 | done: |
538 | 10 | if (ret_value < 0 && cache_ptr && cache_ptr->slist_ptr) |
539 | | /* Arguably, it shouldn't be necessary to re-enable the slist after |
540 | | * the call to H5C__flush_invalidate_cache(), as the metadata cache |
541 | | * should be discarded. However, in the test code, we make multiple |
542 | | * calls to H5C_dest(). Thus we re-enable the slist on failure if it |
543 | | * and the cache still exist. JRM -- 5/15/20 |
544 | | */ |
545 | 0 | if (H5C_set_slist_enabled(f->shared->cache, false, false) < 0) |
546 | 0 | HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed"); |
547 | | |
548 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
549 | 10 | } /* H5C_dest() */ |
550 | | |
551 | | /*------------------------------------------------------------------------- |
552 | | * Function: H5C_evict |
553 | | * |
554 | | * Purpose: Evict all except pinned entries in the cache |
555 | | * |
556 | | * Return: Non-negative on success/Negative on failure |
557 | | * |
558 | | *------------------------------------------------------------------------- |
559 | | */ |
560 | | herr_t |
561 | | H5C_evict(H5F_t *f) |
562 | 0 | { |
563 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
564 | |
|
565 | 0 | FUNC_ENTER_NOAPI(FAIL) |
566 | | |
567 | | /* Sanity check */ |
568 | 0 | assert(f); |
569 | | |
570 | | /* Enable the slist, as it is needed in the flush */ |
571 | 0 | if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0) |
572 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed"); |
573 | | |
574 | | /* Flush and invalidate all cache entries except the pinned entries */ |
575 | 0 | if (H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0) |
576 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache"); |
577 | | |
578 | | /* Disable the slist */ |
579 | 0 | if (H5C_set_slist_enabled(f->shared->cache, false, false) < 0) |
580 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed"); |
581 | | |
582 | 0 | done: |
583 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
584 | 0 | } /* H5C_evict() */ |
585 | | |
586 | | /*------------------------------------------------------------------------- |
587 | | * Function: H5C_flush_cache |
588 | | * |
589 | | * Purpose: Flush (and possibly destroy) the entries contained in the |
590 | | * specified cache. |
591 | | * |
592 | | * If the cache contains protected entries, the function will |
593 | | * fail, as protected entries cannot be flushed. However |
594 | | * all unprotected entries should be flushed before the |
595 | | * function returns failure. |
596 | | * |
597 | | * Return: Non-negative on success/Negative on failure or if there was |
598 | | * a request to flush all items and an entry was protected. |
599 | | * |
600 | | *------------------------------------------------------------------------- |
601 | | */ |
602 | | herr_t |
603 | | H5C_flush_cache(H5F_t *f, unsigned flags) |
604 | 0 | { |
605 | | #ifdef H5C_DO_SANITY_CHECKS |
606 | | int i; |
607 | | uint32_t index_len = 0; |
608 | | size_t index_size = (size_t)0; |
609 | | size_t clean_index_size = (size_t)0; |
610 | | size_t dirty_index_size = (size_t)0; |
611 | | size_t slist_size = (size_t)0; |
612 | | uint32_t slist_len = 0; |
613 | | #endif /* H5C_DO_SANITY_CHECKS */ |
614 | 0 | H5C_ring_t ring; |
615 | 0 | H5C_t *cache_ptr; |
616 | 0 | bool destroy; |
617 | 0 | herr_t ret_value = SUCCEED; |
618 | |
|
619 | 0 | FUNC_ENTER_NOAPI(FAIL) |
620 | |
|
621 | 0 | assert(f); |
622 | 0 | assert(f->shared); |
623 | 0 | cache_ptr = f->shared->cache; |
624 | 0 | assert(cache_ptr); |
625 | 0 | assert(cache_ptr->slist_ptr); |
626 | |
|
627 | | #ifdef H5C_DO_SANITY_CHECKS |
628 | | assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); |
629 | | assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); |
630 | | assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); |
631 | | assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); |
632 | | assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); |
633 | | assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); |
634 | | |
635 | | for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { |
636 | | index_len += cache_ptr->index_ring_len[i]; |
637 | | index_size += cache_ptr->index_ring_size[i]; |
638 | | clean_index_size += cache_ptr->clean_index_ring_size[i]; |
639 | | dirty_index_size += cache_ptr->dirty_index_ring_size[i]; |
640 | | |
641 | | slist_len += cache_ptr->slist_ring_len[i]; |
642 | | slist_size += cache_ptr->slist_ring_size[i]; |
643 | | } /* end for */ |
644 | | |
645 | | assert(cache_ptr->index_len == index_len); |
646 | | assert(cache_ptr->index_size == index_size); |
647 | | assert(cache_ptr->clean_index_size == clean_index_size); |
648 | | assert(cache_ptr->dirty_index_size == dirty_index_size); |
649 | | assert(cache_ptr->slist_len == slist_len); |
650 | | assert(cache_ptr->slist_size == slist_size); |
651 | | #endif /* H5C_DO_SANITY_CHECKS */ |
652 | |
|
653 | | #ifdef H5C_DO_EXTREME_SANITY_CHECKS |
654 | | if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || |
655 | | H5C__validate_lru_list(cache_ptr) < 0) |
656 | | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry"); |
657 | | #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ |
658 | |
|
659 | 0 | destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); |
660 | 0 | assert(!(destroy && ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0))); |
661 | 0 | assert(!(cache_ptr->flush_in_progress)); |
662 | |
|
663 | 0 | cache_ptr->flush_in_progress = true; |
664 | |
|
665 | 0 | if (destroy) { |
666 | 0 | if (H5C__flush_invalidate_cache(f, flags) < 0) |
667 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed"); |
668 | 0 | } /* end if */ |
669 | 0 | else { |
670 | | /* flush each ring, starting from the outermost ring and |
671 | | * working inward. |
672 | | */ |
673 | 0 | ring = H5C_RING_USER; |
674 | 0 | while (ring < H5C_RING_NTYPES) { |
675 | | /* Only call the free space manager settle routines when close |
676 | | * warning has been received. |
677 | | */ |
678 | 0 | if (cache_ptr->close_warning_received) { |
679 | 0 | switch (ring) { |
680 | 0 | case H5C_RING_USER: |
681 | 0 | break; |
682 | | |
683 | 0 | case H5C_RING_RDFSM: |
684 | | /* Settle raw data FSM */ |
685 | 0 | if (!cache_ptr->rdfsm_settled) |
686 | 0 | if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0) |
687 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed"); |
688 | 0 | break; |
689 | | |
690 | 0 | case H5C_RING_MDFSM: |
691 | | /* Settle metadata FSM */ |
692 | 0 | if (!cache_ptr->mdfsm_settled) |
693 | 0 | if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0) |
694 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed"); |
695 | 0 | break; |
696 | | |
697 | 0 | case H5C_RING_SBE: |
698 | 0 | case H5C_RING_SB: |
699 | 0 | break; |
700 | | |
701 | 0 | default: |
702 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!"); |
703 | 0 | break; |
704 | 0 | } /* end switch */ |
705 | 0 | } /* end if */ |
706 | | |
707 | 0 | if (H5C__flush_ring(f, ring, flags) < 0) |
708 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed"); |
709 | 0 | ring++; |
710 | 0 | } /* end while */ |
711 | 0 | } /* end else */ |
712 | | |
713 | 0 | done: |
714 | 0 | cache_ptr->flush_in_progress = false; |
715 | |
|
716 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
717 | 0 | } /* H5C_flush_cache() */ |
718 | | |
719 | | /*------------------------------------------------------------------------- |
720 | | * Function: H5C_flush_to_min_clean |
721 | | * |
722 | | * Purpose: Flush dirty entries until the caches min clean size is |
723 | | * attained. |
724 | | * |
725 | | * This function is used in the implementation of the |
726 | | * metadata cache in PHDF5. To avoid "messages from the |
727 | | * future", the cache on process 0 can't be allowed to |
728 | | * flush entries until the other processes have reached |
729 | | * the same point in the calculation. If this constraint |
730 | | * is not met, it is possible that the other processes will |
731 | | * read metadata generated at a future point in the |
732 | | * computation. |
733 | | * |
734 | | * |
735 | | * Return: Non-negative on success/Negative on failure or if |
736 | | * write is not permitted. |
737 | | * |
738 | | *------------------------------------------------------------------------- |
739 | | */ |
740 | | herr_t |
741 | | H5C_flush_to_min_clean(H5F_t *f) |
742 | 0 | { |
743 | 0 | H5C_t *cache_ptr; |
744 | 0 | bool write_permitted; |
745 | 0 | herr_t ret_value = SUCCEED; |
746 | |
|
747 | 0 | FUNC_ENTER_NOAPI(FAIL) |
748 | |
|
749 | 0 | assert(f); |
750 | 0 | assert(f->shared); |
751 | 0 | cache_ptr = f->shared->cache; |
752 | 0 | assert(cache_ptr); |
753 | |
|
754 | 0 | if (cache_ptr->check_write_permitted != NULL) { |
755 | 0 | if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) |
756 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't get write_permitted"); |
757 | 0 | } /* end if */ |
758 | 0 | else |
759 | 0 | write_permitted = cache_ptr->write_permitted; |
760 | | |
761 | 0 | if (!write_permitted) |
762 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache write is not permitted!?!"); |
763 | | |
764 | 0 | if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0) |
765 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__make_space_in_cache failed"); |
766 | | |
767 | 0 | done: |
768 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
769 | 0 | } /* H5C_flush_to_min_clean() */ |
770 | | |
771 | | /*------------------------------------------------------------------------- |
772 | | * Function: H5C_reset_cache_hit_rate_stats() |
773 | | * |
774 | | * Purpose: Reset the cache hit rate computation fields. |
775 | | * |
776 | | * Return: SUCCEED on success, and FAIL on failure. |
777 | | * |
778 | | *------------------------------------------------------------------------- |
779 | | */ |
780 | | herr_t |
781 | | H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) |
782 | 20 | { |
783 | 20 | herr_t ret_value = SUCCEED; /* Return value */ |
784 | | |
785 | 20 | FUNC_ENTER_NOAPI(FAIL) |
786 | | |
787 | 20 | if (cache_ptr == NULL) |
788 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry"); |
789 | | |
790 | 20 | cache_ptr->cache_hits = 0; |
791 | 20 | cache_ptr->cache_accesses = 0; |
792 | | |
793 | 20 | done: |
794 | 20 | FUNC_LEAVE_NOAPI(ret_value) |
795 | 20 | } /* H5C_reset_cache_hit_rate_stats() */ |
796 | | |
797 | | /*------------------------------------------------------------------------- |
798 | | * Function: H5C_set_cache_auto_resize_config |
799 | | * |
800 | | * Purpose: Set the cache automatic resize configuration to the |
801 | | * provided values if they are in range, and fail if they |
802 | | * are not. |
803 | | * |
804 | | * If the new configuration enables automatic cache resizing, |
805 | | * coerce the cache max size and min clean size into agreement |
806 | | * with the new policy and re-set the full cache hit rate |
807 | | * stats. |
808 | | * |
809 | | * Return: SUCCEED on success, and FAIL on failure. |
810 | | * |
811 | | *------------------------------------------------------------------------- |
812 | | */ |
813 | | herr_t |
814 | | H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr) |
815 | 10 | { |
816 | 10 | size_t new_max_cache_size; |
817 | 10 | size_t new_min_clean_size; |
818 | 10 | herr_t ret_value = SUCCEED; /* Return value */ |
819 | | |
820 | 10 | FUNC_ENTER_NOAPI(FAIL) |
821 | | |
822 | 10 | if (cache_ptr == NULL) |
823 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry"); |
824 | 10 | if (config_ptr == NULL) |
825 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry"); |
826 | 10 | if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) |
827 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version"); |
828 | | |
829 | | /* check general configuration section of the config: */ |
830 | 10 | if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0) |
831 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config"); |
832 | | |
833 | | /* check size increase control fields of the config: */ |
834 | 10 | if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0) |
835 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config"); |
836 | | |
837 | | /* check size decrease control fields of the config: */ |
838 | 10 | if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0) |
839 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config"); |
840 | | |
841 | | /* check for conflicts between size increase and size decrease controls: */ |
842 | 10 | if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0) |
843 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config"); |
844 | | |
845 | | /* will set the increase possible fields to false later if needed */ |
846 | 10 | cache_ptr->size_increase_possible = true; |
847 | 10 | cache_ptr->flash_size_increase_possible = true; |
848 | 10 | cache_ptr->size_decrease_possible = true; |
849 | | |
850 | 10 | switch (config_ptr->incr_mode) { |
851 | 0 | case H5C_incr__off: |
852 | 0 | cache_ptr->size_increase_possible = false; |
853 | 0 | break; |
854 | | |
855 | 10 | case H5C_incr__threshold: |
856 | 10 | if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) || |
857 | 10 | ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0))) |
858 | 0 | cache_ptr->size_increase_possible = false; |
859 | 10 | break; |
860 | | |
861 | 0 | default: /* should be unreachable */ |
862 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?"); |
863 | 10 | } /* end switch */ |
864 | | |
865 | | /* logically, this is where configuration for flash cache size increases |
866 | | * should go. However, this configuration depends on max_cache_size, so |
867 | | * we wait until the end of the function, when this field is set. |
868 | | */ |
869 | | |
870 | 10 | switch (config_ptr->decr_mode) { |
871 | 0 | case H5C_decr__off: |
872 | 0 | cache_ptr->size_decrease_possible = false; |
873 | 0 | break; |
874 | | |
875 | 0 | case H5C_decr__threshold: |
876 | 0 | if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || |
877 | 0 | (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) |
878 | 0 | cache_ptr->size_decrease_possible = false; |
879 | 0 | break; |
880 | | |
881 | 0 | case H5C_decr__age_out: |
882 | 0 | if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || |
883 | 0 | (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) |
884 | 0 | cache_ptr->size_decrease_possible = false; |
885 | 0 | break; |
886 | | |
887 | 10 | case H5C_decr__age_out_with_threshold: |
888 | 10 | if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || |
889 | 10 | (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || |
890 | 10 | config_ptr->upper_hr_threshold >= 1.0) |
891 | 0 | cache_ptr->size_decrease_possible = false; |
892 | 10 | break; |
893 | | |
894 | 0 | default: /* should be unreachable */ |
895 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?"); |
896 | 10 | } /* end switch */ |
897 | | |
898 | 10 | if (config_ptr->max_size == config_ptr->min_size) { |
899 | 0 | cache_ptr->size_increase_possible = false; |
900 | 0 | cache_ptr->flash_size_increase_possible = false; |
901 | 0 | cache_ptr->size_decrease_possible = false; |
902 | 0 | } /* end if */ |
903 | | |
904 | | /* flash_size_increase_possible is intentionally omitted from the |
905 | | * following: |
906 | | */ |
907 | 10 | cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; |
908 | 10 | cache_ptr->resize_ctl = *config_ptr; |
909 | | |
910 | | /* Resize the cache to the supplied initial value if requested, or as |
911 | | * necessary to force it within the bounds of the current automatic |
912 | | * cache resizing configuration. |
913 | | * |
914 | | * Note that the min_clean_fraction may have changed, so we |
915 | | * go through the exercise even if the current size is within |
916 | | * range and an initial size has not been provided. |
917 | | */ |
918 | 10 | if (cache_ptr->resize_ctl.set_initial_size) |
919 | 10 | new_max_cache_size = cache_ptr->resize_ctl.initial_size; |
920 | 0 | else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size) |
921 | 0 | new_max_cache_size = cache_ptr->resize_ctl.max_size; |
922 | 0 | else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size) |
923 | 0 | new_max_cache_size = cache_ptr->resize_ctl.min_size; |
924 | 0 | else |
925 | 0 | new_max_cache_size = cache_ptr->max_cache_size; |
926 | | |
927 | 10 | new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); |
928 | | |
929 | | /* since new_min_clean_size is of type size_t, we have |
930 | | * |
931 | | * ( 0 <= new_min_clean_size ) |
932 | | * |
933 | | * by definition. |
934 | | */ |
935 | 10 | assert(new_min_clean_size <= new_max_cache_size); |
936 | 10 | assert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); |
937 | 10 | assert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); |
938 | | |
939 | 10 | if (new_max_cache_size < cache_ptr->max_cache_size) |
940 | 10 | cache_ptr->size_decreased = true; |
941 | | |
942 | 10 | cache_ptr->max_cache_size = new_max_cache_size; |
943 | 10 | cache_ptr->min_clean_size = new_min_clean_size; |
944 | | |
945 | 10 | if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) |
946 | | /* this should be impossible... */ |
947 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed"); |
948 | | |
949 | | /* remove excess epoch markers if any */ |
950 | 10 | if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) || |
951 | 10 | (config_ptr->decr_mode == H5C_decr__age_out)) { |
952 | 10 | if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) |
953 | 0 | if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) |
954 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers"); |
955 | 10 | } /* end if */ |
956 | 0 | else if (cache_ptr->epoch_markers_active > 0) { |
957 | 0 | if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) |
958 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers"); |
959 | 0 | } |
960 | | |
961 | | /* configure flash size increase facility. We wait until the |
962 | | * end of the function, as we need the max_cache_size set before |
963 | | * we start to keep things simple. |
964 | | * |
965 | | * If we haven't already ruled out flash cache size increases above, |
966 | | * go ahead and configure it. |
967 | | */ |
968 | 10 | if (cache_ptr->flash_size_increase_possible) { |
969 | 10 | switch (config_ptr->flash_incr_mode) { |
970 | 0 | case H5C_flash_incr__off: |
971 | 0 | cache_ptr->flash_size_increase_possible = false; |
972 | 0 | break; |
973 | | |
974 | 10 | case H5C_flash_incr__add_space: |
975 | 10 | cache_ptr->flash_size_increase_possible = true; |
976 | 10 | cache_ptr->flash_size_increase_threshold = |
977 | 10 | (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); |
978 | 10 | break; |
979 | | |
980 | 0 | default: /* should be unreachable */ |
981 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?"); |
982 | 0 | break; |
983 | 10 | } /* end switch */ |
984 | 10 | } /* end if */ |
985 | | |
986 | 10 | done: |
987 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
988 | 10 | } /* H5C_set_cache_auto_resize_config() */ |
989 | | |
990 | | /*------------------------------------------------------------------------- |
991 | | * Function: H5C_set_evictions_enabled() |
992 | | * |
993 | | * Purpose: Set cache_ptr->evictions_enabled to the value of the |
994 | | * evictions enabled parameter. |
995 | | * |
996 | | * Return: SUCCEED on success, and FAIL on failure. |
997 | | * |
998 | | *------------------------------------------------------------------------- |
999 | | */ |
1000 | | herr_t |
1001 | | H5C_set_evictions_enabled(H5C_t *cache_ptr, bool evictions_enabled) |
1002 | 10 | { |
1003 | 10 | herr_t ret_value = SUCCEED; /* Return value */ |
1004 | | |
1005 | 10 | FUNC_ENTER_NOAPI(FAIL) |
1006 | | |
1007 | 10 | if (cache_ptr == NULL) |
1008 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry"); |
1009 | | |
1010 | | /* There is no fundamental reason why we should not permit |
1011 | | * evictions to be disabled while automatic resize is enabled. |
1012 | | * However, allowing it would greatly complicate testing |
1013 | | * the feature. Hence the following: |
1014 | | */ |
1015 | 10 | if ((evictions_enabled != true) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || |
1016 | 0 | (cache_ptr->resize_ctl.decr_mode != H5C_decr__off))) |
1017 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled"); |
1018 | | |
1019 | 10 | cache_ptr->evictions_enabled = evictions_enabled; |
1020 | | |
1021 | 10 | done: |
1022 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
1023 | 10 | } /* H5C_set_evictions_enabled() */ |
1024 | | |
1025 | | /*------------------------------------------------------------------------- |
1026 | | * Function: H5C_set_slist_enabled() |
1027 | | * |
1028 | | * Purpose: Enable or disable the slist as directed. |
1029 | | * |
1030 | | * The slist (skip list) is an address ordered list of |
1031 | | * dirty entries in the metadata cache. However, this |
1032 | | * list is only needed during flush and close, where we |
1033 | | * use it to write entries in more or less increasing |
1034 | | * address order. |
1035 | | * |
1036 | | * This function sets up and enables further operations |
1037 | | * on the slist, or disable the slist. This in turn |
1038 | | * allows us to avoid the overhead of maintaining the |
1039 | | * slist when it is not needed. |
1040 | | * |
1041 | | * |
1042 | | * If the slist_enabled parameter is true, the function |
1043 | | * |
1044 | | * 1) Verifies that the slist is empty. |
1045 | | * |
1046 | | * 2) If the populate_slist parameter is true, scans the |
1047 | | * index list, and inserts all dirty entries into the |
1048 | | * slist. |
1049 | | * |
1050 | | * 3) Sets cache_ptr->slist_enabled = true. |
1051 | | * |
1052 | | * |
1053 | | * If the slist_enabled_parameter is false, the function |
1054 | | * shuts down the slist: |
1055 | | * |
1056 | | * 1) Test to see if the slist is empty. If it is, proceed |
1057 | | * to step 3. |
1058 | | * |
1059 | | * 2) Remove all entries from the slist. |
1060 | | * |
1061 | | * 3) set cache_ptr->slist_enabled = false. |
1062 | | * |
1063 | | * Note that the populate_slist parameter is ignored if |
1064 | | * the slist_enabed parameter is false. |
1065 | | * |
1066 | | * Return: SUCCEED on success, and FAIL on failure. |
1067 | | * |
1068 | | *------------------------------------------------------------------------- |
1069 | | */ |
1070 | | herr_t |
1071 | | H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool populate_slist) |
1072 | 10 | { |
1073 | 10 | H5C_cache_entry_t *entry_ptr; |
1074 | 10 | herr_t ret_value = SUCCEED; /* Return value */ |
1075 | | |
1076 | 10 | FUNC_ENTER_NOAPI(FAIL) |
1077 | | |
1078 | 10 | if (cache_ptr == NULL) |
1079 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry"); |
1080 | | |
1081 | 10 | if (slist_enabled) { |
1082 | 10 | if (cache_ptr->slist_enabled) |
1083 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?"); |
1084 | 10 | if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) |
1085 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?"); |
1086 | | |
1087 | | /* set cache_ptr->slist_enabled to true so that the slist |
1088 | | * maintenance macros will be enabled. |
1089 | | */ |
1090 | 10 | cache_ptr->slist_enabled = true; |
1091 | | |
1092 | 10 | if (populate_slist) { |
1093 | | /* scan the index list and insert all dirty entries in the slist */ |
1094 | 10 | entry_ptr = cache_ptr->il_head; |
1095 | 123 | while (entry_ptr != NULL) { |
1096 | 113 | if (entry_ptr->is_dirty) |
1097 | 0 | H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); |
1098 | 113 | entry_ptr = entry_ptr->il_next; |
1099 | 113 | } |
1100 | | |
1101 | | /* we don't maintain a dirty index len, so we can't do a cross |
1102 | | * check against it. Note that there is no point in cross checking |
1103 | | * against the dirty LRU size, as the dirty LRU may not be maintained, |
1104 | | * and in any case, there is no requirement that all dirty entries |
1105 | | * will reside on the dirty LRU. |
1106 | | */ |
1107 | 10 | assert(cache_ptr->dirty_index_size == cache_ptr->slist_size); |
1108 | 10 | } |
1109 | 10 | } |
1110 | 0 | else { /* take down the skip list */ |
1111 | 0 | if (!cache_ptr->slist_enabled) |
1112 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?"); |
1113 | | |
1114 | 0 | if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) |
1115 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?"); |
1116 | | |
1117 | 0 | cache_ptr->slist_enabled = false; |
1118 | |
|
1119 | 0 | assert(0 == cache_ptr->slist_len); |
1120 | 0 | assert(0 == cache_ptr->slist_size); |
1121 | 0 | } |
1122 | | |
1123 | 10 | done: |
1124 | 10 | FUNC_LEAVE_NOAPI(ret_value) |
1125 | 10 | } /* H5C_set_slist_enabled() */ |
1126 | | |
1127 | | /*------------------------------------------------------------------------- |
1128 | | * Function: H5C_unsettle_ring() |
1129 | | * |
1130 | | * Purpose: Advise the metadata cache that the specified free space |
1131 | | * manager ring is no longer settled (if it was on entry). |
1132 | | * |
1133 | | * If the target free space manager ring is already |
1134 | | * unsettled, do nothing, and return SUCCEED. |
1135 | | * |
1136 | | * If the target free space manager ring is settled, and |
1137 | | * we are not in the process of a file shutdown, mark |
1138 | | * the ring as unsettled, and return SUCCEED. |
1139 | | * |
1140 | | * If the target free space manager is settled, and we |
1141 | | * are in the process of a file shutdown, post an error |
1142 | | * message, and return FAIL. |
1143 | | * |
1144 | | * Return: Non-negative on success/Negative on failure |
1145 | | * |
1146 | | *------------------------------------------------------------------------- |
1147 | | */ |
1148 | | herr_t |
1149 | | H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) |
1150 | 0 | { |
1151 | 0 | H5C_t *cache_ptr; |
1152 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1153 | |
|
1154 | 0 | FUNC_ENTER_NOAPI(FAIL) |
1155 | | |
1156 | | /* Sanity checks */ |
1157 | 0 | assert(f); |
1158 | 0 | assert(f->shared); |
1159 | 0 | assert(f->shared->cache); |
1160 | 0 | assert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); |
1161 | 0 | cache_ptr = f->shared->cache; |
1162 | |
|
1163 | 0 | switch (ring) { |
1164 | 0 | case H5C_RING_RDFSM: |
1165 | 0 | if (cache_ptr->rdfsm_settled) { |
1166 | 0 | if (cache_ptr->close_warning_received) |
1167 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle"); |
1168 | 0 | cache_ptr->rdfsm_settled = false; |
1169 | 0 | } /* end if */ |
1170 | 0 | break; |
1171 | | |
1172 | 0 | case H5C_RING_MDFSM: |
1173 | 0 | if (cache_ptr->mdfsm_settled) { |
1174 | 0 | if (cache_ptr->close_warning_received) |
1175 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle"); |
1176 | 0 | cache_ptr->mdfsm_settled = false; |
1177 | 0 | } /* end if */ |
1178 | 0 | break; |
1179 | | |
1180 | 0 | default: |
1181 | 0 | assert(false); /* this should be un-reachable */ |
1182 | 0 | break; |
1183 | 0 | } /* end switch */ |
1184 | | |
1185 | 0 | done: |
1186 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1187 | 0 | } /* H5C_unsettle_ring() */ |
1188 | | |
1189 | | /*------------------------------------------------------------------------- |
1190 | | * Function: H5C_validate_resize_config() |
1191 | | * |
1192 | | * Purpose: Run a sanity check on the specified sections of the |
1193 | | * provided instance of struct H5C_auto_size_ctl_t. |
1194 | | * |
1195 | | * Do nothing and return SUCCEED if no errors are detected, |
1196 | | * and flag an error and return FAIL otherwise. |
1197 | | * |
1198 | | * Return: Non-negative on success/Negative on failure |
1199 | | * |
1200 | | *------------------------------------------------------------------------- |
1201 | | */ |
1202 | | herr_t |
1203 | | H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) |
1204 | 60 | { |
1205 | 60 | herr_t ret_value = SUCCEED; /* Return value */ |
1206 | | |
1207 | 60 | FUNC_ENTER_NOAPI(FAIL) |
1208 | | |
1209 | 60 | if (config_ptr == NULL) |
1210 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry"); |
1211 | | |
1212 | 60 | if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) |
1213 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version"); |
1214 | | |
1215 | 60 | if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { |
1216 | 30 | if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) |
1217 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big"); |
1218 | 30 | if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) |
1219 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small"); |
1220 | 30 | if (config_ptr->min_size > config_ptr->max_size) |
1221 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size"); |
1222 | 30 | if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || |
1223 | 30 | (config_ptr->initial_size > config_ptr->max_size))) |
1224 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1225 | 30 | "initial_size must be in the interval [min_size, max_size]"); |
1226 | 30 | if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) |
1227 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1228 | 30 | "min_clean_fraction must be in the interval [0.0, 1.0]"); |
1229 | 30 | if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) |
1230 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small"); |
1231 | 30 | if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) |
1232 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big"); |
1233 | 30 | } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ |
1234 | | |
1235 | 60 | if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) { |
1236 | 30 | if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold)) |
1237 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode"); |
1238 | | |
1239 | 30 | if (config_ptr->incr_mode == H5C_incr__threshold) { |
1240 | 30 | if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) |
1241 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1242 | 30 | "lower_hr_threshold must be in the range [0.0, 1.0]"); |
1243 | 30 | if (config_ptr->increment < 1.0) |
1244 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0"); |
1245 | | |
1246 | | /* no need to check max_increment, as it is a size_t, |
1247 | | * and thus must be non-negative. |
1248 | | */ |
1249 | 30 | } /* H5C_incr__threshold */ |
1250 | | |
1251 | 30 | switch (config_ptr->flash_incr_mode) { |
1252 | 0 | case H5C_flash_incr__off: |
1253 | | /* nothing to do here */ |
1254 | 0 | break; |
1255 | | |
1256 | 30 | case H5C_flash_incr__add_space: |
1257 | 30 | if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0)) |
1258 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1259 | 30 | "flash_multiple must be in the range [0.1, 10.0]"); |
1260 | 30 | if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0)) |
1261 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1262 | 30 | "flash_threshold must be in the range [0.1, 1.0]"); |
1263 | 30 | break; |
1264 | | |
1265 | 30 | default: |
1266 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode"); |
1267 | 0 | break; |
1268 | 30 | } /* end switch */ |
1269 | 30 | } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ |
1270 | | |
1271 | 60 | if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { |
1272 | 30 | if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && |
1273 | 30 | (config_ptr->decr_mode != H5C_decr__age_out) && |
1274 | 30 | (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) |
1275 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode"); |
1276 | | |
1277 | 30 | if (config_ptr->decr_mode == H5C_decr__threshold) { |
1278 | 0 | if (config_ptr->upper_hr_threshold > 1.0) |
1279 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0"); |
1280 | 0 | if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) |
1281 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]"); |
1282 | | |
1283 | | /* no need to check max_decrement as it is a size_t |
1284 | | * and thus must be non-negative. |
1285 | | */ |
1286 | 0 | } /* H5C_decr__threshold */ |
1287 | | |
1288 | 30 | if ((config_ptr->decr_mode == H5C_decr__age_out) || |
1289 | 30 | (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { |
1290 | 30 | if (config_ptr->epochs_before_eviction < 1) |
1291 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive"); |
1292 | 30 | if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) |
1293 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big"); |
1294 | 30 | if (config_ptr->apply_empty_reserve && |
1295 | 30 | (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) |
1296 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]"); |
1297 | | |
1298 | | /* no need to check max_decrement as it is a size_t |
1299 | | * and thus must be non-negative. |
1300 | | */ |
1301 | 30 | } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ |
1302 | | |
1303 | 30 | if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) |
1304 | 30 | if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) |
1305 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, |
1306 | 30 | "upper_hr_threshold must be in the interval [0.0, 1.0]"); |
1307 | 30 | } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ |
1308 | | |
1309 | 60 | if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { |
1310 | 30 | if ((config_ptr->incr_mode == H5C_incr__threshold) && |
1311 | 30 | ((config_ptr->decr_mode == H5C_decr__threshold) || |
1312 | 30 | (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) && |
1313 | 30 | (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold)) |
1314 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config"); |
1315 | 30 | } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */ |
1316 | | |
1317 | 60 | done: |
1318 | 60 | FUNC_LEAVE_NOAPI(ret_value) |
1319 | 60 | } /* H5C_validate_resize_config() */ |
1320 | | |
1321 | | /*------------------------------------------------------------------------- |
1322 | | * Function: H5C_cork |
1323 | | * |
1324 | | * Purpose: To cork/uncork/get cork status of an object depending on "action": |
1325 | | * H5C__SET_CORK: |
1326 | | * To cork the object |
1327 | | * Return error if the object is already corked |
1328 | | * H5C__UNCORK: |
1329 | | * To uncork the object |
1330 | | * Return error if the object is not corked |
1331 | | * H5C__GET_CORKED: |
1332 | | * To retrieve the cork status of an object in |
1333 | | * the parameter "corked" |
1334 | | * |
1335 | | * Return: Success: Non-negative |
1336 | | * Failure: Negative |
1337 | | * |
1338 | | *------------------------------------------------------------------------- |
1339 | | */ |
1340 | | herr_t |
1341 | | H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, bool *corked) |
1342 | 0 | { |
1343 | 0 | H5C_tag_info_t *tag_info = NULL; |
1344 | 0 | herr_t ret_value = SUCCEED; |
1345 | |
|
1346 | 0 | FUNC_ENTER_NOAPI_NOINIT |
1347 | | |
1348 | | /* Assertions */ |
1349 | 0 | assert(cache_ptr != NULL); |
1350 | 0 | assert(H5_addr_defined(obj_addr)); |
1351 | 0 | assert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED); |
1352 | | |
1353 | | /* Search the list of corked object addresses in the cache */ |
1354 | 0 | HASH_FIND(hh, cache_ptr->tag_list, &obj_addr, sizeof(haddr_t), tag_info); |
1355 | |
|
1356 | 0 | if (H5C__GET_CORKED == action) { |
1357 | 0 | assert(corked); |
1358 | 0 | if (tag_info != NULL && tag_info->corked) |
1359 | 0 | *corked = true; |
1360 | 0 | else |
1361 | 0 | *corked = false; |
1362 | 0 | } |
1363 | 0 | else { |
1364 | | /* Sanity check */ |
1365 | 0 | assert(H5C__SET_CORK == action || H5C__UNCORK == action); |
1366 | | |
1367 | | /* Perform appropriate action */ |
1368 | 0 | if (H5C__SET_CORK == action) { |
1369 | | /* Check if this is the first entry for this tagged object */ |
1370 | 0 | if (NULL == tag_info) { |
1371 | | /* Allocate new tag info struct */ |
1372 | 0 | if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) |
1373 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry"); |
1374 | | |
1375 | | /* Set the tag for all entries */ |
1376 | 0 | tag_info->tag = obj_addr; |
1377 | | |
1378 | | /* Insert tag info into hash table */ |
1379 | 0 | HASH_ADD(hh, cache_ptr->tag_list, tag, sizeof(haddr_t), tag_info); |
1380 | 0 | } |
1381 | 0 | else { |
1382 | | /* Check for object already corked */ |
1383 | 0 | if (tag_info->corked) |
1384 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked"); |
1385 | 0 | assert(tag_info->entry_cnt > 0 && tag_info->head); |
1386 | 0 | } |
1387 | | |
1388 | | /* Set the corked status for the entire object */ |
1389 | 0 | tag_info->corked = true; |
1390 | 0 | cache_ptr->num_objs_corked++; |
1391 | 0 | } |
1392 | 0 | else { |
1393 | | /* Sanity check */ |
1394 | 0 | if (NULL == tag_info) |
1395 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "tag info pointer is NULL"); |
1396 | | |
1397 | | /* Check for already uncorked */ |
1398 | 0 | if (!tag_info->corked) |
1399 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked"); |
1400 | | |
1401 | | /* Set the corked status for the entire object */ |
1402 | 0 | tag_info->corked = false; |
1403 | 0 | cache_ptr->num_objs_corked--; |
1404 | | |
1405 | | /* Remove the tag info from the tag list, if there's no more entries with this tag */ |
1406 | 0 | if (0 == tag_info->entry_cnt) { |
1407 | | /* Sanity check */ |
1408 | 0 | assert(NULL == tag_info->head); |
1409 | |
|
1410 | 0 | HASH_DELETE(hh, cache_ptr->tag_list, tag_info); |
1411 | | |
1412 | | /* Release the tag info */ |
1413 | 0 | tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); |
1414 | 0 | } |
1415 | 0 | else |
1416 | 0 | assert(NULL != tag_info->head); |
1417 | 0 | } |
1418 | 0 | } |
1419 | | |
1420 | 0 | done: |
1421 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1422 | 0 | } /* H5C_cork() */ |