Line | Count | Source |
1 | | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * |
2 | | * Copyright by The HDF Group. * |
3 | | * All rights reserved. * |
4 | | * * |
5 | | * This file is part of HDF5. The full HDF5 copyright notice, including * |
6 | | * terms governing use, modification, and redistribution, is contained in * |
7 | | * the LICENSE file, which can be found at the root of the source code * |
8 | | * distribution tree, or in https://www.hdfgroup.org/licenses. * |
9 | | * If you do not have access to either file, you may request a copy from * |
10 | | * help@hdfgroup.org. * |
11 | | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
12 | | |
13 | | /*------------------------------------------------------------------------- |
14 | | * |
15 | | * Created: H5Cimage.c |
16 | | * |
17 | | * Purpose: Functions in this file are specific to the implementation |
18 | | * of the metadata cache image feature. |
19 | | * |
20 | | *------------------------------------------------------------------------- |
21 | | */ |
22 | | |
23 | | /****************/ |
24 | | /* Module Setup */ |
25 | | /****************/ |
26 | | |
27 | | #include "H5Cmodule.h" /* This source code file is part of the H5C module */ |
28 | | #define H5F_FRIEND /*suppress error about including H5Fpkg */ |
29 | | |
30 | | /***********/ |
31 | | /* Headers */ |
32 | | /***********/ |
33 | | #include "H5private.h" /* Generic Functions */ |
34 | | #ifdef H5_HAVE_PARALLEL |
35 | | #define H5AC_FRIEND /*suppress error about including H5ACpkg */ |
36 | | #include "H5ACpkg.h" /* Metadata cache */ |
37 | | #endif /* H5_HAVE_PARALLEL */ |
38 | | #include "H5Cpkg.h" /* Cache */ |
39 | | #include "H5Eprivate.h" /* Error handling */ |
40 | | #include "H5Fpkg.h" /* Files */ |
41 | | #include "H5FDprivate.h" /* File drivers */ |
42 | | #include "H5FLprivate.h" /* Free Lists */ |
43 | | #include "H5MMprivate.h" /* Memory management */ |
44 | | |
45 | | /****************/ |
46 | | /* Local Macros */ |
47 | | /****************/ |
48 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
49 | | #define H5C_IMAGE_EXTRA_SPACE 8 |
50 | | #define H5C_IMAGE_SANITY_VALUE "DeadBeef" |
51 | | #else /* H5C_DO_MEMORY_SANITY_CHECKS */ |
52 | | #define H5C_IMAGE_EXTRA_SPACE 0 |
53 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
54 | | |
55 | | /* Cache image buffer components, on disk */ |
56 | 0 | #define H5C__MDCI_BLOCK_SIGNATURE "MDCI" |
57 | 0 | #define H5C__MDCI_BLOCK_SIGNATURE_LEN 4 |
58 | 0 | #define H5C__MDCI_BLOCK_VERSION_0 0 |
59 | | |
60 | | /* Metadata cache image header flags -- max 8 bits */ |
61 | 0 | #define H5C__MDCI_HEADER_HAVE_RESIZE_STATUS 0x01 |
62 | | |
63 | | /* Metadata cache image entry flags -- max 8 bits */ |
64 | 0 | #define H5C__MDCI_ENTRY_DIRTY_FLAG 0x01 |
65 | 0 | #define H5C__MDCI_ENTRY_IN_LRU_FLAG 0x02 |
66 | 0 | #define H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG 0x04 |
67 | 0 | #define H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG 0x08 |
68 | | |
69 | | /* Limits on flush dependency values, stored in 16-bit values on disk */ |
70 | 0 | #define H5C__MDCI_MAX_FD_CHILDREN USHRT_MAX |
71 | 0 | #define H5C__MDCI_MAX_FD_PARENTS USHRT_MAX |
72 | | |
73 | | /* Maximum ring allowed in image */ |
74 | 0 | #define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM |
75 | | |
76 | | /*********************************************************************** |
77 | | * |
78 | | * Stats collection macros |
79 | | * |
80 | | * The following macros must handle stats collection when collection |
81 | | * is enabled, and evaluate to the empty string when it is not. |
82 | | * |
83 | | ***********************************************************************/ |
84 | | #if H5C_COLLECT_CACHE_STATS |
85 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ |
86 | | do { \ |
87 | | (cache_ptr)->images_created++; \ |
88 | | } while (0) |
89 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ |
90 | | do { \ |
91 | | /* make sure image len is still good */ \ |
92 | | assert((cache_ptr)->image_len > 0); \ |
93 | | (cache_ptr)->images_read++; \ |
94 | | } while (0) |
95 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ |
96 | | do { \ |
97 | | /* make sure image len is still good */ \ |
98 | | assert((cache_ptr)->image_len > 0); \ |
99 | | (cache_ptr)->images_loaded++; \ |
100 | | (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ |
101 | | } while (0) |
102 | | #else /* H5C_COLLECT_CACHE_STATS */ |
103 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) |
104 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) |
105 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) |
106 | | #endif /* H5C_COLLECT_CACHE_STATS */ |
107 | | |
108 | | /******************/ |
109 | | /* Local Typedefs */ |
110 | | /******************/ |
111 | | |
112 | | /**************************************************************************** |
113 | | * |
114 | | * structure H5C_recon_entry_t |
115 | | * |
116 | | * This structure provides a temporary uthash table to detect duplicate |
117 | | * addresses. Its fields are as follows: |
118 | | * |
119 | | * addr: file offset of a metadata entry. Entries are added to this |
120 | | * list when they are decoded. If an entry has already existed |
121 | | * in the table, error will occur. |
122 | | * |
123 | | * entry_ptr: pointer to the cache entry, for expunging in failure cleanup. |
124 | | * |
125 | | * hh: uthash hash table handle |
126 | | * |
127 | | ****************************************************************************/ |
128 | | typedef struct H5C_recon_entry_t { |
129 | | haddr_t addr; /* The file address as key */ |
130 | | H5C_cache_entry_t *entry_ptr; |
131 | | UT_hash_handle hh; /* Hash table handle */ |
132 | | } H5C_recon_entry_t; |
133 | | |
134 | | /********************/ |
135 | | /* Local Prototypes */ |
136 | | /********************/ |
137 | | |
138 | | /* Helper routines */ |
139 | | static size_t H5C__cache_image_block_entry_header_size(const H5F_t *f); |
140 | | static size_t H5C__cache_image_block_header_size(const H5F_t *f); |
141 | | static herr_t H5C__check_for_duplicates(H5C_cache_entry_t *pf_entry_ptr, H5C_recon_entry_t **recon_table_ptr); |
142 | | static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, |
143 | | size_t buf_size); |
144 | | #ifndef NDEBUG /* only used in assertions */ |
145 | | static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, |
146 | | unsigned entry_num); |
147 | | #endif |
148 | | static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); |
149 | | static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num); |
150 | | static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr); |
151 | | static void H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, |
152 | | uint32_t fd_height); |
153 | | static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr); |
154 | | static herr_t H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr); |
155 | | static herr_t H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr); |
156 | | static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, |
157 | | const uint8_t **buf); |
158 | | static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, bool create); |
159 | | static herr_t H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr); |
160 | | static herr_t H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr); |
161 | | static herr_t H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr); |
162 | | static herr_t H5C__free_image_entries_array(H5C_t *cache_ptr); |
163 | | |
164 | | /*********************/ |
165 | | /* Package Variables */ |
166 | | /*********************/ |
167 | | |
168 | | /* Declare a free list to manage H5C_cache_entry_t objects */ |
169 | | H5FL_DEFINE(H5C_cache_entry_t); |
170 | | |
171 | | /*****************************/ |
172 | | /* Library Private Variables */ |
173 | | /*****************************/ |
174 | | |
175 | | /*******************/ |
176 | | /* Local Variables */ |
177 | | /*******************/ |
178 | | |
179 | | /*------------------------------------------------------------------------- |
180 | | * Function: H5C_cache_image_pending() |
181 | | * |
182 | | * Purpose: Tests to see if the load of a metadata cache image |
183 | | * load is pending (i.e. will be executed on the next |
184 | | * protect or insert) |
185 | | * |
186 | | * Returns true if a cache image load is pending, and false |
187 | | * if not. Throws an assertion failure on error. |
188 | | * |
189 | | * Return: true if a cache image load is pending, and false otherwise. |
190 | | * |
191 | | *------------------------------------------------------------------------- |
192 | | */ |
193 | | bool |
194 | | H5C_cache_image_pending(const H5C_t *cache_ptr) |
195 | 0 | { |
196 | 0 | bool ret_value = true; /* Return value */ |
197 | |
|
198 | 0 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
199 | | |
200 | | /* Sanity checks */ |
201 | 0 | assert(cache_ptr); |
202 | |
|
203 | 0 | ret_value = (cache_ptr->load_image && !cache_ptr->image_loaded); |
204 | |
|
205 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
206 | 0 | } /* H5C_cache_image_pending() */ |
207 | | |
208 | | /*------------------------------------------------------------------------- |
209 | | * Function: H5C_cache_image_status() |
210 | | * |
211 | | * Purpose: Examine the metadata cache associated with the supplied |
212 | | * instance of H5F_t to determine whether the load of a |
213 | | * cache image has either been queued or executed, and if |
214 | | * construction of a cache image has been requested. |
215 | | * |
216 | | * This done, it set *load_ci_ptr to true if a cache image |
217 | | * has either been loaded or a load has been requested, and |
218 | | * to false otherwise. |
219 | | * |
220 | | * Similarly, set *write_ci_ptr to true if construction of |
221 | | * a cache image has been requested, and to false otherwise. |
222 | | * |
223 | | * Return: SUCCEED on success, and FAIL on failure. |
224 | | * |
225 | | *------------------------------------------------------------------------- |
226 | | */ |
227 | | herr_t |
228 | | H5C_cache_image_status(H5F_t *f, bool *load_ci_ptr, bool *write_ci_ptr) |
229 | 516 | { |
230 | 516 | H5C_t *cache_ptr; |
231 | | |
232 | 516 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
233 | | |
234 | | /* Sanity checks */ |
235 | 516 | assert(f); |
236 | 516 | assert(f->shared); |
237 | 516 | cache_ptr = f->shared->cache; |
238 | 516 | assert(cache_ptr); |
239 | 516 | assert(load_ci_ptr); |
240 | 516 | assert(write_ci_ptr); |
241 | | |
242 | 516 | *load_ci_ptr = cache_ptr->load_image || cache_ptr->image_loaded; |
243 | 516 | *write_ci_ptr = cache_ptr->image_ctl.generate_image; |
244 | | |
245 | 516 | FUNC_LEAVE_NOAPI(SUCCEED) |
246 | 516 | } /* H5C_cache_image_status() */ |
247 | | |
248 | | /*------------------------------------------------------------------------- |
249 | | * Function: H5C__construct_cache_image_buffer() |
250 | | * |
251 | | * Purpose: Allocate a buffer of size cache_ptr->image_len, and |
252 | | * load it with an image of the metadata cache image block. |
253 | | * |
254 | | * Note that by the time this function is called, the cache |
255 | | * should have removed all entries from its data structures. |
256 | | * |
257 | | * Return: SUCCEED on success, and FAIL on failure. |
258 | | * |
259 | | *------------------------------------------------------------------------- |
260 | | */ |
261 | | static herr_t |
262 | | H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) |
263 | 0 | { |
264 | 0 | uint8_t *p; /* Pointer into image buffer */ |
265 | 0 | uint32_t chksum; |
266 | 0 | unsigned u; /* Local index variable */ |
267 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
268 | |
|
269 | 0 | FUNC_ENTER_PACKAGE |
270 | | |
271 | | /* Sanity checks */ |
272 | 0 | assert(f); |
273 | 0 | assert(f->shared); |
274 | 0 | assert(cache_ptr == f->shared->cache); |
275 | 0 | assert(cache_ptr); |
276 | 0 | assert(cache_ptr->close_warning_received); |
277 | 0 | assert(cache_ptr->image_ctl.generate_image); |
278 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
279 | 0 | assert(cache_ptr->index_len == 0); |
280 | 0 | assert(cache_ptr->image_data_len > 0); |
281 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
282 | | |
283 | | /* Allocate the buffer in which to construct the cache image block */ |
284 | 0 | if (NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1))) |
285 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer"); |
286 | | |
287 | | /* Construct the cache image block header image */ |
288 | 0 | p = (uint8_t *)cache_ptr->image_buffer; |
289 | 0 | if (H5C__encode_cache_image_header(f, cache_ptr, &p) < 0) |
290 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "header image construction failed"); |
291 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len); |
292 | | |
293 | | /* Construct the cache entry images */ |
294 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) |
295 | 0 | if (H5C__encode_cache_image_entry(f, cache_ptr, &p, u) < 0) |
296 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "entry image construction failed"); |
297 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len); |
298 | | |
299 | | /* Construct the adaptive resize status image -- not yet */ |
300 | | |
301 | | /* Compute the checksum and encode */ |
302 | 0 | chksum = H5_checksum_metadata(cache_ptr->image_buffer, |
303 | 0 | (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0); |
304 | 0 | UINT32ENCODE(p, chksum); |
305 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) == cache_ptr->image_data_len); |
306 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) <= cache_ptr->image_len); |
307 | |
|
308 | | #ifndef NDEBUG |
309 | | /* validate the metadata cache image we just constructed by decoding it |
310 | | * and comparing the result with the original data. |
311 | | */ |
312 | | { |
313 | | uint32_t old_chksum; |
314 | | const uint8_t *q; |
315 | | H5C_t *fake_cache_ptr = NULL; |
316 | | unsigned v; |
317 | | herr_t status; /* Status from decoding */ |
318 | | |
319 | | fake_cache_ptr = (H5C_t *)H5MM_malloc(sizeof(H5C_t)); |
320 | | assert(fake_cache_ptr); |
321 | | |
322 | | /* needed for sanity checks */ |
323 | | fake_cache_ptr->image_len = cache_ptr->image_len; |
324 | | q = (const uint8_t *)cache_ptr->image_buffer; |
325 | | status = H5C__decode_cache_image_header(f, fake_cache_ptr, &q, cache_ptr->image_len + 1); |
326 | | assert(status >= 0); |
327 | | |
328 | | assert(NULL != p); |
329 | | assert(fake_cache_ptr->num_entries_in_image == cache_ptr->num_entries_in_image); |
330 | | |
331 | | fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_malloc( |
332 | | sizeof(H5C_image_entry_t) * (size_t)(fake_cache_ptr->num_entries_in_image + 1)); |
333 | | assert(fake_cache_ptr->image_entries); |
334 | | |
335 | | for (u = 0; u < fake_cache_ptr->num_entries_in_image; u++) { |
336 | | fake_cache_ptr->image_entries[u].image_ptr = NULL; |
337 | | |
338 | | /* touch up f->shared->cache to satisfy sanity checks... */ |
339 | | f->shared->cache = fake_cache_ptr; |
340 | | status = H5C__decode_cache_image_entry(f, fake_cache_ptr, &q, u); |
341 | | assert(status >= 0); |
342 | | |
343 | | /* ...and then return f->shared->cache to its correct value */ |
344 | | f->shared->cache = cache_ptr; |
345 | | |
346 | | /* verify expected contents */ |
347 | | assert(cache_ptr->image_entries[u].addr == fake_cache_ptr->image_entries[u].addr); |
348 | | assert(cache_ptr->image_entries[u].size == fake_cache_ptr->image_entries[u].size); |
349 | | assert(cache_ptr->image_entries[u].type_id == fake_cache_ptr->image_entries[u].type_id); |
350 | | assert(cache_ptr->image_entries[u].lru_rank == fake_cache_ptr->image_entries[u].lru_rank); |
351 | | assert(cache_ptr->image_entries[u].is_dirty == fake_cache_ptr->image_entries[u].is_dirty); |
352 | | /* don't check image_fd_height as it is not stored in |
353 | | * the metadata cache image block. |
354 | | */ |
355 | | assert(cache_ptr->image_entries[u].fd_child_count == |
356 | | fake_cache_ptr->image_entries[u].fd_child_count); |
357 | | assert(cache_ptr->image_entries[u].fd_dirty_child_count == |
358 | | fake_cache_ptr->image_entries[u].fd_dirty_child_count); |
359 | | assert(cache_ptr->image_entries[u].fd_parent_count == |
360 | | fake_cache_ptr->image_entries[u].fd_parent_count); |
361 | | |
362 | | for (v = 0; v < cache_ptr->image_entries[u].fd_parent_count; v++) |
363 | | assert(cache_ptr->image_entries[u].fd_parent_addrs[v] == |
364 | | fake_cache_ptr->image_entries[u].fd_parent_addrs[v]); |
365 | | |
366 | | /* free the fd_parent_addrs array if it exists */ |
367 | | if (fake_cache_ptr->image_entries[u].fd_parent_addrs) { |
368 | | assert(fake_cache_ptr->image_entries[u].fd_parent_count > 0); |
369 | | fake_cache_ptr->image_entries[u].fd_parent_addrs = |
370 | | (haddr_t *)H5MM_xfree(fake_cache_ptr->image_entries[u].fd_parent_addrs); |
371 | | fake_cache_ptr->image_entries[u].fd_parent_count = 0; |
372 | | } /* end if */ |
373 | | else |
374 | | assert(fake_cache_ptr->image_entries[u].fd_parent_count == 0); |
375 | | |
376 | | assert(cache_ptr->image_entries[u].image_ptr); |
377 | | assert(fake_cache_ptr->image_entries[u].image_ptr); |
378 | | assert(!memcmp(cache_ptr->image_entries[u].image_ptr, fake_cache_ptr->image_entries[u].image_ptr, |
379 | | cache_ptr->image_entries[u].size)); |
380 | | |
381 | | fake_cache_ptr->image_entries[u].image_ptr = |
382 | | H5MM_xfree(fake_cache_ptr->image_entries[u].image_ptr); |
383 | | } /* end for */ |
384 | | |
385 | | assert((size_t)(q - (const uint8_t *)cache_ptr->image_buffer) == |
386 | | cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM); |
387 | | |
388 | | /* compute the checksum */ |
389 | | old_chksum = chksum; |
390 | | chksum = H5_checksum_metadata(cache_ptr->image_buffer, |
391 | | (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0); |
392 | | assert(chksum == old_chksum); |
393 | | |
394 | | fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries); |
395 | | fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr); |
396 | | } /* end block */ |
397 | | #endif |
398 | |
|
399 | 0 | done: |
400 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
401 | 0 | } /* H5C__construct_cache_image_buffer() */ |
402 | | |
403 | | /*------------------------------------------------------------------------- |
404 | | * Function: H5C__generate_cache_image() |
405 | | * |
406 | | * Purpose: Generate the cache image and write it to the file, if |
407 | | * directed. |
408 | | * |
409 | | * Return: SUCCEED on success, and FAIL on failure. |
410 | | * |
411 | | *------------------------------------------------------------------------- |
412 | | */ |
413 | | herr_t |
414 | | H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) |
415 | 0 | { |
416 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
417 | |
|
418 | 0 | FUNC_ENTER_PACKAGE |
419 | | |
420 | | /* Sanity checks */ |
421 | 0 | assert(f); |
422 | 0 | assert(f->shared); |
423 | 0 | assert(cache_ptr == f->shared->cache); |
424 | 0 | assert(cache_ptr); |
425 | | |
426 | | /* Construct cache image */ |
427 | 0 | if (H5C__construct_cache_image_buffer(f, cache_ptr) < 0) |
428 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't create metadata cache image"); |
429 | | |
430 | | /* Free image entries array */ |
431 | 0 | if (H5C__free_image_entries_array(cache_ptr) < 0) |
432 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't free image entries array"); |
433 | | |
434 | | /* Write cache image block if so configured */ |
435 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) { |
436 | 0 | if (H5C__write_cache_image(f, cache_ptr) < 0) |
437 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write metadata cache image block to file"); |
438 | | |
439 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr); |
440 | 0 | } /* end if */ |
441 | | |
442 | | /* Free cache image buffer */ |
443 | 0 | assert(cache_ptr->image_buffer); |
444 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
445 | |
|
446 | 0 | done: |
447 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
448 | 0 | } /* H5C__generate_cache_image() */ |
449 | | |
450 | | /*------------------------------------------------------------------------- |
451 | | * Function: H5C__free_image_entries_array |
452 | | * |
453 | | * Purpose: If the image entries array exists, free the image |
454 | | * associated with each entry, and then free the image |
455 | | * entries array proper. |
456 | | * |
457 | | * Note that by the time this function is called, the cache |
458 | | * should have removed all entries from its data structures. |
459 | | * |
460 | | * Return: SUCCEED on success, and FAIL on failure. |
461 | | * |
462 | | *------------------------------------------------------------------------- |
463 | | */ |
464 | | static herr_t |
465 | | H5C__free_image_entries_array(H5C_t *cache_ptr) |
466 | 0 | { |
467 | 0 | FUNC_ENTER_PACKAGE_NOERR |
468 | | |
469 | | /* Sanity checks */ |
470 | 0 | assert(cache_ptr); |
471 | 0 | assert(cache_ptr->close_warning_received); |
472 | 0 | assert(cache_ptr->image_ctl.generate_image); |
473 | 0 | assert(cache_ptr->index_len == 0); |
474 | | |
475 | | /* Check for entries to free */ |
476 | 0 | if (cache_ptr->image_entries != NULL) { |
477 | 0 | unsigned u; /* Local index variable */ |
478 | |
|
479 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) { |
480 | 0 | H5C_image_entry_t *ie_ptr; /* Image entry to release */ |
481 | | |
482 | | /* Get pointer to image entry */ |
483 | 0 | ie_ptr = &(cache_ptr->image_entries[u]); |
484 | | |
485 | | /* Sanity checks */ |
486 | 0 | assert(ie_ptr); |
487 | 0 | assert(ie_ptr->image_ptr); |
488 | | |
489 | | /* Free the parent addrs array if appropriate */ |
490 | 0 | if (ie_ptr->fd_parent_addrs) { |
491 | 0 | assert(ie_ptr->fd_parent_count > 0); |
492 | |
|
493 | 0 | ie_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(ie_ptr->fd_parent_addrs); |
494 | 0 | } /* end if */ |
495 | 0 | else |
496 | 0 | assert(ie_ptr->fd_parent_count == 0); |
497 | | |
498 | | /* Free the image */ |
499 | 0 | ie_ptr->image_ptr = H5MM_xfree(ie_ptr->image_ptr); |
500 | 0 | } /* end for */ |
501 | | |
502 | | /* Free the image entries array */ |
503 | 0 | cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(cache_ptr->image_entries); |
504 | 0 | } /* end if */ |
505 | |
|
506 | 0 | FUNC_LEAVE_NOAPI(SUCCEED) |
507 | 0 | } /* H5C__free_image_entries_array() */ |
508 | | |
509 | | /*------------------------------------------------------------------------- |
510 | | * Function: H5C__get_cache_image_config |
511 | | * |
512 | | * Purpose: Copy the current configuration for cache image generation |
513 | | * on file close into the instance of H5C_cache_image_ctl_t |
514 | | * pointed to by config_ptr. |
515 | | * |
516 | | * Return: SUCCEED on success, and FAIL on failure. |
517 | | * |
518 | | *------------------------------------------------------------------------- |
519 | | */ |
520 | | herr_t |
521 | | H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) |
522 | 0 | { |
523 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
524 | |
|
525 | 0 | FUNC_ENTER_PACKAGE |
526 | |
|
527 | 0 | if (cache_ptr == NULL) |
528 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry"); |
529 | 0 | if (config_ptr == NULL) |
530 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad config_ptr on entry"); |
531 | | |
532 | 0 | *config_ptr = cache_ptr->image_ctl; |
533 | |
|
534 | 0 | done: |
535 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
536 | 0 | } /* H5C__get_cache_image_config() */ |
537 | | |
538 | | /*------------------------------------------------------------------------- |
539 | | * Function: H5C__read_cache_image |
540 | | * |
541 | | * Purpose: Load the metadata cache image from the specified location |
542 | | * in the file, and return it in the supplied buffer. |
543 | | * |
544 | | * Return: Non-negative on success/Negative on failure |
545 | | * |
546 | | *------------------------------------------------------------------------- |
547 | | */ |
548 | | static herr_t |
549 | | H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) |
550 | 0 | { |
551 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
552 | |
|
553 | 0 | FUNC_ENTER_PACKAGE |
554 | | |
555 | | /* Sanity checks */ |
556 | 0 | assert(f); |
557 | 0 | assert(cache_ptr); |
558 | 0 | assert(H5_addr_defined(cache_ptr->image_addr)); |
559 | 0 | assert(cache_ptr->image_len > 0); |
560 | 0 | assert(cache_ptr->image_buffer); |
561 | |
|
562 | | #ifdef H5_HAVE_PARALLEL |
563 | | { |
564 | | H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
565 | | int mpi_result; |
566 | | |
567 | | if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { |
568 | | #endif /* H5_HAVE_PARALLEL */ |
569 | | |
570 | | /* Read the buffer (if serial access, or rank 0 of parallel access) */ |
571 | | /* NOTE: if this block read is being performed on rank 0 only, throwing |
572 | | * an error here will cause other ranks to hang in the following MPI_Bcast. |
573 | | */ |
574 | 0 | if (H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, |
575 | 0 | cache_ptr->image_buffer) < 0) |
576 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block"); |
577 | | |
578 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr); |
579 | |
|
580 | | #ifdef H5_HAVE_PARALLEL |
581 | | if (aux_ptr) { |
582 | | /* Broadcast cache image */ |
583 | | if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, |
584 | | MPI_BYTE, 0, aux_ptr->mpi_comm))) |
585 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
586 | | } /* end if */ |
587 | | } /* end if */ |
588 | | else if (aux_ptr) { |
589 | | /* Retrieve the contents of the metadata cache image from process 0 */ |
590 | | if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, |
591 | | MPI_BYTE, 0, aux_ptr->mpi_comm))) |
592 | | HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", mpi_result) |
593 | | } /* end else-if */ |
594 | | } /* end block */ |
595 | | #endif /* H5_HAVE_PARALLEL */ |
596 | |
|
597 | 0 | done: |
598 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
599 | 0 | } /* H5C__read_cache_image() */ |
600 | | |
601 | | /*------------------------------------------------------------------------- |
602 | | * Function: H5C__load_cache_image |
603 | | * |
604 | | * Purpose: Read the cache image superblock extension message and |
605 | | * delete it if so directed. |
606 | | * |
607 | | * Then load the cache image block at the specified location, |
608 | | * decode it, and insert its contents into the metadata |
609 | | * cache. |
610 | | * |
611 | | * Return: Non-negative on success/Negative on failure |
612 | | * |
613 | | *------------------------------------------------------------------------- |
614 | | */ |
615 | | herr_t |
616 | | H5C__load_cache_image(H5F_t *f) |
617 | 0 | { |
618 | 0 | H5C_t *cache_ptr; |
619 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
620 | |
|
621 | 0 | FUNC_ENTER_PACKAGE |
622 | | |
623 | | /* Sanity checks */ |
624 | 0 | assert(f); |
625 | 0 | assert(f->shared); |
626 | 0 | cache_ptr = f->shared->cache; |
627 | 0 | assert(cache_ptr); |
628 | | |
629 | | /* If the image address is defined, load the image, decode it, |
630 | | * and insert its contents into the metadata cache. |
631 | | * |
632 | | * Note that under normal operating conditions, it is an error if the |
633 | | * image address is HADDR_UNDEF. However, to facilitate testing, |
634 | | * we allow this special value of the image address which means that |
635 | | * no image exists, and that the load operation should be skipped |
636 | | * silently. |
637 | | */ |
638 | 0 | if (H5_addr_defined(cache_ptr->image_addr)) { |
639 | | /* Sanity checks */ |
640 | 0 | assert(cache_ptr->image_len > 0); |
641 | 0 | assert(cache_ptr->image_buffer == NULL); |
642 | | |
643 | | /* Allocate space for the image */ |
644 | 0 | if (NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1))) |
645 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer"); |
646 | | |
647 | | /* Load the image from file */ |
648 | 0 | if (H5C__read_cache_image(f, cache_ptr) < 0) |
649 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block"); |
650 | | |
651 | | /* Reconstruct cache contents, from image */ |
652 | 0 | if (H5C__reconstruct_cache_contents(f, cache_ptr) < 0) |
653 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "Can't reconstruct cache contents from image block"); |
654 | | |
655 | | /* Free the image buffer */ |
656 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
657 | | |
658 | | /* Update stats -- must do this now, as we are about |
659 | | * to discard the size of the cache image. |
660 | | */ |
661 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr); |
662 | |
|
663 | 0 | cache_ptr->image_loaded = true; |
664 | 0 | } /* end if */ |
665 | | |
666 | | /* If directed, free the on disk metadata cache image */ |
667 | 0 | if (cache_ptr->delete_image) { |
668 | 0 | if (H5F__super_ext_remove_msg(f, H5O_MDCI_MSG_ID) < 0) |
669 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, |
670 | 0 | "can't remove metadata cache image message from superblock extension"); |
671 | | |
672 | | /* Reset image block values */ |
673 | 0 | cache_ptr->image_len = 0; |
674 | 0 | cache_ptr->image_data_len = 0; |
675 | 0 | cache_ptr->image_addr = HADDR_UNDEF; |
676 | 0 | } /* end if */ |
677 | | |
678 | 0 | done: |
679 | 0 | if (ret_value < 0) { |
680 | 0 | if (H5_addr_defined(cache_ptr->image_addr)) |
681 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
682 | 0 | } |
683 | |
|
684 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
685 | 0 | } /* H5C__load_cache_image() */ |
686 | | |
687 | | /*------------------------------------------------------------------------- |
688 | | * Function: H5C_load_cache_image_on_next_protect() |
689 | | * |
690 | | * Purpose: Note the fact that a metadata cache image superblock |
691 | | * extension message exists, along with the base address |
692 | | * and length of the metadata cache image block. |
693 | | * |
694 | | * Once this notification is received the metadata cache |
695 | | * image block must be read, decoded, and loaded into the |
696 | | * cache on the next call to H5C_protect(). |
697 | | * |
698 | | * Further, if the file is opened R/W, the metadata cache |
699 | | * image superblock extension message must be deleted from |
700 | | * the superblock extension and the image block freed |
701 | | * |
702 | | * Contrawise, if the file is opened R/O, the metadata |
703 | | * cache image superblock extension message and image block |
704 | | * must be left as is. Further, any dirty entries in the |
705 | | * cache image block must be marked as clean to avoid |
706 | | * attempts to write them on file close. |
707 | | * |
708 | | * Return: SUCCEED |
709 | | * |
710 | | *------------------------------------------------------------------------- |
711 | | */ |
712 | | herr_t |
713 | | H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, bool rw) |
714 | 0 | { |
715 | 0 | H5C_t *cache_ptr; |
716 | |
|
717 | 0 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
718 | | |
719 | | /* Sanity checks */ |
720 | 0 | assert(f); |
721 | 0 | assert(f->shared); |
722 | 0 | cache_ptr = f->shared->cache; |
723 | 0 | assert(cache_ptr); |
724 | | |
725 | | /* Set information needed to load cache image */ |
726 | 0 | cache_ptr->image_addr = addr; |
727 | 0 | cache_ptr->image_len = len; |
728 | 0 | cache_ptr->load_image = true; |
729 | 0 | cache_ptr->delete_image = rw; |
730 | |
|
731 | 0 | FUNC_LEAVE_NOAPI(SUCCEED) |
732 | 0 | } /* H5C_load_cache_image_on_next_protect() */ |
733 | | |
734 | | /*------------------------------------------------------------------------- |
735 | | * Function: H5C__image_entry_cmp |
736 | | * |
737 | | * Purpose: Comparison callback for qsort(3) on image entries. |
738 | | * Entries are sorted first by flush dependency height, |
739 | | * and then by LRU rank. |
740 | | * |
741 | | * Note: Entries with a _greater_ flush dependency height should |
742 | | * be sorted earlier than entries with lower heights, since |
743 | | * leafs in the flush dependency graph are at height 0, and their |
744 | | * parents need to be earlier in the image, so that they can |
745 | | * construct their flush dependencies when decoded. |
746 | | * |
747 | | * Return: An integer less than, equal to, or greater than zero if the |
748 | | * first entry is considered to be respectively less than, |
749 | | * equal to, or greater than the second. |
750 | | * |
751 | | *------------------------------------------------------------------------- |
752 | | */ |
753 | | static int |
754 | | H5C__image_entry_cmp(const void *_entry1, const void *_entry2) |
755 | 0 | { |
756 | 0 | const H5C_image_entry_t *entry1 = |
757 | 0 | (const H5C_image_entry_t *)_entry1; /* Pointer to first image entry to compare */ |
758 | 0 | const H5C_image_entry_t *entry2 = |
759 | 0 | (const H5C_image_entry_t *)_entry2; /* Pointer to second image entry to compare */ |
760 | 0 | int ret_value = 0; /* Return value */ |
761 | |
|
762 | 0 | FUNC_ENTER_PACKAGE_NOERR |
763 | | |
764 | | /* Sanity checks */ |
765 | 0 | assert(entry1); |
766 | 0 | assert(entry2); |
767 | |
|
768 | 0 | if (entry1->image_fd_height > entry2->image_fd_height) |
769 | 0 | ret_value = -1; |
770 | 0 | else if (entry1->image_fd_height < entry2->image_fd_height) |
771 | 0 | ret_value = 1; |
772 | 0 | else { |
773 | | /* Sanity check */ |
774 | 0 | assert(entry1->lru_rank >= -1); |
775 | 0 | assert(entry2->lru_rank >= -1); |
776 | |
|
777 | 0 | if (entry1->lru_rank < entry2->lru_rank) |
778 | 0 | ret_value = -1; |
779 | 0 | else if (entry1->lru_rank > entry2->lru_rank) |
780 | 0 | ret_value = 1; |
781 | 0 | } /* end else */ |
782 | |
|
783 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
784 | 0 | } /* H5C__image_entry_cmp() */ |
785 | | |
786 | | /*------------------------------------------------------------------------- |
787 | | * Function: H5C__prep_image_for_file_close |
788 | | * |
789 | | * Purpose: The objective of the call is to allow the metadata cache |
790 | | * to do any preparatory work prior to generation of a |
791 | | * cache image. |
792 | | * |
793 | | * In particular, the cache must |
794 | | * |
795 | | * 1) serialize all its entries, |
796 | | * |
797 | | * 2) compute the size of the metadata cache image, |
798 | | * |
799 | | * 3) allocate space for the metadata cache image, and |
800 | | * |
801 | | * 4) setup the metadata cache image superblock extension |
802 | | * message with the address and size of the metadata |
803 | | * cache image. |
804 | | * |
805 | | * The parallel case is complicated by the fact that |
806 | | * while all metadata caches must contain the same set of |
807 | | * dirty entries, there is no such requirement for clean |
808 | | * entries or the order that entries appear in the LRU. |
809 | | * |
810 | | * Thus, there is no requirement that different processes |
811 | | * will construct cache images of the same size. |
812 | | * |
813 | | * This is not a major issue as long as all processes include |
814 | | * the same set of dirty entries in the cache -- as they |
815 | | * currently do (note that this will change when we implement |
816 | | * the ageout feature). Since only the process zero cache |
817 | | * writes the cache image, all that is necessary is to |
818 | | * broadcast the process zero cache size for use in the |
819 | | * superblock extension messages and cache image block |
820 | | * allocations. |
821 | | * |
822 | | * Note: At present, cache image is disabled in the |
823 | | * parallel case as the new collective metadata write |
824 | | * code must be modified to support cache image. |
825 | | * |
826 | | * Return: Non-negative on success/Negative on failure |
827 | | * |
828 | | *------------------------------------------------------------------------- |
829 | | */ |
830 | | herr_t |
831 | | H5C__prep_image_for_file_close(H5F_t *f, bool *image_generated) |
832 | 516 | { |
833 | 516 | H5C_t *cache_ptr = NULL; |
834 | 516 | haddr_t eoa_frag_addr = HADDR_UNDEF; |
835 | 516 | hsize_t eoa_frag_size = 0; |
836 | 516 | herr_t ret_value = SUCCEED; /* Return value */ |
837 | | |
838 | 516 | FUNC_ENTER_PACKAGE |
839 | | |
840 | | /* Sanity checks */ |
841 | 516 | assert(f); |
842 | 516 | assert(f->shared); |
843 | 516 | assert(f->shared->cache); |
844 | 516 | cache_ptr = f->shared->cache; |
845 | 516 | assert(cache_ptr); |
846 | 516 | assert(image_generated); |
847 | | |
848 | | /* If the file is opened and closed without any access to |
849 | | * any group or data set, it is possible that the cache image (if |
850 | | * it exists) has not been read yet. Do this now if required. |
851 | | */ |
852 | 516 | if (cache_ptr->load_image) { |
853 | 0 | cache_ptr->load_image = false; |
854 | 0 | if (H5C__load_cache_image(f) < 0) |
855 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image"); |
856 | 0 | } /* end if */ |
857 | | |
858 | | /* Before we start to generate the cache image (if requested), verify |
859 | | * that the superblock supports superblock extension messages, and |
860 | | * silently cancel any request for a cache image if it does not. |
861 | | * |
862 | | * Ideally, we would do this when the cache image is requested, |
863 | | * but the necessary information is not necessary available at that |
864 | | * time -- hence this last minute check. |
865 | | * |
866 | | * Note that under some error conditions, the superblock will be |
867 | | * undefined in this case as well -- if so, assume that the |
868 | | * superblock does not support superblock extension messages. |
869 | | * Also verify that the file's high_bound is at least release |
870 | | * 1.10.x, otherwise cancel the request for a cache image |
871 | | */ |
872 | 516 | if ((NULL == f->shared->sblock) || (f->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) || |
873 | 481 | (f->shared->high_bound < H5F_LIBVER_V110)) { |
874 | 481 | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
875 | | |
876 | 481 | cache_ptr->image_ctl = default_image_ctl; |
877 | 481 | assert(!(cache_ptr->image_ctl.generate_image)); |
878 | 481 | } /* end if */ |
879 | | |
880 | | /* Generate the cache image, if requested */ |
881 | 516 | if (cache_ptr->image_ctl.generate_image) { |
882 | | /* Create the cache image super block extension message. |
883 | | * |
884 | | * Note that the base address and length of the metadata cache |
885 | | * image are undefined at this point, and thus will have to be |
886 | | * updated later. |
887 | | * |
888 | | * Create the super block extension message now so that space |
889 | | * is allocated for it (if necessary) before we allocate space |
890 | | * for the cache image block. |
891 | | * |
892 | | * To simplify testing, do this only if the |
893 | | * H5C_CI__GEN_MDCI_SBE_MESG bit is set in |
894 | | * cache_ptr->image_ctl.flags. |
895 | | */ |
896 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG) |
897 | 0 | if (H5C__write_cache_image_superblock_msg(f, true) < 0) |
898 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed."); |
899 | | |
900 | | /* Serialize the cache */ |
901 | 0 | if (H5C__serialize_cache(f) < 0) |
902 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed"); |
903 | | |
904 | | /* Scan the cache and record data needed to construct the |
905 | | * cache image. In particular, for each entry we must record: |
906 | | * |
907 | | * 1) rank in LRU (if entry is in LRU) |
908 | | * |
909 | | * 2) Whether the entry is dirty prior to flush of |
910 | | * cache just prior to close. |
911 | | * |
912 | | * 3) Addresses of flush dependency parents (if any). |
913 | | * |
914 | | * 4) Number of flush dependency children (if any). |
915 | | * |
916 | | * In passing, also compute the size of the metadata cache |
917 | | * image. With the recent modifications of the free space |
918 | | * manager code, this size should be correct. |
919 | | */ |
920 | 0 | if (H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0) |
921 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed"); |
922 | 0 | assert(HADDR_UNDEF == cache_ptr->image_addr); |
923 | |
|
924 | | #ifdef H5_HAVE_PARALLEL |
925 | | /* In the parallel case, overwrite the image_len with the |
926 | | * value computed by process 0. |
927 | | */ |
928 | | if (cache_ptr->aux_ptr) { /* we have multiple processes */ |
929 | | int mpi_result; |
930 | | unsigned p0_image_len; |
931 | | H5AC_aux_t *aux_ptr; |
932 | | |
933 | | aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
934 | | if (aux_ptr->mpi_rank == 0) { |
935 | | aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len; |
936 | | p0_image_len = aux_ptr->p0_image_len; |
937 | | |
938 | | if (MPI_SUCCESS != |
939 | | (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm))) |
940 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
941 | | |
942 | | assert(p0_image_len == aux_ptr->p0_image_len); |
943 | | } /* end if */ |
944 | | else { |
945 | | if (MPI_SUCCESS != |
946 | | (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm))) |
947 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
948 | | |
949 | | aux_ptr->p0_image_len = p0_image_len; |
950 | | } /* end else */ |
951 | | |
952 | | /* Allocate space for a cache image of size equal to that |
953 | | * computed by the process 0. This may be different from |
954 | | * cache_ptr->image_data_len if mpi_rank != 0. However, since |
955 | | * cache image write is suppressed on all processes other than |
956 | | * process 0, this doesn't matter. |
957 | | * |
958 | | * Note that we allocate the cache image directly from the file |
959 | | * driver so as to avoid unsettling the free space managers. |
960 | | */ |
961 | | if (HADDR_UNDEF == |
962 | | (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, (hsize_t)p0_image_len, |
963 | | &eoa_frag_addr, &eoa_frag_size))) |
964 | | HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, |
965 | | "can't allocate file space for metadata cache image"); |
966 | | } /* end if */ |
967 | | else |
968 | | #endif /* H5_HAVE_PARALLEL */ |
969 | | /* Allocate the cache image block. Note that we allocate this |
970 | | * this space directly from the file driver so as to avoid |
971 | | * unsettling the free space managers. |
972 | | */ |
973 | 0 | if (HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, |
974 | 0 | (hsize_t)(cache_ptr->image_data_len), |
975 | 0 | &eoa_frag_addr, &eoa_frag_size))) |
976 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, |
977 | 0 | "can't allocate file space for metadata cache image"); |
978 | | |
979 | | /* Make note of the eoa after allocation of the cache image |
980 | | * block. This value is used for sanity checking when we |
981 | | * shutdown the self referential free space managers after |
982 | | * we destroy the metadata cache. |
983 | | */ |
984 | 0 | assert(HADDR_UNDEF == f->shared->eoa_post_mdci_fsalloc); |
985 | 0 | if (HADDR_UNDEF == (f->shared->eoa_post_mdci_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT))) |
986 | 0 | HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size"); |
987 | | |
988 | | /* For now, drop any fragment left over from the allocation of the |
989 | | * image block on the ground. A fragment should only be returned |
990 | | * if the underlying file alignment is greater than 1. |
991 | | * |
992 | | * Clean this up eventually by extending the size of the cache |
993 | | * image block to the next alignment boundary, and then setting |
994 | | * the image_data_len to the actual size of the cache_image. |
995 | | * |
996 | | * On the off chance that there is some other way to get a |
997 | | * a fragment on a cache image allocation, leave the following |
998 | | * assertion in the code so we will find out. |
999 | | */ |
1000 | 0 | assert((eoa_frag_size == 0) || (f->shared->alignment != 1)); |
1001 | | |
1002 | | /* Eventually it will be possible for the length of the cache image |
1003 | | * block on file to be greater than the size of the data it |
1004 | | * contains. However, for now they must be the same. Set |
1005 | | * cache_ptr->image_len accordingly. |
1006 | | */ |
1007 | 0 | cache_ptr->image_len = cache_ptr->image_data_len; |
1008 | | |
1009 | | /* update the metadata cache image superblock extension |
1010 | | * message with the new cache image block base address and |
1011 | | * length. |
1012 | | * |
1013 | | * to simplify testing, do this only if the |
1014 | | * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in |
1015 | | * cache_ptr->image_ctl.flags. |
1016 | | */ |
1017 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) |
1018 | 0 | if (H5C__write_cache_image_superblock_msg(f, false) < 0) |
1019 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed"); |
1020 | | |
1021 | | /* At this point: |
1022 | | * |
1023 | | * 1) space in the file for the metadata cache image |
1024 | | * is allocated, |
1025 | | * |
1026 | | * 2) the metadata cache image superblock extension |
1027 | | * message exists and (if so configured) contains |
1028 | | * the correct data, |
1029 | | * |
1030 | | * 3) All entries in the cache that will appear in the |
1031 | | * cache image are serialized with up to date images. |
1032 | | * |
1033 | | * Since we just updated the cache image message, |
1034 | | * the super block extension message is dirty. However, |
1035 | | * since the superblock and the superblock extension |
1036 | | * can't be included in the cache image, this is a non- |
1037 | | * issue. |
1038 | | * |
1039 | | * 4) All entries in the cache that will be include in |
1040 | | * the cache are marked as such, and we have a count |
1041 | | * of same. |
1042 | | * |
1043 | | * 5) Flush dependency heights are calculated for all |
1044 | | * entries that will be included in the cache image. |
1045 | | * |
1046 | | * If there are any entries to be included in the metadata cache |
1047 | | * image, allocate, populate, and sort the image_entries array. |
1048 | | * |
1049 | | * If the metadata cache image will be empty, delete the |
1050 | | * metadata cache image superblock extension message, set |
1051 | | * cache_ptr->image_ctl.generate_image to false. This will |
1052 | | * allow the file close to continue normally without the |
1053 | | * unnecessary generation of the metadata cache image. |
1054 | | */ |
1055 | 0 | if (cache_ptr->num_entries_in_image > 0) { |
1056 | 0 | if (H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0) |
1057 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array."); |
1058 | | |
1059 | | /* Sort the entries */ |
1060 | 0 | qsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image, |
1061 | 0 | sizeof(H5C_image_entry_t), H5C__image_entry_cmp); |
1062 | 0 | } /* end if */ |
1063 | 0 | else { /* cancel creation of metadata cache image */ |
1064 | 0 | assert(cache_ptr->image_entries == NULL); |
1065 | | |
1066 | | /* To avoid breaking the control flow tests, only delete |
1067 | | * the mdci superblock extension message if the |
1068 | | * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in |
1069 | | * cache_ptr->image_ctl.flags. |
1070 | | */ |
1071 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) |
1072 | 0 | if (H5F__super_ext_remove_msg(f, H5O_MDCI_MSG_ID) < 0) |
1073 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, |
1074 | 0 | "can't remove MDC image msg from superblock ext"); |
1075 | | |
1076 | 0 | cache_ptr->image_ctl.generate_image = false; |
1077 | 0 | } /* end else */ |
1078 | | |
1079 | | /* Indicate that a cache image was generated */ |
1080 | 0 | *image_generated = true; |
1081 | 0 | } /* end if */ |
1082 | | |
1083 | 516 | done: |
1084 | 516 | FUNC_LEAVE_NOAPI(ret_value) |
1085 | 516 | } /* H5C__prep_image_for_file_close() */ |
1086 | | |
1087 | | /*------------------------------------------------------------------------- |
1088 | | * Function: H5C_set_cache_image_config |
1089 | | * |
1090 | | * Purpose: If *config_ptr contains valid data, copy it into the |
1091 | | * image_ctl field of *cache_ptr. Make adjustments for |
1092 | | * changes in configuration as required. |
1093 | | * |
1094 | | * If the file is open read only, silently |
1095 | | * force the cache image configuration to its default |
1096 | | * (which disables construction of a cache image). |
1097 | | * |
1098 | | * Note that in addition to being inapplicable in the |
1099 | | * read only case, cache image is also inapplicable if |
1100 | | * the superblock does not support superblock extension |
1101 | | * messages. Unfortunately, this information need not |
1102 | | * be available at this point. Thus we check for this |
1103 | | * later, in H5C_prep_for_file_close() and cancel the |
1104 | | * cache image request if appropriate. |
1105 | | * |
1106 | | * Fail if the new configuration is invalid. |
1107 | | * |
1108 | | * Return: SUCCEED on success, and FAIL on failure. |
1109 | | * |
1110 | | *------------------------------------------------------------------------- |
1111 | | */ |
1112 | | herr_t |
1113 | | H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) |
1114 | 516 | { |
1115 | 516 | herr_t ret_value = SUCCEED; /* Return value */ |
1116 | | |
1117 | 516 | FUNC_ENTER_NOAPI(FAIL) |
1118 | | |
1119 | | /* Sanity checks */ |
1120 | 516 | assert(f); |
1121 | | |
1122 | | /* Check arguments */ |
1123 | 516 | if (cache_ptr == NULL) |
1124 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry"); |
1125 | | |
1126 | | /* Validate the config: */ |
1127 | 516 | if (H5C_validate_cache_image_config(config_ptr) < 0) |
1128 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration"); |
1129 | | |
1130 | | #ifdef H5_HAVE_PARALLEL |
1131 | | /* The collective metadata write code is not currently compatible |
1132 | | * with cache image. Until this is fixed, suppress cache image silently |
1133 | | * if there is more than one process. |
1134 | | */ |
1135 | | if (cache_ptr->aux_ptr) { |
1136 | | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
1137 | | |
1138 | | cache_ptr->image_ctl = default_image_ctl; |
1139 | | assert(!(cache_ptr->image_ctl.generate_image)); |
1140 | | } |
1141 | | else { |
1142 | | #endif /* H5_HAVE_PARALLEL */ |
1143 | | /* A cache image can only be generated if the file is opened read / write |
1144 | | * and the superblock supports superblock extension messages. |
1145 | | * |
1146 | | * However, the superblock version is not available at this point -- |
1147 | | * hence we can only check the former requirement now. Do the latter |
1148 | | * check just before we construct the image.. |
1149 | | * |
1150 | | * If the file is opened read / write, apply the supplied configuration. |
1151 | | * |
1152 | | * If it is not, set the image configuration to the default, which has |
1153 | | * the effect of silently disabling the cache image if it was requested. |
1154 | | */ |
1155 | 516 | if (H5F_INTENT(f) & H5F_ACC_RDWR) |
1156 | 516 | cache_ptr->image_ctl = *config_ptr; |
1157 | 0 | else { |
1158 | 0 | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
1159 | |
|
1160 | 0 | cache_ptr->image_ctl = default_image_ctl; |
1161 | 0 | assert(!(cache_ptr->image_ctl.generate_image)); |
1162 | 0 | } |
1163 | | #ifdef H5_HAVE_PARALLEL |
1164 | | } |
1165 | | #endif /* H5_HAVE_PARALLEL */ |
1166 | | |
1167 | 516 | done: |
1168 | 516 | FUNC_LEAVE_NOAPI(ret_value) |
1169 | 516 | } /* H5C_set_cache_image_config() */ |
1170 | | |
1171 | | /*------------------------------------------------------------------------- |
1172 | | * Function: H5C_validate_cache_image_config() |
1173 | | * |
1174 | | * Purpose: Run a sanity check on the provided instance of struct |
1175 | | * H5AC_cache_image_config_t. |
1176 | | * |
1177 | | * Do nothing and return SUCCEED if no errors are detected, |
1178 | | * and flag an error and return FAIL otherwise. |
1179 | | * |
1180 | | * Return: Non-negative on success/Negative on failure |
1181 | | * |
1182 | | *------------------------------------------------------------------------- |
1183 | | */ |
1184 | | herr_t |
1185 | | H5C_validate_cache_image_config(H5C_cache_image_ctl_t *ctl_ptr) |
1186 | 1.03k | { |
1187 | 1.03k | herr_t ret_value = SUCCEED; /* Return value */ |
1188 | | |
1189 | 1.03k | FUNC_ENTER_NOAPI(FAIL) |
1190 | | |
1191 | 1.03k | if (ctl_ptr == NULL) |
1192 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL ctl_ptr on entry"); |
1193 | 1.03k | if (ctl_ptr->version != H5C__CURR_CACHE_IMAGE_CTL_VER) |
1194 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown cache image control version"); |
1195 | | |
1196 | | /* At present, we do not support inclusion of the adaptive resize |
1197 | | * configuration in the cache image. Thus the save_resize_status |
1198 | | * field must be false. |
1199 | | */ |
1200 | 1.03k | if (ctl_ptr->save_resize_status != false) |
1201 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in save_resize_status field"); |
1202 | | |
1203 | | /* At present, we do not support prefetched entry ageouts. Thus |
1204 | | * the entry_ageout field must be set to |
1205 | | * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. |
1206 | | */ |
1207 | 1.03k | if (ctl_ptr->entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) |
1208 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in entry_ageout field"); |
1209 | | |
1210 | 1.03k | if ((ctl_ptr->flags & ~H5C_CI__ALL_FLAGS) != 0) |
1211 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown flag set"); |
1212 | | |
1213 | 1.03k | done: |
1214 | 1.03k | FUNC_LEAVE_NOAPI(ret_value) |
1215 | 1.03k | } /* H5C_validate_cache_image_config() */ |
1216 | | |
1217 | | /*************************************************************************/ |
1218 | | /**************************** Private Functions: *************************/ |
1219 | | /*************************************************************************/ |
1220 | | |
1221 | | /*------------------------------------------------------------------------- |
1222 | | * Function: H5C__cache_image_block_entry_header_size |
1223 | | * |
1224 | | * Purpose: Compute the size of the header of the metadata cache |
1225 | | * image block, and return the value. |
1226 | | * |
1227 | | * Return: Size of the header section of the metadata cache image |
1228 | | * block in bytes. |
1229 | | * |
1230 | | *------------------------------------------------------------------------- |
1231 | | */ |
1232 | | static size_t |
1233 | | H5C__cache_image_block_entry_header_size(const H5F_t *f) |
1234 | 0 | { |
1235 | 0 | size_t ret_value = 0; /* Return value */ |
1236 | |
|
1237 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1238 | | |
1239 | | /* Set return value */ |
1240 | 0 | ret_value = (size_t)(1 + /* type */ |
1241 | 0 | 1 + /* flags */ |
1242 | 0 | 1 + /* ring */ |
1243 | 0 | 1 + /* age */ |
1244 | 0 | 2 + /* dependency child count */ |
1245 | 0 | 2 + /* dirty dep child count */ |
1246 | 0 | 2 + /* dependency parent count */ |
1247 | 0 | 4 + /* index in LRU */ |
1248 | 0 | H5F_SIZEOF_ADDR(f) + /* entry offset */ |
1249 | 0 | H5F_SIZEOF_SIZE(f)); /* entry length */ |
1250 | |
|
1251 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1252 | 0 | } /* H5C__cache_image_block_entry_header_size() */ |
1253 | | |
1254 | | /*------------------------------------------------------------------------- |
1255 | | * Function: H5C__cache_image_block_header_size |
1256 | | * |
1257 | | * Purpose: Compute the size of the header of the metadata cache |
1258 | | * image block, and return the value. |
1259 | | * |
1260 | | * Return: Size of the header section of the metadata cache image |
1261 | | * block in bytes. |
1262 | | * |
1263 | | *------------------------------------------------------------------------- |
1264 | | */ |
1265 | | static size_t |
1266 | | H5C__cache_image_block_header_size(const H5F_t *f) |
1267 | 0 | { |
1268 | 0 | size_t ret_value = 0; /* Return value */ |
1269 | |
|
1270 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1271 | | |
1272 | | /* Set return value */ |
1273 | 0 | ret_value = (size_t)(4 + /* signature */ |
1274 | 0 | 1 + /* version */ |
1275 | 0 | 1 + /* flags */ |
1276 | 0 | H5F_SIZEOF_SIZE(f) + /* image data length */ |
1277 | 0 | 4); /* num_entries */ |
1278 | |
|
1279 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1280 | 0 | } /* H5C__cache_image_block_header_size() */ |
1281 | | |
1282 | | /*------------------------------------------------------------------------- |
1283 | | * Function: H5C__decode_cache_image_header() |
1284 | | * |
1285 | | * Purpose: Decode the metadata cache image buffer header from the |
1286 | | * supplied buffer and load the data into the supplied instance |
1287 | | * of H5C_t. Advances the buffer pointer to the first byte |
1288 | | * after the header image, or unchanged on failure. |
1289 | | * |
1290 | | * Return: Non-negative on success/Negative on failure |
1291 | | * |
1292 | | *------------------------------------------------------------------------- |
1293 | | */ |
1294 | | static herr_t |
1295 | | H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, size_t buf_size) |
1296 | 0 | { |
1297 | 0 | uint8_t version; |
1298 | 0 | uint8_t flags; |
1299 | 0 | bool have_resize_status = false; |
1300 | 0 | size_t actual_header_len; |
1301 | 0 | size_t expected_header_len; |
1302 | 0 | const uint8_t *p; |
1303 | 0 | const uint8_t *p_end = *buf + buf_size - 1; /* End of the p buffer */ |
1304 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1305 | |
|
1306 | 0 | FUNC_ENTER_PACKAGE |
1307 | | |
1308 | | /* Sanity checks */ |
1309 | 0 | assert(cache_ptr); |
1310 | 0 | assert(buf); |
1311 | 0 | assert(*buf); |
1312 | | |
1313 | | /* Point to buffer to decode */ |
1314 | 0 | p = *buf; |
1315 | | |
1316 | | /* Ensure buffer has enough data for signature comparison */ |
1317 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5C__MDCI_BLOCK_SIGNATURE_LEN, p_end)) |
1318 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "Insufficient buffer size for signature"); |
1319 | | |
1320 | | /* Check signature */ |
1321 | 0 | if (memcmp(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN) != 0) |
1322 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image header signature"); |
1323 | 0 | p += H5C__MDCI_BLOCK_SIGNATURE_LEN; |
1324 | | |
1325 | | /* Check version */ |
1326 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
1327 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); |
1328 | 0 | version = *p++; |
1329 | 0 | if (version != (uint8_t)H5C__MDCI_BLOCK_VERSION_0) |
1330 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image version"); |
1331 | | |
1332 | | /* Decode flags */ |
1333 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
1334 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); |
1335 | 0 | flags = *p++; |
1336 | 0 | if (flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS) |
1337 | 0 | have_resize_status = true; |
1338 | 0 | if (have_resize_status) |
1339 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "MDC resize status not yet supported"); |
1340 | | |
1341 | | /* Read image data length */ |
1342 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) |
1343 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); |
1344 | 0 | H5F_DECODE_LENGTH(f, p, cache_ptr->image_data_len); |
1345 | | |
1346 | | /* For now -- will become <= eventually */ |
1347 | 0 | if (cache_ptr->image_data_len != cache_ptr->image_len) |
1348 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image data length"); |
1349 | | |
1350 | | /* Read num entries */ |
1351 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) |
1352 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); |
1353 | 0 | UINT32DECODE(p, cache_ptr->num_entries_in_image); |
1354 | 0 | if (cache_ptr->num_entries_in_image == 0) |
1355 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache entry count"); |
1356 | | |
1357 | | /* Verify expected length of header */ |
1358 | 0 | actual_header_len = (size_t)(p - *buf); |
1359 | 0 | expected_header_len = H5C__cache_image_block_header_size(f); |
1360 | 0 | if (actual_header_len != expected_header_len) |
1361 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len"); |
1362 | | |
1363 | | /* Update buffer pointer */ |
1364 | 0 | *buf = p; |
1365 | |
|
1366 | 0 | done: |
1367 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1368 | 0 | } /* H5C__decode_cache_image_header() */ |
1369 | | |
1370 | | #ifndef NDEBUG |
1371 | | |
1372 | | /*------------------------------------------------------------------------- |
1373 | | * Function: H5C__decode_cache_image_entry() |
1374 | | * |
1375 | | * Purpose: Decode the metadata cache image entry from the supplied |
1376 | | * buffer into the supplied instance of H5C_image_entry_t. |
1377 | | * This includes allocating a buffer for the entry image, |
1378 | | * loading it, and setting ie_ptr->image_ptr to point to |
1379 | | * the buffer. |
1380 | | * |
1381 | | * Advances the buffer pointer to the first byte |
1382 | | * after the entry, or unchanged on failure. |
1383 | | * |
1384 | | * Return: Non-negative on success/Negative on failure |
1385 | | * |
1386 | | *------------------------------------------------------------------------- |
1387 | | */ |
1388 | | static herr_t |
1389 | | H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num) |
1390 | | { |
1391 | | bool is_dirty = false; |
1392 | | bool in_lru = false; /* Only used in assertions */ |
1393 | | bool is_fd_parent = false; /* Only used in assertions */ |
1394 | | bool is_fd_child = false; /* Only used in assertions */ |
1395 | | haddr_t addr; |
1396 | | hsize_t size = 0; |
1397 | | void *image_ptr; |
1398 | | uint8_t flags = 0; |
1399 | | uint8_t type_id; |
1400 | | uint8_t ring; |
1401 | | uint8_t age; |
1402 | | uint16_t fd_child_count; |
1403 | | uint16_t fd_dirty_child_count; |
1404 | | uint16_t fd_parent_count; |
1405 | | haddr_t *fd_parent_addrs = NULL; |
1406 | | int32_t lru_rank; |
1407 | | H5C_image_entry_t *ie_ptr = NULL; |
1408 | | const uint8_t *p; |
1409 | | herr_t ret_value = SUCCEED; /* Return value */ |
1410 | | |
1411 | | FUNC_ENTER_PACKAGE |
1412 | | |
1413 | | /* Sanity checks */ |
1414 | | assert(f); |
1415 | | assert(f->shared); |
1416 | | assert(cache_ptr == f->shared->cache); |
1417 | | assert(cache_ptr); |
1418 | | assert(buf); |
1419 | | assert(*buf); |
1420 | | assert(entry_num < cache_ptr->num_entries_in_image); |
1421 | | ie_ptr = &(cache_ptr->image_entries[entry_num]); |
1422 | | assert(ie_ptr); |
1423 | | |
1424 | | /* Get pointer to buffer */ |
1425 | | p = *buf; |
1426 | | |
1427 | | /* Decode type id */ |
1428 | | type_id = *p++; |
1429 | | |
1430 | | /* Decode flags */ |
1431 | | flags = *p++; |
1432 | | if (flags & H5C__MDCI_ENTRY_DIRTY_FLAG) |
1433 | | is_dirty = true; |
1434 | | if (flags & H5C__MDCI_ENTRY_IN_LRU_FLAG) |
1435 | | in_lru = true; |
1436 | | if (flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG) |
1437 | | is_fd_parent = true; |
1438 | | if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) |
1439 | | is_fd_child = true; |
1440 | | |
1441 | | /* Decode ring */ |
1442 | | ring = *p++; |
1443 | | assert(ring > (uint8_t)(H5C_RING_UNDEFINED)); |
1444 | | assert(ring < (uint8_t)(H5C_RING_NTYPES)); |
1445 | | |
1446 | | /* Decode age */ |
1447 | | age = *p++; |
1448 | | |
1449 | | /* Decode dependency child count */ |
1450 | | UINT16DECODE(p, fd_child_count); |
1451 | | assert((is_fd_parent && fd_child_count > 0) || (!is_fd_parent && fd_child_count == 0)); |
1452 | | |
1453 | | /* Decode dirty dependency child count */ |
1454 | | UINT16DECODE(p, fd_dirty_child_count); |
1455 | | if (fd_dirty_child_count > fd_child_count) |
1456 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid dirty flush dependency child count"); |
1457 | | |
1458 | | /* Decode dependency parent count */ |
1459 | | UINT16DECODE(p, fd_parent_count); |
1460 | | assert((is_fd_child && fd_parent_count > 0) || (!is_fd_child && fd_parent_count == 0)); |
1461 | | |
1462 | | /* Decode index in LRU */ |
1463 | | INT32DECODE(p, lru_rank); |
1464 | | assert((in_lru && lru_rank >= 0) || (!in_lru && lru_rank == -1)); |
1465 | | |
1466 | | /* Decode entry offset */ |
1467 | | H5F_addr_decode(f, &p, &addr); |
1468 | | if (!H5_addr_defined(addr)) |
1469 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry offset"); |
1470 | | |
1471 | | /* Decode entry length */ |
1472 | | H5F_DECODE_LENGTH(f, p, size); |
1473 | | if (size == 0) |
1474 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry size"); |
1475 | | |
1476 | | /* Verify expected length of entry image */ |
1477 | | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
1478 | | HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "Bad entry image len"); |
1479 | | |
1480 | | /* If parent count greater than zero, allocate array for parent |
1481 | | * addresses, and decode addresses into the array. |
1482 | | */ |
1483 | | if (fd_parent_count > 0) { |
1484 | | int i; /* Local index variable */ |
1485 | | |
1486 | | if (NULL == (fd_parent_addrs = (haddr_t *)H5MM_malloc((size_t)(fd_parent_count)*H5F_SIZEOF_ADDR(f)))) |
1487 | | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
1488 | | "memory allocation failed for fd parent addrs buffer"); |
1489 | | |
1490 | | for (i = 0; i < fd_parent_count; i++) { |
1491 | | H5F_addr_decode(f, &p, &(fd_parent_addrs[i])); |
1492 | | if (!H5_addr_defined(fd_parent_addrs[i])) |
1493 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid flush dependency parent offset"); |
1494 | | } /* end for */ |
1495 | | } /* end if */ |
1496 | | |
1497 | | /* Allocate buffer for entry image */ |
1498 | | if (NULL == (image_ptr = H5MM_malloc(size + H5C_IMAGE_EXTRA_SPACE))) |
1499 | | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer"); |
1500 | | |
1501 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
1502 | | H5MM_memcpy(((uint8_t *)image_ptr) + size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); |
1503 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
1504 | | |
1505 | | /* Copy the entry image from the cache image block */ |
1506 | | H5MM_memcpy(image_ptr, p, size); |
1507 | | p += size; |
1508 | | |
1509 | | /* Copy data into target */ |
1510 | | ie_ptr->addr = addr; |
1511 | | ie_ptr->size = size; |
1512 | | ie_ptr->ring = (H5C_ring_t)ring; |
1513 | | ie_ptr->age = (int32_t)age; |
1514 | | ie_ptr->type_id = (int32_t)type_id; |
1515 | | ie_ptr->lru_rank = lru_rank; |
1516 | | ie_ptr->is_dirty = is_dirty; |
1517 | | ie_ptr->fd_child_count = (uint64_t)fd_child_count; |
1518 | | ie_ptr->fd_dirty_child_count = (uint64_t)fd_dirty_child_count; |
1519 | | ie_ptr->fd_parent_count = (uint64_t)fd_parent_count; |
1520 | | ie_ptr->fd_parent_addrs = fd_parent_addrs; |
1521 | | ie_ptr->image_ptr = image_ptr; |
1522 | | |
1523 | | /* Update buffer pointer */ |
1524 | | *buf = p; |
1525 | | |
1526 | | done: |
1527 | | FUNC_LEAVE_NOAPI(ret_value) |
1528 | | } /* H5C__decode_cache_image_entry() */ |
1529 | | #endif |
1530 | | |
1531 | | /*------------------------------------------------------------------------- |
1532 | | * Function: H5C__encode_cache_image_header() |
1533 | | * |
1534 | | * Purpose: Encode the metadata cache image buffer header in the |
1535 | | * supplied buffer. Updates buffer pointer to the first byte |
1536 | | * after the header image in the buffer, or unchanged on failure. |
1537 | | * |
1538 | | * Return: Non-negative on success/Negative on failure |
1539 | | * |
1540 | | *------------------------------------------------------------------------- |
1541 | | */ |
1542 | | static herr_t |
1543 | | H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf) |
1544 | 0 | { |
1545 | 0 | size_t actual_header_len; |
1546 | 0 | size_t expected_header_len; |
1547 | 0 | uint8_t flags = 0; |
1548 | 0 | uint8_t *p; /* Pointer into cache image buffer */ |
1549 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1550 | |
|
1551 | 0 | FUNC_ENTER_PACKAGE |
1552 | | |
1553 | | /* Sanity checks */ |
1554 | 0 | assert(cache_ptr); |
1555 | 0 | assert(cache_ptr->close_warning_received); |
1556 | 0 | assert(cache_ptr->image_ctl.generate_image); |
1557 | 0 | assert(cache_ptr->index_len == 0); |
1558 | 0 | assert(cache_ptr->image_data_len > 0); |
1559 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
1560 | 0 | assert(buf); |
1561 | 0 | assert(*buf); |
1562 | | |
1563 | | /* Set pointer into buffer */ |
1564 | 0 | p = *buf; |
1565 | | |
1566 | | /* write signature */ |
1567 | 0 | H5MM_memcpy(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN); |
1568 | 0 | p += H5C__MDCI_BLOCK_SIGNATURE_LEN; |
1569 | | |
1570 | | /* write version */ |
1571 | 0 | *p++ = (uint8_t)H5C__MDCI_BLOCK_VERSION_0; |
1572 | | |
1573 | | /* setup and write flags */ |
1574 | | |
1575 | | /* at present we don't support saving resize status */ |
1576 | 0 | assert(!cache_ptr->image_ctl.save_resize_status); |
1577 | 0 | if (cache_ptr->image_ctl.save_resize_status) |
1578 | 0 | flags |= H5C__MDCI_HEADER_HAVE_RESIZE_STATUS; |
1579 | |
|
1580 | 0 | *p++ = flags; |
1581 | | |
1582 | | /* Encode image data length */ |
1583 | | /* this must be true at present */ |
1584 | 0 | assert(cache_ptr->image_len == cache_ptr->image_data_len); |
1585 | 0 | H5F_ENCODE_LENGTH(f, p, cache_ptr->image_data_len); |
1586 | | |
1587 | | /* write num entries */ |
1588 | 0 | UINT32ENCODE(p, cache_ptr->num_entries_in_image); |
1589 | | |
1590 | | /* verify expected length of header */ |
1591 | 0 | actual_header_len = (size_t)(p - *buf); |
1592 | 0 | expected_header_len = H5C__cache_image_block_header_size(f); |
1593 | 0 | if (actual_header_len != expected_header_len) |
1594 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len"); |
1595 | | |
1596 | | /* Update buffer pointer */ |
1597 | 0 | *buf = p; |
1598 | |
|
1599 | 0 | done: |
1600 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1601 | 0 | } /* H5C__encode_cache_image_header() */ |
1602 | | |
1603 | | /*------------------------------------------------------------------------- |
1604 | | * Function: H5C__encode_cache_image_entry() |
1605 | | * |
1606 | | * Purpose: Encode the metadata cache image buffer header in the |
1607 | | * supplied buffer. Updates buffer pointer to the first byte |
1608 | | * after the entry in the buffer, or unchanged on failure. |
1609 | | * |
1610 | | * Return: Non-negative on success/Negative on failure |
1611 | | * |
1612 | | *------------------------------------------------------------------------- |
1613 | | */ |
1614 | | static herr_t |
1615 | | H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num) |
1616 | 0 | { |
1617 | 0 | H5C_image_entry_t *ie_ptr; /* Pointer to entry to encode */ |
1618 | 0 | uint8_t flags = 0; /* Flags for entry */ |
1619 | 0 | uint8_t *p; /* Pointer into cache image buffer */ |
1620 | 0 | unsigned u; /* Local index value */ |
1621 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1622 | |
|
1623 | 0 | FUNC_ENTER_PACKAGE |
1624 | | |
1625 | | /* Sanity checks */ |
1626 | 0 | assert(f); |
1627 | 0 | assert(f->shared); |
1628 | 0 | assert(cache_ptr == f->shared->cache); |
1629 | 0 | assert(cache_ptr); |
1630 | 0 | assert(cache_ptr->close_warning_received); |
1631 | 0 | assert(cache_ptr->image_ctl.generate_image); |
1632 | 0 | assert(cache_ptr->index_len == 0); |
1633 | 0 | assert(buf); |
1634 | 0 | assert(*buf); |
1635 | 0 | assert(entry_num < cache_ptr->num_entries_in_image); |
1636 | 0 | ie_ptr = &(cache_ptr->image_entries[entry_num]); |
1637 | | |
1638 | | /* Get pointer to buffer to encode into */ |
1639 | 0 | p = *buf; |
1640 | | |
1641 | | /* Encode type */ |
1642 | 0 | if ((ie_ptr->type_id < 0) || (ie_ptr->type_id > 255)) |
1643 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "type_id out of range."); |
1644 | 0 | *p++ = (uint8_t)(ie_ptr->type_id); |
1645 | | |
1646 | | /* Compose and encode flags */ |
1647 | 0 | if (ie_ptr->is_dirty) |
1648 | 0 | flags |= H5C__MDCI_ENTRY_DIRTY_FLAG; |
1649 | 0 | if (ie_ptr->lru_rank > 0) |
1650 | 0 | flags |= H5C__MDCI_ENTRY_IN_LRU_FLAG; |
1651 | 0 | if (ie_ptr->fd_child_count > 0) |
1652 | 0 | flags |= H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG; |
1653 | 0 | if (ie_ptr->fd_parent_count > 0) |
1654 | 0 | flags |= H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG; |
1655 | 0 | *p++ = flags; |
1656 | | |
1657 | | /* Encode ring */ |
1658 | 0 | *p++ = (uint8_t)(ie_ptr->ring); |
1659 | | |
1660 | | /* Encode age */ |
1661 | 0 | *p++ = (uint8_t)(ie_ptr->age); |
1662 | | |
1663 | | /* Validate and encode dependency child count */ |
1664 | 0 | if (ie_ptr->fd_child_count > H5C__MDCI_MAX_FD_CHILDREN) |
1665 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_child_count out of range"); |
1666 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_child_count)); |
1667 | | |
1668 | | /* Validate and encode dirty dependency child count */ |
1669 | 0 | if (ie_ptr->fd_dirty_child_count > H5C__MDCI_MAX_FD_CHILDREN) |
1670 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_dirty_child_count out of range"); |
1671 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_dirty_child_count)); |
1672 | | |
1673 | | /* Validate and encode dependency parent count */ |
1674 | 0 | if (ie_ptr->fd_parent_count > H5C__MDCI_MAX_FD_PARENTS) |
1675 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_parent_count out of range"); |
1676 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_parent_count)); |
1677 | | |
1678 | | /* Encode index in LRU */ |
1679 | 0 | INT32ENCODE(p, ie_ptr->lru_rank); |
1680 | | |
1681 | | /* Encode entry offset */ |
1682 | 0 | H5F_addr_encode(f, &p, ie_ptr->addr); |
1683 | | |
1684 | | /* Encode entry length */ |
1685 | 0 | H5F_ENCODE_LENGTH(f, p, ie_ptr->size); |
1686 | | |
1687 | | /* Verify expected length of entry image */ |
1688 | 0 | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
1689 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad entry image len"); |
1690 | | |
1691 | | /* Encode dependency parent offsets -- if any */ |
1692 | 0 | for (u = 0; u < ie_ptr->fd_parent_count; u++) |
1693 | 0 | H5F_addr_encode(f, &p, ie_ptr->fd_parent_addrs[u]); |
1694 | | |
1695 | | /* Copy entry image */ |
1696 | 0 | H5MM_memcpy(p, ie_ptr->image_ptr, ie_ptr->size); |
1697 | 0 | p += ie_ptr->size; |
1698 | | |
1699 | | /* Update buffer pointer */ |
1700 | 0 | *buf = p; |
1701 | |
|
1702 | 0 | done: |
1703 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1704 | 0 | } /* H5C__encode_cache_image_entry() */ |
1705 | | |
1706 | | /*------------------------------------------------------------------------- |
1707 | | * Function: H5C__prep_for_file_close__compute_fd_heights |
1708 | | * |
1709 | | * Purpose: The purpose of this function is to compute the flush |
1710 | | * dependency height of all entries that appear in the cache |
1711 | | * image. |
1712 | | * |
1713 | | * At present, entries are included or excluded from the |
1714 | | * cache image depending upon the ring in which they reside. |
1715 | | * Thus there is no chance that one side of a flush dependency |
1716 | | * will be in the cache image, and the other side not. |
1717 | | * |
1718 | | * However, once we start placing a limit on the size of the |
1719 | | * cache image, or start excluding prefetched entries from |
1720 | | * the cache image if they haven't been accessed in some |
1721 | | * number of file close / open cycles, this will no longer |
1722 | | * be the case. |
1723 | | * |
1724 | | * In particular, if a flush dependency child is dirty, and |
1725 | | * one of its flush dependency parents is dirty and not in |
1726 | | * the cache image, then the flush dependency child cannot |
1727 | | * be in the cache image without violating flush ordering. |
1728 | | * |
1729 | | * Observe that a clean flush dependency child can be either |
1730 | | * in or out of the cache image without effect on flush |
1731 | | * dependencies. |
1732 | | * |
1733 | | * Similarly, a flush dependency parent can always be part |
1734 | | * of a cache image, regardless of whether it is clean or |
1735 | | * dirty -- but remember that a flush dependency parent can |
1736 | | * also be a flush dependency child. |
1737 | | * |
1738 | | * Finally, note that for purposes of the cache image, flush |
1739 | | * dependency height ends when a flush dependency relation |
1740 | | * passes off the cache image. |
1741 | | * |
1742 | | * On exit, the flush dependency height of each entry in the |
1743 | | * cache image should be calculated and stored in the cache |
1744 | | * entry. Entries will be removed from the cache image if |
1745 | | * necessary to maintain flush ordering. |
1746 | | * |
1747 | | * Return: Non-negative on success/Negative on failure |
1748 | | * |
1749 | | *------------------------------------------------------------------------- |
1750 | | */ |
1751 | | static herr_t |
1752 | | H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) |
1753 | 0 | { |
1754 | 0 | H5C_cache_entry_t *entry_ptr; |
1755 | 0 | H5C_cache_entry_t *parent_ptr; |
1756 | | #ifndef NDEBUG |
1757 | | unsigned entries_removed_from_image = 0; |
1758 | | unsigned external_parent_fd_refs_removed = 0; |
1759 | | unsigned external_child_fd_refs_removed = 0; |
1760 | | #endif |
1761 | 0 | bool done = false; |
1762 | 0 | unsigned u; /* Local index variable */ |
1763 | 0 | herr_t ret_value = SUCCEED; |
1764 | |
|
1765 | 0 | FUNC_ENTER_PACKAGE |
1766 | | |
1767 | | /* sanity checks */ |
1768 | 0 | assert(cache_ptr); |
1769 | | |
1770 | | /* Remove from the cache image all dirty entries that are |
1771 | | * flush dependency children of dirty entries that are not in the |
1772 | | * cache image. Must do this, as if we fail to do so, the parent |
1773 | | * will be written to file before the child. Since it is possible |
1774 | | * that the child will have dirty children of its own, this may take |
1775 | | * multiple passes through the index list. |
1776 | | */ |
1777 | 0 | done = false; |
1778 | 0 | while (!done) { |
1779 | 0 | done = true; |
1780 | 0 | entry_ptr = cache_ptr->il_head; |
1781 | 0 | while (entry_ptr != NULL) { |
1782 | | /* Should this entry be in the image */ |
1783 | 0 | if (entry_ptr->image_dirty && entry_ptr->include_in_image && (entry_ptr->fd_parent_count > 0)) { |
1784 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1785 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1786 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1787 | | |
1788 | | /* Sanity check parent */ |
1789 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1790 | |
|
1791 | 0 | if (parent_ptr->is_dirty && !parent_ptr->include_in_image && |
1792 | 0 | entry_ptr->include_in_image) { |
1793 | | |
1794 | | /* Must remove child from image -- only do this once */ |
1795 | | #ifndef NDEBUG |
1796 | | entries_removed_from_image++; |
1797 | | #endif |
1798 | 0 | entry_ptr->include_in_image = false; |
1799 | 0 | } /* end if */ |
1800 | 0 | } /* for */ |
1801 | 0 | } /* end if */ |
1802 | |
|
1803 | 0 | entry_ptr = entry_ptr->il_next; |
1804 | 0 | } /* while ( entry_ptr != NULL ) */ |
1805 | 0 | } /* while ( ! done ) */ |
1806 | | |
1807 | | /* at present, entries are included in the cache image if they reside |
1808 | | * in a specified set of rings. Thus it should be impossible for |
1809 | | * entries_removed_from_image to be positive. Assert that this is |
1810 | | * so. Note that this will change when we start aging entries out |
1811 | | * of the cache image. |
1812 | | */ |
1813 | 0 | assert(entries_removed_from_image == 0); |
1814 | | |
1815 | | /* Next, remove from entries in the cache image, references to |
1816 | | * flush dependency parents or children that are not in the cache image. |
1817 | | */ |
1818 | 0 | entry_ptr = cache_ptr->il_head; |
1819 | 0 | while (entry_ptr != NULL) { |
1820 | 0 | if (!entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) { |
1821 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1822 | |
|
1823 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1824 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1825 | | |
1826 | | /* Sanity check parent */ |
1827 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1828 | |
|
1829 | 0 | if (parent_ptr->include_in_image) { |
1830 | | /* Must remove reference to child */ |
1831 | 0 | assert(parent_ptr->fd_child_count > 0); |
1832 | 0 | parent_ptr->fd_child_count--; |
1833 | |
|
1834 | 0 | if (entry_ptr->is_dirty) { |
1835 | 0 | assert(parent_ptr->fd_dirty_child_count > 0); |
1836 | 0 | parent_ptr->fd_dirty_child_count--; |
1837 | 0 | } /* end if */ |
1838 | |
|
1839 | | #ifndef NDEBUG |
1840 | | external_child_fd_refs_removed++; |
1841 | | #endif |
1842 | 0 | } /* end if */ |
1843 | 0 | } /* for */ |
1844 | 0 | } /* end if */ |
1845 | 0 | else if (entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) { |
1846 | | /* Sanity checks */ |
1847 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1848 | 0 | assert(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count); |
1849 | 0 | assert(entry_ptr->fd_parent_addrs); |
1850 | |
|
1851 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1852 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1853 | | |
1854 | | /* Sanity check parent */ |
1855 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1856 | |
|
1857 | 0 | if (!parent_ptr->include_in_image) { |
1858 | | /* Must remove reference to parent */ |
1859 | 0 | assert(entry_ptr->fd_parent_count > 0); |
1860 | 0 | parent_ptr->fd_child_count--; |
1861 | |
|
1862 | 0 | assert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]); |
1863 | |
|
1864 | 0 | entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF; |
1865 | | #ifndef NDEBUG |
1866 | | external_parent_fd_refs_removed++; |
1867 | | #endif |
1868 | 0 | } /* end if */ |
1869 | 0 | } /* for */ |
1870 | | |
1871 | | /* Touch up fd_parent_addrs array if necessary */ |
1872 | 0 | if (entry_ptr->fd_parent_count == 0) { |
1873 | 0 | H5MM_xfree(entry_ptr->fd_parent_addrs); |
1874 | 0 | entry_ptr->fd_parent_addrs = NULL; |
1875 | 0 | } /* end if */ |
1876 | 0 | else if (entry_ptr->flush_dep_nparents > entry_ptr->fd_parent_count) { |
1877 | 0 | haddr_t *old_fd_parent_addrs = entry_ptr->fd_parent_addrs; |
1878 | 0 | unsigned v; |
1879 | |
|
1880 | 0 | if (NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_calloc( |
1881 | 0 | sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_addrs)))) |
1882 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
1883 | 0 | "memory allocation failed for fd parent addr array"); |
1884 | | |
1885 | 0 | v = 0; |
1886 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1887 | 0 | if (old_fd_parent_addrs[u] != HADDR_UNDEF) { |
1888 | 0 | entry_ptr->fd_parent_addrs[v] = old_fd_parent_addrs[u]; |
1889 | 0 | v++; |
1890 | 0 | } /* end if */ |
1891 | 0 | } /* end for */ |
1892 | |
|
1893 | 0 | assert(v == entry_ptr->fd_parent_count); |
1894 | 0 | } /* end else-if */ |
1895 | 0 | } /* end else-if */ |
1896 | | |
1897 | 0 | entry_ptr = entry_ptr->il_next; |
1898 | 0 | } /* while (entry_ptr != NULL) */ |
1899 | | |
1900 | | /* At present, no external parent or child flush dependency links |
1901 | | * should exist -- hence the following assertions. This will change |
1902 | | * if we support ageout of entries in the cache image. |
1903 | | */ |
1904 | 0 | assert(external_child_fd_refs_removed == 0); |
1905 | 0 | assert(external_parent_fd_refs_removed == 0); |
1906 | | |
1907 | | /* At this point we should have removed all flush dependencies that |
1908 | | * cross cache image boundaries. Now compute the flush dependency |
1909 | | * heights for all entries in the image. |
1910 | | * |
1911 | | * Until I can think of a better way, do this via a depth first |
1912 | | * search implemented via a recursive function call. |
1913 | | * |
1914 | | * Note that entry_ptr->image_fd_height has already been initialized to 0 |
1915 | | * for all entries that may appear in the cache image. |
1916 | | */ |
1917 | 0 | entry_ptr = cache_ptr->il_head; |
1918 | 0 | while (entry_ptr != NULL) { |
1919 | 0 | if (entry_ptr->include_in_image && entry_ptr->fd_child_count == 0 && entry_ptr->fd_parent_count > 0) { |
1920 | 0 | for (u = 0; u < entry_ptr->fd_parent_count; u++) { |
1921 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1922 | |
|
1923 | 0 | if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) |
1924 | 0 | H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1); |
1925 | 0 | } /* end for */ |
1926 | 0 | } /* end if */ |
1927 | |
|
1928 | 0 | entry_ptr = entry_ptr->il_next; |
1929 | 0 | } /* while (entry_ptr != NULL) */ |
1930 | |
|
1931 | 0 | done: |
1932 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1933 | 0 | } /* H5C__prep_for_file_close__compute_fd_heights() */ |
1934 | | |
1935 | | /*------------------------------------------------------------------------- |
1936 | | * Function: H5C__prep_for_file_close__compute_fd_heights_real |
1937 | | * |
1938 | | * Purpose: H5C__prep_for_file_close__compute_fd_heights() prepares |
1939 | | * for the computation of flush dependency heights of all |
1940 | | * entries in the cache image, this function actually does |
1941 | | * it. |
1942 | | * |
1943 | | * The basic observation behind this function is as follows: |
1944 | | * |
1945 | | * Suppose you have an entry E with a flush dependency |
1946 | | * height of X. Then the parents of E must all have |
1947 | | * flush dependency X + 1 or greater. |
1948 | | * |
1949 | | * Use this observation to compute flush dependency height |
1950 | | * of all entries in the cache image via the following |
1951 | | * recursive algorithm: |
1952 | | * |
1953 | | * 1) On entry, set the flush dependency height of the |
1954 | | * supplied cache entry to the supplied value. |
1955 | | * |
1956 | | * 2) Examine all the flush dependency parents of the |
1957 | | * supplied entry. |
1958 | | * |
1959 | | * If the parent is in the cache image, and has flush |
1960 | | * dependency height less than or equal to the flush |
1961 | | * dependency height of the current entry, call the |
1962 | | * recursive routine on the parent with flush dependency |
1963 | | * height equal to the flush dependency height of the |
1964 | | * child plus 1. |
1965 | | * |
1966 | | * Otherwise do nothing. |
1967 | | * |
1968 | | * Observe that if the flush dependency height of all entries |
1969 | | * in the image is initialized to zero, and if this recursive |
1970 | | * function is called with flush dependency height 0 on all |
1971 | | * entries in the cache image with FD parents in the image, |
1972 | | * but without FD children in the image, the correct flush |
1973 | | * dependency height should be set for all entries in the |
1974 | | * cache image. |
1975 | | * |
1976 | | * Return: void |
1977 | | * |
1978 | | *------------------------------------------------------------------------- |
1979 | | */ |
1980 | | static void |
1981 | | H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, uint32_t fd_height) |
1982 | 0 | { |
1983 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1984 | | |
1985 | | /* Sanity checks */ |
1986 | 0 | assert(entry_ptr); |
1987 | 0 | assert(entry_ptr->include_in_image); |
1988 | 0 | assert((entry_ptr->image_fd_height == 0) || (entry_ptr->image_fd_height < fd_height)); |
1989 | 0 | assert(((fd_height == 0) && (entry_ptr->fd_child_count == 0)) || |
1990 | 0 | ((fd_height > 0) && (entry_ptr->fd_child_count > 0))); |
1991 | |
|
1992 | 0 | entry_ptr->image_fd_height = fd_height; |
1993 | 0 | if (entry_ptr->flush_dep_nparents > 0) { |
1994 | 0 | unsigned u; |
1995 | |
|
1996 | 0 | assert(entry_ptr->flush_dep_parent); |
1997 | 0 | for (u = 0; u < entry_ptr->fd_parent_count; u++) { |
1998 | 0 | H5C_cache_entry_t *parent_ptr; |
1999 | |
|
2000 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
2001 | |
|
2002 | 0 | if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= fd_height) |
2003 | 0 | H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, fd_height + 1); |
2004 | 0 | } /* end for */ |
2005 | 0 | } /* end if */ |
2006 | |
|
2007 | 0 | FUNC_LEAVE_NOAPI_VOID |
2008 | 0 | } /* H5C__prep_for_file_close__compute_fd_heights_real() */ |
2009 | | |
2010 | | /*------------------------------------------------------------------------- |
2011 | | * Function: H5C__prep_for_file_close__setup_image_entries_array |
2012 | | * |
2013 | | * Purpose: Allocate space for the image_entries array, and load |
2014 | | * each instance of H5C_image_entry_t in the array with |
2015 | | * the data necessary to construct the metadata cache image. |
2016 | | * |
2017 | | * Return: Non-negative on success/Negative on failure |
2018 | | * |
2019 | | *------------------------------------------------------------------------- |
2020 | | */ |
2021 | | static herr_t |
2022 | | H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) |
2023 | 0 | { |
2024 | 0 | H5C_cache_entry_t *entry_ptr; |
2025 | 0 | H5C_image_entry_t *image_entries = NULL; |
2026 | | #ifndef NDEBUG |
2027 | | uint32_t entries_visited = 0; |
2028 | | #endif |
2029 | 0 | unsigned u; /* Local index variable */ |
2030 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2031 | |
|
2032 | 0 | FUNC_ENTER_PACKAGE |
2033 | | |
2034 | | /* Sanity checks */ |
2035 | 0 | assert(cache_ptr); |
2036 | 0 | assert(cache_ptr->close_warning_received); |
2037 | 0 | assert(cache_ptr->pl_len == 0); |
2038 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2039 | 0 | assert(cache_ptr->image_entries == NULL); |
2040 | | |
2041 | | /* Allocate and initialize image_entries array */ |
2042 | 0 | if (NULL == (image_entries = (H5C_image_entry_t *)H5MM_calloc( |
2043 | 0 | sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1)))) |
2044 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for image_entries"); |
2045 | | |
2046 | | /* Initialize (non-zero/NULL/false) fields */ |
2047 | 0 | for (u = 0; u <= cache_ptr->num_entries_in_image; u++) { |
2048 | 0 | image_entries[u].addr = HADDR_UNDEF; |
2049 | 0 | image_entries[u].ring = H5C_RING_UNDEFINED; |
2050 | 0 | image_entries[u].type_id = -1; |
2051 | 0 | } /* end for */ |
2052 | | |
2053 | | /* Scan each entry on the index list and populate the image_entries array */ |
2054 | 0 | u = 0; |
2055 | 0 | entry_ptr = cache_ptr->il_head; |
2056 | 0 | while (entry_ptr != NULL) { |
2057 | 0 | if (entry_ptr->include_in_image) { |
2058 | | /* Since we have already serialized the cache, the following |
2059 | | * should hold. |
2060 | | */ |
2061 | 0 | assert(entry_ptr->image_up_to_date); |
2062 | 0 | assert(entry_ptr->image_ptr); |
2063 | 0 | assert(entry_ptr->type); |
2064 | |
|
2065 | 0 | image_entries[u].addr = entry_ptr->addr; |
2066 | 0 | image_entries[u].size = entry_ptr->size; |
2067 | 0 | image_entries[u].ring = entry_ptr->ring; |
2068 | | |
2069 | | /* When a prefetched entry is included in the image, store |
2070 | | * its underlying type id in the image entry, not |
2071 | | * H5AC_PREFETCHED_ENTRY_ID. In passing, also increment |
2072 | | * the age (up to H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX). |
2073 | | */ |
2074 | 0 | if (entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID) { |
2075 | 0 | image_entries[u].type_id = entry_ptr->prefetch_type_id; |
2076 | |
|
2077 | 0 | if (entry_ptr->age >= H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX) |
2078 | 0 | image_entries[u].age = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX; |
2079 | 0 | else |
2080 | 0 | image_entries[u].age = entry_ptr->age + 1; |
2081 | 0 | } /* end if */ |
2082 | 0 | else { |
2083 | 0 | image_entries[u].type_id = entry_ptr->type->id; |
2084 | 0 | image_entries[u].age = 0; |
2085 | 0 | } /* end else */ |
2086 | |
|
2087 | 0 | image_entries[u].lru_rank = entry_ptr->lru_rank; |
2088 | 0 | image_entries[u].is_dirty = entry_ptr->is_dirty; |
2089 | 0 | image_entries[u].image_fd_height = entry_ptr->image_fd_height; |
2090 | 0 | image_entries[u].fd_parent_count = entry_ptr->fd_parent_count; |
2091 | 0 | image_entries[u].fd_parent_addrs = entry_ptr->fd_parent_addrs; |
2092 | 0 | image_entries[u].fd_child_count = entry_ptr->fd_child_count; |
2093 | 0 | image_entries[u].fd_dirty_child_count = entry_ptr->fd_dirty_child_count; |
2094 | 0 | image_entries[u].image_ptr = entry_ptr->image_ptr; |
2095 | | |
2096 | | /* Null out entry_ptr->fd_parent_addrs and set |
2097 | | * entry_ptr->fd_parent_count to zero so that ownership of the |
2098 | | * flush dependency parents address array is transferred to the |
2099 | | * image entry. |
2100 | | */ |
2101 | 0 | entry_ptr->fd_parent_count = 0; |
2102 | 0 | entry_ptr->fd_parent_addrs = NULL; |
2103 | |
|
2104 | 0 | u++; |
2105 | |
|
2106 | 0 | assert(u <= cache_ptr->num_entries_in_image); |
2107 | 0 | } /* end if */ |
2108 | |
|
2109 | | #ifndef NDEBUG |
2110 | | entries_visited++; |
2111 | | #endif |
2112 | |
|
2113 | 0 | entry_ptr = entry_ptr->il_next; |
2114 | 0 | } /* end while */ |
2115 | | |
2116 | | /* Sanity checks */ |
2117 | 0 | assert(entries_visited == cache_ptr->index_len); |
2118 | 0 | assert(u == cache_ptr->num_entries_in_image); |
2119 | |
|
2120 | 0 | assert(image_entries[u].fd_parent_addrs == NULL); |
2121 | 0 | assert(image_entries[u].image_ptr == NULL); |
2122 | |
|
2123 | 0 | cache_ptr->image_entries = image_entries; |
2124 | |
|
2125 | 0 | done: |
2126 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2127 | 0 | } /* H5C__prep_for_file_close__setup_image_entries_array() */ |
2128 | | |
2129 | | /*------------------------------------------------------------------------- |
2130 | | * Function: H5C__prep_for_file_close__scan_entries |
2131 | | * |
2132 | | * Purpose: Scan all entries in the metadata cache, and store all |
2133 | | * entry specific data required for construction of the |
2134 | | * metadata cache image block and likely to be discarded |
2135 | | * or modified during the cache flush on file close. |
2136 | | * |
2137 | | * In particular, make note of: |
2138 | | * entry rank in LRU |
2139 | | * whether the entry is dirty |
2140 | | * base address of entry flush dependency parent, |
2141 | | * if it exists. |
2142 | | * number of flush dependency children, if any. |
2143 | | * |
2144 | | * Also, determine which entries are to be included in the |
2145 | | * metadata cache image. At present, all entries other than |
2146 | | * the superblock, the superblock extension object header and |
2147 | | * its associated chunks (if any) are included. |
2148 | | * |
2149 | | * Finally, compute the size of the metadata cache image |
2150 | | * block. |
2151 | | * |
2152 | | * Return: Non-negative on success/Negative on failure |
2153 | | * |
2154 | | *------------------------------------------------------------------------- |
2155 | | */ |
2156 | | static herr_t |
2157 | | H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) |
2158 | 0 | { |
2159 | 0 | H5C_cache_entry_t *entry_ptr; |
2160 | 0 | bool include_in_image; |
2161 | 0 | int lru_rank = 1; |
2162 | | #ifndef NDEBUG |
2163 | | unsigned entries_visited = 0; |
2164 | | uint32_t num_entries_tentatively_in_image = 0; |
2165 | | #endif |
2166 | 0 | uint32_t num_entries_in_image = 0; |
2167 | 0 | size_t image_len; |
2168 | 0 | size_t entry_header_len; |
2169 | 0 | size_t fd_parents_list_len; |
2170 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2171 | |
|
2172 | 0 | FUNC_ENTER_PACKAGE |
2173 | | |
2174 | | /* Sanity checks */ |
2175 | 0 | assert(f); |
2176 | 0 | assert(f->shared); |
2177 | 0 | assert(f->shared->sblock); |
2178 | 0 | assert(cache_ptr); |
2179 | 0 | assert(cache_ptr->close_warning_received); |
2180 | 0 | assert(cache_ptr->pl_len == 0); |
2181 | | |
2182 | | /* Initialize image len to the size of the metadata cache image block |
2183 | | * header. |
2184 | | */ |
2185 | 0 | image_len = H5C__cache_image_block_header_size(f); |
2186 | 0 | entry_header_len = H5C__cache_image_block_entry_header_size(f); |
2187 | | |
2188 | | /* Scan each entry on the index list */ |
2189 | 0 | entry_ptr = cache_ptr->il_head; |
2190 | 0 | while (entry_ptr != NULL) { |
2191 | | /* Since we have already serialized the cache, the following |
2192 | | * should hold. |
2193 | | */ |
2194 | 0 | assert(entry_ptr->image_up_to_date); |
2195 | 0 | assert(entry_ptr->image_ptr); |
2196 | | |
2197 | | /* Initially, we mark all entries in the rings included |
2198 | | * in the cache image as being included in the in the |
2199 | | * image. Depending on circumstances, we may exclude some |
2200 | | * of these entries later. |
2201 | | */ |
2202 | 0 | if (entry_ptr->ring > H5C_MAX_RING_IN_IMAGE) |
2203 | 0 | include_in_image = false; |
2204 | 0 | else |
2205 | 0 | include_in_image = true; |
2206 | 0 | entry_ptr->include_in_image = include_in_image; |
2207 | |
|
2208 | 0 | if (include_in_image) { |
2209 | 0 | entry_ptr->lru_rank = -1; |
2210 | 0 | entry_ptr->image_dirty = entry_ptr->is_dirty; |
2211 | 0 | entry_ptr->image_fd_height = 0; /* will compute this later */ |
2212 | | |
2213 | | /* Initially, include all flush dependency parents in the |
2214 | | * the list of flush dependencies to be stored in the |
2215 | | * image. We may remove some or all of these later. |
2216 | | */ |
2217 | 0 | if (entry_ptr->flush_dep_nparents > 0) { |
2218 | | /* The parents addresses array may already exist -- reallocate |
2219 | | * as needed. |
2220 | | */ |
2221 | 0 | if (entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count) { |
2222 | | /* parent addresses array should already be allocated |
2223 | | * and of the correct size. |
2224 | | */ |
2225 | 0 | assert(entry_ptr->fd_parent_addrs); |
2226 | 0 | } /* end if */ |
2227 | 0 | else if (entry_ptr->fd_parent_count > 0) { |
2228 | 0 | assert(entry_ptr->fd_parent_addrs); |
2229 | 0 | entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs); |
2230 | 0 | } /* end else-if */ |
2231 | 0 | else { |
2232 | 0 | assert(entry_ptr->fd_parent_count == 0); |
2233 | 0 | assert(entry_ptr->fd_parent_addrs == NULL); |
2234 | 0 | } /* end else */ |
2235 | |
|
2236 | 0 | entry_ptr->fd_parent_count = entry_ptr->flush_dep_nparents; |
2237 | 0 | if (NULL == entry_ptr->fd_parent_addrs) |
2238 | 0 | if (NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc( |
2239 | 0 | sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_count)))) |
2240 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
2241 | 0 | "memory allocation failed for fd parent addrs buffer"); |
2242 | | |
2243 | 0 | for (int i = 0; i < (int)(entry_ptr->fd_parent_count); i++) { |
2244 | 0 | entry_ptr->fd_parent_addrs[i] = entry_ptr->flush_dep_parent[i]->addr; |
2245 | 0 | assert(H5_addr_defined(entry_ptr->fd_parent_addrs[i])); |
2246 | 0 | } /* end for */ |
2247 | 0 | } /* end if */ |
2248 | 0 | else if (entry_ptr->fd_parent_count > 0) { |
2249 | 0 | assert(entry_ptr->fd_parent_addrs); |
2250 | 0 | entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs); |
2251 | 0 | } /* end else-if */ |
2252 | 0 | else |
2253 | 0 | assert(entry_ptr->fd_parent_addrs == NULL); |
2254 | | |
2255 | | /* Initially, all flush dependency children are included int |
2256 | | * the count of flush dependency child relationships to be |
2257 | | * represented in the cache image. Some or all of these |
2258 | | * may be dropped from the image later. |
2259 | | */ |
2260 | 0 | if (entry_ptr->flush_dep_nchildren > 0) { |
2261 | 0 | if (!entry_ptr->is_pinned) |
2262 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "encountered unpinned fd parent?!?"); |
2263 | | |
2264 | 0 | entry_ptr->fd_child_count = entry_ptr->flush_dep_nchildren; |
2265 | 0 | entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children; |
2266 | 0 | } /* end if */ |
2267 | |
|
2268 | | #ifndef NDEBUG |
2269 | | num_entries_tentatively_in_image++; |
2270 | | #endif |
2271 | 0 | } /* end if */ |
2272 | | |
2273 | | #ifndef NDEBUG |
2274 | | entries_visited++; |
2275 | | #endif |
2276 | 0 | entry_ptr = entry_ptr->il_next; |
2277 | 0 | } /* end while */ |
2278 | 0 | assert(entries_visited == cache_ptr->index_len); |
2279 | | |
2280 | | /* Now compute the flush dependency heights of all flush dependency |
2281 | | * relationships to be represented in the image. |
2282 | | * |
2283 | | * If all entries in the target rings are included in the |
2284 | | * image, the flush dependency heights are simply the heights |
2285 | | * of all flush dependencies in the target rings. |
2286 | | * |
2287 | | * However, if we restrict appearance in the cache image either |
2288 | | * by number of entries in the image, restrictions on the number |
2289 | | * of times a prefetched entry can appear in an image, or image |
2290 | | * size, it is possible that flush dependency parents or children |
2291 | | * of entries that are in the image may not be included in the |
2292 | | * the image. In this case, we must prune all flush dependency |
2293 | | * relationships that cross the image boundary, and all exclude |
2294 | | * from the image all dirty flush dependency children that have |
2295 | | * a dirty flush dependency parent that is not in the image. |
2296 | | * This is necessary to preserve the required flush ordering. |
2297 | | * |
2298 | | * These details are tended to by the following call to |
2299 | | * H5C__prep_for_file_close__compute_fd_heights(). Because the |
2300 | | * exact contents of the image cannot be known until after this |
2301 | | * call, computation of the image size is delayed. |
2302 | | */ |
2303 | 0 | if (H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0) |
2304 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?"); |
2305 | | |
2306 | | /* At this point, all entries that will appear in the cache |
2307 | | * image should be marked correctly. Compute the size of the |
2308 | | * cache image. |
2309 | | */ |
2310 | | #ifndef NDEBUG |
2311 | | entries_visited = 0; |
2312 | | #endif |
2313 | 0 | entry_ptr = cache_ptr->il_head; |
2314 | 0 | while (entry_ptr != NULL) { |
2315 | 0 | if (entry_ptr->include_in_image) { |
2316 | 0 | if (entry_ptr->fd_parent_count > 0) |
2317 | 0 | fd_parents_list_len = (size_t)(H5F_SIZEOF_ADDR(f) * entry_ptr->fd_parent_count); |
2318 | 0 | else |
2319 | 0 | fd_parents_list_len = (size_t)0; |
2320 | |
|
2321 | 0 | image_len += entry_header_len + fd_parents_list_len + entry_ptr->size; |
2322 | 0 | num_entries_in_image++; |
2323 | 0 | } /* end if */ |
2324 | |
|
2325 | | #ifndef NDEBUG |
2326 | | entries_visited++; |
2327 | | #endif |
2328 | 0 | entry_ptr = entry_ptr->il_next; |
2329 | 0 | } /* end while */ |
2330 | 0 | assert(entries_visited == cache_ptr->index_len); |
2331 | 0 | assert(num_entries_in_image <= num_entries_tentatively_in_image); |
2332 | |
|
2333 | | #ifndef NDEBUG |
2334 | | { |
2335 | | unsigned j = 0; |
2336 | | for (int i = H5C_MAX_RING_IN_IMAGE + 1; i <= H5C_RING_SB; i++) |
2337 | | j += cache_ptr->index_ring_len[i]; |
2338 | | |
2339 | | /* This will change */ |
2340 | | assert(entries_visited == (num_entries_tentatively_in_image + j)); |
2341 | | } |
2342 | | #endif |
2343 | |
|
2344 | 0 | cache_ptr->num_entries_in_image = num_entries_in_image; |
2345 | | #ifndef NDEBUG |
2346 | | entries_visited = 0; |
2347 | | #endif |
2348 | | |
2349 | | /* Now scan the LRU list to set the lru_rank fields of all entries |
2350 | | * on the LRU. |
2351 | | * |
2352 | | * Note that we start with rank 1, and increment by 1 with each |
2353 | | * entry on the LRU. |
2354 | | * |
2355 | | * Note that manually pinned entryies will have lru_rank -1, |
2356 | | * and no flush dependency. Putting these entries at the head of |
2357 | | * the reconstructed LRU should be appropriate. |
2358 | | */ |
2359 | 0 | entry_ptr = cache_ptr->LRU_head_ptr; |
2360 | 0 | while (entry_ptr != NULL) { |
2361 | 0 | assert(entry_ptr->type != NULL); |
2362 | | |
2363 | | /* to avoid confusion, don't set lru_rank on epoch markers. |
2364 | | * Note that we still increment the lru_rank, so that the holes |
2365 | | * in the sequence of entries on the LRU will indicate the |
2366 | | * locations of epoch markers (if any) when we reconstruct |
2367 | | * the LRU. |
2368 | | * |
2369 | | * Do not set lru_rank or increment lru_rank for entries |
2370 | | * that will not be included in the cache image. |
2371 | | */ |
2372 | 0 | if (entry_ptr->type->id == H5AC_EPOCH_MARKER_ID) |
2373 | 0 | lru_rank++; |
2374 | 0 | else if (entry_ptr->include_in_image) { |
2375 | 0 | entry_ptr->lru_rank = lru_rank; |
2376 | 0 | lru_rank++; |
2377 | 0 | } /* end else-if */ |
2378 | |
|
2379 | | #ifndef NDEBUG |
2380 | | entries_visited++; |
2381 | | #endif |
2382 | 0 | entry_ptr = entry_ptr->next; |
2383 | 0 | } /* end while */ |
2384 | 0 | assert(entries_visited == cache_ptr->LRU_list_len); |
2385 | |
|
2386 | 0 | image_len += H5F_SIZEOF_CHKSUM; |
2387 | 0 | cache_ptr->image_data_len = image_len; |
2388 | |
|
2389 | 0 | done: |
2390 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2391 | 0 | } /* H5C__prep_for_file_close__scan_entries() */ |
2392 | | |
2393 | | /*------------------------------------------------------------------------- |
2394 | | * Function: H5C__check_for_duplicates() |
2395 | | * |
2396 | | * Purpose: Detects two entries with the same address. When the |
2397 | | * duplicate occurs, expunge the entry from the cache. Leave the |
2398 | | * half-processed entry for the caller to clean up as with other failures. |
2399 | | * |
2400 | | * Return: SUCCEED on success, and FAIL on failure. |
2401 | | * |
2402 | | *------------------------------------------------------------------------- |
2403 | | */ |
2404 | | static herr_t |
2405 | | H5C__check_for_duplicates(H5C_cache_entry_t *pf_entry_ptr, H5C_recon_entry_t **recon_table_ptr) |
2406 | 0 | { |
2407 | 0 | haddr_t addr = pf_entry_ptr->addr; |
2408 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2409 | 0 | H5C_recon_entry_t *recon_entry = NULL; /* Points to an entry in the temp table */ |
2410 | |
|
2411 | 0 | FUNC_ENTER_PACKAGE |
2412 | | |
2413 | | /* Check whether the address is duplicated */ |
2414 | 0 | HASH_FIND(hh, *recon_table_ptr, &addr, sizeof(haddr_t), recon_entry); |
2415 | | |
2416 | | /* Duplicate found, remove the duplicated entry */ |
2417 | 0 | if (recon_entry) |
2418 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "duplicate addresses found"); |
2419 | 0 | else { |
2420 | | /* Insert address into the hash table for checking against later */ |
2421 | 0 | if (NULL == (recon_entry = (H5C_recon_entry_t *)H5MM_malloc(sizeof(H5C_recon_entry_t)))) |
2422 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for address entry"); |
2423 | 0 | recon_entry->addr = addr; |
2424 | 0 | recon_entry->entry_ptr = pf_entry_ptr; |
2425 | 0 | HASH_ADD(hh, *recon_table_ptr, addr, sizeof(haddr_t), recon_entry); |
2426 | 0 | } |
2427 | | |
2428 | 0 | done: |
2429 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2430 | 0 | } /* H5C__check_for_duplicates() */ |
2431 | | |
2432 | | /*------------------------------------------------------------------------- |
2433 | | * Function: H5C__reconstruct_cache_contents() |
2434 | | * |
2435 | | * Purpose: Scan the image buffer, and create a prefetched |
2436 | | * cache entry for every entry in the buffer. Insert the |
2437 | | * prefetched entries in the index and the LRU, and |
2438 | | * reconstruct any flush dependencies. Order the entries |
2439 | | * in the LRU as indicated by the stored lru_ranks. |
2440 | | * |
2441 | | * Return: SUCCEED on success, and FAIL on failure. |
2442 | | * |
2443 | | *------------------------------------------------------------------------- |
2444 | | */ |
2445 | | static herr_t |
2446 | | H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) |
2447 | 0 | { |
2448 | 0 | H5C_cache_entry_t *pf_entry_ptr = NULL; /* Pointer to prefetched entry */ |
2449 | 0 | H5C_cache_entry_t *parent_ptr; /* Pointer to parent of prefetched entry */ |
2450 | 0 | hsize_t image_len; /* Image length */ |
2451 | 0 | const uint8_t *p; /* Pointer into image buffer */ |
2452 | 0 | unsigned u, v; /* Local index variable */ |
2453 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2454 | | |
2455 | | /* Declare a uthash table to detect duplicate addresses. It will be destroyed |
2456 | | after decoding the cache contents */ |
2457 | 0 | H5C_recon_entry_t *recon_table = NULL; /* Hash table head */ |
2458 | |
|
2459 | 0 | FUNC_ENTER_PACKAGE |
2460 | | |
2461 | | /* Sanity checks */ |
2462 | 0 | assert(f); |
2463 | 0 | assert(f->shared); |
2464 | 0 | assert(cache_ptr == f->shared->cache); |
2465 | 0 | assert(cache_ptr); |
2466 | 0 | assert(cache_ptr->image_buffer); |
2467 | 0 | assert(cache_ptr->image_len > 0); |
2468 | | |
2469 | | /* Decode metadata cache image header */ |
2470 | 0 | p = (uint8_t *)cache_ptr->image_buffer; |
2471 | 0 | if (H5C__decode_cache_image_header(f, cache_ptr, &p, cache_ptr->image_len + 1) < 0) |
2472 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "cache image header decode failed"); |
2473 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_len); |
2474 | | |
2475 | | /* The image_data_len and # of entries should be defined now */ |
2476 | 0 | assert(cache_ptr->image_data_len > 0); |
2477 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
2478 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2479 | | |
2480 | | /* Reconstruct entries in image */ |
2481 | 0 | image_len = cache_ptr->image_len; |
2482 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) { |
2483 | | |
2484 | | /* Create the prefetched entry described by the ith |
2485 | | * entry in cache_ptr->image_entrise. |
2486 | | */ |
2487 | 0 | if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &image_len, &p))) |
2488 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed"); |
2489 | | |
2490 | | /* Make sure different entries don't have the same address */ |
2491 | 0 | if (H5C__check_for_duplicates(pf_entry_ptr, &recon_table) < 0) { |
2492 | | /* Free the half-processed entry */ |
2493 | 0 | if (pf_entry_ptr->image_ptr) |
2494 | 0 | H5MM_xfree(pf_entry_ptr->image_ptr); |
2495 | 0 | if (pf_entry_ptr->fd_parent_count > 0 && pf_entry_ptr->fd_parent_addrs) |
2496 | 0 | H5MM_xfree(pf_entry_ptr->fd_parent_addrs); |
2497 | 0 | pf_entry_ptr = H5FL_FREE(H5C_cache_entry_t, pf_entry_ptr); |
2498 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "duplicate addresses in cache"); |
2499 | 0 | } |
2500 | | |
2501 | | /* Note that we make no checks on available cache space before |
2502 | | * inserting the reconstructed entry into the metadata cache. |
2503 | | * |
2504 | | * This is OK since the cache must be almost empty at the beginning |
2505 | | * of the process, and since we check cache size at the end of the |
2506 | | * reconstruction process. |
2507 | | */ |
2508 | | |
2509 | | /* Insert the prefetched entry in the index */ |
2510 | 0 | H5C__INSERT_IN_INDEX(cache_ptr, pf_entry_ptr, FAIL); |
2511 | | |
2512 | | /* If dirty, insert the entry into the slist. */ |
2513 | 0 | if (pf_entry_ptr->is_dirty) |
2514 | 0 | H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, pf_entry_ptr, FAIL); |
2515 | | |
2516 | | /* Append the entry to the LRU */ |
2517 | 0 | H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, pf_entry_ptr, FAIL); |
2518 | |
|
2519 | 0 | H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty); |
2520 | | |
2521 | | /* If the prefetched entry is the child in one or more flush |
2522 | | * dependency relationships, recreate those flush dependencies. |
2523 | | */ |
2524 | 0 | for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { |
2525 | | /* Sanity checks */ |
2526 | 0 | assert(pf_entry_ptr->fd_parent_addrs); |
2527 | 0 | assert(H5_addr_defined(pf_entry_ptr->fd_parent_addrs[v])); |
2528 | | |
2529 | | /* Find the parent entry */ |
2530 | 0 | parent_ptr = NULL; |
2531 | 0 | H5C__SEARCH_INDEX(cache_ptr, pf_entry_ptr->fd_parent_addrs[v], parent_ptr, FAIL); |
2532 | 0 | if (parent_ptr == NULL) |
2533 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "fd parent not in cache?!?"); |
2534 | | |
2535 | | /* Sanity checks */ |
2536 | 0 | assert(parent_ptr->addr == pf_entry_ptr->fd_parent_addrs[v]); |
2537 | 0 | assert(parent_ptr->lru_rank == -1); |
2538 | | |
2539 | | /* Must protect parent entry to set up a flush dependency. |
2540 | | * Do this now, and then uprotect when done. |
2541 | | */ |
2542 | 0 | H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL); |
2543 | 0 | parent_ptr->is_protected = true; |
2544 | | |
2545 | | /* Setup the flush dependency */ |
2546 | 0 | if (H5C_create_flush_dependency(parent_ptr, pf_entry_ptr) < 0) |
2547 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency"); |
2548 | | |
2549 | | /* And now unprotect */ |
2550 | 0 | H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, parent_ptr, FAIL); |
2551 | 0 | parent_ptr->is_protected = false; |
2552 | 0 | } /* end for */ |
2553 | 0 | } /* end for */ |
2554 | | |
2555 | | #ifndef NDEBUG |
2556 | | /* Scan the cache entries, and verify that each entry has |
2557 | | * the expected flush dependency status. |
2558 | | */ |
2559 | | pf_entry_ptr = cache_ptr->il_head; |
2560 | | while (pf_entry_ptr != NULL) { |
2561 | | assert((pf_entry_ptr->prefetched && pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) || |
2562 | | (!pf_entry_ptr->prefetched && pf_entry_ptr->type != H5AC_PREFETCHED_ENTRY)); |
2563 | | if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) |
2564 | | assert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); |
2565 | | |
2566 | | for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { |
2567 | | parent_ptr = pf_entry_ptr->flush_dep_parent[v]; |
2568 | | assert(parent_ptr); |
2569 | | assert(pf_entry_ptr->fd_parent_addrs); |
2570 | | assert(pf_entry_ptr->fd_parent_addrs[v] == parent_ptr->addr); |
2571 | | assert(parent_ptr->flush_dep_nchildren > 0); |
2572 | | } /* end for */ |
2573 | | |
2574 | | if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) { |
2575 | | assert(pf_entry_ptr->fd_child_count == pf_entry_ptr->flush_dep_nchildren); |
2576 | | assert(pf_entry_ptr->fd_dirty_child_count == pf_entry_ptr->flush_dep_ndirty_children); |
2577 | | } /* end if */ |
2578 | | |
2579 | | pf_entry_ptr = pf_entry_ptr->il_next; |
2580 | | } /* end while */ |
2581 | | |
2582 | | /* Scan the LRU, and verify the expected ordering of the |
2583 | | * prefetched entries. |
2584 | | */ |
2585 | | { |
2586 | | int lru_rank_holes = 0; |
2587 | | H5C_cache_entry_t *entry_ptr; |
2588 | | int i; /* Local index variable */ |
2589 | | |
2590 | | i = -1; |
2591 | | entry_ptr = cache_ptr->LRU_head_ptr; |
2592 | | while (entry_ptr != NULL) { |
2593 | | assert(entry_ptr->type != NULL); |
2594 | | |
2595 | | if (entry_ptr->prefetched) { |
2596 | | assert(entry_ptr->lru_rank != 0); |
2597 | | assert((entry_ptr->lru_rank == -1) || (entry_ptr->lru_rank > i)); |
2598 | | |
2599 | | if ((entry_ptr->lru_rank > 1) && (entry_ptr->lru_rank > i + 1)) |
2600 | | lru_rank_holes += entry_ptr->lru_rank - (i + 1); |
2601 | | i = entry_ptr->lru_rank; |
2602 | | } /* end if */ |
2603 | | |
2604 | | entry_ptr = entry_ptr->next; |
2605 | | } /* end while */ |
2606 | | |
2607 | | /* Holes in the sequences of LRU ranks can appear due to epoch |
2608 | | * markers. They are left in to allow re-insertion of the |
2609 | | * epoch markers on reconstruction of the cache -- thus |
2610 | | * the following sanity check will have to be revised when |
2611 | | * we add code to store and restore adaptive resize status. |
2612 | | */ |
2613 | | assert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS); |
2614 | | } /* end block */ |
2615 | | #endif |
2616 | | |
2617 | | /* Check to see if the cache is oversize, and evict entries as |
2618 | | * necessary to remain within limits. |
2619 | | */ |
2620 | 0 | if (cache_ptr->index_size >= cache_ptr->max_cache_size) { |
2621 | | /* cache is oversized -- call H5C__make_space_in_cache() with zero |
2622 | | * space needed to repair the situation if possible. |
2623 | | */ |
2624 | 0 | bool write_permitted = false; |
2625 | |
|
2626 | 0 | if (cache_ptr->check_write_permitted && (cache_ptr->check_write_permitted)(f, &write_permitted) < 0) |
2627 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "Can't get write_permitted"); |
2628 | 0 | else |
2629 | 0 | write_permitted = cache_ptr->write_permitted; |
2630 | | |
2631 | 0 | if (H5C__make_space_in_cache(f, 0, write_permitted) < 0) |
2632 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "H5C__make_space_in_cache failed"); |
2633 | 0 | } /* end if */ |
2634 | | |
2635 | 0 | done: |
2636 | 0 | if (FAIL == ret_value) { |
2637 | | |
2638 | | /* If we failed during reconstruction, remove reconstructed entries */ |
2639 | 0 | H5C_recon_entry_t *recon_entry, *tmp; |
2640 | |
|
2641 | 0 | HASH_ITER(hh, recon_table, recon_entry, tmp) |
2642 | 0 | { |
2643 | 0 | H5C_cache_entry_t *entry_ptr = recon_entry->entry_ptr; |
2644 | 0 | haddr_t addr = entry_ptr->addr; |
2645 | | |
2646 | | /* If the entry is protected, unprotect it */ |
2647 | 0 | if (entry_ptr->is_protected) |
2648 | 0 | if (H5C_unprotect(f, addr, (void *)entry_ptr, H5C__DELETED_FLAG) < 0) |
2649 | 0 | HDONE_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't unprotect entry"); |
2650 | | |
2651 | | /* If the entry is pinned, unpin it */ |
2652 | 0 | if (entry_ptr->is_pinned) |
2653 | 0 | if (H5C_unpin_entry((void *)entry_ptr) < 0) |
2654 | 0 | HDONE_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry"); |
2655 | | |
2656 | | /* Remove the unpinned and unprotected entry */ |
2657 | 0 | if (H5AC_expunge_entry(f, H5AC_PREFETCHED_ENTRY, addr, H5AC__NO_FLAGS_SET) < 0) { |
2658 | 0 | if (entry_ptr->image_ptr) |
2659 | 0 | H5MM_xfree(entry_ptr->image_ptr); |
2660 | 0 | if (entry_ptr->fd_parent_count > 0 && entry_ptr->fd_parent_addrs) |
2661 | 0 | H5MM_xfree(entry_ptr->fd_parent_addrs); |
2662 | 0 | entry_ptr = H5FL_FREE(H5C_cache_entry_t, entry_ptr); |
2663 | 0 | HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block"); |
2664 | 0 | } |
2665 | |
|
2666 | 0 | HASH_DEL(recon_table, recon_entry); |
2667 | 0 | H5MM_xfree(recon_entry); |
2668 | 0 | } |
2669 | | /* The temporary hash table should be empty */ |
2670 | 0 | assert(recon_table == NULL); |
2671 | 0 | } |
2672 | | /* No failure, only cleanup the temporary hash table */ |
2673 | 0 | else if (recon_table) { |
2674 | | /* Free the temporary hash table */ |
2675 | 0 | H5C_recon_entry_t *cur, *tmp; |
2676 | 0 | HASH_ITER(hh, recon_table, cur, tmp) |
2677 | 0 | { |
2678 | 0 | HASH_DEL(recon_table, cur); |
2679 | 0 | H5MM_xfree(cur); |
2680 | 0 | } |
2681 | 0 | } |
2682 | |
|
2683 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2684 | 0 | } /* H5C__reconstruct_cache_contents() */ |
2685 | | |
2686 | | /*------------------------------------------------------------------------- |
2687 | | * Function: H5C__reconstruct_cache_entry() |
2688 | | * |
2689 | | * Purpose: Allocate a prefetched metadata cache entry and initialize |
2690 | | * it from image buffer. |
2691 | | * |
2692 | | * Return a pointer to the newly allocated cache entry, |
2693 | | * or NULL on failure. |
2694 | | * |
2695 | | * Return: Pointer to the new instance of H5C_cache_entry on success, |
2696 | | * or NULL on failure. |
2697 | | * |
2698 | | *------------------------------------------------------------------------- |
2699 | | */ |
2700 | | static H5C_cache_entry_t * |
2701 | | H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, const uint8_t **buf) |
2702 | 0 | { |
2703 | 0 | H5C_cache_entry_t *pf_entry_ptr = NULL; /* Reconstructed cache entry */ |
2704 | 0 | uint8_t flags = 0; |
2705 | 0 | bool is_dirty = false; |
2706 | 0 | haddr_t eoa; |
2707 | 0 | bool is_fd_parent = false; |
2708 | | #ifndef NDEBUG /* only used in assertions */ |
2709 | | bool in_lru = false; |
2710 | | bool is_fd_child = false; |
2711 | | #endif |
2712 | 0 | bool file_is_rw; |
2713 | 0 | const uint8_t *p; |
2714 | 0 | const uint8_t *p_end = *buf + *buf_size - 1; /* Pointer to last valid byte in buffer */ |
2715 | 0 | H5C_cache_entry_t *ret_value = NULL; /* Return value */ |
2716 | |
|
2717 | 0 | FUNC_ENTER_PACKAGE |
2718 | | |
2719 | | /* Sanity checks */ |
2720 | 0 | assert(cache_ptr); |
2721 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2722 | 0 | assert(buf && *buf); |
2723 | | |
2724 | | /* Key R/W access off of whether the image will be deleted */ |
2725 | 0 | file_is_rw = cache_ptr->delete_image; |
2726 | | |
2727 | | /* Allocate space for the prefetched cache entry */ |
2728 | 0 | if (NULL == (pf_entry_ptr = H5FL_CALLOC(H5C_cache_entry_t))) |
2729 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for prefetched cache entry"); |
2730 | | |
2731 | | /* Get pointer to buffer */ |
2732 | 0 | p = *buf; |
2733 | | |
2734 | | /* Decode type id */ |
2735 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
2736 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2737 | 0 | pf_entry_ptr->prefetch_type_id = *p++; |
2738 | 0 | if (pf_entry_ptr->prefetch_type_id < H5AC_BT_ID || pf_entry_ptr->prefetch_type_id >= H5AC_NTYPES) |
2739 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "type id is out of valid range"); |
2740 | | |
2741 | | /* Decode flags */ |
2742 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
2743 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2744 | 0 | flags = *p++; |
2745 | 0 | if (flags & H5C__MDCI_ENTRY_DIRTY_FLAG) |
2746 | 0 | is_dirty = true; |
2747 | | #ifndef NDEBUG /* only used in assertions */ |
2748 | | if (flags & H5C__MDCI_ENTRY_IN_LRU_FLAG) |
2749 | | in_lru = true; |
2750 | | if (flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG) |
2751 | | is_fd_parent = true; |
2752 | | if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) |
2753 | | is_fd_child = true; |
2754 | | #endif |
2755 | | |
2756 | | /* Force dirty entries to clean if the file read only -- must do |
2757 | | * this as otherwise the cache will attempt to write them on file |
2758 | | * close. Since the file is R/O, the metadata cache image superblock |
2759 | | * extension message and the cache image block will not be removed. |
2760 | | * Hence no danger in this for subsequent opens. |
2761 | | * |
2762 | | * However, if the dirty entry (marked clean for purposes of the R/O |
2763 | | * file open) is evicted and then referred to, the cache will read |
2764 | | * either invalid or obsolete data from the file. Handle this by |
2765 | | * setting the prefetched_dirty field, and hiding such entries from |
2766 | | * the eviction candidate selection algorithm. |
2767 | | */ |
2768 | 0 | pf_entry_ptr->is_dirty = (is_dirty && file_is_rw); |
2769 | | |
2770 | | /* Decode ring */ |
2771 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
2772 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2773 | 0 | pf_entry_ptr->ring = *p++; |
2774 | 0 | if (pf_entry_ptr->ring >= (uint8_t)(H5C_RING_NTYPES)) |
2775 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "ring is out of valid range"); |
2776 | | |
2777 | | /* Decode age */ |
2778 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) |
2779 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2780 | 0 | pf_entry_ptr->age = *p++; |
2781 | 0 | if (pf_entry_ptr->age > H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX) |
2782 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "entry age is out of policy range"); |
2783 | | |
2784 | | /* Decode dependency child count */ |
2785 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) |
2786 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2787 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_child_count); |
2788 | 0 | if (is_fd_parent && pf_entry_ptr->fd_child_count <= 0) |
2789 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "parent entry has no children"); |
2790 | 0 | else if (!is_fd_parent && pf_entry_ptr->fd_child_count != 0) |
2791 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "non-parent entry has children"); |
2792 | | |
2793 | | /* Decode dirty dependency child count */ |
2794 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) |
2795 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2796 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count); |
2797 | 0 | if (!file_is_rw) |
2798 | 0 | pf_entry_ptr->fd_dirty_child_count = 0; |
2799 | 0 | if (pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count) |
2800 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count"); |
2801 | | |
2802 | | /* Decode dependency parent count */ |
2803 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) |
2804 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2805 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_parent_count); |
2806 | 0 | assert((is_fd_child && pf_entry_ptr->fd_parent_count > 0) || |
2807 | 0 | (!is_fd_child && pf_entry_ptr->fd_parent_count == 0)); |
2808 | | |
2809 | | /* Decode index in LRU */ |
2810 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) |
2811 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2812 | 0 | INT32DECODE(p, pf_entry_ptr->lru_rank); |
2813 | 0 | assert((in_lru && pf_entry_ptr->lru_rank >= 0) || (!in_lru && pf_entry_ptr->lru_rank == -1)); |
2814 | | |
2815 | | /* Decode entry offset */ |
2816 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) |
2817 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2818 | 0 | H5F_addr_decode(f, &p, &pf_entry_ptr->addr); |
2819 | | |
2820 | | /* Validate address range */ |
2821 | 0 | eoa = H5F_get_eoa(f, H5FD_MEM_DEFAULT); |
2822 | 0 | if (!H5_addr_defined(pf_entry_ptr->addr) || H5_addr_overflow(pf_entry_ptr->addr, pf_entry_ptr->size) || |
2823 | 0 | H5_addr_ge(pf_entry_ptr->addr + pf_entry_ptr->size, eoa)) |
2824 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry address range"); |
2825 | | |
2826 | | /* Decode entry length */ |
2827 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f), p_end)) |
2828 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2829 | 0 | H5F_DECODE_LENGTH(f, p, pf_entry_ptr->size); |
2830 | 0 | if (pf_entry_ptr->size == 0) |
2831 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry size"); |
2832 | | |
2833 | | /* Verify expected length of entry image */ |
2834 | 0 | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
2835 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, NULL, "Bad entry image len"); |
2836 | | |
2837 | | /* If parent count greater than zero, allocate array for parent |
2838 | | * addresses, and decode addresses into the array. |
2839 | | */ |
2840 | 0 | if (pf_entry_ptr->fd_parent_count > 0) { |
2841 | 0 | unsigned u; /* Local index variable */ |
2842 | |
|
2843 | 0 | if (NULL == (pf_entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc( |
2844 | 0 | (size_t)(pf_entry_ptr->fd_parent_count) * H5F_SIZEOF_ADDR(f)))) |
2845 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, |
2846 | 0 | "memory allocation failed for fd parent addrs buffer"); |
2847 | | |
2848 | 0 | for (u = 0; u < pf_entry_ptr->fd_parent_count; u++) { |
2849 | |
|
2850 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) |
2851 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2852 | 0 | H5F_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u])); |
2853 | 0 | if (!H5_addr_defined(pf_entry_ptr->fd_parent_addrs[u])) |
2854 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid flush dependency parent offset"); |
2855 | 0 | } /* end for */ |
2856 | 0 | } /* end if */ |
2857 | | |
2858 | | /* Allocate buffer for entry image */ |
2859 | 0 | if (NULL == (pf_entry_ptr->image_ptr = H5MM_malloc(pf_entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) |
2860 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer"); |
2861 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
2862 | | H5MM_memcpy(((uint8_t *)pf_entry_ptr->image_ptr) + pf_entry_ptr->size, H5C_IMAGE_SANITY_VALUE, |
2863 | | H5C_IMAGE_EXTRA_SPACE); |
2864 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
2865 | | |
2866 | | /* Copy the entry image from the cache image block */ |
2867 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, pf_entry_ptr->size, p_end)) |
2868 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); |
2869 | 0 | H5MM_memcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size); |
2870 | 0 | p += pf_entry_ptr->size; |
2871 | | |
2872 | | /* Initialize the rest of the fields in the prefetched entry */ |
2873 | | /* (Only need to set non-zero/NULL/false fields, due to calloc() above) */ |
2874 | 0 | pf_entry_ptr->cache_ptr = cache_ptr; |
2875 | 0 | pf_entry_ptr->image_up_to_date = true; |
2876 | 0 | pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY; |
2877 | 0 | pf_entry_ptr->prefetched = true; |
2878 | 0 | pf_entry_ptr->prefetched_dirty = is_dirty && (!file_is_rw); |
2879 | | |
2880 | | /* Sanity checks */ |
2881 | 0 | assert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE); |
2882 | | |
2883 | | /* Update buffer pointer and buffer len */ |
2884 | 0 | *buf_size -= (hsize_t)(p - *buf); |
2885 | 0 | *buf = p; |
2886 | |
|
2887 | 0 | ret_value = pf_entry_ptr; |
2888 | |
|
2889 | 0 | done: |
2890 | 0 | if (NULL == ret_value && pf_entry_ptr) { |
2891 | 0 | if (pf_entry_ptr->image_ptr) |
2892 | 0 | H5MM_xfree(pf_entry_ptr->image_ptr); |
2893 | 0 | if (pf_entry_ptr->fd_parent_count > 0 && pf_entry_ptr->fd_parent_addrs) |
2894 | 0 | H5MM_xfree(pf_entry_ptr->fd_parent_addrs); |
2895 | 0 | pf_entry_ptr = H5FL_FREE(H5C_cache_entry_t, pf_entry_ptr); |
2896 | 0 | } |
2897 | |
|
2898 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2899 | 0 | } /* H5C__reconstruct_cache_entry() */ |
2900 | | |
2901 | | /*------------------------------------------------------------------------- |
2902 | | * Function: H5C__write_cache_image_superblock_msg |
2903 | | * |
2904 | | * Purpose: Write the cache image superblock extension message, |
2905 | | * creating if specified. |
2906 | | * |
2907 | | * In general, the size and location of the cache image block |
2908 | | * will be unknown at the time that the cache image superblock |
2909 | | * message is created. A subsequent call to this routine will |
2910 | | * be used to write the correct data. |
2911 | | * |
2912 | | * Return: Non-negative on success/Negative on failure. |
2913 | | * |
2914 | | *------------------------------------------------------------------------- |
2915 | | */ |
2916 | | static herr_t |
2917 | | H5C__write_cache_image_superblock_msg(H5F_t *f, bool create) |
2918 | 0 | { |
2919 | 0 | H5C_t *cache_ptr; |
2920 | 0 | H5O_mdci_t mdci_msg; /* metadata cache image message */ |
2921 | | /* to insert in the superblock */ |
2922 | | /* extension. */ |
2923 | 0 | unsigned mesg_flags = H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS; |
2924 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2925 | |
|
2926 | 0 | FUNC_ENTER_PACKAGE |
2927 | | |
2928 | | /* Sanity checks */ |
2929 | 0 | assert(f); |
2930 | 0 | assert(f->shared); |
2931 | 0 | assert(f->shared->cache); |
2932 | 0 | cache_ptr = f->shared->cache; |
2933 | 0 | assert(cache_ptr); |
2934 | 0 | assert(cache_ptr->close_warning_received); |
2935 | | |
2936 | | /* Write data into the metadata cache image superblock extension message. |
2937 | | * Note that this data will be bogus when we first create the message. |
2938 | | * We will overwrite this data later in a second call to this function. |
2939 | | */ |
2940 | 0 | mdci_msg.addr = cache_ptr->image_addr; |
2941 | | #ifdef H5_HAVE_PARALLEL |
2942 | | if (cache_ptr->aux_ptr) { /* we have multiple processes */ |
2943 | | H5AC_aux_t *aux_ptr; |
2944 | | |
2945 | | aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
2946 | | mdci_msg.size = aux_ptr->p0_image_len; |
2947 | | } /* end if */ |
2948 | | else |
2949 | | #endif /* H5_HAVE_PARALLEL */ |
2950 | 0 | mdci_msg.size = cache_ptr->image_len; |
2951 | | |
2952 | | /* Write metadata cache image message to superblock extension */ |
2953 | 0 | if (H5F__super_ext_write_msg(f, H5O_MDCI_MSG_ID, &mdci_msg, create, mesg_flags) < 0) |
2954 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, |
2955 | 0 | "can't write metadata cache image message to superblock extension"); |
2956 | | |
2957 | 0 | done: |
2958 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2959 | 0 | } /* H5C__write_cache_image_superblock_msg() */ |
2960 | | |
2961 | | /*------------------------------------------------------------------------- |
2962 | | * Function: H5C__write_cache_image |
2963 | | * |
2964 | | * Purpose: Write the supplied metadata cache image to the specified |
2965 | | * location in file. |
2966 | | * |
2967 | | * Return: Non-negative on success/Negative on failure |
2968 | | * |
2969 | | *------------------------------------------------------------------------- |
2970 | | */ |
2971 | | static herr_t |
2972 | | H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr) |
2973 | 0 | { |
2974 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2975 | |
|
2976 | 0 | FUNC_ENTER_PACKAGE |
2977 | | |
2978 | | /* Sanity checks */ |
2979 | 0 | assert(f); |
2980 | 0 | assert(cache_ptr); |
2981 | 0 | assert(H5_addr_defined(cache_ptr->image_addr)); |
2982 | 0 | assert(cache_ptr->image_len > 0); |
2983 | 0 | assert(cache_ptr->image_buffer); |
2984 | |
|
2985 | | #ifdef H5_HAVE_PARALLEL |
2986 | | { |
2987 | | H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
2988 | | |
2989 | | if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { |
2990 | | #endif /* H5_HAVE_PARALLEL */ |
2991 | | |
2992 | | /* Write the buffer (if serial access, or rank 0 for parallel access) */ |
2993 | 0 | if (H5F_block_write(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, |
2994 | 0 | cache_ptr->image_buffer) < 0) |
2995 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't write metadata cache image block to file"); |
2996 | | #ifdef H5_HAVE_PARALLEL |
2997 | | } /* end if */ |
2998 | | } /* end block */ |
2999 | | #endif /* H5_HAVE_PARALLEL */ |
3000 | | |
3001 | 0 | done: |
3002 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
3003 | 0 | } /* H5C__write_cache_image() */ |