Line | Count | Source (jump to first uncovered line) |
1 | | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * |
2 | | * Copyright by The HDF Group. * |
3 | | * All rights reserved. * |
4 | | * * |
5 | | * This file is part of HDF5. The full HDF5 copyright notice, including * |
6 | | * terms governing use, modification, and redistribution, is contained in * |
7 | | * the LICENSE file, which can be found at the root of the source code * |
8 | | * distribution tree, or in https://www.hdfgroup.org/licenses. * |
9 | | * If you do not have access to either file, you may request a copy from * |
10 | | * help@hdfgroup.org. * |
11 | | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
12 | | |
13 | | /*------------------------------------------------------------------------- |
14 | | * |
15 | | * Created: H5Cimage.c |
16 | | * |
17 | | * Purpose: Functions in this file are specific to the implementation |
18 | | * of the metadata cache image feature. |
19 | | * |
20 | | *------------------------------------------------------------------------- |
21 | | */ |
22 | | |
23 | | /****************/ |
24 | | /* Module Setup */ |
25 | | /****************/ |
26 | | |
27 | | #include "H5Cmodule.h" /* This source code file is part of the H5C module */ |
28 | | #define H5F_FRIEND /*suppress error about including H5Fpkg */ |
29 | | |
30 | | /***********/ |
31 | | /* Headers */ |
32 | | /***********/ |
33 | | #include "H5private.h" /* Generic Functions */ |
34 | | #ifdef H5_HAVE_PARALLEL |
35 | | #define H5AC_FRIEND /*suppress error about including H5ACpkg */ |
36 | | #include "H5ACpkg.h" /* Metadata cache */ |
37 | | #endif /* H5_HAVE_PARALLEL */ |
38 | | #include "H5Cpkg.h" /* Cache */ |
39 | | #include "H5Eprivate.h" /* Error handling */ |
40 | | #include "H5Fpkg.h" /* Files */ |
41 | | #include "H5FDprivate.h" /* File drivers */ |
42 | | #include "H5FLprivate.h" /* Free Lists */ |
43 | | #include "H5MMprivate.h" /* Memory management */ |
44 | | |
45 | | /****************/ |
46 | | /* Local Macros */ |
47 | | /****************/ |
48 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
49 | | #define H5C_IMAGE_EXTRA_SPACE 8 |
50 | | #define H5C_IMAGE_SANITY_VALUE "DeadBeef" |
51 | | #else /* H5C_DO_MEMORY_SANITY_CHECKS */ |
52 | | #define H5C_IMAGE_EXTRA_SPACE 0 |
53 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
54 | | |
55 | | /* Cache image buffer components, on disk */ |
56 | 0 | #define H5C__MDCI_BLOCK_SIGNATURE "MDCI" |
57 | 0 | #define H5C__MDCI_BLOCK_SIGNATURE_LEN 4 |
58 | 0 | #define H5C__MDCI_BLOCK_VERSION_0 0 |
59 | | |
60 | | /* Metadata cache image header flags -- max 8 bits */ |
61 | 0 | #define H5C__MDCI_HEADER_HAVE_RESIZE_STATUS 0x01 |
62 | | |
63 | | /* Metadata cache image entry flags -- max 8 bits */ |
64 | 0 | #define H5C__MDCI_ENTRY_DIRTY_FLAG 0x01 |
65 | 0 | #define H5C__MDCI_ENTRY_IN_LRU_FLAG 0x02 |
66 | 0 | #define H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG 0x04 |
67 | 0 | #define H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG 0x08 |
68 | | |
69 | | /* Limits on flush dependency values, stored in 16-bit values on disk */ |
70 | 0 | #define H5C__MDCI_MAX_FD_CHILDREN USHRT_MAX |
71 | 0 | #define H5C__MDCI_MAX_FD_PARENTS USHRT_MAX |
72 | | |
73 | | /* Maximum ring allowed in image */ |
74 | 0 | #define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM |
75 | | |
76 | | /*********************************************************************** |
77 | | * |
78 | | * Stats collection macros |
79 | | * |
80 | | * The following macros must handle stats collection when collection |
81 | | * is enabled, and evaluate to the empty string when it is not. |
82 | | * |
83 | | ***********************************************************************/ |
84 | | #if H5C_COLLECT_CACHE_STATS |
85 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ |
86 | | do { \ |
87 | | (cache_ptr)->images_created++; \ |
88 | | } while (0) |
89 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ |
90 | | do { \ |
91 | | /* make sure image len is still good */ \ |
92 | | assert((cache_ptr)->image_len > 0); \ |
93 | | (cache_ptr)->images_read++; \ |
94 | | } while (0) |
95 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ |
96 | | do { \ |
97 | | /* make sure image len is still good */ \ |
98 | | assert((cache_ptr)->image_len > 0); \ |
99 | | (cache_ptr)->images_loaded++; \ |
100 | | (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ |
101 | | } while (0) |
102 | | #else /* H5C_COLLECT_CACHE_STATS */ |
103 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) |
104 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) |
105 | | #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) |
106 | | #endif /* H5C_COLLECT_CACHE_STATS */ |
107 | | |
108 | | /******************/ |
109 | | /* Local Typedefs */ |
110 | | /******************/ |
111 | | |
112 | | /********************/ |
113 | | /* Local Prototypes */ |
114 | | /********************/ |
115 | | |
116 | | /* Helper routines */ |
117 | | static size_t H5C__cache_image_block_entry_header_size(const H5F_t *f); |
118 | | static size_t H5C__cache_image_block_header_size(const H5F_t *f); |
119 | | static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, |
120 | | size_t buf_size); |
121 | | #ifndef NDEBUG /* only used in assertions */ |
122 | | static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, |
123 | | unsigned entry_num); |
124 | | #endif |
125 | | static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); |
126 | | static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num); |
127 | | static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr); |
128 | | static void H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, |
129 | | uint32_t fd_height); |
130 | | static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr); |
131 | | static herr_t H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr); |
132 | | static herr_t H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr); |
133 | | static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf); |
134 | | static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, bool create); |
135 | | static herr_t H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr); |
136 | | static herr_t H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr); |
137 | | static herr_t H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr); |
138 | | static herr_t H5C__free_image_entries_array(H5C_t *cache_ptr); |
139 | | |
140 | | /*********************/ |
141 | | /* Package Variables */ |
142 | | /*********************/ |
143 | | |
144 | | /* Declare a free list to manage H5C_cache_entry_t objects */ |
145 | | H5FL_DEFINE(H5C_cache_entry_t); |
146 | | |
147 | | /*****************************/ |
148 | | /* Library Private Variables */ |
149 | | /*****************************/ |
150 | | |
151 | | /*******************/ |
152 | | /* Local Variables */ |
153 | | /*******************/ |
154 | | |
155 | | /*------------------------------------------------------------------------- |
156 | | * Function: H5C_cache_image_pending() |
157 | | * |
158 | | * Purpose: Tests to see if the load of a metadata cache image |
159 | | * load is pending (i.e. will be executed on the next |
160 | | * protect or insert) |
161 | | * |
162 | | * Returns true if a cache image load is pending, and false |
163 | | * if not. Throws an assertion failure on error. |
164 | | * |
165 | | * Return: true if a cache image load is pending, and false otherwise. |
166 | | * |
167 | | *------------------------------------------------------------------------- |
168 | | */ |
169 | | bool |
170 | | H5C_cache_image_pending(const H5C_t *cache_ptr) |
171 | 0 | { |
172 | 0 | bool ret_value = true; /* Return value */ |
173 | |
|
174 | 0 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
175 | | |
176 | | /* Sanity checks */ |
177 | 0 | assert(cache_ptr); |
178 | |
|
179 | 0 | ret_value = (cache_ptr->load_image && !cache_ptr->image_loaded); |
180 | |
|
181 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
182 | 0 | } /* H5C_cache_image_pending() */ |
183 | | |
184 | | /*------------------------------------------------------------------------- |
185 | | * Function: H5C_cache_image_status() |
186 | | * |
187 | | * Purpose: Examine the metadata cache associated with the supplied |
188 | | * instance of H5F_t to determine whether the load of a |
189 | | * cache image has either been queued or executed, and if |
190 | | * construction of a cache image has been requested. |
191 | | * |
192 | | * This done, it set *load_ci_ptr to true if a cache image |
193 | | * has either been loaded or a load has been requested, and |
194 | | * to false otherwise. |
195 | | * |
196 | | * Similarly, set *write_ci_ptr to true if construction of |
197 | | * a cache image has been requested, and to false otherwise. |
198 | | * |
199 | | * Return: SUCCEED on success, and FAIL on failure. |
200 | | * |
201 | | *------------------------------------------------------------------------- |
202 | | */ |
203 | | herr_t |
204 | | H5C_cache_image_status(H5F_t *f, bool *load_ci_ptr, bool *write_ci_ptr) |
205 | 23 | { |
206 | 23 | H5C_t *cache_ptr; |
207 | | |
208 | 23 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
209 | | |
210 | | /* Sanity checks */ |
211 | 23 | assert(f); |
212 | 23 | assert(f->shared); |
213 | 23 | cache_ptr = f->shared->cache; |
214 | 23 | assert(cache_ptr); |
215 | 23 | assert(load_ci_ptr); |
216 | 23 | assert(write_ci_ptr); |
217 | | |
218 | 23 | *load_ci_ptr = cache_ptr->load_image || cache_ptr->image_loaded; |
219 | 23 | *write_ci_ptr = cache_ptr->image_ctl.generate_image; |
220 | | |
221 | 23 | FUNC_LEAVE_NOAPI(SUCCEED) |
222 | 23 | } /* H5C_cache_image_status() */ |
223 | | |
224 | | /*------------------------------------------------------------------------- |
225 | | * Function: H5C__construct_cache_image_buffer() |
226 | | * |
227 | | * Purpose: Allocate a buffer of size cache_ptr->image_len, and |
228 | | * load it with an image of the metadata cache image block. |
229 | | * |
230 | | * Note that by the time this function is called, the cache |
231 | | * should have removed all entries from its data structures. |
232 | | * |
233 | | * Return: SUCCEED on success, and FAIL on failure. |
234 | | * |
235 | | *------------------------------------------------------------------------- |
236 | | */ |
237 | | static herr_t |
238 | | H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) |
239 | 0 | { |
240 | 0 | uint8_t *p; /* Pointer into image buffer */ |
241 | 0 | uint32_t chksum; |
242 | 0 | unsigned u; /* Local index variable */ |
243 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
244 | |
|
245 | 0 | FUNC_ENTER_PACKAGE |
246 | | |
247 | | /* Sanity checks */ |
248 | 0 | assert(f); |
249 | 0 | assert(f->shared); |
250 | 0 | assert(cache_ptr == f->shared->cache); |
251 | 0 | assert(cache_ptr); |
252 | 0 | assert(cache_ptr->close_warning_received); |
253 | 0 | assert(cache_ptr->image_ctl.generate_image); |
254 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
255 | 0 | assert(cache_ptr->index_len == 0); |
256 | 0 | assert(cache_ptr->image_data_len > 0); |
257 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
258 | | |
259 | | /* Allocate the buffer in which to construct the cache image block */ |
260 | 0 | if (NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1))) |
261 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer"); |
262 | | |
263 | | /* Construct the cache image block header image */ |
264 | 0 | p = (uint8_t *)cache_ptr->image_buffer; |
265 | 0 | if (H5C__encode_cache_image_header(f, cache_ptr, &p) < 0) |
266 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "header image construction failed"); |
267 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len); |
268 | | |
269 | | /* Construct the cache entry images */ |
270 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) |
271 | 0 | if (H5C__encode_cache_image_entry(f, cache_ptr, &p, u) < 0) |
272 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "entry image construction failed"); |
273 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len); |
274 | | |
275 | | /* Construct the adaptive resize status image -- not yet */ |
276 | | |
277 | | /* Compute the checksum and encode */ |
278 | 0 | chksum = H5_checksum_metadata(cache_ptr->image_buffer, |
279 | 0 | (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0); |
280 | 0 | UINT32ENCODE(p, chksum); |
281 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) == cache_ptr->image_data_len); |
282 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) <= cache_ptr->image_len); |
283 | |
|
284 | | #ifndef NDEBUG |
285 | | /* validate the metadata cache image we just constructed by decoding it |
286 | | * and comparing the result with the original data. |
287 | | */ |
288 | | { |
289 | | uint32_t old_chksum; |
290 | | const uint8_t *q; |
291 | | H5C_t *fake_cache_ptr = NULL; |
292 | | unsigned v; |
293 | | herr_t status; /* Status from decoding */ |
294 | | |
295 | | fake_cache_ptr = (H5C_t *)H5MM_malloc(sizeof(H5C_t)); |
296 | | assert(fake_cache_ptr); |
297 | | |
298 | | /* needed for sanity checks */ |
299 | | fake_cache_ptr->image_len = cache_ptr->image_len; |
300 | | q = (const uint8_t *)cache_ptr->image_buffer; |
301 | | status = H5C__decode_cache_image_header(f, fake_cache_ptr, &q, cache_ptr->image_len + 1); |
302 | | assert(status >= 0); |
303 | | |
304 | | assert(NULL != p); |
305 | | assert(fake_cache_ptr->num_entries_in_image == cache_ptr->num_entries_in_image); |
306 | | |
307 | | fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_malloc( |
308 | | sizeof(H5C_image_entry_t) * (size_t)(fake_cache_ptr->num_entries_in_image + 1)); |
309 | | assert(fake_cache_ptr->image_entries); |
310 | | |
311 | | for (u = 0; u < fake_cache_ptr->num_entries_in_image; u++) { |
312 | | fake_cache_ptr->image_entries[u].image_ptr = NULL; |
313 | | |
314 | | /* touch up f->shared->cache to satisfy sanity checks... */ |
315 | | f->shared->cache = fake_cache_ptr; |
316 | | status = H5C__decode_cache_image_entry(f, fake_cache_ptr, &q, u); |
317 | | assert(status >= 0); |
318 | | |
319 | | /* ...and then return f->shared->cache to its correct value */ |
320 | | f->shared->cache = cache_ptr; |
321 | | |
322 | | /* verify expected contents */ |
323 | | assert(cache_ptr->image_entries[u].addr == fake_cache_ptr->image_entries[u].addr); |
324 | | assert(cache_ptr->image_entries[u].size == fake_cache_ptr->image_entries[u].size); |
325 | | assert(cache_ptr->image_entries[u].type_id == fake_cache_ptr->image_entries[u].type_id); |
326 | | assert(cache_ptr->image_entries[u].lru_rank == fake_cache_ptr->image_entries[u].lru_rank); |
327 | | assert(cache_ptr->image_entries[u].is_dirty == fake_cache_ptr->image_entries[u].is_dirty); |
328 | | /* don't check image_fd_height as it is not stored in |
329 | | * the metadata cache image block. |
330 | | */ |
331 | | assert(cache_ptr->image_entries[u].fd_child_count == |
332 | | fake_cache_ptr->image_entries[u].fd_child_count); |
333 | | assert(cache_ptr->image_entries[u].fd_dirty_child_count == |
334 | | fake_cache_ptr->image_entries[u].fd_dirty_child_count); |
335 | | assert(cache_ptr->image_entries[u].fd_parent_count == |
336 | | fake_cache_ptr->image_entries[u].fd_parent_count); |
337 | | |
338 | | for (v = 0; v < cache_ptr->image_entries[u].fd_parent_count; v++) |
339 | | assert(cache_ptr->image_entries[u].fd_parent_addrs[v] == |
340 | | fake_cache_ptr->image_entries[u].fd_parent_addrs[v]); |
341 | | |
342 | | /* free the fd_parent_addrs array if it exists */ |
343 | | if (fake_cache_ptr->image_entries[u].fd_parent_addrs) { |
344 | | assert(fake_cache_ptr->image_entries[u].fd_parent_count > 0); |
345 | | fake_cache_ptr->image_entries[u].fd_parent_addrs = |
346 | | (haddr_t *)H5MM_xfree(fake_cache_ptr->image_entries[u].fd_parent_addrs); |
347 | | fake_cache_ptr->image_entries[u].fd_parent_count = 0; |
348 | | } /* end if */ |
349 | | else |
350 | | assert(fake_cache_ptr->image_entries[u].fd_parent_count == 0); |
351 | | |
352 | | assert(cache_ptr->image_entries[u].image_ptr); |
353 | | assert(fake_cache_ptr->image_entries[u].image_ptr); |
354 | | assert(!memcmp(cache_ptr->image_entries[u].image_ptr, fake_cache_ptr->image_entries[u].image_ptr, |
355 | | cache_ptr->image_entries[u].size)); |
356 | | |
357 | | fake_cache_ptr->image_entries[u].image_ptr = |
358 | | H5MM_xfree(fake_cache_ptr->image_entries[u].image_ptr); |
359 | | } /* end for */ |
360 | | |
361 | | assert((size_t)(q - (const uint8_t *)cache_ptr->image_buffer) == |
362 | | cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM); |
363 | | |
364 | | /* compute the checksum */ |
365 | | old_chksum = chksum; |
366 | | chksum = H5_checksum_metadata(cache_ptr->image_buffer, |
367 | | (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0); |
368 | | assert(chksum == old_chksum); |
369 | | |
370 | | fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries); |
371 | | fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr); |
372 | | } /* end block */ |
373 | | #endif |
374 | |
|
375 | 0 | done: |
376 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
377 | 0 | } /* H5C__construct_cache_image_buffer() */ |
378 | | |
379 | | /*------------------------------------------------------------------------- |
380 | | * Function: H5C__generate_cache_image() |
381 | | * |
382 | | * Purpose: Generate the cache image and write it to the file, if |
383 | | * directed. |
384 | | * |
385 | | * Return: SUCCEED on success, and FAIL on failure. |
386 | | * |
387 | | *------------------------------------------------------------------------- |
388 | | */ |
389 | | herr_t |
390 | | H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) |
391 | 0 | { |
392 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
393 | |
|
394 | 0 | FUNC_ENTER_PACKAGE |
395 | | |
396 | | /* Sanity checks */ |
397 | 0 | assert(f); |
398 | 0 | assert(f->shared); |
399 | 0 | assert(cache_ptr == f->shared->cache); |
400 | 0 | assert(cache_ptr); |
401 | | |
402 | | /* Construct cache image */ |
403 | 0 | if (H5C__construct_cache_image_buffer(f, cache_ptr) < 0) |
404 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't create metadata cache image"); |
405 | | |
406 | | /* Free image entries array */ |
407 | 0 | if (H5C__free_image_entries_array(cache_ptr) < 0) |
408 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't free image entries array"); |
409 | | |
410 | | /* Write cache image block if so configured */ |
411 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) { |
412 | 0 | if (H5C__write_cache_image(f, cache_ptr) < 0) |
413 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write metadata cache image block to file"); |
414 | | |
415 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr); |
416 | 0 | } /* end if */ |
417 | | |
418 | | /* Free cache image buffer */ |
419 | 0 | assert(cache_ptr->image_buffer); |
420 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
421 | |
|
422 | 0 | done: |
423 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
424 | 0 | } /* H5C__generate_cache_image() */ |
425 | | |
426 | | /*------------------------------------------------------------------------- |
427 | | * Function: H5C__free_image_entries_array |
428 | | * |
429 | | * Purpose: If the image entries array exists, free the image |
430 | | * associated with each entry, and then free the image |
431 | | * entries array proper. |
432 | | * |
433 | | * Note that by the time this function is called, the cache |
434 | | * should have removed all entries from its data structures. |
435 | | * |
436 | | * Return: SUCCEED on success, and FAIL on failure. |
437 | | * |
438 | | *------------------------------------------------------------------------- |
439 | | */ |
440 | | static herr_t |
441 | | H5C__free_image_entries_array(H5C_t *cache_ptr) |
442 | 0 | { |
443 | 0 | FUNC_ENTER_PACKAGE_NOERR |
444 | | |
445 | | /* Sanity checks */ |
446 | 0 | assert(cache_ptr); |
447 | 0 | assert(cache_ptr->close_warning_received); |
448 | 0 | assert(cache_ptr->image_ctl.generate_image); |
449 | 0 | assert(cache_ptr->index_len == 0); |
450 | | |
451 | | /* Check for entries to free */ |
452 | 0 | if (cache_ptr->image_entries != NULL) { |
453 | 0 | unsigned u; /* Local index variable */ |
454 | |
|
455 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) { |
456 | 0 | H5C_image_entry_t *ie_ptr; /* Image entry to release */ |
457 | | |
458 | | /* Get pointer to image entry */ |
459 | 0 | ie_ptr = &(cache_ptr->image_entries[u]); |
460 | | |
461 | | /* Sanity checks */ |
462 | 0 | assert(ie_ptr); |
463 | 0 | assert(ie_ptr->image_ptr); |
464 | | |
465 | | /* Free the parent addrs array if appropriate */ |
466 | 0 | if (ie_ptr->fd_parent_addrs) { |
467 | 0 | assert(ie_ptr->fd_parent_count > 0); |
468 | |
|
469 | 0 | ie_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(ie_ptr->fd_parent_addrs); |
470 | 0 | } /* end if */ |
471 | 0 | else |
472 | 0 | assert(ie_ptr->fd_parent_count == 0); |
473 | | |
474 | | /* Free the image */ |
475 | 0 | ie_ptr->image_ptr = H5MM_xfree(ie_ptr->image_ptr); |
476 | 0 | } /* end for */ |
477 | | |
478 | | /* Free the image entries array */ |
479 | 0 | cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(cache_ptr->image_entries); |
480 | 0 | } /* end if */ |
481 | |
|
482 | 0 | FUNC_LEAVE_NOAPI(SUCCEED) |
483 | 0 | } /* H5C__free_image_entries_array() */ |
484 | | |
485 | | /*------------------------------------------------------------------------- |
486 | | * Function: H5C__get_cache_image_config |
487 | | * |
488 | | * Purpose: Copy the current configuration for cache image generation |
489 | | * on file close into the instance of H5C_cache_image_ctl_t |
490 | | * pointed to by config_ptr. |
491 | | * |
492 | | * Return: SUCCEED on success, and FAIL on failure. |
493 | | * |
494 | | *------------------------------------------------------------------------- |
495 | | */ |
496 | | herr_t |
497 | | H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) |
498 | 0 | { |
499 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
500 | |
|
501 | 0 | FUNC_ENTER_PACKAGE |
502 | |
|
503 | 0 | if (cache_ptr == NULL) |
504 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry"); |
505 | 0 | if (config_ptr == NULL) |
506 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad config_ptr on entry"); |
507 | | |
508 | 0 | *config_ptr = cache_ptr->image_ctl; |
509 | |
|
510 | 0 | done: |
511 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
512 | 0 | } /* H5C__get_cache_image_config() */ |
513 | | |
514 | | /*------------------------------------------------------------------------- |
515 | | * Function: H5C__read_cache_image |
516 | | * |
517 | | * Purpose: Load the metadata cache image from the specified location |
518 | | * in the file, and return it in the supplied buffer. |
519 | | * |
520 | | * Return: Non-negative on success/Negative on failure |
521 | | * |
522 | | *------------------------------------------------------------------------- |
523 | | */ |
524 | | static herr_t |
525 | | H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) |
526 | 0 | { |
527 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
528 | |
|
529 | 0 | FUNC_ENTER_PACKAGE |
530 | | |
531 | | /* Sanity checks */ |
532 | 0 | assert(f); |
533 | 0 | assert(cache_ptr); |
534 | 0 | assert(H5_addr_defined(cache_ptr->image_addr)); |
535 | 0 | assert(cache_ptr->image_len > 0); |
536 | 0 | assert(cache_ptr->image_buffer); |
537 | |
|
538 | | #ifdef H5_HAVE_PARALLEL |
539 | | { |
540 | | H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
541 | | int mpi_result; |
542 | | |
543 | | if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { |
544 | | #endif /* H5_HAVE_PARALLEL */ |
545 | | |
546 | | /* Read the buffer (if serial access, or rank 0 of parallel access) */ |
547 | | /* NOTE: if this block read is being performed on rank 0 only, throwing |
548 | | * an error here will cause other ranks to hang in the following MPI_Bcast. |
549 | | */ |
550 | 0 | if (H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, |
551 | 0 | cache_ptr->image_buffer) < 0) |
552 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block"); |
553 | | |
554 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr); |
555 | |
|
556 | | #ifdef H5_HAVE_PARALLEL |
557 | | if (aux_ptr) { |
558 | | /* Broadcast cache image */ |
559 | | if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, |
560 | | MPI_BYTE, 0, aux_ptr->mpi_comm))) |
561 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
562 | | } /* end if */ |
563 | | } /* end if */ |
564 | | else if (aux_ptr) { |
565 | | /* Retrieve the contents of the metadata cache image from process 0 */ |
566 | | if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, |
567 | | MPI_BYTE, 0, aux_ptr->mpi_comm))) |
568 | | HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", mpi_result) |
569 | | } /* end else-if */ |
570 | | } /* end block */ |
571 | | #endif /* H5_HAVE_PARALLEL */ |
572 | |
|
573 | 0 | done: |
574 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
575 | 0 | } /* H5C__read_cache_image() */ |
576 | | |
577 | | /*------------------------------------------------------------------------- |
578 | | * Function: H5C__load_cache_image |
579 | | * |
580 | | * Purpose: Read the cache image superblock extension message and |
581 | | * delete it if so directed. |
582 | | * |
583 | | * Then load the cache image block at the specified location, |
584 | | * decode it, and insert its contents into the metadata |
585 | | * cache. |
586 | | * |
587 | | * Return: Non-negative on success/Negative on failure |
588 | | * |
589 | | *------------------------------------------------------------------------- |
590 | | */ |
591 | | herr_t |
592 | | H5C__load_cache_image(H5F_t *f) |
593 | 0 | { |
594 | 0 | H5C_t *cache_ptr; |
595 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
596 | |
|
597 | 0 | FUNC_ENTER_PACKAGE |
598 | | |
599 | | /* Sanity checks */ |
600 | 0 | assert(f); |
601 | 0 | assert(f->shared); |
602 | 0 | cache_ptr = f->shared->cache; |
603 | 0 | assert(cache_ptr); |
604 | | |
605 | | /* If the image address is defined, load the image, decode it, |
606 | | * and insert its contents into the metadata cache. |
607 | | * |
608 | | * Note that under normal operating conditions, it is an error if the |
609 | | * image address is HADDR_UNDEF. However, to facilitate testing, |
610 | | * we allow this special value of the image address which means that |
611 | | * no image exists, and that the load operation should be skipped |
612 | | * silently. |
613 | | */ |
614 | 0 | if (H5_addr_defined(cache_ptr->image_addr)) { |
615 | | /* Sanity checks */ |
616 | 0 | assert(cache_ptr->image_len > 0); |
617 | 0 | assert(cache_ptr->image_buffer == NULL); |
618 | | |
619 | | /* Allocate space for the image */ |
620 | 0 | if (NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1))) |
621 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer"); |
622 | | |
623 | | /* Load the image from file */ |
624 | 0 | if (H5C__read_cache_image(f, cache_ptr) < 0) |
625 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block"); |
626 | | |
627 | | /* Reconstruct cache contents, from image */ |
628 | 0 | if (H5C__reconstruct_cache_contents(f, cache_ptr) < 0) |
629 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "Can't reconstruct cache contents from image block"); |
630 | | |
631 | | /* Free the image buffer */ |
632 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
633 | | |
634 | | /* Update stats -- must do this now, as we are about |
635 | | * to discard the size of the cache image. |
636 | | */ |
637 | 0 | H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr); |
638 | |
|
639 | 0 | cache_ptr->image_loaded = true; |
640 | 0 | } /* end if */ |
641 | | |
642 | | /* If directed, free the on disk metadata cache image */ |
643 | 0 | if (cache_ptr->delete_image) { |
644 | 0 | if (H5F__super_ext_remove_msg(f, H5O_MDCI_MSG_ID) < 0) |
645 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, |
646 | 0 | "can't remove metadata cache image message from superblock extension"); |
647 | | |
648 | | /* Reset image block values */ |
649 | 0 | cache_ptr->image_len = 0; |
650 | 0 | cache_ptr->image_data_len = 0; |
651 | 0 | cache_ptr->image_addr = HADDR_UNDEF; |
652 | 0 | } /* end if */ |
653 | | |
654 | 0 | done: |
655 | 0 | if (ret_value < 0) { |
656 | 0 | if (H5_addr_defined(cache_ptr->image_addr)) |
657 | 0 | cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer); |
658 | 0 | } |
659 | |
|
660 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
661 | 0 | } /* H5C__load_cache_image() */ |
662 | | |
663 | | /*------------------------------------------------------------------------- |
664 | | * Function: H5C_load_cache_image_on_next_protect() |
665 | | * |
666 | | * Purpose: Note the fact that a metadata cache image superblock |
667 | | * extension message exists, along with the base address |
668 | | * and length of the metadata cache image block. |
669 | | * |
670 | | * Once this notification is received the metadata cache |
671 | | * image block must be read, decoded, and loaded into the |
672 | | * cache on the next call to H5C_protect(). |
673 | | * |
674 | | * Further, if the file is opened R/W, the metadata cache |
675 | | * image superblock extension message must be deleted from |
676 | | * the superblock extension and the image block freed |
677 | | * |
678 | | * Contrawise, if the file is opened R/O, the metadata |
679 | | * cache image superblock extension message and image block |
680 | | * must be left as is. Further, any dirty entries in the |
681 | | * cache image block must be marked as clean to avoid |
682 | | * attempts to write them on file close. |
683 | | * |
684 | | * Return: SUCCEED |
685 | | * |
686 | | *------------------------------------------------------------------------- |
687 | | */ |
688 | | herr_t |
689 | | H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, bool rw) |
690 | 0 | { |
691 | 0 | H5C_t *cache_ptr; |
692 | |
|
693 | 0 | FUNC_ENTER_NOAPI_NOINIT_NOERR |
694 | | |
695 | | /* Sanity checks */ |
696 | 0 | assert(f); |
697 | 0 | assert(f->shared); |
698 | 0 | cache_ptr = f->shared->cache; |
699 | 0 | assert(cache_ptr); |
700 | | |
701 | | /* Set information needed to load cache image */ |
702 | 0 | cache_ptr->image_addr = addr; |
703 | 0 | cache_ptr->image_len = len; |
704 | 0 | cache_ptr->load_image = true; |
705 | 0 | cache_ptr->delete_image = rw; |
706 | |
|
707 | 0 | FUNC_LEAVE_NOAPI(SUCCEED) |
708 | 0 | } /* H5C_load_cache_image_on_next_protect() */ |
709 | | |
710 | | /*------------------------------------------------------------------------- |
711 | | * Function: H5C__image_entry_cmp |
712 | | * |
713 | | * Purpose: Comparison callback for qsort(3) on image entries. |
714 | | * Entries are sorted first by flush dependency height, |
715 | | * and then by LRU rank. |
716 | | * |
717 | | * Note: Entries with a _greater_ flush dependency height should |
718 | | * be sorted earlier than entries with lower heights, since |
719 | | * leafs in the flush dependency graph are at height 0, and their |
720 | | * parents need to be earlier in the image, so that they can |
721 | | * construct their flush dependencies when decoded. |
722 | | * |
723 | | * Return: An integer less than, equal to, or greater than zero if the |
724 | | * first entry is considered to be respectively less than, |
725 | | * equal to, or greater than the second. |
726 | | * |
727 | | *------------------------------------------------------------------------- |
728 | | */ |
729 | | static int |
730 | | H5C__image_entry_cmp(const void *_entry1, const void *_entry2) |
731 | 0 | { |
732 | 0 | const H5C_image_entry_t *entry1 = |
733 | 0 | (const H5C_image_entry_t *)_entry1; /* Pointer to first image entry to compare */ |
734 | 0 | const H5C_image_entry_t *entry2 = |
735 | 0 | (const H5C_image_entry_t *)_entry2; /* Pointer to second image entry to compare */ |
736 | 0 | int ret_value = 0; /* Return value */ |
737 | |
|
738 | 0 | FUNC_ENTER_PACKAGE_NOERR |
739 | | |
740 | | /* Sanity checks */ |
741 | 0 | assert(entry1); |
742 | 0 | assert(entry2); |
743 | |
|
744 | 0 | if (entry1->image_fd_height > entry2->image_fd_height) |
745 | 0 | ret_value = -1; |
746 | 0 | else if (entry1->image_fd_height < entry2->image_fd_height) |
747 | 0 | ret_value = 1; |
748 | 0 | else { |
749 | | /* Sanity check */ |
750 | 0 | assert(entry1->lru_rank >= -1); |
751 | 0 | assert(entry2->lru_rank >= -1); |
752 | |
|
753 | 0 | if (entry1->lru_rank < entry2->lru_rank) |
754 | 0 | ret_value = -1; |
755 | 0 | else if (entry1->lru_rank > entry2->lru_rank) |
756 | 0 | ret_value = 1; |
757 | 0 | } /* end else */ |
758 | |
|
759 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
760 | 0 | } /* H5C__image_entry_cmp() */ |
761 | | |
762 | | /*------------------------------------------------------------------------- |
763 | | * Function: H5C__prep_image_for_file_close |
764 | | * |
765 | | * Purpose: The objective of the call is to allow the metadata cache |
766 | | * to do any preparatory work prior to generation of a |
767 | | * cache image. |
768 | | * |
769 | | * In particular, the cache must |
770 | | * |
771 | | * 1) serialize all its entries, |
772 | | * |
773 | | * 2) compute the size of the metadata cache image, |
774 | | * |
775 | | * 3) allocate space for the metadata cache image, and |
776 | | * |
777 | | * 4) setup the metadata cache image superblock extension |
778 | | * message with the address and size of the metadata |
779 | | * cache image. |
780 | | * |
781 | | * The parallel case is complicated by the fact that |
782 | | * while all metadata caches must contain the same set of |
783 | | * dirty entries, there is no such requirement for clean |
784 | | * entries or the order that entries appear in the LRU. |
785 | | * |
786 | | * Thus, there is no requirement that different processes |
787 | | * will construct cache images of the same size. |
788 | | * |
789 | | * This is not a major issue as long as all processes include |
790 | | * the same set of dirty entries in the cache -- as they |
791 | | * currently do (note that this will change when we implement |
792 | | * the ageout feature). Since only the process zero cache |
793 | | * writes the cache image, all that is necessary is to |
794 | | * broadcast the process zero cache size for use in the |
795 | | * superblock extension messages and cache image block |
796 | | * allocations. |
797 | | * |
798 | | * Note: At present, cache image is disabled in the |
799 | | * parallel case as the new collective metadata write |
800 | | * code must be modified to support cache image. |
801 | | * |
802 | | * Return: Non-negative on success/Negative on failure |
803 | | * |
804 | | *------------------------------------------------------------------------- |
805 | | */ |
806 | | herr_t |
807 | | H5C__prep_image_for_file_close(H5F_t *f, bool *image_generated) |
808 | 23 | { |
809 | 23 | H5C_t *cache_ptr = NULL; |
810 | 23 | haddr_t eoa_frag_addr = HADDR_UNDEF; |
811 | 23 | hsize_t eoa_frag_size = 0; |
812 | 23 | herr_t ret_value = SUCCEED; /* Return value */ |
813 | | |
814 | 23 | FUNC_ENTER_PACKAGE |
815 | | |
816 | | /* Sanity checks */ |
817 | 23 | assert(f); |
818 | 23 | assert(f->shared); |
819 | 23 | assert(f->shared->cache); |
820 | 23 | cache_ptr = f->shared->cache; |
821 | 23 | assert(cache_ptr); |
822 | 23 | assert(image_generated); |
823 | | |
824 | | /* If the file is opened and closed without any access to |
825 | | * any group or data set, it is possible that the cache image (if |
826 | | * it exists) has not been read yet. Do this now if required. |
827 | | */ |
828 | 23 | if (cache_ptr->load_image) { |
829 | 0 | cache_ptr->load_image = false; |
830 | 0 | if (H5C__load_cache_image(f) < 0) |
831 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image"); |
832 | 0 | } /* end if */ |
833 | | |
834 | | /* Before we start to generate the cache image (if requested), verify |
835 | | * that the superblock supports superblock extension messages, and |
836 | | * silently cancel any request for a cache image if it does not. |
837 | | * |
838 | | * Ideally, we would do this when the cache image is requested, |
839 | | * but the necessary information is not necessary available at that |
840 | | * time -- hence this last minute check. |
841 | | * |
842 | | * Note that under some error conditions, the superblock will be |
843 | | * undefined in this case as well -- if so, assume that the |
844 | | * superblock does not support superblock extension messages. |
845 | | * Also verify that the file's high_bound is at least release |
846 | | * 1.10.x, otherwise cancel the request for a cache image |
847 | | */ |
848 | 23 | if ((NULL == f->shared->sblock) || (f->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) || |
849 | 23 | (f->shared->high_bound < H5F_LIBVER_V110)) { |
850 | 23 | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
851 | | |
852 | 23 | cache_ptr->image_ctl = default_image_ctl; |
853 | 23 | assert(!(cache_ptr->image_ctl.generate_image)); |
854 | 23 | } /* end if */ |
855 | | |
856 | | /* Generate the cache image, if requested */ |
857 | 23 | if (cache_ptr->image_ctl.generate_image) { |
858 | | /* Create the cache image super block extension message. |
859 | | * |
860 | | * Note that the base address and length of the metadata cache |
861 | | * image are undefined at this point, and thus will have to be |
862 | | * updated later. |
863 | | * |
864 | | * Create the super block extension message now so that space |
865 | | * is allocated for it (if necessary) before we allocate space |
866 | | * for the cache image block. |
867 | | * |
868 | | * To simplify testing, do this only if the |
869 | | * H5C_CI__GEN_MDCI_SBE_MESG bit is set in |
870 | | * cache_ptr->image_ctl.flags. |
871 | | */ |
872 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG) |
873 | 0 | if (H5C__write_cache_image_superblock_msg(f, true) < 0) |
874 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed."); |
875 | | |
876 | | /* Serialize the cache */ |
877 | 0 | if (H5C__serialize_cache(f) < 0) |
878 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed"); |
879 | | |
880 | | /* Scan the cache and record data needed to construct the |
881 | | * cache image. In particular, for each entry we must record: |
882 | | * |
883 | | * 1) rank in LRU (if entry is in LRU) |
884 | | * |
885 | | * 2) Whether the entry is dirty prior to flush of |
886 | | * cache just prior to close. |
887 | | * |
888 | | * 3) Addresses of flush dependency parents (if any). |
889 | | * |
890 | | * 4) Number of flush dependency children (if any). |
891 | | * |
892 | | * In passing, also compute the size of the metadata cache |
893 | | * image. With the recent modifications of the free space |
894 | | * manager code, this size should be correct. |
895 | | */ |
896 | 0 | if (H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0) |
897 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed"); |
898 | 0 | assert(HADDR_UNDEF == cache_ptr->image_addr); |
899 | |
|
900 | | #ifdef H5_HAVE_PARALLEL |
901 | | /* In the parallel case, overwrite the image_len with the |
902 | | * value computed by process 0. |
903 | | */ |
904 | | if (cache_ptr->aux_ptr) { /* we have multiple processes */ |
905 | | int mpi_result; |
906 | | unsigned p0_image_len; |
907 | | H5AC_aux_t *aux_ptr; |
908 | | |
909 | | aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
910 | | if (aux_ptr->mpi_rank == 0) { |
911 | | aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len; |
912 | | p0_image_len = aux_ptr->p0_image_len; |
913 | | |
914 | | if (MPI_SUCCESS != |
915 | | (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm))) |
916 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
917 | | |
918 | | assert(p0_image_len == aux_ptr->p0_image_len); |
919 | | } /* end if */ |
920 | | else { |
921 | | if (MPI_SUCCESS != |
922 | | (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm))) |
923 | | HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) |
924 | | |
925 | | aux_ptr->p0_image_len = p0_image_len; |
926 | | } /* end else */ |
927 | | |
928 | | /* Allocate space for a cache image of size equal to that |
929 | | * computed by the process 0. This may be different from |
930 | | * cache_ptr->image_data_len if mpi_rank != 0. However, since |
931 | | * cache image write is suppressed on all processes other than |
932 | | * process 0, this doesn't matter. |
933 | | * |
934 | | * Note that we allocate the cache image directly from the file |
935 | | * driver so as to avoid unsettling the free space managers. |
936 | | */ |
937 | | if (HADDR_UNDEF == |
938 | | (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, (hsize_t)p0_image_len, |
939 | | &eoa_frag_addr, &eoa_frag_size))) |
940 | | HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, |
941 | | "can't allocate file space for metadata cache image"); |
942 | | } /* end if */ |
943 | | else |
944 | | #endif /* H5_HAVE_PARALLEL */ |
945 | | /* Allocate the cache image block. Note that we allocate this |
946 | | * this space directly from the file driver so as to avoid |
947 | | * unsettling the free space managers. |
948 | | */ |
949 | 0 | if (HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, |
950 | 0 | (hsize_t)(cache_ptr->image_data_len), |
951 | 0 | &eoa_frag_addr, &eoa_frag_size))) |
952 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, |
953 | 0 | "can't allocate file space for metadata cache image"); |
954 | | |
955 | | /* Make note of the eoa after allocation of the cache image |
956 | | * block. This value is used for sanity checking when we |
957 | | * shutdown the self referential free space managers after |
958 | | * we destroy the metadata cache. |
959 | | */ |
960 | 0 | assert(HADDR_UNDEF == f->shared->eoa_post_mdci_fsalloc); |
961 | 0 | if (HADDR_UNDEF == (f->shared->eoa_post_mdci_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT))) |
962 | 0 | HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size"); |
963 | | |
964 | | /* For now, drop any fragment left over from the allocation of the |
965 | | * image block on the ground. A fragment should only be returned |
966 | | * if the underlying file alignment is greater than 1. |
967 | | * |
968 | | * Clean this up eventually by extending the size of the cache |
969 | | * image block to the next alignment boundary, and then setting |
970 | | * the image_data_len to the actual size of the cache_image. |
971 | | * |
972 | | * On the off chance that there is some other way to get a |
973 | | * a fragment on a cache image allocation, leave the following |
974 | | * assertion in the code so we will find out. |
975 | | */ |
976 | 0 | assert((eoa_frag_size == 0) || (f->shared->alignment != 1)); |
977 | | |
978 | | /* Eventually it will be possible for the length of the cache image |
979 | | * block on file to be greater than the size of the data it |
980 | | * contains. However, for now they must be the same. Set |
981 | | * cache_ptr->image_len accordingly. |
982 | | */ |
983 | 0 | cache_ptr->image_len = cache_ptr->image_data_len; |
984 | | |
985 | | /* update the metadata cache image superblock extension |
986 | | * message with the new cache image block base address and |
987 | | * length. |
988 | | * |
989 | | * to simplify testing, do this only if the |
990 | | * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in |
991 | | * cache_ptr->image_ctl.flags. |
992 | | */ |
993 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) |
994 | 0 | if (H5C__write_cache_image_superblock_msg(f, false) < 0) |
995 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed"); |
996 | | |
997 | | /* At this point: |
998 | | * |
999 | | * 1) space in the file for the metadata cache image |
1000 | | * is allocated, |
1001 | | * |
1002 | | * 2) the metadata cache image superblock extension |
1003 | | * message exists and (if so configured) contains |
1004 | | * the correct data, |
1005 | | * |
1006 | | * 3) All entries in the cache that will appear in the |
1007 | | * cache image are serialized with up to date images. |
1008 | | * |
1009 | | * Since we just updated the cache image message, |
1010 | | * the super block extension message is dirty. However, |
1011 | | * since the superblock and the superblock extension |
1012 | | * can't be included in the cache image, this is a non- |
1013 | | * issue. |
1014 | | * |
1015 | | * 4) All entries in the cache that will be include in |
1016 | | * the cache are marked as such, and we have a count |
1017 | | * of same. |
1018 | | * |
1019 | | * 5) Flush dependency heights are calculated for all |
1020 | | * entries that will be included in the cache image. |
1021 | | * |
1022 | | * If there are any entries to be included in the metadata cache |
1023 | | * image, allocate, populate, and sort the image_entries array. |
1024 | | * |
1025 | | * If the metadata cache image will be empty, delete the |
1026 | | * metadata cache image superblock extension message, set |
1027 | | * cache_ptr->image_ctl.generate_image to false. This will |
1028 | | * allow the file close to continue normally without the |
1029 | | * unnecessary generation of the metadata cache image. |
1030 | | */ |
1031 | 0 | if (cache_ptr->num_entries_in_image > 0) { |
1032 | 0 | if (H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0) |
1033 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array."); |
1034 | | |
1035 | | /* Sort the entries */ |
1036 | 0 | qsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image, |
1037 | 0 | sizeof(H5C_image_entry_t), H5C__image_entry_cmp); |
1038 | 0 | } /* end if */ |
1039 | 0 | else { /* cancel creation of metadata cache image */ |
1040 | 0 | assert(cache_ptr->image_entries == NULL); |
1041 | | |
1042 | | /* To avoid breaking the control flow tests, only delete |
1043 | | * the mdci superblock extension message if the |
1044 | | * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in |
1045 | | * cache_ptr->image_ctl.flags. |
1046 | | */ |
1047 | 0 | if (cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) |
1048 | 0 | if (H5F__super_ext_remove_msg(f, H5O_MDCI_MSG_ID) < 0) |
1049 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, |
1050 | 0 | "can't remove MDC image msg from superblock ext"); |
1051 | | |
1052 | 0 | cache_ptr->image_ctl.generate_image = false; |
1053 | 0 | } /* end else */ |
1054 | | |
1055 | | /* Indicate that a cache image was generated */ |
1056 | 0 | *image_generated = true; |
1057 | 0 | } /* end if */ |
1058 | | |
1059 | 23 | done: |
1060 | 23 | FUNC_LEAVE_NOAPI(ret_value) |
1061 | 23 | } /* H5C__prep_image_for_file_close() */ |
1062 | | |
1063 | | /*------------------------------------------------------------------------- |
1064 | | * Function: H5C_set_cache_image_config |
1065 | | * |
1066 | | * Purpose: If *config_ptr contains valid data, copy it into the |
1067 | | * image_ctl field of *cache_ptr. Make adjustments for |
1068 | | * changes in configuration as required. |
1069 | | * |
1070 | | * If the file is open read only, silently |
1071 | | * force the cache image configuration to its default |
1072 | | * (which disables construction of a cache image). |
1073 | | * |
1074 | | * Note that in addition to being inapplicable in the |
1075 | | * read only case, cache image is also inapplicable if |
1076 | | * the superblock does not support superblock extension |
1077 | | * messages. Unfortunately, this information need not |
1078 | | * be available at this point. Thus we check for this |
1079 | | * later, in H5C_prep_for_file_close() and cancel the |
1080 | | * cache image request if appropriate. |
1081 | | * |
1082 | | * Fail if the new configuration is invalid. |
1083 | | * |
1084 | | * Return: SUCCEED on success, and FAIL on failure. |
1085 | | * |
1086 | | *------------------------------------------------------------------------- |
1087 | | */ |
1088 | | herr_t |
1089 | | H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) |
1090 | 23 | { |
1091 | 23 | herr_t ret_value = SUCCEED; /* Return value */ |
1092 | | |
1093 | 23 | FUNC_ENTER_NOAPI(FAIL) |
1094 | | |
1095 | | /* Sanity checks */ |
1096 | 23 | assert(f); |
1097 | | |
1098 | | /* Check arguments */ |
1099 | 23 | if (cache_ptr == NULL) |
1100 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry"); |
1101 | | |
1102 | | /* Validate the config: */ |
1103 | 23 | if (H5C_validate_cache_image_config(config_ptr) < 0) |
1104 | 0 | HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration"); |
1105 | | |
1106 | | #ifdef H5_HAVE_PARALLEL |
1107 | | /* The collective metadata write code is not currently compatible |
1108 | | * with cache image. Until this is fixed, suppress cache image silently |
1109 | | * if there is more than one process. |
1110 | | */ |
1111 | | if (cache_ptr->aux_ptr) { |
1112 | | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
1113 | | |
1114 | | cache_ptr->image_ctl = default_image_ctl; |
1115 | | assert(!(cache_ptr->image_ctl.generate_image)); |
1116 | | } |
1117 | | else { |
1118 | | #endif /* H5_HAVE_PARALLEL */ |
1119 | | /* A cache image can only be generated if the file is opened read / write |
1120 | | * and the superblock supports superblock extension messages. |
1121 | | * |
1122 | | * However, the superblock version is not available at this point -- |
1123 | | * hence we can only check the former requirement now. Do the latter |
1124 | | * check just before we construct the image.. |
1125 | | * |
1126 | | * If the file is opened read / write, apply the supplied configuration. |
1127 | | * |
1128 | | * If it is not, set the image configuration to the default, which has |
1129 | | * the effect of silently disabling the cache image if it was requested. |
1130 | | */ |
1131 | 23 | if (H5F_INTENT(f) & H5F_ACC_RDWR) |
1132 | 23 | cache_ptr->image_ctl = *config_ptr; |
1133 | 0 | else { |
1134 | 0 | H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; |
1135 | |
|
1136 | 0 | cache_ptr->image_ctl = default_image_ctl; |
1137 | 0 | assert(!(cache_ptr->image_ctl.generate_image)); |
1138 | 0 | } |
1139 | | #ifdef H5_HAVE_PARALLEL |
1140 | | } |
1141 | | #endif /* H5_HAVE_PARALLEL */ |
1142 | | |
1143 | 23 | done: |
1144 | 23 | FUNC_LEAVE_NOAPI(ret_value) |
1145 | 23 | } /* H5C_set_cache_image_config() */ |
1146 | | |
1147 | | /*------------------------------------------------------------------------- |
1148 | | * Function: H5C_validate_cache_image_config() |
1149 | | * |
1150 | | * Purpose: Run a sanity check on the provided instance of struct |
1151 | | * H5AC_cache_image_config_t. |
1152 | | * |
1153 | | * Do nothing and return SUCCEED if no errors are detected, |
1154 | | * and flag an error and return FAIL otherwise. |
1155 | | * |
1156 | | * Return: Non-negative on success/Negative on failure |
1157 | | * |
1158 | | *------------------------------------------------------------------------- |
1159 | | */ |
1160 | | herr_t |
1161 | | H5C_validate_cache_image_config(H5C_cache_image_ctl_t *ctl_ptr) |
1162 | 46 | { |
1163 | 46 | herr_t ret_value = SUCCEED; /* Return value */ |
1164 | | |
1165 | 46 | FUNC_ENTER_NOAPI(FAIL) |
1166 | | |
1167 | 46 | if (ctl_ptr == NULL) |
1168 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL ctl_ptr on entry"); |
1169 | 46 | if (ctl_ptr->version != H5C__CURR_CACHE_IMAGE_CTL_VER) |
1170 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown cache image control version"); |
1171 | | |
1172 | | /* At present, we do not support inclusion of the adaptive resize |
1173 | | * configuration in the cache image. Thus the save_resize_status |
1174 | | * field must be false. |
1175 | | */ |
1176 | 46 | if (ctl_ptr->save_resize_status != false) |
1177 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in save_resize_status field"); |
1178 | | |
1179 | | /* At present, we do not support prefetched entry ageouts. Thus |
1180 | | * the entry_ageout field must be set to |
1181 | | * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. |
1182 | | */ |
1183 | 46 | if (ctl_ptr->entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) |
1184 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in entry_ageout field"); |
1185 | | |
1186 | 46 | if ((ctl_ptr->flags & ~H5C_CI__ALL_FLAGS) != 0) |
1187 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown flag set"); |
1188 | | |
1189 | 46 | done: |
1190 | 46 | FUNC_LEAVE_NOAPI(ret_value) |
1191 | 46 | } /* H5C_validate_cache_image_config() */ |
1192 | | |
1193 | | /*************************************************************************/ |
1194 | | /**************************** Private Functions: *************************/ |
1195 | | /*************************************************************************/ |
1196 | | |
1197 | | /*------------------------------------------------------------------------- |
1198 | | * Function: H5C__cache_image_block_entry_header_size |
1199 | | * |
1200 | | * Purpose: Compute the size of the header of the metadata cache |
1201 | | * image block, and return the value. |
1202 | | * |
1203 | | * Return: Size of the header section of the metadata cache image |
1204 | | * block in bytes. |
1205 | | * |
1206 | | *------------------------------------------------------------------------- |
1207 | | */ |
1208 | | static size_t |
1209 | | H5C__cache_image_block_entry_header_size(const H5F_t *f) |
1210 | 0 | { |
1211 | 0 | size_t ret_value = 0; /* Return value */ |
1212 | |
|
1213 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1214 | | |
1215 | | /* Set return value */ |
1216 | 0 | ret_value = (size_t)(1 + /* type */ |
1217 | 0 | 1 + /* flags */ |
1218 | 0 | 1 + /* ring */ |
1219 | 0 | 1 + /* age */ |
1220 | 0 | 2 + /* dependency child count */ |
1221 | 0 | 2 + /* dirty dep child count */ |
1222 | 0 | 2 + /* dependency parent count */ |
1223 | 0 | 4 + /* index in LRU */ |
1224 | 0 | H5F_SIZEOF_ADDR(f) + /* entry offset */ |
1225 | 0 | H5F_SIZEOF_SIZE(f)); /* entry length */ |
1226 | |
|
1227 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1228 | 0 | } /* H5C__cache_image_block_entry_header_size() */ |
1229 | | |
1230 | | /*------------------------------------------------------------------------- |
1231 | | * Function: H5C__cache_image_block_header_size |
1232 | | * |
1233 | | * Purpose: Compute the size of the header of the metadata cache |
1234 | | * image block, and return the value. |
1235 | | * |
1236 | | * Return: Size of the header section of the metadata cache image |
1237 | | * block in bytes. |
1238 | | * |
1239 | | *------------------------------------------------------------------------- |
1240 | | */ |
1241 | | static size_t |
1242 | | H5C__cache_image_block_header_size(const H5F_t *f) |
1243 | 0 | { |
1244 | 0 | size_t ret_value = 0; /* Return value */ |
1245 | |
|
1246 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1247 | | |
1248 | | /* Set return value */ |
1249 | 0 | ret_value = (size_t)(4 + /* signature */ |
1250 | 0 | 1 + /* version */ |
1251 | 0 | 1 + /* flags */ |
1252 | 0 | H5F_SIZEOF_SIZE(f) + /* image data length */ |
1253 | 0 | 4); /* num_entries */ |
1254 | |
|
1255 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1256 | 0 | } /* H5C__cache_image_block_header_size() */ |
1257 | | |
1258 | | /*------------------------------------------------------------------------- |
1259 | | * Function: H5C__decode_cache_image_header() |
1260 | | * |
1261 | | * Purpose: Decode the metadata cache image buffer header from the |
1262 | | * supplied buffer and load the data into the supplied instance |
1263 | | * of H5C_t. Advances the buffer pointer to the first byte |
1264 | | * after the header image, or unchanged on failure. |
1265 | | * |
1266 | | * Return: Non-negative on success/Negative on failure |
1267 | | * |
1268 | | *------------------------------------------------------------------------- |
1269 | | */ |
1270 | | static herr_t |
1271 | | H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, size_t buf_size) |
1272 | 0 | { |
1273 | 0 | uint8_t version; |
1274 | 0 | uint8_t flags; |
1275 | 0 | bool have_resize_status = false; |
1276 | 0 | size_t actual_header_len; |
1277 | 0 | size_t expected_header_len; |
1278 | 0 | const uint8_t *p; |
1279 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1280 | |
|
1281 | 0 | FUNC_ENTER_PACKAGE |
1282 | | |
1283 | | /* Sanity checks */ |
1284 | 0 | assert(cache_ptr); |
1285 | 0 | assert(buf); |
1286 | 0 | assert(*buf); |
1287 | | |
1288 | | /* Point to buffer to decode */ |
1289 | 0 | p = *buf; |
1290 | | |
1291 | | /* Ensure buffer has enough data for signature comparison */ |
1292 | 0 | if (H5_IS_BUFFER_OVERFLOW(p, H5C__MDCI_BLOCK_SIGNATURE_LEN, *buf + buf_size - 1)) |
1293 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, FAIL, "Insufficient buffer size for signature"); |
1294 | | |
1295 | | /* Check signature */ |
1296 | 0 | if (memcmp(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN) != 0) |
1297 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image header signature"); |
1298 | 0 | p += H5C__MDCI_BLOCK_SIGNATURE_LEN; |
1299 | | |
1300 | | /* Check version */ |
1301 | 0 | version = *p++; |
1302 | 0 | if (version != (uint8_t)H5C__MDCI_BLOCK_VERSION_0) |
1303 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image version"); |
1304 | | |
1305 | | /* Decode flags */ |
1306 | 0 | flags = *p++; |
1307 | 0 | if (flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS) |
1308 | 0 | have_resize_status = true; |
1309 | 0 | if (have_resize_status) |
1310 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "MDC resize status not yet supported"); |
1311 | | |
1312 | | /* Read image data length */ |
1313 | 0 | H5F_DECODE_LENGTH(f, p, cache_ptr->image_data_len); |
1314 | | |
1315 | | /* For now -- will become <= eventually */ |
1316 | 0 | if (cache_ptr->image_data_len != cache_ptr->image_len) |
1317 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image data length"); |
1318 | | |
1319 | | /* Read num entries */ |
1320 | 0 | UINT32DECODE(p, cache_ptr->num_entries_in_image); |
1321 | 0 | if (cache_ptr->num_entries_in_image == 0) |
1322 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache entry count"); |
1323 | | |
1324 | | /* Verify expected length of header */ |
1325 | 0 | actual_header_len = (size_t)(p - *buf); |
1326 | 0 | expected_header_len = H5C__cache_image_block_header_size(f); |
1327 | 0 | if (actual_header_len != expected_header_len) |
1328 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len"); |
1329 | | |
1330 | | /* Update buffer pointer */ |
1331 | 0 | *buf = p; |
1332 | |
|
1333 | 0 | done: |
1334 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1335 | 0 | } /* H5C__decode_cache_image_header() */ |
1336 | | |
1337 | | #ifndef NDEBUG |
1338 | | |
1339 | | /*------------------------------------------------------------------------- |
1340 | | * Function: H5C__decode_cache_image_entry() |
1341 | | * |
1342 | | * Purpose: Decode the metadata cache image entry from the supplied |
1343 | | * buffer into the supplied instance of H5C_image_entry_t. |
1344 | | * This includes allocating a buffer for the entry image, |
1345 | | * loading it, and setting ie_ptr->image_ptr to point to |
1346 | | * the buffer. |
1347 | | * |
1348 | | * Advances the buffer pointer to the first byte |
1349 | | * after the entry, or unchanged on failure. |
1350 | | * |
1351 | | * Return: Non-negative on success/Negative on failure |
1352 | | * |
1353 | | *------------------------------------------------------------------------- |
1354 | | */ |
1355 | | static herr_t |
1356 | | H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num) |
1357 | | { |
1358 | | bool is_dirty = false; |
1359 | | bool in_lru = false; /* Only used in assertions */ |
1360 | | bool is_fd_parent = false; /* Only used in assertions */ |
1361 | | bool is_fd_child = false; /* Only used in assertions */ |
1362 | | haddr_t addr; |
1363 | | hsize_t size = 0; |
1364 | | void *image_ptr; |
1365 | | uint8_t flags = 0; |
1366 | | uint8_t type_id; |
1367 | | uint8_t ring; |
1368 | | uint8_t age; |
1369 | | uint16_t fd_child_count; |
1370 | | uint16_t fd_dirty_child_count; |
1371 | | uint16_t fd_parent_count; |
1372 | | haddr_t *fd_parent_addrs = NULL; |
1373 | | int32_t lru_rank; |
1374 | | H5C_image_entry_t *ie_ptr = NULL; |
1375 | | const uint8_t *p; |
1376 | | herr_t ret_value = SUCCEED; /* Return value */ |
1377 | | |
1378 | | FUNC_ENTER_PACKAGE |
1379 | | |
1380 | | /* Sanity checks */ |
1381 | | assert(f); |
1382 | | assert(f->shared); |
1383 | | assert(cache_ptr == f->shared->cache); |
1384 | | assert(cache_ptr); |
1385 | | assert(buf); |
1386 | | assert(*buf); |
1387 | | assert(entry_num < cache_ptr->num_entries_in_image); |
1388 | | ie_ptr = &(cache_ptr->image_entries[entry_num]); |
1389 | | assert(ie_ptr); |
1390 | | |
1391 | | /* Get pointer to buffer */ |
1392 | | p = *buf; |
1393 | | |
1394 | | /* Decode type id */ |
1395 | | type_id = *p++; |
1396 | | |
1397 | | /* Decode flags */ |
1398 | | flags = *p++; |
1399 | | if (flags & H5C__MDCI_ENTRY_DIRTY_FLAG) |
1400 | | is_dirty = true; |
1401 | | if (flags & H5C__MDCI_ENTRY_IN_LRU_FLAG) |
1402 | | in_lru = true; |
1403 | | if (flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG) |
1404 | | is_fd_parent = true; |
1405 | | if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) |
1406 | | is_fd_child = true; |
1407 | | |
1408 | | /* Decode ring */ |
1409 | | ring = *p++; |
1410 | | assert(ring > (uint8_t)(H5C_RING_UNDEFINED)); |
1411 | | assert(ring < (uint8_t)(H5C_RING_NTYPES)); |
1412 | | |
1413 | | /* Decode age */ |
1414 | | age = *p++; |
1415 | | |
1416 | | /* Decode dependency child count */ |
1417 | | UINT16DECODE(p, fd_child_count); |
1418 | | assert((is_fd_parent && fd_child_count > 0) || (!is_fd_parent && fd_child_count == 0)); |
1419 | | |
1420 | | /* Decode dirty dependency child count */ |
1421 | | UINT16DECODE(p, fd_dirty_child_count); |
1422 | | if (fd_dirty_child_count > fd_child_count) |
1423 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid dirty flush dependency child count"); |
1424 | | |
1425 | | /* Decode dependency parent count */ |
1426 | | UINT16DECODE(p, fd_parent_count); |
1427 | | assert((is_fd_child && fd_parent_count > 0) || (!is_fd_child && fd_parent_count == 0)); |
1428 | | |
1429 | | /* Decode index in LRU */ |
1430 | | INT32DECODE(p, lru_rank); |
1431 | | assert((in_lru && lru_rank >= 0) || (!in_lru && lru_rank == -1)); |
1432 | | |
1433 | | /* Decode entry offset */ |
1434 | | H5F_addr_decode(f, &p, &addr); |
1435 | | if (!H5_addr_defined(addr)) |
1436 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry offset"); |
1437 | | |
1438 | | /* Decode entry length */ |
1439 | | H5F_DECODE_LENGTH(f, p, size); |
1440 | | if (size == 0) |
1441 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry size"); |
1442 | | |
1443 | | /* Verify expected length of entry image */ |
1444 | | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
1445 | | HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "Bad entry image len"); |
1446 | | |
1447 | | /* If parent count greater than zero, allocate array for parent |
1448 | | * addresses, and decode addresses into the array. |
1449 | | */ |
1450 | | if (fd_parent_count > 0) { |
1451 | | int i; /* Local index variable */ |
1452 | | |
1453 | | if (NULL == (fd_parent_addrs = (haddr_t *)H5MM_malloc((size_t)(fd_parent_count)*H5F_SIZEOF_ADDR(f)))) |
1454 | | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
1455 | | "memory allocation failed for fd parent addrs buffer"); |
1456 | | |
1457 | | for (i = 0; i < fd_parent_count; i++) { |
1458 | | H5F_addr_decode(f, &p, &(fd_parent_addrs[i])); |
1459 | | if (!H5_addr_defined(fd_parent_addrs[i])) |
1460 | | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid flush dependency parent offset"); |
1461 | | } /* end for */ |
1462 | | } /* end if */ |
1463 | | |
1464 | | /* Allocate buffer for entry image */ |
1465 | | if (NULL == (image_ptr = H5MM_malloc(size + H5C_IMAGE_EXTRA_SPACE))) |
1466 | | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer"); |
1467 | | |
1468 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
1469 | | H5MM_memcpy(((uint8_t *)image_ptr) + size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); |
1470 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
1471 | | |
1472 | | /* Copy the entry image from the cache image block */ |
1473 | | H5MM_memcpy(image_ptr, p, size); |
1474 | | p += size; |
1475 | | |
1476 | | /* Copy data into target */ |
1477 | | ie_ptr->addr = addr; |
1478 | | ie_ptr->size = size; |
1479 | | ie_ptr->ring = (H5C_ring_t)ring; |
1480 | | ie_ptr->age = (int32_t)age; |
1481 | | ie_ptr->type_id = (int32_t)type_id; |
1482 | | ie_ptr->lru_rank = lru_rank; |
1483 | | ie_ptr->is_dirty = is_dirty; |
1484 | | ie_ptr->fd_child_count = (uint64_t)fd_child_count; |
1485 | | ie_ptr->fd_dirty_child_count = (uint64_t)fd_dirty_child_count; |
1486 | | ie_ptr->fd_parent_count = (uint64_t)fd_parent_count; |
1487 | | ie_ptr->fd_parent_addrs = fd_parent_addrs; |
1488 | | ie_ptr->image_ptr = image_ptr; |
1489 | | |
1490 | | /* Update buffer pointer */ |
1491 | | *buf = p; |
1492 | | |
1493 | | done: |
1494 | | FUNC_LEAVE_NOAPI(ret_value) |
1495 | | } /* H5C__decode_cache_image_entry() */ |
1496 | | #endif |
1497 | | |
1498 | | /*------------------------------------------------------------------------- |
1499 | | * Function: H5C__encode_cache_image_header() |
1500 | | * |
1501 | | * Purpose: Encode the metadata cache image buffer header in the |
1502 | | * supplied buffer. Updates buffer pointer to the first byte |
1503 | | * after the header image in the buffer, or unchanged on failure. |
1504 | | * |
1505 | | * Return: Non-negative on success/Negative on failure |
1506 | | * |
1507 | | *------------------------------------------------------------------------- |
1508 | | */ |
1509 | | static herr_t |
1510 | | H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf) |
1511 | 0 | { |
1512 | 0 | size_t actual_header_len; |
1513 | 0 | size_t expected_header_len; |
1514 | 0 | uint8_t flags = 0; |
1515 | 0 | uint8_t *p; /* Pointer into cache image buffer */ |
1516 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1517 | |
|
1518 | 0 | FUNC_ENTER_PACKAGE |
1519 | | |
1520 | | /* Sanity checks */ |
1521 | 0 | assert(cache_ptr); |
1522 | 0 | assert(cache_ptr->close_warning_received); |
1523 | 0 | assert(cache_ptr->image_ctl.generate_image); |
1524 | 0 | assert(cache_ptr->index_len == 0); |
1525 | 0 | assert(cache_ptr->image_data_len > 0); |
1526 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
1527 | 0 | assert(buf); |
1528 | 0 | assert(*buf); |
1529 | | |
1530 | | /* Set pointer into buffer */ |
1531 | 0 | p = *buf; |
1532 | | |
1533 | | /* write signature */ |
1534 | 0 | H5MM_memcpy(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN); |
1535 | 0 | p += H5C__MDCI_BLOCK_SIGNATURE_LEN; |
1536 | | |
1537 | | /* write version */ |
1538 | 0 | *p++ = (uint8_t)H5C__MDCI_BLOCK_VERSION_0; |
1539 | | |
1540 | | /* setup and write flags */ |
1541 | | |
1542 | | /* at present we don't support saving resize status */ |
1543 | 0 | assert(!cache_ptr->image_ctl.save_resize_status); |
1544 | 0 | if (cache_ptr->image_ctl.save_resize_status) |
1545 | 0 | flags |= H5C__MDCI_HEADER_HAVE_RESIZE_STATUS; |
1546 | |
|
1547 | 0 | *p++ = flags; |
1548 | | |
1549 | | /* Encode image data length */ |
1550 | | /* this must be true at present */ |
1551 | 0 | assert(cache_ptr->image_len == cache_ptr->image_data_len); |
1552 | 0 | H5F_ENCODE_LENGTH(f, p, cache_ptr->image_data_len); |
1553 | | |
1554 | | /* write num entries */ |
1555 | 0 | UINT32ENCODE(p, cache_ptr->num_entries_in_image); |
1556 | | |
1557 | | /* verify expected length of header */ |
1558 | 0 | actual_header_len = (size_t)(p - *buf); |
1559 | 0 | expected_header_len = H5C__cache_image_block_header_size(f); |
1560 | 0 | if (actual_header_len != expected_header_len) |
1561 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len"); |
1562 | | |
1563 | | /* Update buffer pointer */ |
1564 | 0 | *buf = p; |
1565 | |
|
1566 | 0 | done: |
1567 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1568 | 0 | } /* H5C__encode_cache_image_header() */ |
1569 | | |
1570 | | /*------------------------------------------------------------------------- |
1571 | | * Function: H5C__encode_cache_image_entry() |
1572 | | * |
1573 | | * Purpose: Encode the metadata cache image buffer header in the |
1574 | | * supplied buffer. Updates buffer pointer to the first byte |
1575 | | * after the entry in the buffer, or unchanged on failure. |
1576 | | * |
1577 | | * Return: Non-negative on success/Negative on failure |
1578 | | * |
1579 | | *------------------------------------------------------------------------- |
1580 | | */ |
1581 | | static herr_t |
1582 | | H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num) |
1583 | 0 | { |
1584 | 0 | H5C_image_entry_t *ie_ptr; /* Pointer to entry to encode */ |
1585 | 0 | uint8_t flags = 0; /* Flags for entry */ |
1586 | 0 | uint8_t *p; /* Pointer into cache image buffer */ |
1587 | 0 | unsigned u; /* Local index value */ |
1588 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1589 | |
|
1590 | 0 | FUNC_ENTER_PACKAGE |
1591 | | |
1592 | | /* Sanity checks */ |
1593 | 0 | assert(f); |
1594 | 0 | assert(f->shared); |
1595 | 0 | assert(cache_ptr == f->shared->cache); |
1596 | 0 | assert(cache_ptr); |
1597 | 0 | assert(cache_ptr->close_warning_received); |
1598 | 0 | assert(cache_ptr->image_ctl.generate_image); |
1599 | 0 | assert(cache_ptr->index_len == 0); |
1600 | 0 | assert(buf); |
1601 | 0 | assert(*buf); |
1602 | 0 | assert(entry_num < cache_ptr->num_entries_in_image); |
1603 | 0 | ie_ptr = &(cache_ptr->image_entries[entry_num]); |
1604 | | |
1605 | | /* Get pointer to buffer to encode into */ |
1606 | 0 | p = *buf; |
1607 | | |
1608 | | /* Encode type */ |
1609 | 0 | if ((ie_ptr->type_id < 0) || (ie_ptr->type_id > 255)) |
1610 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "type_id out of range."); |
1611 | 0 | *p++ = (uint8_t)(ie_ptr->type_id); |
1612 | | |
1613 | | /* Compose and encode flags */ |
1614 | 0 | if (ie_ptr->is_dirty) |
1615 | 0 | flags |= H5C__MDCI_ENTRY_DIRTY_FLAG; |
1616 | 0 | if (ie_ptr->lru_rank > 0) |
1617 | 0 | flags |= H5C__MDCI_ENTRY_IN_LRU_FLAG; |
1618 | 0 | if (ie_ptr->fd_child_count > 0) |
1619 | 0 | flags |= H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG; |
1620 | 0 | if (ie_ptr->fd_parent_count > 0) |
1621 | 0 | flags |= H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG; |
1622 | 0 | *p++ = flags; |
1623 | | |
1624 | | /* Encode ring */ |
1625 | 0 | *p++ = (uint8_t)(ie_ptr->ring); |
1626 | | |
1627 | | /* Encode age */ |
1628 | 0 | *p++ = (uint8_t)(ie_ptr->age); |
1629 | | |
1630 | | /* Validate and encode dependency child count */ |
1631 | 0 | if (ie_ptr->fd_child_count > H5C__MDCI_MAX_FD_CHILDREN) |
1632 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_child_count out of range"); |
1633 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_child_count)); |
1634 | | |
1635 | | /* Validate and encode dirty dependency child count */ |
1636 | 0 | if (ie_ptr->fd_dirty_child_count > H5C__MDCI_MAX_FD_CHILDREN) |
1637 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_dirty_child_count out of range"); |
1638 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_dirty_child_count)); |
1639 | | |
1640 | | /* Validate and encode dependency parent count */ |
1641 | 0 | if (ie_ptr->fd_parent_count > H5C__MDCI_MAX_FD_PARENTS) |
1642 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_parent_count out of range"); |
1643 | 0 | UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_parent_count)); |
1644 | | |
1645 | | /* Encode index in LRU */ |
1646 | 0 | INT32ENCODE(p, ie_ptr->lru_rank); |
1647 | | |
1648 | | /* Encode entry offset */ |
1649 | 0 | H5F_addr_encode(f, &p, ie_ptr->addr); |
1650 | | |
1651 | | /* Encode entry length */ |
1652 | 0 | H5F_ENCODE_LENGTH(f, p, ie_ptr->size); |
1653 | | |
1654 | | /* Verify expected length of entry image */ |
1655 | 0 | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
1656 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad entry image len"); |
1657 | | |
1658 | | /* Encode dependency parent offsets -- if any */ |
1659 | 0 | for (u = 0; u < ie_ptr->fd_parent_count; u++) |
1660 | 0 | H5F_addr_encode(f, &p, ie_ptr->fd_parent_addrs[u]); |
1661 | | |
1662 | | /* Copy entry image */ |
1663 | 0 | H5MM_memcpy(p, ie_ptr->image_ptr, ie_ptr->size); |
1664 | 0 | p += ie_ptr->size; |
1665 | | |
1666 | | /* Update buffer pointer */ |
1667 | 0 | *buf = p; |
1668 | |
|
1669 | 0 | done: |
1670 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1671 | 0 | } /* H5C__encode_cache_image_entry() */ |
1672 | | |
1673 | | /*------------------------------------------------------------------------- |
1674 | | * Function: H5C__prep_for_file_close__compute_fd_heights |
1675 | | * |
1676 | | * Purpose: The purpose of this function is to compute the flush |
1677 | | * dependency height of all entries that appear in the cache |
1678 | | * image. |
1679 | | * |
1680 | | * At present, entries are included or excluded from the |
1681 | | * cache image depending upon the ring in which they reside. |
1682 | | * Thus there is no chance that one side of a flush dependency |
1683 | | * will be in the cache image, and the other side not. |
1684 | | * |
1685 | | * However, once we start placing a limit on the size of the |
1686 | | * cache image, or start excluding prefetched entries from |
1687 | | * the cache image if they haven't been accessed in some |
1688 | | * number of file close / open cycles, this will no longer |
1689 | | * be the case. |
1690 | | * |
1691 | | * In particular, if a flush dependency child is dirty, and |
1692 | | * one of its flush dependency parents is dirty and not in |
1693 | | * the cache image, then the flush dependency child cannot |
1694 | | * be in the cache image without violating flush ordering. |
1695 | | * |
1696 | | * Observe that a clean flush dependency child can be either |
1697 | | * in or out of the cache image without effect on flush |
1698 | | * dependencies. |
1699 | | * |
1700 | | * Similarly, a flush dependency parent can always be part |
1701 | | * of a cache image, regardless of whether it is clean or |
1702 | | * dirty -- but remember that a flush dependency parent can |
1703 | | * also be a flush dependency child. |
1704 | | * |
1705 | | * Finally, note that for purposes of the cache image, flush |
1706 | | * dependency height ends when a flush dependency relation |
1707 | | * passes off the cache image. |
1708 | | * |
1709 | | * On exit, the flush dependency height of each entry in the |
1710 | | * cache image should be calculated and stored in the cache |
1711 | | * entry. Entries will be removed from the cache image if |
1712 | | * necessary to maintain flush ordering. |
1713 | | * |
1714 | | * Return: Non-negative on success/Negative on failure |
1715 | | * |
1716 | | *------------------------------------------------------------------------- |
1717 | | */ |
1718 | | static herr_t |
1719 | | H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) |
1720 | 0 | { |
1721 | 0 | H5C_cache_entry_t *entry_ptr; |
1722 | 0 | H5C_cache_entry_t *parent_ptr; |
1723 | | #ifndef NDEBUG |
1724 | | unsigned entries_removed_from_image = 0; |
1725 | | unsigned external_parent_fd_refs_removed = 0; |
1726 | | unsigned external_child_fd_refs_removed = 0; |
1727 | | #endif |
1728 | 0 | bool done = false; |
1729 | 0 | unsigned u; /* Local index variable */ |
1730 | 0 | herr_t ret_value = SUCCEED; |
1731 | |
|
1732 | 0 | FUNC_ENTER_PACKAGE |
1733 | | |
1734 | | /* sanity checks */ |
1735 | 0 | assert(cache_ptr); |
1736 | | |
1737 | | /* Remove from the cache image all dirty entries that are |
1738 | | * flush dependency children of dirty entries that are not in the |
1739 | | * cache image. Must do this, as if we fail to do so, the parent |
1740 | | * will be written to file before the child. Since it is possible |
1741 | | * that the child will have dirty children of its own, this may take |
1742 | | * multiple passes through the index list. |
1743 | | */ |
1744 | 0 | done = false; |
1745 | 0 | while (!done) { |
1746 | 0 | done = true; |
1747 | 0 | entry_ptr = cache_ptr->il_head; |
1748 | 0 | while (entry_ptr != NULL) { |
1749 | | /* Should this entry be in the image */ |
1750 | 0 | if (entry_ptr->image_dirty && entry_ptr->include_in_image && (entry_ptr->fd_parent_count > 0)) { |
1751 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1752 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1753 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1754 | | |
1755 | | /* Sanity check parent */ |
1756 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1757 | |
|
1758 | 0 | if (parent_ptr->is_dirty && !parent_ptr->include_in_image && |
1759 | 0 | entry_ptr->include_in_image) { |
1760 | | |
1761 | | /* Must remove child from image -- only do this once */ |
1762 | | #ifndef NDEBUG |
1763 | | entries_removed_from_image++; |
1764 | | #endif |
1765 | 0 | entry_ptr->include_in_image = false; |
1766 | 0 | } /* end if */ |
1767 | 0 | } /* for */ |
1768 | 0 | } /* end if */ |
1769 | |
|
1770 | 0 | entry_ptr = entry_ptr->il_next; |
1771 | 0 | } /* while ( entry_ptr != NULL ) */ |
1772 | 0 | } /* while ( ! done ) */ |
1773 | | |
1774 | | /* at present, entries are included in the cache image if they reside |
1775 | | * in a specified set of rings. Thus it should be impossible for |
1776 | | * entries_removed_from_image to be positive. Assert that this is |
1777 | | * so. Note that this will change when we start aging entries out |
1778 | | * of the cache image. |
1779 | | */ |
1780 | 0 | assert(entries_removed_from_image == 0); |
1781 | | |
1782 | | /* Next, remove from entries in the cache image, references to |
1783 | | * flush dependency parents or children that are not in the cache image. |
1784 | | */ |
1785 | 0 | entry_ptr = cache_ptr->il_head; |
1786 | 0 | while (entry_ptr != NULL) { |
1787 | 0 | if (!entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) { |
1788 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1789 | |
|
1790 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1791 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1792 | | |
1793 | | /* Sanity check parent */ |
1794 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1795 | |
|
1796 | 0 | if (parent_ptr->include_in_image) { |
1797 | | /* Must remove reference to child */ |
1798 | 0 | assert(parent_ptr->fd_child_count > 0); |
1799 | 0 | parent_ptr->fd_child_count--; |
1800 | |
|
1801 | 0 | if (entry_ptr->is_dirty) { |
1802 | 0 | assert(parent_ptr->fd_dirty_child_count > 0); |
1803 | 0 | parent_ptr->fd_dirty_child_count--; |
1804 | 0 | } /* end if */ |
1805 | |
|
1806 | | #ifndef NDEBUG |
1807 | | external_child_fd_refs_removed++; |
1808 | | #endif |
1809 | 0 | } /* end if */ |
1810 | 0 | } /* for */ |
1811 | 0 | } /* end if */ |
1812 | 0 | else if (entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) { |
1813 | | /* Sanity checks */ |
1814 | 0 | assert(entry_ptr->flush_dep_parent != NULL); |
1815 | 0 | assert(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count); |
1816 | 0 | assert(entry_ptr->fd_parent_addrs); |
1817 | |
|
1818 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1819 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1820 | | |
1821 | | /* Sanity check parent */ |
1822 | 0 | assert(entry_ptr->ring == parent_ptr->ring); |
1823 | |
|
1824 | 0 | if (!parent_ptr->include_in_image) { |
1825 | | /* Must remove reference to parent */ |
1826 | 0 | assert(entry_ptr->fd_parent_count > 0); |
1827 | 0 | parent_ptr->fd_child_count--; |
1828 | |
|
1829 | 0 | assert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]); |
1830 | |
|
1831 | 0 | entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF; |
1832 | | #ifndef NDEBUG |
1833 | | external_parent_fd_refs_removed++; |
1834 | | #endif |
1835 | 0 | } /* end if */ |
1836 | 0 | } /* for */ |
1837 | | |
1838 | | /* Touch up fd_parent_addrs array if necessary */ |
1839 | 0 | if (entry_ptr->fd_parent_count == 0) { |
1840 | 0 | H5MM_xfree(entry_ptr->fd_parent_addrs); |
1841 | 0 | entry_ptr->fd_parent_addrs = NULL; |
1842 | 0 | } /* end if */ |
1843 | 0 | else if (entry_ptr->flush_dep_nparents > entry_ptr->fd_parent_count) { |
1844 | 0 | haddr_t *old_fd_parent_addrs = entry_ptr->fd_parent_addrs; |
1845 | 0 | unsigned v; |
1846 | |
|
1847 | 0 | if (NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_calloc( |
1848 | 0 | sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_addrs)))) |
1849 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
1850 | 0 | "memory allocation failed for fd parent addr array"); |
1851 | | |
1852 | 0 | v = 0; |
1853 | 0 | for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { |
1854 | 0 | if (old_fd_parent_addrs[u] != HADDR_UNDEF) { |
1855 | 0 | entry_ptr->fd_parent_addrs[v] = old_fd_parent_addrs[u]; |
1856 | 0 | v++; |
1857 | 0 | } /* end if */ |
1858 | 0 | } /* end for */ |
1859 | |
|
1860 | 0 | assert(v == entry_ptr->fd_parent_count); |
1861 | 0 | } /* end else-if */ |
1862 | 0 | } /* end else-if */ |
1863 | | |
1864 | 0 | entry_ptr = entry_ptr->il_next; |
1865 | 0 | } /* while (entry_ptr != NULL) */ |
1866 | | |
1867 | | /* At present, no external parent or child flush dependency links |
1868 | | * should exist -- hence the following assertions. This will change |
1869 | | * if we support ageout of entries in the cache image. |
1870 | | */ |
1871 | 0 | assert(external_child_fd_refs_removed == 0); |
1872 | 0 | assert(external_parent_fd_refs_removed == 0); |
1873 | | |
1874 | | /* At this point we should have removed all flush dependencies that |
1875 | | * cross cache image boundaries. Now compute the flush dependency |
1876 | | * heights for all entries in the image. |
1877 | | * |
1878 | | * Until I can think of a better way, do this via a depth first |
1879 | | * search implemented via a recursive function call. |
1880 | | * |
1881 | | * Note that entry_ptr->image_fd_height has already been initialized to 0 |
1882 | | * for all entries that may appear in the cache image. |
1883 | | */ |
1884 | 0 | entry_ptr = cache_ptr->il_head; |
1885 | 0 | while (entry_ptr != NULL) { |
1886 | 0 | if (entry_ptr->include_in_image && entry_ptr->fd_child_count == 0 && entry_ptr->fd_parent_count > 0) { |
1887 | 0 | for (u = 0; u < entry_ptr->fd_parent_count; u++) { |
1888 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1889 | |
|
1890 | 0 | if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) |
1891 | 0 | H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1); |
1892 | 0 | } /* end for */ |
1893 | 0 | } /* end if */ |
1894 | |
|
1895 | 0 | entry_ptr = entry_ptr->il_next; |
1896 | 0 | } /* while (entry_ptr != NULL) */ |
1897 | |
|
1898 | 0 | done: |
1899 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
1900 | 0 | } /* H5C__prep_for_file_close__compute_fd_heights() */ |
1901 | | |
1902 | | /*------------------------------------------------------------------------- |
1903 | | * Function: H5C__prep_for_file_close__compute_fd_heights_real |
1904 | | * |
1905 | | * Purpose: H5C__prep_for_file_close__compute_fd_heights() prepares |
1906 | | * for the computation of flush dependency heights of all |
1907 | | * entries in the cache image, this function actually does |
1908 | | * it. |
1909 | | * |
1910 | | * The basic observation behind this function is as follows: |
1911 | | * |
1912 | | * Suppose you have an entry E with a flush dependency |
1913 | | * height of X. Then the parents of E must all have |
1914 | | * flush dependency X + 1 or greater. |
1915 | | * |
1916 | | * Use this observation to compute flush dependency height |
1917 | | * of all entries in the cache image via the following |
1918 | | * recursive algorithm: |
1919 | | * |
1920 | | * 1) On entry, set the flush dependency height of the |
1921 | | * supplied cache entry to the supplied value. |
1922 | | * |
1923 | | * 2) Examine all the flush dependency parents of the |
1924 | | * supplied entry. |
1925 | | * |
1926 | | * If the parent is in the cache image, and has flush |
1927 | | * dependency height less than or equal to the flush |
1928 | | * dependency height of the current entry, call the |
1929 | | * recursive routine on the parent with flush dependency |
1930 | | * height equal to the flush dependency height of the |
1931 | | * child plus 1. |
1932 | | * |
1933 | | * Otherwise do nothing. |
1934 | | * |
1935 | | * Observe that if the flush dependency height of all entries |
1936 | | * in the image is initialized to zero, and if this recursive |
1937 | | * function is called with flush dependency height 0 on all |
1938 | | * entries in the cache image with FD parents in the image, |
1939 | | * but without FD children in the image, the correct flush |
1940 | | * dependency height should be set for all entries in the |
1941 | | * cache image. |
1942 | | * |
1943 | | * Return: void |
1944 | | * |
1945 | | *------------------------------------------------------------------------- |
1946 | | */ |
1947 | | static void |
1948 | | H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, uint32_t fd_height) |
1949 | 0 | { |
1950 | 0 | FUNC_ENTER_PACKAGE_NOERR |
1951 | | |
1952 | | /* Sanity checks */ |
1953 | 0 | assert(entry_ptr); |
1954 | 0 | assert(entry_ptr->include_in_image); |
1955 | 0 | assert((entry_ptr->image_fd_height == 0) || (entry_ptr->image_fd_height < fd_height)); |
1956 | 0 | assert(((fd_height == 0) && (entry_ptr->fd_child_count == 0)) || |
1957 | 0 | ((fd_height > 0) && (entry_ptr->fd_child_count > 0))); |
1958 | |
|
1959 | 0 | entry_ptr->image_fd_height = fd_height; |
1960 | 0 | if (entry_ptr->flush_dep_nparents > 0) { |
1961 | 0 | unsigned u; |
1962 | |
|
1963 | 0 | assert(entry_ptr->flush_dep_parent); |
1964 | 0 | for (u = 0; u < entry_ptr->fd_parent_count; u++) { |
1965 | 0 | H5C_cache_entry_t *parent_ptr; |
1966 | |
|
1967 | 0 | parent_ptr = entry_ptr->flush_dep_parent[u]; |
1968 | |
|
1969 | 0 | if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= fd_height) |
1970 | 0 | H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, fd_height + 1); |
1971 | 0 | } /* end for */ |
1972 | 0 | } /* end if */ |
1973 | |
|
1974 | 0 | FUNC_LEAVE_NOAPI_VOID |
1975 | 0 | } /* H5C__prep_for_file_close__compute_fd_heights_real() */ |
1976 | | |
1977 | | /*------------------------------------------------------------------------- |
1978 | | * Function: H5C__prep_for_file_close__setup_image_entries_array |
1979 | | * |
1980 | | * Purpose: Allocate space for the image_entries array, and load |
1981 | | * each instance of H5C_image_entry_t in the array with |
1982 | | * the data necessary to construct the metadata cache image. |
1983 | | * |
1984 | | * Return: Non-negative on success/Negative on failure |
1985 | | * |
1986 | | *------------------------------------------------------------------------- |
1987 | | */ |
1988 | | static herr_t |
1989 | | H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) |
1990 | 0 | { |
1991 | 0 | H5C_cache_entry_t *entry_ptr; |
1992 | 0 | H5C_image_entry_t *image_entries = NULL; |
1993 | | #ifndef NDEBUG |
1994 | | uint32_t entries_visited = 0; |
1995 | | #endif |
1996 | 0 | unsigned u; /* Local index variable */ |
1997 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
1998 | |
|
1999 | 0 | FUNC_ENTER_PACKAGE |
2000 | | |
2001 | | /* Sanity checks */ |
2002 | 0 | assert(cache_ptr); |
2003 | 0 | assert(cache_ptr->close_warning_received); |
2004 | 0 | assert(cache_ptr->pl_len == 0); |
2005 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2006 | 0 | assert(cache_ptr->image_entries == NULL); |
2007 | | |
2008 | | /* Allocate and initialize image_entries array */ |
2009 | 0 | if (NULL == (image_entries = (H5C_image_entry_t *)H5MM_calloc( |
2010 | 0 | sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1)))) |
2011 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for image_entries"); |
2012 | | |
2013 | | /* Initialize (non-zero/NULL/false) fields */ |
2014 | 0 | for (u = 0; u <= cache_ptr->num_entries_in_image; u++) { |
2015 | 0 | image_entries[u].addr = HADDR_UNDEF; |
2016 | 0 | image_entries[u].ring = H5C_RING_UNDEFINED; |
2017 | 0 | image_entries[u].type_id = -1; |
2018 | 0 | } /* end for */ |
2019 | | |
2020 | | /* Scan each entry on the index list and populate the image_entries array */ |
2021 | 0 | u = 0; |
2022 | 0 | entry_ptr = cache_ptr->il_head; |
2023 | 0 | while (entry_ptr != NULL) { |
2024 | 0 | if (entry_ptr->include_in_image) { |
2025 | | /* Since we have already serialized the cache, the following |
2026 | | * should hold. |
2027 | | */ |
2028 | 0 | assert(entry_ptr->image_up_to_date); |
2029 | 0 | assert(entry_ptr->image_ptr); |
2030 | 0 | assert(entry_ptr->type); |
2031 | |
|
2032 | 0 | image_entries[u].addr = entry_ptr->addr; |
2033 | 0 | image_entries[u].size = entry_ptr->size; |
2034 | 0 | image_entries[u].ring = entry_ptr->ring; |
2035 | | |
2036 | | /* When a prefetched entry is included in the image, store |
2037 | | * its underlying type id in the image entry, not |
2038 | | * H5AC_PREFETCHED_ENTRY_ID. In passing, also increment |
2039 | | * the age (up to H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX). |
2040 | | */ |
2041 | 0 | if (entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID) { |
2042 | 0 | image_entries[u].type_id = entry_ptr->prefetch_type_id; |
2043 | |
|
2044 | 0 | if (entry_ptr->age >= H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX) |
2045 | 0 | image_entries[u].age = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX; |
2046 | 0 | else |
2047 | 0 | image_entries[u].age = entry_ptr->age + 1; |
2048 | 0 | } /* end if */ |
2049 | 0 | else { |
2050 | 0 | image_entries[u].type_id = entry_ptr->type->id; |
2051 | 0 | image_entries[u].age = 0; |
2052 | 0 | } /* end else */ |
2053 | |
|
2054 | 0 | image_entries[u].lru_rank = entry_ptr->lru_rank; |
2055 | 0 | image_entries[u].is_dirty = entry_ptr->is_dirty; |
2056 | 0 | image_entries[u].image_fd_height = entry_ptr->image_fd_height; |
2057 | 0 | image_entries[u].fd_parent_count = entry_ptr->fd_parent_count; |
2058 | 0 | image_entries[u].fd_parent_addrs = entry_ptr->fd_parent_addrs; |
2059 | 0 | image_entries[u].fd_child_count = entry_ptr->fd_child_count; |
2060 | 0 | image_entries[u].fd_dirty_child_count = entry_ptr->fd_dirty_child_count; |
2061 | 0 | image_entries[u].image_ptr = entry_ptr->image_ptr; |
2062 | | |
2063 | | /* Null out entry_ptr->fd_parent_addrs and set |
2064 | | * entry_ptr->fd_parent_count to zero so that ownership of the |
2065 | | * flush dependency parents address array is transferred to the |
2066 | | * image entry. |
2067 | | */ |
2068 | 0 | entry_ptr->fd_parent_count = 0; |
2069 | 0 | entry_ptr->fd_parent_addrs = NULL; |
2070 | |
|
2071 | 0 | u++; |
2072 | |
|
2073 | 0 | assert(u <= cache_ptr->num_entries_in_image); |
2074 | 0 | } /* end if */ |
2075 | |
|
2076 | | #ifndef NDEBUG |
2077 | | entries_visited++; |
2078 | | #endif |
2079 | |
|
2080 | 0 | entry_ptr = entry_ptr->il_next; |
2081 | 0 | } /* end while */ |
2082 | | |
2083 | | /* Sanity checks */ |
2084 | 0 | assert(entries_visited == cache_ptr->index_len); |
2085 | 0 | assert(u == cache_ptr->num_entries_in_image); |
2086 | |
|
2087 | 0 | assert(image_entries[u].fd_parent_addrs == NULL); |
2088 | 0 | assert(image_entries[u].image_ptr == NULL); |
2089 | |
|
2090 | 0 | cache_ptr->image_entries = image_entries; |
2091 | |
|
2092 | 0 | done: |
2093 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2094 | 0 | } /* H5C__prep_for_file_close__setup_image_entries_array() */ |
2095 | | |
2096 | | /*------------------------------------------------------------------------- |
2097 | | * Function: H5C__prep_for_file_close__scan_entries |
2098 | | * |
2099 | | * Purpose: Scan all entries in the metadata cache, and store all |
2100 | | * entry specific data required for construction of the |
2101 | | * metadata cache image block and likely to be discarded |
2102 | | * or modified during the cache flush on file close. |
2103 | | * |
2104 | | * In particular, make note of: |
2105 | | * entry rank in LRU |
2106 | | * whether the entry is dirty |
2107 | | * base address of entry flush dependency parent, |
2108 | | * if it exists. |
2109 | | * number of flush dependency children, if any. |
2110 | | * |
2111 | | * Also, determine which entries are to be included in the |
2112 | | * metadata cache image. At present, all entries other than |
2113 | | * the superblock, the superblock extension object header and |
2114 | | * its associated chunks (if any) are included. |
2115 | | * |
2116 | | * Finally, compute the size of the metadata cache image |
2117 | | * block. |
2118 | | * |
2119 | | * Return: Non-negative on success/Negative on failure |
2120 | | * |
2121 | | *------------------------------------------------------------------------- |
2122 | | */ |
2123 | | static herr_t |
2124 | | H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) |
2125 | 0 | { |
2126 | 0 | H5C_cache_entry_t *entry_ptr; |
2127 | 0 | bool include_in_image; |
2128 | 0 | int lru_rank = 1; |
2129 | | #ifndef NDEBUG |
2130 | | unsigned entries_visited = 0; |
2131 | | uint32_t num_entries_tentatively_in_image = 0; |
2132 | | #endif |
2133 | 0 | uint32_t num_entries_in_image = 0; |
2134 | 0 | size_t image_len; |
2135 | 0 | size_t entry_header_len; |
2136 | 0 | size_t fd_parents_list_len; |
2137 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2138 | |
|
2139 | 0 | FUNC_ENTER_PACKAGE |
2140 | | |
2141 | | /* Sanity checks */ |
2142 | 0 | assert(f); |
2143 | 0 | assert(f->shared); |
2144 | 0 | assert(f->shared->sblock); |
2145 | 0 | assert(cache_ptr); |
2146 | 0 | assert(cache_ptr->close_warning_received); |
2147 | 0 | assert(cache_ptr->pl_len == 0); |
2148 | | |
2149 | | /* Initialize image len to the size of the metadata cache image block |
2150 | | * header. |
2151 | | */ |
2152 | 0 | image_len = H5C__cache_image_block_header_size(f); |
2153 | 0 | entry_header_len = H5C__cache_image_block_entry_header_size(f); |
2154 | | |
2155 | | /* Scan each entry on the index list */ |
2156 | 0 | entry_ptr = cache_ptr->il_head; |
2157 | 0 | while (entry_ptr != NULL) { |
2158 | | /* Since we have already serialized the cache, the following |
2159 | | * should hold. |
2160 | | */ |
2161 | 0 | assert(entry_ptr->image_up_to_date); |
2162 | 0 | assert(entry_ptr->image_ptr); |
2163 | | |
2164 | | /* Initially, we mark all entries in the rings included |
2165 | | * in the cache image as being included in the in the |
2166 | | * image. Depending on circumstances, we may exclude some |
2167 | | * of these entries later. |
2168 | | */ |
2169 | 0 | if (entry_ptr->ring > H5C_MAX_RING_IN_IMAGE) |
2170 | 0 | include_in_image = false; |
2171 | 0 | else |
2172 | 0 | include_in_image = true; |
2173 | 0 | entry_ptr->include_in_image = include_in_image; |
2174 | |
|
2175 | 0 | if (include_in_image) { |
2176 | 0 | entry_ptr->lru_rank = -1; |
2177 | 0 | entry_ptr->image_dirty = entry_ptr->is_dirty; |
2178 | 0 | entry_ptr->image_fd_height = 0; /* will compute this later */ |
2179 | | |
2180 | | /* Initially, include all flush dependency parents in the |
2181 | | * the list of flush dependencies to be stored in the |
2182 | | * image. We may remove some or all of these later. |
2183 | | */ |
2184 | 0 | if (entry_ptr->flush_dep_nparents > 0) { |
2185 | | /* The parents addresses array may already exist -- reallocate |
2186 | | * as needed. |
2187 | | */ |
2188 | 0 | if (entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count) { |
2189 | | /* parent addresses array should already be allocated |
2190 | | * and of the correct size. |
2191 | | */ |
2192 | 0 | assert(entry_ptr->fd_parent_addrs); |
2193 | 0 | } /* end if */ |
2194 | 0 | else if (entry_ptr->fd_parent_count > 0) { |
2195 | 0 | assert(entry_ptr->fd_parent_addrs); |
2196 | 0 | entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs); |
2197 | 0 | } /* end else-if */ |
2198 | 0 | else { |
2199 | 0 | assert(entry_ptr->fd_parent_count == 0); |
2200 | 0 | assert(entry_ptr->fd_parent_addrs == NULL); |
2201 | 0 | } /* end else */ |
2202 | |
|
2203 | 0 | entry_ptr->fd_parent_count = entry_ptr->flush_dep_nparents; |
2204 | 0 | if (NULL == entry_ptr->fd_parent_addrs) |
2205 | 0 | if (NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc( |
2206 | 0 | sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_count)))) |
2207 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, |
2208 | 0 | "memory allocation failed for fd parent addrs buffer"); |
2209 | | |
2210 | 0 | for (int i = 0; i < (int)(entry_ptr->fd_parent_count); i++) { |
2211 | 0 | entry_ptr->fd_parent_addrs[i] = entry_ptr->flush_dep_parent[i]->addr; |
2212 | 0 | assert(H5_addr_defined(entry_ptr->fd_parent_addrs[i])); |
2213 | 0 | } /* end for */ |
2214 | 0 | } /* end if */ |
2215 | 0 | else if (entry_ptr->fd_parent_count > 0) { |
2216 | 0 | assert(entry_ptr->fd_parent_addrs); |
2217 | 0 | entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs); |
2218 | 0 | } /* end else-if */ |
2219 | 0 | else |
2220 | 0 | assert(entry_ptr->fd_parent_addrs == NULL); |
2221 | | |
2222 | | /* Initially, all flush dependency children are included int |
2223 | | * the count of flush dependency child relationships to be |
2224 | | * represented in the cache image. Some or all of these |
2225 | | * may be dropped from the image later. |
2226 | | */ |
2227 | 0 | if (entry_ptr->flush_dep_nchildren > 0) { |
2228 | 0 | if (!entry_ptr->is_pinned) |
2229 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "encountered unpinned fd parent?!?"); |
2230 | | |
2231 | 0 | entry_ptr->fd_child_count = entry_ptr->flush_dep_nchildren; |
2232 | 0 | entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children; |
2233 | 0 | } /* end if */ |
2234 | |
|
2235 | | #ifndef NDEBUG |
2236 | | num_entries_tentatively_in_image++; |
2237 | | #endif |
2238 | 0 | } /* end if */ |
2239 | | |
2240 | | #ifndef NDEBUG |
2241 | | entries_visited++; |
2242 | | #endif |
2243 | 0 | entry_ptr = entry_ptr->il_next; |
2244 | 0 | } /* end while */ |
2245 | 0 | assert(entries_visited == cache_ptr->index_len); |
2246 | | |
2247 | | /* Now compute the flush dependency heights of all flush dependency |
2248 | | * relationships to be represented in the image. |
2249 | | * |
2250 | | * If all entries in the target rings are included in the |
2251 | | * image, the flush dependency heights are simply the heights |
2252 | | * of all flush dependencies in the target rings. |
2253 | | * |
2254 | | * However, if we restrict appearance in the cache image either |
2255 | | * by number of entries in the image, restrictions on the number |
2256 | | * of times a prefetched entry can appear in an image, or image |
2257 | | * size, it is possible that flush dependency parents or children |
2258 | | * of entries that are in the image may not be included in the |
2259 | | * the image. In this case, we must prune all flush dependency |
2260 | | * relationships that cross the image boundary, and all exclude |
2261 | | * from the image all dirty flush dependency children that have |
2262 | | * a dirty flush dependency parent that is not in the image. |
2263 | | * This is necessary to preserve the required flush ordering. |
2264 | | * |
2265 | | * These details are tended to by the following call to |
2266 | | * H5C__prep_for_file_close__compute_fd_heights(). Because the |
2267 | | * exact contents of the image cannot be known until after this |
2268 | | * call, computation of the image size is delayed. |
2269 | | */ |
2270 | 0 | if (H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0) |
2271 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?"); |
2272 | | |
2273 | | /* At this point, all entries that will appear in the cache |
2274 | | * image should be marked correctly. Compute the size of the |
2275 | | * cache image. |
2276 | | */ |
2277 | | #ifndef NDEBUG |
2278 | | entries_visited = 0; |
2279 | | #endif |
2280 | 0 | entry_ptr = cache_ptr->il_head; |
2281 | 0 | while (entry_ptr != NULL) { |
2282 | 0 | if (entry_ptr->include_in_image) { |
2283 | 0 | if (entry_ptr->fd_parent_count > 0) |
2284 | 0 | fd_parents_list_len = (size_t)(H5F_SIZEOF_ADDR(f) * entry_ptr->fd_parent_count); |
2285 | 0 | else |
2286 | 0 | fd_parents_list_len = (size_t)0; |
2287 | |
|
2288 | 0 | image_len += entry_header_len + fd_parents_list_len + entry_ptr->size; |
2289 | 0 | num_entries_in_image++; |
2290 | 0 | } /* end if */ |
2291 | |
|
2292 | | #ifndef NDEBUG |
2293 | | entries_visited++; |
2294 | | #endif |
2295 | 0 | entry_ptr = entry_ptr->il_next; |
2296 | 0 | } /* end while */ |
2297 | 0 | assert(entries_visited == cache_ptr->index_len); |
2298 | 0 | assert(num_entries_in_image <= num_entries_tentatively_in_image); |
2299 | |
|
2300 | | #ifndef NDEBUG |
2301 | | { |
2302 | | unsigned j = 0; |
2303 | | for (int i = H5C_MAX_RING_IN_IMAGE + 1; i <= H5C_RING_SB; i++) |
2304 | | j += cache_ptr->index_ring_len[i]; |
2305 | | |
2306 | | /* This will change */ |
2307 | | assert(entries_visited == (num_entries_tentatively_in_image + j)); |
2308 | | } |
2309 | | #endif |
2310 | |
|
2311 | 0 | cache_ptr->num_entries_in_image = num_entries_in_image; |
2312 | | #ifndef NDEBUG |
2313 | | entries_visited = 0; |
2314 | | #endif |
2315 | | |
2316 | | /* Now scan the LRU list to set the lru_rank fields of all entries |
2317 | | * on the LRU. |
2318 | | * |
2319 | | * Note that we start with rank 1, and increment by 1 with each |
2320 | | * entry on the LRU. |
2321 | | * |
2322 | | * Note that manually pinned entryies will have lru_rank -1, |
2323 | | * and no flush dependency. Putting these entries at the head of |
2324 | | * the reconstructed LRU should be appropriate. |
2325 | | */ |
2326 | 0 | entry_ptr = cache_ptr->LRU_head_ptr; |
2327 | 0 | while (entry_ptr != NULL) { |
2328 | 0 | assert(entry_ptr->type != NULL); |
2329 | | |
2330 | | /* to avoid confusion, don't set lru_rank on epoch markers. |
2331 | | * Note that we still increment the lru_rank, so that the holes |
2332 | | * in the sequence of entries on the LRU will indicate the |
2333 | | * locations of epoch markers (if any) when we reconstruct |
2334 | | * the LRU. |
2335 | | * |
2336 | | * Do not set lru_rank or increment lru_rank for entries |
2337 | | * that will not be included in the cache image. |
2338 | | */ |
2339 | 0 | if (entry_ptr->type->id == H5AC_EPOCH_MARKER_ID) |
2340 | 0 | lru_rank++; |
2341 | 0 | else if (entry_ptr->include_in_image) { |
2342 | 0 | entry_ptr->lru_rank = lru_rank; |
2343 | 0 | lru_rank++; |
2344 | 0 | } /* end else-if */ |
2345 | |
|
2346 | | #ifndef NDEBUG |
2347 | | entries_visited++; |
2348 | | #endif |
2349 | 0 | entry_ptr = entry_ptr->next; |
2350 | 0 | } /* end while */ |
2351 | 0 | assert(entries_visited == cache_ptr->LRU_list_len); |
2352 | |
|
2353 | 0 | image_len += H5F_SIZEOF_CHKSUM; |
2354 | 0 | cache_ptr->image_data_len = image_len; |
2355 | |
|
2356 | 0 | done: |
2357 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2358 | 0 | } /* H5C__prep_for_file_close__scan_entries() */ |
2359 | | |
2360 | | /*------------------------------------------------------------------------- |
2361 | | * Function: H5C__reconstruct_cache_contents() |
2362 | | * |
2363 | | * Purpose: Scan the image buffer, and create a prefetched |
2364 | | * cache entry for every entry in the buffer. Insert the |
2365 | | * prefetched entries in the index and the LRU, and |
2366 | | * reconstruct any flush dependencies. Order the entries |
2367 | | * in the LRU as indicated by the stored lru_ranks. |
2368 | | * |
2369 | | * Return: SUCCEED on success, and FAIL on failure. |
2370 | | * |
2371 | | *------------------------------------------------------------------------- |
2372 | | */ |
2373 | | static herr_t |
2374 | | H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) |
2375 | 0 | { |
2376 | 0 | H5C_cache_entry_t *pf_entry_ptr; /* Pointer to prefetched entry */ |
2377 | 0 | H5C_cache_entry_t *parent_ptr; /* Pointer to parent of prefetched entry */ |
2378 | 0 | const uint8_t *p; /* Pointer into image buffer */ |
2379 | 0 | unsigned u, v; /* Local index variable */ |
2380 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2381 | |
|
2382 | 0 | FUNC_ENTER_PACKAGE |
2383 | | |
2384 | | /* Sanity checks */ |
2385 | 0 | assert(f); |
2386 | 0 | assert(f->shared); |
2387 | 0 | assert(cache_ptr == f->shared->cache); |
2388 | 0 | assert(cache_ptr); |
2389 | 0 | assert(cache_ptr->image_buffer); |
2390 | 0 | assert(cache_ptr->image_len > 0); |
2391 | | |
2392 | | /* Decode metadata cache image header */ |
2393 | 0 | p = (uint8_t *)cache_ptr->image_buffer; |
2394 | 0 | if (H5C__decode_cache_image_header(f, cache_ptr, &p, cache_ptr->image_len + 1) < 0) |
2395 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "cache image header decode failed"); |
2396 | 0 | assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_len); |
2397 | | |
2398 | | /* The image_data_len and # of entries should be defined now */ |
2399 | 0 | assert(cache_ptr->image_data_len > 0); |
2400 | 0 | assert(cache_ptr->image_data_len <= cache_ptr->image_len); |
2401 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2402 | | |
2403 | | /* Reconstruct entries in image */ |
2404 | 0 | for (u = 0; u < cache_ptr->num_entries_in_image; u++) { |
2405 | | /* Create the prefetched entry described by the ith |
2406 | | * entry in cache_ptr->image_entrise. |
2407 | | */ |
2408 | 0 | if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &p))) |
2409 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed"); |
2410 | | |
2411 | | /* Note that we make no checks on available cache space before |
2412 | | * inserting the reconstructed entry into the metadata cache. |
2413 | | * |
2414 | | * This is OK since the cache must be almost empty at the beginning |
2415 | | * of the process, and since we check cache size at the end of the |
2416 | | * reconstruction process. |
2417 | | */ |
2418 | | |
2419 | | /* Insert the prefetched entry in the index */ |
2420 | 0 | H5C__INSERT_IN_INDEX(cache_ptr, pf_entry_ptr, FAIL); |
2421 | | |
2422 | | /* If dirty, insert the entry into the slist. */ |
2423 | 0 | if (pf_entry_ptr->is_dirty) |
2424 | 0 | H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, pf_entry_ptr, FAIL); |
2425 | | |
2426 | | /* Append the entry to the LRU */ |
2427 | 0 | H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, pf_entry_ptr, FAIL); |
2428 | |
|
2429 | 0 | H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty); |
2430 | | |
2431 | | /* If the prefetched entry is the child in one or more flush |
2432 | | * dependency relationships, recreate those flush dependencies. |
2433 | | */ |
2434 | 0 | for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { |
2435 | | /* Sanity checks */ |
2436 | 0 | assert(pf_entry_ptr->fd_parent_addrs); |
2437 | 0 | assert(H5_addr_defined(pf_entry_ptr->fd_parent_addrs[v])); |
2438 | | |
2439 | | /* Find the parent entry */ |
2440 | 0 | parent_ptr = NULL; |
2441 | 0 | H5C__SEARCH_INDEX(cache_ptr, pf_entry_ptr->fd_parent_addrs[v], parent_ptr, FAIL); |
2442 | 0 | if (parent_ptr == NULL) |
2443 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "fd parent not in cache?!?"); |
2444 | | |
2445 | | /* Sanity checks */ |
2446 | 0 | assert(parent_ptr->addr == pf_entry_ptr->fd_parent_addrs[v]); |
2447 | 0 | assert(parent_ptr->lru_rank == -1); |
2448 | | |
2449 | | /* Must protect parent entry to set up a flush dependency. |
2450 | | * Do this now, and then uprotect when done. |
2451 | | */ |
2452 | 0 | H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL); |
2453 | 0 | parent_ptr->is_protected = true; |
2454 | | |
2455 | | /* Setup the flush dependency */ |
2456 | 0 | if (H5C_create_flush_dependency(parent_ptr, pf_entry_ptr) < 0) |
2457 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency"); |
2458 | | |
2459 | | /* And now unprotect */ |
2460 | 0 | H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, parent_ptr, FAIL); |
2461 | 0 | parent_ptr->is_protected = false; |
2462 | 0 | } /* end for */ |
2463 | 0 | } /* end for */ |
2464 | | |
2465 | | #ifndef NDEBUG |
2466 | | /* Scan the cache entries, and verify that each entry has |
2467 | | * the expected flush dependency status. |
2468 | | */ |
2469 | | pf_entry_ptr = cache_ptr->il_head; |
2470 | | while (pf_entry_ptr != NULL) { |
2471 | | assert((pf_entry_ptr->prefetched && pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) || |
2472 | | (!pf_entry_ptr->prefetched && pf_entry_ptr->type != H5AC_PREFETCHED_ENTRY)); |
2473 | | if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) |
2474 | | assert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); |
2475 | | |
2476 | | for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { |
2477 | | parent_ptr = pf_entry_ptr->flush_dep_parent[v]; |
2478 | | assert(parent_ptr); |
2479 | | assert(pf_entry_ptr->fd_parent_addrs); |
2480 | | assert(pf_entry_ptr->fd_parent_addrs[v] == parent_ptr->addr); |
2481 | | assert(parent_ptr->flush_dep_nchildren > 0); |
2482 | | } /* end for */ |
2483 | | |
2484 | | if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) { |
2485 | | assert(pf_entry_ptr->fd_child_count == pf_entry_ptr->flush_dep_nchildren); |
2486 | | assert(pf_entry_ptr->fd_dirty_child_count == pf_entry_ptr->flush_dep_ndirty_children); |
2487 | | } /* end if */ |
2488 | | |
2489 | | pf_entry_ptr = pf_entry_ptr->il_next; |
2490 | | } /* end while */ |
2491 | | |
2492 | | /* Scan the LRU, and verify the expected ordering of the |
2493 | | * prefetched entries. |
2494 | | */ |
2495 | | { |
2496 | | int lru_rank_holes = 0; |
2497 | | H5C_cache_entry_t *entry_ptr; |
2498 | | int i; /* Local index variable */ |
2499 | | |
2500 | | i = -1; |
2501 | | entry_ptr = cache_ptr->LRU_head_ptr; |
2502 | | while (entry_ptr != NULL) { |
2503 | | assert(entry_ptr->type != NULL); |
2504 | | |
2505 | | if (entry_ptr->prefetched) { |
2506 | | assert(entry_ptr->lru_rank != 0); |
2507 | | assert((entry_ptr->lru_rank == -1) || (entry_ptr->lru_rank > i)); |
2508 | | |
2509 | | if ((entry_ptr->lru_rank > 1) && (entry_ptr->lru_rank > i + 1)) |
2510 | | lru_rank_holes += entry_ptr->lru_rank - (i + 1); |
2511 | | i = entry_ptr->lru_rank; |
2512 | | } /* end if */ |
2513 | | |
2514 | | entry_ptr = entry_ptr->next; |
2515 | | } /* end while */ |
2516 | | |
2517 | | /* Holes in the sequences of LRU ranks can appear due to epoch |
2518 | | * markers. They are left in to allow re-insertion of the |
2519 | | * epoch markers on reconstruction of the cache -- thus |
2520 | | * the following sanity check will have to be revised when |
2521 | | * we add code to store and restore adaptive resize status. |
2522 | | */ |
2523 | | assert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS); |
2524 | | } /* end block */ |
2525 | | #endif |
2526 | | |
2527 | | /* Check to see if the cache is oversize, and evict entries as |
2528 | | * necessary to remain within limits. |
2529 | | */ |
2530 | 0 | if (cache_ptr->index_size >= cache_ptr->max_cache_size) { |
2531 | | /* cache is oversized -- call H5C__make_space_in_cache() with zero |
2532 | | * space needed to repair the situation if possible. |
2533 | | */ |
2534 | 0 | bool write_permitted = false; |
2535 | |
|
2536 | 0 | if (cache_ptr->check_write_permitted && (cache_ptr->check_write_permitted)(f, &write_permitted) < 0) |
2537 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "Can't get write_permitted"); |
2538 | 0 | else |
2539 | 0 | write_permitted = cache_ptr->write_permitted; |
2540 | | |
2541 | 0 | if (H5C__make_space_in_cache(f, 0, write_permitted) < 0) |
2542 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "H5C__make_space_in_cache failed"); |
2543 | 0 | } /* end if */ |
2544 | | |
2545 | 0 | done: |
2546 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2547 | 0 | } /* H5C__reconstruct_cache_contents() */ |
2548 | | |
2549 | | /*------------------------------------------------------------------------- |
2550 | | * Function: H5C__reconstruct_cache_entry() |
2551 | | * |
2552 | | * Purpose: Allocate a prefetched metadata cache entry and initialize |
2553 | | * it from image buffer. |
2554 | | * |
2555 | | * Return a pointer to the newly allocated cache entry, |
2556 | | * or NULL on failure. |
2557 | | * |
2558 | | * Return: Pointer to the new instance of H5C_cache_entry on success, |
2559 | | * or NULL on failure. |
2560 | | * |
2561 | | *------------------------------------------------------------------------- |
2562 | | */ |
2563 | | static H5C_cache_entry_t * |
2564 | | H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf) |
2565 | 0 | { |
2566 | 0 | H5C_cache_entry_t *pf_entry_ptr = NULL; /* Reconstructed cache entry */ |
2567 | 0 | uint8_t flags = 0; |
2568 | 0 | bool is_dirty = false; |
2569 | | #ifndef NDEBUG /* only used in assertions */ |
2570 | | bool in_lru = false; |
2571 | | bool is_fd_parent = false; |
2572 | | bool is_fd_child = false; |
2573 | | #endif |
2574 | 0 | const uint8_t *p; |
2575 | 0 | bool file_is_rw; |
2576 | 0 | H5C_cache_entry_t *ret_value = NULL; /* Return value */ |
2577 | |
|
2578 | 0 | FUNC_ENTER_PACKAGE |
2579 | | |
2580 | | /* Sanity checks */ |
2581 | 0 | assert(cache_ptr); |
2582 | 0 | assert(cache_ptr->num_entries_in_image > 0); |
2583 | 0 | assert(buf && *buf); |
2584 | | |
2585 | | /* Key R/W access off of whether the image will be deleted */ |
2586 | 0 | file_is_rw = cache_ptr->delete_image; |
2587 | | |
2588 | | /* Allocate space for the prefetched cache entry */ |
2589 | 0 | if (NULL == (pf_entry_ptr = H5FL_CALLOC(H5C_cache_entry_t))) |
2590 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for prefetched cache entry"); |
2591 | | |
2592 | | /* Get pointer to buffer */ |
2593 | 0 | p = *buf; |
2594 | | |
2595 | | /* Decode type id */ |
2596 | 0 | pf_entry_ptr->prefetch_type_id = *p++; |
2597 | | |
2598 | | /* Decode flags */ |
2599 | 0 | flags = *p++; |
2600 | 0 | if (flags & H5C__MDCI_ENTRY_DIRTY_FLAG) |
2601 | 0 | is_dirty = true; |
2602 | | #ifndef NDEBUG /* only used in assertions */ |
2603 | | if (flags & H5C__MDCI_ENTRY_IN_LRU_FLAG) |
2604 | | in_lru = true; |
2605 | | if (flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG) |
2606 | | is_fd_parent = true; |
2607 | | if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) |
2608 | | is_fd_child = true; |
2609 | | #endif |
2610 | | |
2611 | | /* Force dirty entries to clean if the file read only -- must do |
2612 | | * this as otherwise the cache will attempt to write them on file |
2613 | | * close. Since the file is R/O, the metadata cache image superblock |
2614 | | * extension message and the cache image block will not be removed. |
2615 | | * Hence no danger in this for subsequent opens. |
2616 | | * |
2617 | | * However, if the dirty entry (marked clean for purposes of the R/O |
2618 | | * file open) is evicted and then referred to, the cache will read |
2619 | | * either invalid or obsolete data from the file. Handle this by |
2620 | | * setting the prefetched_dirty field, and hiding such entries from |
2621 | | * the eviction candidate selection algorithm. |
2622 | | */ |
2623 | 0 | pf_entry_ptr->is_dirty = (is_dirty && file_is_rw); |
2624 | | |
2625 | | /* Decode ring */ |
2626 | 0 | pf_entry_ptr->ring = *p++; |
2627 | 0 | assert(pf_entry_ptr->ring > (uint8_t)(H5C_RING_UNDEFINED)); |
2628 | 0 | assert(pf_entry_ptr->ring < (uint8_t)(H5C_RING_NTYPES)); |
2629 | | |
2630 | | /* Decode age */ |
2631 | 0 | pf_entry_ptr->age = *p++; |
2632 | | |
2633 | | /* Decode dependency child count */ |
2634 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_child_count); |
2635 | 0 | assert((is_fd_parent && pf_entry_ptr->fd_child_count > 0) || |
2636 | 0 | (!is_fd_parent && pf_entry_ptr->fd_child_count == 0)); |
2637 | | |
2638 | | /* Decode dirty dependency child count */ |
2639 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count); |
2640 | 0 | if (!file_is_rw) |
2641 | 0 | pf_entry_ptr->fd_dirty_child_count = 0; |
2642 | 0 | if (pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count) |
2643 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count"); |
2644 | | |
2645 | | /* Decode dependency parent count */ |
2646 | 0 | UINT16DECODE(p, pf_entry_ptr->fd_parent_count); |
2647 | 0 | assert((is_fd_child && pf_entry_ptr->fd_parent_count > 0) || |
2648 | 0 | (!is_fd_child && pf_entry_ptr->fd_parent_count == 0)); |
2649 | | |
2650 | | /* Decode index in LRU */ |
2651 | 0 | INT32DECODE(p, pf_entry_ptr->lru_rank); |
2652 | 0 | assert((in_lru && pf_entry_ptr->lru_rank >= 0) || (!in_lru && pf_entry_ptr->lru_rank == -1)); |
2653 | | |
2654 | | /* Decode entry offset */ |
2655 | 0 | H5F_addr_decode(f, &p, &pf_entry_ptr->addr); |
2656 | 0 | if (!H5_addr_defined(pf_entry_ptr->addr)) |
2657 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry offset"); |
2658 | | |
2659 | | /* Decode entry length */ |
2660 | 0 | H5F_DECODE_LENGTH(f, p, pf_entry_ptr->size); |
2661 | 0 | if (pf_entry_ptr->size == 0) |
2662 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry size"); |
2663 | | |
2664 | | /* Verify expected length of entry image */ |
2665 | 0 | if ((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) |
2666 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, NULL, "Bad entry image len"); |
2667 | | |
2668 | | /* If parent count greater than zero, allocate array for parent |
2669 | | * addresses, and decode addresses into the array. |
2670 | | */ |
2671 | 0 | if (pf_entry_ptr->fd_parent_count > 0) { |
2672 | 0 | unsigned u; /* Local index variable */ |
2673 | |
|
2674 | 0 | if (NULL == (pf_entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc( |
2675 | 0 | (size_t)(pf_entry_ptr->fd_parent_count) * H5F_SIZEOF_ADDR(f)))) |
2676 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, |
2677 | 0 | "memory allocation failed for fd parent addrs buffer"); |
2678 | | |
2679 | 0 | for (u = 0; u < pf_entry_ptr->fd_parent_count; u++) { |
2680 | 0 | H5F_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u])); |
2681 | 0 | if (!H5_addr_defined(pf_entry_ptr->fd_parent_addrs[u])) |
2682 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid flush dependency parent offset"); |
2683 | 0 | } /* end for */ |
2684 | 0 | } /* end if */ |
2685 | | |
2686 | | /* Allocate buffer for entry image */ |
2687 | 0 | if (NULL == (pf_entry_ptr->image_ptr = H5MM_malloc(pf_entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) |
2688 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer"); |
2689 | | #if H5C_DO_MEMORY_SANITY_CHECKS |
2690 | | H5MM_memcpy(((uint8_t *)pf_entry_ptr->image_ptr) + pf_entry_ptr->size, H5C_IMAGE_SANITY_VALUE, |
2691 | | H5C_IMAGE_EXTRA_SPACE); |
2692 | | #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ |
2693 | | |
2694 | | /* Copy the entry image from the cache image block */ |
2695 | 0 | H5MM_memcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size); |
2696 | 0 | p += pf_entry_ptr->size; |
2697 | | |
2698 | | /* Initialize the rest of the fields in the prefetched entry */ |
2699 | | /* (Only need to set non-zero/NULL/false fields, due to calloc() above) */ |
2700 | 0 | pf_entry_ptr->cache_ptr = cache_ptr; |
2701 | 0 | pf_entry_ptr->image_up_to_date = true; |
2702 | 0 | pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY; |
2703 | 0 | pf_entry_ptr->prefetched = true; |
2704 | 0 | pf_entry_ptr->prefetched_dirty = is_dirty && (!file_is_rw); |
2705 | | |
2706 | | /* Sanity checks */ |
2707 | 0 | assert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE); |
2708 | | |
2709 | | /* Update buffer pointer */ |
2710 | 0 | *buf = p; |
2711 | |
|
2712 | 0 | ret_value = pf_entry_ptr; |
2713 | |
|
2714 | 0 | done: |
2715 | 0 | if (NULL == ret_value && pf_entry_ptr) |
2716 | 0 | pf_entry_ptr = H5FL_FREE(H5C_cache_entry_t, pf_entry_ptr); |
2717 | |
|
2718 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2719 | 0 | } /* H5C__reconstruct_cache_entry() */ |
2720 | | |
2721 | | /*------------------------------------------------------------------------- |
2722 | | * Function: H5C__write_cache_image_superblock_msg |
2723 | | * |
2724 | | * Purpose: Write the cache image superblock extension message, |
2725 | | * creating if specified. |
2726 | | * |
2727 | | * In general, the size and location of the cache image block |
2728 | | * will be unknown at the time that the cache image superblock |
2729 | | * message is created. A subsequent call to this routine will |
2730 | | * be used to write the correct data. |
2731 | | * |
2732 | | * Return: Non-negative on success/Negative on failure. |
2733 | | * |
2734 | | *------------------------------------------------------------------------- |
2735 | | */ |
2736 | | static herr_t |
2737 | | H5C__write_cache_image_superblock_msg(H5F_t *f, bool create) |
2738 | 0 | { |
2739 | 0 | H5C_t *cache_ptr; |
2740 | 0 | H5O_mdci_t mdci_msg; /* metadata cache image message */ |
2741 | | /* to insert in the superblock */ |
2742 | | /* extension. */ |
2743 | 0 | unsigned mesg_flags = H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS; |
2744 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2745 | |
|
2746 | 0 | FUNC_ENTER_PACKAGE |
2747 | | |
2748 | | /* Sanity checks */ |
2749 | 0 | assert(f); |
2750 | 0 | assert(f->shared); |
2751 | 0 | assert(f->shared->cache); |
2752 | 0 | cache_ptr = f->shared->cache; |
2753 | 0 | assert(cache_ptr); |
2754 | 0 | assert(cache_ptr->close_warning_received); |
2755 | | |
2756 | | /* Write data into the metadata cache image superblock extension message. |
2757 | | * Note that this data will be bogus when we first create the message. |
2758 | | * We will overwrite this data later in a second call to this function. |
2759 | | */ |
2760 | 0 | mdci_msg.addr = cache_ptr->image_addr; |
2761 | | #ifdef H5_HAVE_PARALLEL |
2762 | | if (cache_ptr->aux_ptr) { /* we have multiple processes */ |
2763 | | H5AC_aux_t *aux_ptr; |
2764 | | |
2765 | | aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
2766 | | mdci_msg.size = aux_ptr->p0_image_len; |
2767 | | } /* end if */ |
2768 | | else |
2769 | | #endif /* H5_HAVE_PARALLEL */ |
2770 | 0 | mdci_msg.size = cache_ptr->image_len; |
2771 | | |
2772 | | /* Write metadata cache image message to superblock extension */ |
2773 | 0 | if (H5F__super_ext_write_msg(f, H5O_MDCI_MSG_ID, &mdci_msg, create, mesg_flags) < 0) |
2774 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, |
2775 | 0 | "can't write metadata cache image message to superblock extension"); |
2776 | | |
2777 | 0 | done: |
2778 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2779 | 0 | } /* H5C__write_cache_image_superblock_msg() */ |
2780 | | |
2781 | | /*------------------------------------------------------------------------- |
2782 | | * Function: H5C__write_cache_image |
2783 | | * |
2784 | | * Purpose: Write the supplied metadata cache image to the specified |
2785 | | * location in file. |
2786 | | * |
2787 | | * Return: Non-negative on success/Negative on failure |
2788 | | * |
2789 | | *------------------------------------------------------------------------- |
2790 | | */ |
2791 | | static herr_t |
2792 | | H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr) |
2793 | 0 | { |
2794 | 0 | herr_t ret_value = SUCCEED; /* Return value */ |
2795 | |
|
2796 | 0 | FUNC_ENTER_PACKAGE |
2797 | | |
2798 | | /* Sanity checks */ |
2799 | 0 | assert(f); |
2800 | 0 | assert(cache_ptr); |
2801 | 0 | assert(H5_addr_defined(cache_ptr->image_addr)); |
2802 | 0 | assert(cache_ptr->image_len > 0); |
2803 | 0 | assert(cache_ptr->image_buffer); |
2804 | |
|
2805 | | #ifdef H5_HAVE_PARALLEL |
2806 | | { |
2807 | | H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; |
2808 | | |
2809 | | if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { |
2810 | | #endif /* H5_HAVE_PARALLEL */ |
2811 | | |
2812 | | /* Write the buffer (if serial access, or rank 0 for parallel access) */ |
2813 | 0 | if (H5F_block_write(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, |
2814 | 0 | cache_ptr->image_buffer) < 0) |
2815 | 0 | HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't write metadata cache image block to file"); |
2816 | | #ifdef H5_HAVE_PARALLEL |
2817 | | } /* end if */ |
2818 | | } /* end block */ |
2819 | | #endif /* H5_HAVE_PARALLEL */ |
2820 | | |
2821 | 0 | done: |
2822 | 0 | FUNC_LEAVE_NOAPI(ret_value) |
2823 | 0 | } /* H5C__write_cache_image() */ |