Line data Source code
1 : #include "fd_wksp_private.h"
2 :
3 : FD_TL int fd_wksp_oom_silent = 0;
4 :
5 : /* fd_wksp_private_split_before splits a partition (index i2) into two
6 : smaller partitions and returns the partition index (i1) of the
7 : partition created by the split. The created partition will be
8 : immediately before the original partition. It will be in the
9 : partitioning with a zero tag. It will not be in the idle stack, used
10 : treap or free treap.
11 :
12 : sz is size of the original partition post split. This should be in
13 : (0,original_sz) (yes, open on both ends such that the post split
14 : partitions both have non-zero size.
15 :
16 : This will pop the idle stack once to assign the index of the newly
17 : created partition. Assumes the caller knows the idle stack is not
18 : empty.
19 :
20 : Assumes the original partition is in the partitioning with a zero
21 : tag. Further assumes the original partition is not in the idle
22 : stack, used treap or free treap.
23 :
24 : fd_wksp_private_split_after is identical except the partition created
25 : by the split is after the original partition. */
26 :
27 : static ulong /* In [0,part_max) */
28 : fd_wksp_private_split_before( ulong i2, /* In [0,part_max) */
29 : ulong s2, /* In (0,size of i2) */
30 : fd_wksp_t * wksp, /* Current local join */
31 25 : fd_wksp_private_pinfo_t * pinfo ) { /* == fd_wksp_private_pinfo( wksp ) */
32 :
33 25 : ulong g3 = pinfo[ i2 ].gaddr_hi; /* Old end here */
34 25 : ulong g2 = g3 - s2; /* New ends here */
35 25 : ulong g1 = pinfo[ i2 ].gaddr_lo; /* New starts here */
36 :
37 25 : ulong i0 = fd_wksp_private_pinfo_idx( pinfo[ i2 ].prev_cidx ); /* Old before */
38 25 : ulong i1 = fd_wksp_private_idle_stack_pop( wksp, pinfo ); /* New before */
39 :
40 25 : pinfo[ i1 ].gaddr_lo = g1;
41 25 : pinfo[ i1 ].gaddr_hi = g2;
42 25 : pinfo[ i1 ].tag = 0UL;
43 25 : pinfo[ i1 ].in_same = 0U;
44 25 : pinfo[ i1 ].prev_cidx = fd_wksp_private_pinfo_cidx( i0 );
45 25 : pinfo[ i1 ].next_cidx = fd_wksp_private_pinfo_cidx( i2 );
46 25 : pinfo[ i1 ].left_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
47 25 : pinfo[ i1 ].right_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
48 25 : pinfo[ i1 ].same_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
49 25 : pinfo[ i1 ].parent_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
50 :
51 25 : pinfo[ i2 ].gaddr_lo = g2;
52 25 : pinfo[ i2 ].prev_cidx = fd_wksp_private_pinfo_cidx( i1 );
53 :
54 25 : if( fd_wksp_private_pinfo_idx_is_null( i0 ) ) wksp->part_head_cidx = fd_wksp_private_pinfo_cidx( i1 );
55 25 : else pinfo[ i0 ].next_cidx = fd_wksp_private_pinfo_cidx( i1 );
56 :
57 25 : return i1;
58 25 : }
59 :
60 : static ulong /* In [0,part_max) */
61 : fd_wksp_private_split_after( ulong i1, /* In [0,part_max) */
62 : ulong s1, /* In (0,size of i2) */
63 : fd_wksp_t * wksp, /* Current local join */
64 18805 : fd_wksp_private_pinfo_t * pinfo ) { /* == fd_wksp_private_pinfo( wksp ) */
65 :
66 18805 : ulong g1 = pinfo[ i1 ].gaddr_lo; /* Old starts here */
67 18805 : ulong g2 = g1 + s1; /* New starts here */
68 18805 : ulong g3 = pinfo[ i1 ].gaddr_hi; /* New end here */
69 :
70 18805 : ulong i2 = fd_wksp_private_idle_stack_pop( wksp, pinfo ); /* New before */
71 18805 : ulong i3 = fd_wksp_private_pinfo_idx( pinfo[ i1 ].next_cidx ); /* Old after */
72 :
73 18805 : pinfo[ i2 ].gaddr_lo = g2;
74 18805 : pinfo[ i2 ].gaddr_hi = g3;
75 18805 : pinfo[ i2 ].tag = 0UL;
76 18805 : pinfo[ i2 ].in_same = 0U;
77 18805 : pinfo[ i2 ].prev_cidx = fd_wksp_private_pinfo_cidx( i1 );
78 18805 : pinfo[ i2 ].next_cidx = fd_wksp_private_pinfo_cidx( i3 );
79 18805 : pinfo[ i2 ].left_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
80 18805 : pinfo[ i2 ].right_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
81 18805 : pinfo[ i2 ].same_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
82 18805 : pinfo[ i2 ].parent_cidx = fd_wksp_private_pinfo_cidx( FD_WKSP_PRIVATE_PINFO_IDX_NULL );
83 :
84 18805 : pinfo[ i1 ].gaddr_hi = g2;
85 18805 : pinfo[ i1 ].next_cidx = fd_wksp_private_pinfo_cidx( i2 );
86 :
87 18805 : if( fd_wksp_private_pinfo_idx_is_null( i3 ) ) wksp->part_tail_cidx = fd_wksp_private_pinfo_cidx( i2 );
88 7151 : else pinfo[ i3 ].prev_cidx = fd_wksp_private_pinfo_cidx( i2 );
89 :
90 18805 : return i2;
91 18805 : }
92 :
93 : /* fd_wksp_private_merge_before i1 into i2 where i1 is the partition
94 : immediately preceding i2. Assumes that i2 and the partition before
95 : it are not tagged and not in the idle stack, free treap or used
96 : treap.
97 :
98 : This will push the idle stack once to make the index used for the
99 : preceding partition available for future use.
100 :
101 : fd_wksp_private_merge_after is identical except the partition to
102 : merge the split is after the original partition. */
103 :
104 : static void
105 : fd_wksp_private_merge_before( ulong i1, /* In [0,part_max), == prev to i2, on idle stack on return */
106 : ulong i2, /* In [0,part_max) */
107 : fd_wksp_t * wksp, /* Current local join */
108 7919 : fd_wksp_private_pinfo_t * pinfo ) { /* == fd_wksp_private_pinfo( wksp ) */
109 :
110 7919 : ulong i0 = fd_wksp_private_pinfo_idx( pinfo[ i1 ].prev_cidx ); /* Partition before that (if any) */
111 :
112 7919 : pinfo[ i2 ].gaddr_lo = pinfo[ i1 ].gaddr_lo;
113 7919 : pinfo[ i2 ].prev_cidx = fd_wksp_private_pinfo_cidx( i0 );
114 :
115 7919 : if( fd_wksp_private_pinfo_idx_is_null( i0 ) ) wksp->part_head_cidx = fd_wksp_private_pinfo_cidx( i2 );
116 7916 : else pinfo[ i0 ].next_cidx = fd_wksp_private_pinfo_cidx( i2 );
117 :
118 7919 : fd_wksp_private_idle_stack_push( i1, wksp, pinfo );
119 7919 : }
120 :
121 : static void
122 : fd_wksp_private_merge_after( ulong i1, /* In [0,part_max) */
123 : ulong i2, /* In [0,part_max), == next to i1, on idle stack on return */
124 : fd_wksp_t * wksp, /* Current local join */
125 10839 : fd_wksp_private_pinfo_t * pinfo ) { /* == fd_wksp_private_pinfo( wksp ) */
126 :
127 10839 : ulong i3 = fd_wksp_private_pinfo_idx( pinfo[ i2 ].next_cidx ); /* Partition after that (if any) */
128 :
129 10839 : pinfo[ i1 ].gaddr_hi = pinfo[ i2 ].gaddr_hi;
130 10839 : pinfo[ i1 ].next_cidx = fd_wksp_private_pinfo_cidx( i3 );
131 :
132 10839 : if( fd_wksp_private_pinfo_idx_is_null( i3 ) ) wksp->part_tail_cidx = fd_wksp_private_pinfo_cidx( i1 );
133 5347 : else pinfo[ i3 ].prev_cidx = fd_wksp_private_pinfo_cidx( i1 );
134 :
135 10839 : fd_wksp_private_idle_stack_push( i2, wksp, pinfo );
136 10839 : }
137 :
138 : static void
139 : fd_wksp_private_free( ulong i, /* Partition to free, in [0,part_max) */
140 : fd_wksp_t * wksp, /* Current local join */
141 25079 : fd_wksp_private_pinfo_t * pinfo ) { /* == fd_wksp_private_pinfo( wksp ) */
142 :
143 25079 : ulong part_max = wksp->part_max;
144 :
145 : /* Officially free i */
146 :
147 25079 : FD_COMPILER_MFENCE();
148 25079 : FD_VOLATILE( pinfo[ i ].tag ) = 0UL;
149 25079 : FD_COMPILER_MFENCE();
150 :
151 : /* Remove it from various structures. It is okay if we are killed in
152 : this as next person to try to lock the wksp will detect this and
153 : rebuild the workspace. */
154 :
155 25079 : if( FD_UNLIKELY( fd_wksp_private_used_treap_remove( i, wksp, pinfo ) ) ) {
156 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
157 0 : return;
158 0 : }
159 :
160 25079 : ulong p = fd_wksp_private_pinfo_idx( pinfo[ i ].prev_cidx );
161 25079 : if( FD_LIKELY( p<part_max ) && !pinfo[ p ].tag ) {
162 7919 : if( FD_UNLIKELY( fd_wksp_private_free_treap_remove( p, wksp, pinfo ) ) ) {
163 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
164 0 : return;
165 0 : }
166 7919 : fd_wksp_private_merge_before( p, i, wksp, pinfo );
167 7919 : }
168 :
169 25079 : ulong n = fd_wksp_private_pinfo_idx( pinfo[ i ].next_cidx );
170 25079 : if( FD_LIKELY( n<part_max ) && !pinfo[ n ].tag ) {
171 10839 : if( FD_UNLIKELY( fd_wksp_private_free_treap_remove( n, wksp, pinfo ) ) ) {
172 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
173 0 : return;
174 0 : }
175 10839 : fd_wksp_private_merge_after( i, n, wksp, pinfo );
176 10839 : }
177 :
178 25079 : if( FD_UNLIKELY( fd_wksp_private_free_treap_insert( i, wksp, pinfo ) ) ) {
179 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
180 0 : return;
181 0 : }
182 :
183 : # if FD_HAS_DEEPASAN
184 : /* Poison the data region of the now freed allocation. */
185 : fd_asan_poison( fd_wksp_laddr_fast( wksp, pinfo[ i ].gaddr_lo ), pinfo[ i ].gaddr_hi - pinfo[ i ].gaddr_lo );
186 : # endif
187 25079 : }
188 :
189 : /* user APIs **********************************************************/
190 :
191 : void *
192 : fd_wksp_laddr( fd_wksp_t const * wksp,
193 252 : ulong gaddr ) {
194 252 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return NULL; }
195 :
196 252 : if( !gaddr ) return NULL; /* "NULL" maps to NULL */
197 :
198 : /* Note: <= used for gaddr_hi below to support mapping ranges of the
199 : form [lo,hi) between local and global address spaces with no
200 : special handling if allocation put hi at the very end of the
201 : workspace. */
202 :
203 252 : if( FD_UNLIKELY( !((wksp->gaddr_lo<=gaddr) & (gaddr<=wksp->gaddr_hi)) ) ) { FD_LOG_WARNING(( "bad gaddr" )); return NULL; }
204 :
205 252 : return fd_wksp_laddr_fast( wksp, gaddr );
206 252 : }
207 :
208 : ulong
209 : fd_wksp_gaddr( fd_wksp_t const * wksp,
210 0 : void const * laddr ) {
211 0 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return 0UL; }
212 :
213 0 : if( !laddr ) return 0UL; /* NULL maps to "NULL" */
214 :
215 0 : ulong gaddr = fd_wksp_gaddr_fast( wksp, laddr );
216 :
217 : /* See note above about why <= for gaddr_hi */
218 :
219 0 : if( FD_UNLIKELY( !((wksp->gaddr_lo<=gaddr) & (gaddr<=wksp->gaddr_hi)) ) ) { FD_LOG_WARNING(( "bad laddr" )); return 0UL; }
220 :
221 0 : return gaddr;
222 0 : }
223 :
224 : ulong
225 : fd_wksp_alloc_at_least( fd_wksp_t * wksp,
226 : ulong align,
227 : ulong sz,
228 : ulong tag,
229 : ulong * _lo,
230 24927 : ulong * _hi ) {
231 24927 : align = fd_ulong_if( !align, FD_WKSP_ALIGN_DEFAULT, align );
232 24927 : ulong footprint = sz + align - 1UL;
233 :
234 24927 : if( FD_UNLIKELY( !sz ) ) goto fail; /* silent */
235 24927 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); goto fail; }
236 24927 : if( FD_UNLIKELY( !fd_ulong_is_pow2( align ) ) ) { FD_LOG_WARNING(( "bad align" )); goto fail; }
237 24927 : if( FD_UNLIKELY( !tag ) ) { FD_LOG_WARNING(( "bad tag" )); goto fail; }
238 :
239 : # if FD_HAS_DEEPASAN
240 : /* ASan requires 8 byte alignment for poisoning because memory is mapped in
241 : 8 byte intervals to ASan shadow bytes. Update alignment, sz, and footprint
242 : to meet manual poisoning requirements. */
243 : align = fd_ulong_if( align < FD_ASAN_ALIGN, FD_ASAN_ALIGN, align );
244 : if( sz && sz < ULONG_MAX ) {
245 : sz = fd_ulong_align_up( sz, FD_ASAN_ALIGN );
246 : }
247 : footprint = sz + align - 1UL;
248 : # endif
249 :
250 24927 : if( FD_UNLIKELY( footprint < sz ) ) { FD_LOG_WARNING(( "sz overflow" )); goto fail; }
251 :
252 24927 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
253 :
254 24927 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) goto fail; /* logs details */
255 :
256 : /* Find the smallest free partition size that can handle footprint.
257 : Note: it is theoretically possible when there is corruption that a
258 : failure to find a suitable partition could be fixed by rebuilding
259 : the wksp. But this should not be common and is expensive and we
260 : can't tell if this is a run-of-the-mill allocation failure
261 : (insufficient space or too much fragmentation) or an exotic data
262 : corruption case. So we just fail to keep algo cost strict and let
263 : the user decide if they want to attempt extreme measures. */
264 :
265 24927 : ulong i = fd_wksp_private_free_treap_query( footprint, wksp, pinfo );
266 24927 : if( FD_UNLIKELY( fd_wksp_private_pinfo_idx_is_null( i ) ) ) {
267 0 : fd_wksp_private_unlock( wksp );
268 0 : if( !fd_wksp_oom_silent ) FD_LOG_WARNING(( "no free space available in workspace %s with data size %lu", wksp->name, wksp->data_max ));
269 0 : goto fail;
270 0 : }
271 :
272 : /* At this point, i in [0,max), there is at least one suitable
273 : partition. If there is more than one, use one from the same list. */
274 :
275 24927 : if( !fd_wksp_private_free_treap_same_is_empty( i, wksp, pinfo ) ) i = fd_wksp_private_free_treap_same_remove( i, wksp, pinfo );
276 23218 : else if( FD_UNLIKELY( fd_wksp_private_free_treap_remove( i, wksp, pinfo ) ) ) {
277 0 : fd_wksp_private_unlock( wksp );
278 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
279 0 : goto fail;
280 0 : }
281 :
282 : /* At this point, partition i has a zero tag and is not in the idle
283 : stack, free treap, or used treap. Further, it is guaranteed to be
284 : large enough to hold the request. Trim it to fit the request as
285 : tightly as possible. We check before we trim that there are enough
286 : idle partitions available to complete the request (this improves
287 : checkpointing as all allocated partitions will be reasonably
288 : tight). */
289 :
290 24927 : ulong lo = pinfo[ i ].gaddr_lo;
291 24927 : ulong hi = pinfo[ i ].gaddr_hi;
292 :
293 24927 : ulong r0 = fd_ulong_align_up( lo, align );
294 24927 : ulong r1 = r0 + sz;
295 :
296 24927 : ulong part_max = wksp->part_max;
297 24927 : ulong idle_idx = fd_wksp_private_pinfo_idx( wksp->idle_top_cidx );
298 43757 : for( int idle_rem = (r0>lo) + (r1<hi); idle_rem; idle_rem-- ) {
299 18830 : if( FD_UNLIKELY( idle_idx >= part_max ) ) {
300 0 : if( FD_UNLIKELY( fd_wksp_private_free_treap_insert( i, wksp, pinfo ) ) ) { /* Could eliminate this with additional surgery */
301 0 : fd_wksp_private_unlock( wksp );
302 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
303 0 : goto fail;
304 0 : }
305 0 : fd_wksp_private_unlock( wksp );
306 0 : FD_LOG_WARNING(( "too few partitions available for allocation (part_max %lu)", part_max ));
307 0 : goto fail;
308 0 : }
309 18830 : idle_idx = fd_wksp_private_pinfo_idx( pinfo[ idle_idx ].parent_cidx );
310 18830 : }
311 :
312 24927 : if( FD_UNLIKELY( r0>lo ) ) { /* opt for reasonable alignments */
313 25 : ulong j = fd_wksp_private_split_before( i, hi-r0, wksp, pinfo );
314 25 : if( FD_UNLIKELY( fd_wksp_private_free_treap_insert( j, wksp, pinfo ) ) ) {
315 0 : fd_wksp_private_unlock( wksp );
316 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
317 0 : goto fail;
318 0 : }
319 25 : lo = r0;
320 25 : }
321 :
322 24927 : if( FD_LIKELY( r1<hi ) ) { /* opt for splitting a final large partition */
323 18805 : ulong j = fd_wksp_private_split_after( i, sz, wksp, pinfo );
324 18805 : if( FD_UNLIKELY( fd_wksp_private_free_treap_insert( j, wksp, pinfo ) ) ) {
325 0 : fd_wksp_private_unlock( wksp );
326 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
327 0 : goto fail;
328 0 : }
329 18805 : hi = r1;
330 18805 : }
331 :
332 24927 : if( FD_UNLIKELY( fd_wksp_private_used_treap_insert( i, wksp, pinfo ) ) ) {
333 0 : fd_wksp_private_unlock( wksp );
334 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
335 0 : goto fail;
336 0 : }
337 :
338 : /* At this point, i is unofficially allocated. It is okay if we get
339 : killed at any point above as the next wksp user to try to lock the
340 : wksp will detect that we died in the middle of an operation,
341 : potentially leaving the partitioning, idle stack, used treap and/or
342 : free treap might be in an inconsistent state and thus proceed to
343 : rebuild them. We now update the tag in the array to make the
344 : allocation official. */
345 :
346 24927 : FD_COMPILER_MFENCE();
347 24927 : FD_VOLATILE( pinfo[ i ].tag ) = tag;
348 24927 : FD_COMPILER_MFENCE();
349 :
350 : # if FD_HAS_DEEPASAN
351 : /* Unpoison the data region of the allocation */
352 : fd_asan_unpoison( fd_wksp_laddr_fast( wksp, lo ), hi - lo );
353 : # endif
354 :
355 24927 : fd_wksp_private_unlock( wksp );
356 24927 : *_lo = lo;
357 24927 : *_hi = hi;
358 24927 : return r0;
359 :
360 0 : fail:
361 0 : *_lo = 0UL;
362 0 : *_hi = 0UL;
363 0 : return 0UL;
364 24927 : }
365 :
366 : void
367 : fd_wksp_free( fd_wksp_t * wksp,
368 25076 : ulong gaddr ) {
369 25076 : if( FD_UNLIKELY( !gaddr ) ) return;
370 :
371 25076 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return; }
372 :
373 25076 : ulong part_max = wksp->part_max;
374 25076 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
375 :
376 25076 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return; /* logs details */
377 :
378 25076 : ulong i = fd_wksp_private_used_treap_query( gaddr, wksp, pinfo );
379 25079 : if( FD_UNLIKELY( i<part_max ) ) fd_wksp_private_free( i, wksp, pinfo ); /* logs details */
380 :
381 25076 : fd_wksp_private_unlock( wksp );
382 :
383 25076 : if( FD_UNLIKELY( i>=part_max && i!=FD_WKSP_PRIVATE_PINFO_IDX_NULL ) ) {
384 0 : FD_LOG_WARNING(( "gaddr does not appear to be a current wksp allocation" ));
385 0 : }
386 25076 : }
387 :
388 : ulong
389 : fd_wksp_tag( fd_wksp_t * wksp,
390 0 : ulong gaddr ) {
391 0 : if( FD_UNLIKELY( !wksp ) ) return 0UL;
392 :
393 0 : ulong part_max = wksp->part_max;
394 0 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
395 :
396 0 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return 0UL; /* logs details */
397 :
398 0 : ulong i = fd_wksp_private_used_treap_query( gaddr, wksp, pinfo );
399 0 : ulong tag = FD_LIKELY( i<part_max ) ? pinfo[ i ].tag : 0UL;
400 :
401 0 : fd_wksp_private_unlock( wksp );
402 :
403 0 : return tag;
404 0 : }
405 :
406 : ulong
407 : fd_wksp_tag_query( fd_wksp_t * wksp,
408 : ulong const * tag,
409 : ulong tag_cnt,
410 : fd_wksp_tag_query_info_t * info,
411 0 : ulong info_max ) {
412 :
413 0 : if( FD_UNLIKELY( !tag_cnt ) ) return 0UL; /* No tags to query */
414 :
415 0 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return 0UL; }
416 0 : if( FD_UNLIKELY( !tag ) ) { FD_LOG_WARNING(( "bad tag" )); return 0UL; }
417 :
418 0 : if( FD_UNLIKELY( (!!info_max) & (!info) ) ) { FD_LOG_WARNING(( "NULL info" )); return 0UL; }
419 :
420 0 : ulong part_max = wksp->part_max;
421 0 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
422 :
423 0 : ulong info_cnt = 0UL;
424 :
425 0 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return 0UL; /* logs details */
426 :
427 0 : ulong cycle_tag = wksp->cycle_tag++;
428 :
429 0 : ulong i = fd_wksp_private_pinfo_idx( wksp->part_head_cidx );
430 0 : while( !fd_wksp_private_pinfo_idx_is_null( i ) ) {
431 0 : if( FD_UNLIKELY( i>=part_max ) || FD_UNLIKELY( pinfo[ i ].cycle_tag==cycle_tag ) ) {
432 0 : fd_wksp_private_unlock( wksp );
433 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
434 0 : return 0UL;
435 0 : }
436 0 : pinfo[ i ].cycle_tag = cycle_tag; /* mark i as visited */
437 :
438 0 : ulong _tag = pinfo[ i ].tag;
439 0 : for( ulong tag_idx=0UL; tag_idx<tag_cnt; tag_idx++ ) { /* TODO: USE BETTER MATCHER */
440 0 : if( tag[ tag_idx ]==_tag ) {
441 0 : if( FD_LIKELY( info_cnt<info_max ) ) {
442 0 : info[ info_cnt ].gaddr_lo = pinfo[ i ].gaddr_lo;
443 0 : info[ info_cnt ].gaddr_hi = pinfo[ i ].gaddr_hi;
444 0 : info[ info_cnt ].tag = pinfo[ i ].tag;
445 0 : }
446 0 : info_cnt++;
447 0 : break;
448 0 : }
449 0 : }
450 :
451 0 : i = fd_wksp_private_pinfo_idx( pinfo[ i ].next_cidx );
452 0 : }
453 :
454 0 : fd_wksp_private_unlock( wksp );
455 0 : return info_cnt;
456 0 : }
457 :
458 : void
459 : fd_wksp_tag_free( fd_wksp_t * wksp,
460 : ulong const * tag,
461 0 : ulong tag_cnt ) {
462 0 : if( FD_UNLIKELY( !tag_cnt ) ) return; /* No tags to free */
463 :
464 0 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return; }
465 0 : if( FD_UNLIKELY( !tag ) ) { FD_LOG_WARNING(( "bad tag" )); return; }
466 :
467 0 : ulong part_max = wksp->part_max;
468 0 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
469 :
470 0 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return; /* logs details */
471 :
472 : /* Push matching used partitions onto a stack */
473 :
474 0 : ulong top = FD_WKSP_PRIVATE_PINFO_IDX_NULL;
475 :
476 0 : ulong cycle_tag = wksp->cycle_tag++;
477 :
478 0 : ulong i = fd_wksp_private_pinfo_idx( wksp->part_head_cidx );
479 0 : while( !fd_wksp_private_pinfo_idx_is_null( i ) ) {
480 0 : if( FD_UNLIKELY( i>=part_max ) || FD_UNLIKELY( pinfo[ i ].cycle_tag==cycle_tag ) ) {
481 0 : fd_wksp_private_unlock( wksp );
482 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
483 0 : return;
484 0 : }
485 0 : pinfo[ i ].cycle_tag = cycle_tag; /* mark i as visited */
486 :
487 0 : ulong _tag = pinfo[ i ].tag;
488 0 : if( _tag ) { /* TODO: use a more efficient matcher */
489 0 : ulong tag_idx; for( tag_idx=0UL; tag_idx<tag_cnt; tag_idx++ ) if( tag[ tag_idx ]==_tag ) break;
490 0 : if( tag_idx<tag_cnt ) {
491 0 : pinfo[ i ].stack_cidx = fd_wksp_private_pinfo_cidx( top );
492 0 : top = i;
493 0 : }
494 0 : }
495 0 : i = fd_wksp_private_pinfo_idx( pinfo[ i ].next_cidx );
496 0 : }
497 :
498 : /* Free partitions on the stack */
499 :
500 0 : while( !fd_wksp_private_pinfo_idx_is_null( top ) ) {
501 0 : i = top;
502 0 : top = fd_wksp_private_pinfo_idx( pinfo[ i ].stack_cidx );
503 0 : fd_wksp_private_free( i, wksp, pinfo );
504 0 : }
505 :
506 0 : fd_wksp_private_unlock( wksp );
507 0 : }
508 :
509 : void
510 : fd_wksp_memset( fd_wksp_t * wksp,
511 : ulong gaddr,
512 0 : int c ) {
513 0 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return; }
514 :
515 0 : ulong part_max = wksp->part_max;
516 0 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
517 :
518 0 : int err;
519 :
520 0 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return; /* logs details */
521 :
522 0 : ulong i = fd_wksp_private_used_treap_query( gaddr, wksp, pinfo );
523 0 : if( FD_UNLIKELY( i>=part_max ) ) err = 1;
524 0 : else {
525 0 : fd_memset( fd_wksp_laddr_fast( wksp, pinfo[ i ].gaddr_lo ), c, fd_wksp_private_pinfo_sz( pinfo + i ) );
526 0 : err = 0;
527 0 : }
528 :
529 0 : fd_wksp_private_unlock( wksp );
530 :
531 0 : if( FD_UNLIKELY( err ) ) FD_LOG_WARNING(( "gaddr does not seem to point to a current wksp allocation" ));
532 0 : }
533 :
534 : void
535 : fd_wksp_reset( fd_wksp_t * wksp,
536 0 : uint seed ) {
537 0 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "NULL wksp" )); return; }
538 :
539 : # if FD_HAS_DEEPASAN
540 : /* Poison entire workspace except wksp header and the pinfo array. */
541 : ulong footprint = fd_wksp_footprint( wksp->part_max, wksp->data_max );
542 : void * wksp_data = (void*)((ulong)wksp + fd_wksp_private_pinfo_off());
543 : fd_asan_poison( wksp_data, footprint - fd_wksp_private_pinfo_off() );
544 : fd_wksp_private_pinfo_t * pinfo_arr = fd_wksp_private_pinfo( wksp );
545 : fd_asan_unpoison( pinfo_arr, FD_WKSP_PRIVATE_PINFO_FOOTPRINT * wksp->part_max );
546 : # endif
547 :
548 0 : ulong part_max = wksp->part_max;
549 0 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
550 :
551 0 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) return; /* logs details */
552 :
553 0 : for( ulong i=0; i<part_max; i++ ) pinfo[ i ].tag = 0UL;
554 0 : int err = fd_wksp_rebuild( wksp, seed );
555 :
556 0 : fd_wksp_private_unlock( wksp );
557 :
558 0 : if( FD_UNLIKELY( err ) ) FD_LOG_WARNING(( "corrupt wksp detected" ));
559 0 : }
560 :
561 : fd_wksp_usage_t *
562 : fd_wksp_usage( fd_wksp_t * wksp,
563 : ulong const * tag,
564 : ulong tag_cnt,
565 12 : fd_wksp_usage_t * usage ) {
566 :
567 : /* Check input args */
568 :
569 12 : if( FD_UNLIKELY( !usage ) ) { FD_LOG_WARNING(( "bad usage" )); return usage; }
570 :
571 12 : fd_memset( usage, 0, sizeof(fd_wksp_usage_t) );
572 :
573 12 : if( FD_UNLIKELY( !wksp ) ) { FD_LOG_WARNING(( "bad wksp" )); return usage; }
574 12 : if( FD_UNLIKELY( (!tag) & (!!tag_cnt) ) ) { FD_LOG_WARNING(( "bad tag" )); return usage; }
575 :
576 12 : ulong part_max = wksp->part_max;
577 12 : fd_wksp_private_pinfo_t * pinfo = fd_wksp_private_pinfo( wksp );
578 :
579 12 : if( FD_UNLIKELY( fd_wksp_private_lock( wksp ) ) ) { FD_LOG_WARNING(( "fd_wksp_private_lock failed" )); return usage; }
580 :
581 : /* Push matching used partitions onto a stack */
582 :
583 12 : usage->total_max = part_max;
584 :
585 12 : ulong cycle_tag = wksp->cycle_tag++;
586 :
587 12 : ulong i = fd_wksp_private_pinfo_idx( wksp->part_head_cidx );
588 1110 : while( !fd_wksp_private_pinfo_idx_is_null( i ) ) {
589 1098 : if( FD_UNLIKELY( i>=part_max ) || FD_UNLIKELY( pinfo[ i ].cycle_tag==cycle_tag ) ) {
590 0 : fd_wksp_private_unlock( wksp );
591 0 : FD_LOG_WARNING(( "corrupt wksp detected" ));
592 0 : fd_memset( usage, 0, sizeof(fd_wksp_usage_t) );
593 0 : return usage;
594 0 : }
595 1098 : pinfo[ i ].cycle_tag = cycle_tag; /* mark i as visited */
596 :
597 1098 : ulong part_sz = fd_wksp_private_pinfo_sz( pinfo + i );
598 1098 : ulong part_tag = pinfo[ i ].tag;
599 :
600 : /* TODO: use a more efficient matcher */
601 1278 : ulong tag_idx; for( tag_idx=0UL; tag_idx<tag_cnt; tag_idx++ ) if( tag[ tag_idx ]==part_tag ) break;
602 :
603 1098 : int is_free = !part_tag;
604 1098 : int is_used = tag_idx<tag_cnt;
605 :
606 1098 : usage->total_cnt += 1UL; usage->total_sz += part_sz;
607 1098 : usage->free_cnt += (ulong)is_free; usage->free_sz += fd_ulong_if( is_free, part_sz, 0UL );
608 1098 : usage->used_cnt += (ulong)is_used; usage->used_sz += fd_ulong_if( is_used, part_sz, 0UL );
609 :
610 1098 : i = fd_wksp_private_pinfo_idx( pinfo[ i ].next_cidx );
611 1098 : }
612 :
613 12 : fd_wksp_private_unlock( wksp );
614 12 : return usage;
615 12 : }
|