/src/icu/source/common/unifiedcache.cpp
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // © 2016 and later: Unicode, Inc. and others.  | 
2  |  | // License & terms of use: http://www.unicode.org/copyright.html  | 
3  |  | /*  | 
4  |  | ******************************************************************************  | 
5  |  | * Copyright (C) 2015, International Business Machines Corporation and  | 
6  |  | * others. All Rights Reserved.  | 
7  |  | ******************************************************************************  | 
8  |  | *  | 
9  |  | * File unifiedcache.cpp  | 
10  |  | ******************************************************************************  | 
11  |  | */  | 
12  |  |  | 
13  |  | #include "unifiedcache.h"  | 
14  |  |  | 
15  |  | #include <algorithm>      // For std::max()  | 
16  |  | #include <mutex>  | 
17  |  |  | 
18  |  | #include "uassert.h"  | 
19  |  | #include "uhash.h"  | 
20  |  | #include "ucln_cmn.h"  | 
21  |  |  | 
22  |  | static icu::UnifiedCache *gCache = NULL;  | 
23  |  | static std::mutex *gCacheMutex = nullptr;  | 
24  |  | static std::condition_variable *gInProgressValueAddedCond;  | 
25  |  | static icu::UInitOnce gCacheInitOnce = U_INITONCE_INITIALIZER;  | 
26  |  |  | 
27  |  | static const int32_t MAX_EVICT_ITERATIONS = 10;  | 
28  |  | static const int32_t DEFAULT_MAX_UNUSED = 1000;  | 
29  |  | static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;  | 
30  |  |  | 
31  |  |  | 
32  |  | U_CDECL_BEGIN  | 
33  | 0  | static UBool U_CALLCONV unifiedcache_cleanup() { | 
34  | 0  |     gCacheInitOnce.reset();  | 
35  | 0  |     delete gCache;  | 
36  | 0  |     gCache = nullptr;  | 
37  | 0  |     gCacheMutex->~mutex();  | 
38  | 0  |     gCacheMutex = nullptr;  | 
39  | 0  |     gInProgressValueAddedCond->~condition_variable();  | 
40  | 0  |     gInProgressValueAddedCond = nullptr;  | 
41  | 0  |     return TRUE;  | 
42  | 0  | }  | 
43  |  | U_CDECL_END  | 
44  |  |  | 
45  |  |  | 
46  |  | U_NAMESPACE_BEGIN  | 
47  |  |  | 
48  |  | U_CAPI int32_t U_EXPORT2  | 
49  | 0  | ucache_hashKeys(const UHashTok key) { | 
50  | 0  |     const CacheKeyBase *ckey = (const CacheKeyBase *) key.pointer;  | 
51  | 0  |     return ckey->hashCode();  | 
52  | 0  | }  | 
53  |  |  | 
54  |  | U_CAPI UBool U_EXPORT2  | 
55  | 0  | ucache_compareKeys(const UHashTok key1, const UHashTok key2) { | 
56  | 0  |     const CacheKeyBase *p1 = (const CacheKeyBase *) key1.pointer;  | 
57  | 0  |     const CacheKeyBase *p2 = (const CacheKeyBase *) key2.pointer;  | 
58  | 0  |     return *p1 == *p2;  | 
59  | 0  | }  | 
60  |  |  | 
61  |  | U_CAPI void U_EXPORT2  | 
62  | 0  | ucache_deleteKey(void *obj) { | 
63  | 0  |     CacheKeyBase *p = (CacheKeyBase *) obj;  | 
64  | 0  |     delete p;  | 
65  | 0  | }  | 
66  |  |  | 
67  | 0  | CacheKeyBase::~CacheKeyBase() { | 
68  | 0  | }  | 
69  |  |  | 
70  | 0  | static void U_CALLCONV cacheInit(UErrorCode &status) { | 
71  | 0  |     U_ASSERT(gCache == NULL);  | 
72  | 0  |     ucln_common_registerCleanup(  | 
73  | 0  |             UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);  | 
74  |  | 
  | 
75  | 0  |     gCacheMutex = STATIC_NEW(std::mutex);  | 
76  | 0  |     gInProgressValueAddedCond = STATIC_NEW(std::condition_variable);  | 
77  | 0  |     gCache = new UnifiedCache(status);  | 
78  | 0  |     if (gCache == NULL) { | 
79  | 0  |         status = U_MEMORY_ALLOCATION_ERROR;  | 
80  | 0  |     }  | 
81  | 0  |     if (U_FAILURE(status)) { | 
82  | 0  |         delete gCache;  | 
83  | 0  |         gCache = NULL;  | 
84  | 0  |         return;  | 
85  | 0  |     }  | 
86  | 0  | }  | 
87  |  |  | 
88  | 0  | UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) { | 
89  | 0  |     umtx_initOnce(gCacheInitOnce, &cacheInit, status);  | 
90  | 0  |     if (U_FAILURE(status)) { | 
91  | 0  |         return NULL;  | 
92  | 0  |     }  | 
93  | 0  |     U_ASSERT(gCache != NULL);  | 
94  | 0  |     return gCache;  | 
95  | 0  | }  | 
96  |  |  | 
97  |  | UnifiedCache::UnifiedCache(UErrorCode &status) :  | 
98  |  |         fHashtable(NULL),  | 
99  | 0  |         fEvictPos(UHASH_FIRST),  | 
100  | 0  |         fNumValuesTotal(0),  | 
101  | 0  |         fNumValuesInUse(0),  | 
102  | 0  |         fMaxUnused(DEFAULT_MAX_UNUSED),  | 
103  | 0  |         fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),  | 
104  | 0  |         fAutoEvictedCount(0),  | 
105  | 0  |         fNoValue(nullptr) { | 
106  | 0  |     if (U_FAILURE(status)) { | 
107  | 0  |         return;  | 
108  | 0  |     }  | 
109  | 0  |     fNoValue = new SharedObject();  | 
110  | 0  |     if (fNoValue == nullptr) { | 
111  | 0  |         status = U_MEMORY_ALLOCATION_ERROR;  | 
112  | 0  |         return;  | 
113  | 0  |     }  | 
114  | 0  |     fNoValue->softRefCount = 1;  // Add fake references to prevent fNoValue from being deleted  | 
115  | 0  |     fNoValue->hardRefCount = 1;  // when other references to it are removed.  | 
116  | 0  |     fNoValue->cachePtr = this;  | 
117  |  | 
  | 
118  | 0  |     fHashtable = uhash_open(  | 
119  | 0  |             &ucache_hashKeys,  | 
120  | 0  |             &ucache_compareKeys,  | 
121  | 0  |             NULL,  | 
122  | 0  |             &status);  | 
123  | 0  |     if (U_FAILURE(status)) { | 
124  | 0  |         return;  | 
125  | 0  |     }  | 
126  | 0  |     uhash_setKeyDeleter(fHashtable, &ucache_deleteKey);  | 
127  | 0  | }  | 
128  |  |  | 
129  |  | void UnifiedCache::setEvictionPolicy(  | 
130  | 0  |         int32_t count, int32_t percentageOfInUseItems, UErrorCode &status) { | 
131  | 0  |     if (U_FAILURE(status)) { | 
132  | 0  |         return;  | 
133  | 0  |     }  | 
134  | 0  |     if (count < 0 || percentageOfInUseItems < 0) { | 
135  | 0  |         status = U_ILLEGAL_ARGUMENT_ERROR;  | 
136  | 0  |         return;  | 
137  | 0  |     }  | 
138  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
139  | 0  |     fMaxUnused = count;  | 
140  | 0  |     fMaxPercentageOfInUse = percentageOfInUseItems;  | 
141  | 0  | }  | 
142  |  |  | 
143  | 0  | int32_t UnifiedCache::unusedCount() const { | 
144  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
145  | 0  |     return uhash_count(fHashtable) - fNumValuesInUse;  | 
146  | 0  | }  | 
147  |  |  | 
148  | 0  | int64_t UnifiedCache::autoEvictedCount() const { | 
149  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
150  | 0  |     return fAutoEvictedCount;  | 
151  | 0  | }  | 
152  |  |  | 
153  | 0  | int32_t UnifiedCache::keyCount() const { | 
154  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
155  | 0  |     return uhash_count(fHashtable);  | 
156  | 0  | }  | 
157  |  |  | 
158  | 0  | void UnifiedCache::flush() const { | 
159  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
160  |  |  | 
161  |  |     // Use a loop in case cache items that are flushed held hard references to  | 
162  |  |     // other cache items making those additional cache items eligible for  | 
163  |  |     // flushing.  | 
164  | 0  |     while (_flush(FALSE));  | 
165  | 0  | }  | 
166  |  |  | 
167  | 0  | void UnifiedCache::handleUnreferencedObject() const { | 
168  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
169  | 0  |     --fNumValuesInUse;  | 
170  | 0  |     _runEvictionSlice();  | 
171  | 0  | }  | 
172  |  |  | 
173  |  | #ifdef UNIFIED_CACHE_DEBUG  | 
174  |  | #include <stdio.h>  | 
175  |  |  | 
176  |  | void UnifiedCache::dump() { | 
177  |  |     UErrorCode status = U_ZERO_ERROR;  | 
178  |  |     const UnifiedCache *cache = getInstance(status);  | 
179  |  |     if (U_FAILURE(status)) { | 
180  |  |         fprintf(stderr, "Unified Cache: Error fetching cache.\n");  | 
181  |  |         return;  | 
182  |  |     }  | 
183  |  |     cache->dumpContents();  | 
184  |  | }  | 
185  |  |  | 
186  |  | void UnifiedCache::dumpContents() const { | 
187  |  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
188  |  |     _dumpContents();  | 
189  |  | }  | 
190  |  |  | 
191  |  | // Dumps content of cache.  | 
192  |  | // On entry, gCacheMutex must be held.  | 
193  |  | // On exit, cache contents dumped to stderr.  | 
194  |  | void UnifiedCache::_dumpContents() const { | 
195  |  |     int32_t pos = UHASH_FIRST;  | 
196  |  |     const UHashElement *element = uhash_nextElement(fHashtable, &pos);  | 
197  |  |     char buffer[256];  | 
198  |  |     int32_t cnt = 0;  | 
199  |  |     for (; element != NULL; element = uhash_nextElement(fHashtable, &pos)) { | 
200  |  |         const SharedObject *sharedObject =  | 
201  |  |                 (const SharedObject *) element->value.pointer;  | 
202  |  |         const CacheKeyBase *key =  | 
203  |  |                 (const CacheKeyBase *) element->key.pointer;  | 
204  |  |         if (sharedObject->hasHardReferences()) { | 
205  |  |             ++cnt;  | 
206  |  |             fprintf(  | 
207  |  |                     stderr,  | 
208  |  |                     "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",  | 
209  |  |                     key->writeDescription(buffer, 256),  | 
210  |  |                     key->creationStatus,  | 
211  |  |                     sharedObject == fNoValue ? NULL :sharedObject,  | 
212  |  |                     sharedObject->getRefCount(),  | 
213  |  |                     sharedObject->getSoftRefCount());  | 
214  |  |         }  | 
215  |  |     }  | 
216  |  |     fprintf(stderr, "Unified Cache: %d out of a total of %d still have hard references\n", cnt, uhash_count(fHashtable));  | 
217  |  | }  | 
218  |  | #endif  | 
219  |  |  | 
220  | 0  | UnifiedCache::~UnifiedCache() { | 
221  |  |     // Try our best to clean up first.  | 
222  | 0  |     flush();  | 
223  | 0  |     { | 
224  |  |         // Now all that should be left in the cache are entries that refer to  | 
225  |  |         // each other and entries with hard references from outside the cache.  | 
226  |  |         // Nothing we can do about these so proceed to wipe out the cache.  | 
227  | 0  |         std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
228  | 0  |         _flush(TRUE);  | 
229  | 0  |     }  | 
230  | 0  |     uhash_close(fHashtable);  | 
231  | 0  |     fHashtable = nullptr;  | 
232  | 0  |     delete fNoValue;  | 
233  | 0  |     fNoValue = nullptr;  | 
234  | 0  | }  | 
235  |  |  | 
236  |  | const UHashElement *  | 
237  | 0  | UnifiedCache::_nextElement() const { | 
238  | 0  |     const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);  | 
239  | 0  |     if (element == NULL) { | 
240  | 0  |         fEvictPos = UHASH_FIRST;  | 
241  | 0  |         return uhash_nextElement(fHashtable, &fEvictPos);  | 
242  | 0  |     }  | 
243  | 0  |     return element;  | 
244  | 0  | }  | 
245  |  |  | 
246  | 0  | UBool UnifiedCache::_flush(UBool all) const { | 
247  | 0  |     UBool result = FALSE;  | 
248  | 0  |     int32_t origSize = uhash_count(fHashtable);  | 
249  | 0  |     for (int32_t i = 0; i < origSize; ++i) { | 
250  | 0  |         const UHashElement *element = _nextElement();  | 
251  | 0  |         if (element == nullptr) { | 
252  | 0  |             break;  | 
253  | 0  |         }  | 
254  | 0  |         if (all || _isEvictable(element)) { | 
255  | 0  |             const SharedObject *sharedObject =  | 
256  | 0  |                     (const SharedObject *) element->value.pointer;  | 
257  | 0  |             U_ASSERT(sharedObject->cachePtr == this);  | 
258  | 0  |             uhash_removeElement(fHashtable, element);  | 
259  | 0  |             removeSoftRef(sharedObject);    // Deletes the sharedObject when softRefCount goes to zero.  | 
260  | 0  |             result = TRUE;  | 
261  | 0  |         }  | 
262  | 0  |     }  | 
263  | 0  |     return result;  | 
264  | 0  | }  | 
265  |  |  | 
266  | 0  | int32_t UnifiedCache::_computeCountOfItemsToEvict() const { | 
267  | 0  |     int32_t totalItems = uhash_count(fHashtable);  | 
268  | 0  |     int32_t evictableItems = totalItems - fNumValuesInUse;  | 
269  |  | 
  | 
270  | 0  |     int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;  | 
271  | 0  |     int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);  | 
272  | 0  |     int32_t countOfItemsToEvict = std::max(0, evictableItems - unusedLimit);  | 
273  | 0  |     return countOfItemsToEvict;  | 
274  | 0  | }  | 
275  |  |  | 
276  | 0  | void UnifiedCache::_runEvictionSlice() const { | 
277  | 0  |     int32_t maxItemsToEvict = _computeCountOfItemsToEvict();  | 
278  | 0  |     if (maxItemsToEvict <= 0) { | 
279  | 0  |         return;  | 
280  | 0  |     }  | 
281  | 0  |     for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) { | 
282  | 0  |         const UHashElement *element = _nextElement();  | 
283  | 0  |         if (element == nullptr) { | 
284  | 0  |             break;  | 
285  | 0  |         }  | 
286  | 0  |         if (_isEvictable(element)) { | 
287  | 0  |             const SharedObject *sharedObject =  | 
288  | 0  |                     (const SharedObject *) element->value.pointer;  | 
289  | 0  |             uhash_removeElement(fHashtable, element);  | 
290  | 0  |             removeSoftRef(sharedObject);   // Deletes sharedObject when SoftRefCount goes to zero.  | 
291  | 0  |             ++fAutoEvictedCount;  | 
292  | 0  |             if (--maxItemsToEvict == 0) { | 
293  | 0  |                 break;  | 
294  | 0  |             }  | 
295  | 0  |         }  | 
296  | 0  |     }  | 
297  | 0  | }  | 
298  |  |  | 
299  |  | void UnifiedCache::_putNew(  | 
300  |  |         const CacheKeyBase &key,  | 
301  |  |         const SharedObject *value,  | 
302  |  |         const UErrorCode creationStatus,  | 
303  | 0  |         UErrorCode &status) const { | 
304  | 0  |     if (U_FAILURE(status)) { | 
305  | 0  |         return;  | 
306  | 0  |     }  | 
307  | 0  |     CacheKeyBase *keyToAdopt = key.clone();  | 
308  | 0  |     if (keyToAdopt == NULL) { | 
309  | 0  |         status = U_MEMORY_ALLOCATION_ERROR;  | 
310  | 0  |         return;  | 
311  | 0  |     }  | 
312  | 0  |     keyToAdopt->fCreationStatus = creationStatus;  | 
313  | 0  |     if (value->softRefCount == 0) { | 
314  | 0  |         _registerPrimary(keyToAdopt, value);  | 
315  | 0  |     }  | 
316  | 0  |     void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);  | 
317  | 0  |     U_ASSERT(oldValue == nullptr);  | 
318  | 0  |     (void)oldValue;  | 
319  | 0  |     if (U_SUCCESS(status)) { | 
320  | 0  |         value->softRefCount++;  | 
321  | 0  |     }  | 
322  | 0  | }  | 
323  |  |  | 
324  |  | void UnifiedCache::_putIfAbsentAndGet(  | 
325  |  |         const CacheKeyBase &key,  | 
326  |  |         const SharedObject *&value,  | 
327  | 0  |         UErrorCode &status) const { | 
328  | 0  |     std::lock_guard<std::mutex> lock(*gCacheMutex);  | 
329  | 0  |     const UHashElement *element = uhash_find(fHashtable, &key);  | 
330  | 0  |     if (element != NULL && !_inProgress(element)) { | 
331  | 0  |         _fetch(element, value, status);  | 
332  | 0  |         return;  | 
333  | 0  |     }  | 
334  | 0  |     if (element == NULL) { | 
335  | 0  |         UErrorCode putError = U_ZERO_ERROR;  | 
336  |  |         // best-effort basis only.  | 
337  | 0  |         _putNew(key, value, status, putError);  | 
338  | 0  |     } else { | 
339  | 0  |         _put(element, value, status);  | 
340  | 0  |     }  | 
341  |  |     // Run an eviction slice. This will run even if we added a primary entry  | 
342  |  |     // which doesn't increase the unused count, but that is still o.k  | 
343  | 0  |     _runEvictionSlice();  | 
344  | 0  | }  | 
345  |  |  | 
346  |  |  | 
347  |  | UBool UnifiedCache::_poll(  | 
348  |  |         const CacheKeyBase &key,  | 
349  |  |         const SharedObject *&value,  | 
350  | 0  |         UErrorCode &status) const { | 
351  | 0  |     U_ASSERT(value == NULL);  | 
352  | 0  |     U_ASSERT(status == U_ZERO_ERROR);  | 
353  | 0  |     std::unique_lock<std::mutex> lock(*gCacheMutex);  | 
354  | 0  |     const UHashElement *element = uhash_find(fHashtable, &key);  | 
355  |  |  | 
356  |  |     // If the hash table contains an inProgress placeholder entry for this key,  | 
357  |  |     // this means that another thread is currently constructing the value object.  | 
358  |  |     // Loop, waiting for that construction to complete.  | 
359  | 0  |      while (element != NULL && _inProgress(element)) { | 
360  | 0  |          gInProgressValueAddedCond->wait(lock);  | 
361  | 0  |          element = uhash_find(fHashtable, &key);  | 
362  | 0  |     }  | 
363  |  |  | 
364  |  |     // If the hash table contains an entry for the key,  | 
365  |  |     // fetch out the contents and return them.  | 
366  | 0  |     if (element != NULL) { | 
367  | 0  |          _fetch(element, value, status);  | 
368  | 0  |         return TRUE;  | 
369  | 0  |     }  | 
370  |  |  | 
371  |  |     // The hash table contained nothing for this key.  | 
372  |  |     // Insert an inProgress place holder value.  | 
373  |  |     // Our caller will create the final value and update the hash table.  | 
374  | 0  |     _putNew(key, fNoValue, U_ZERO_ERROR, status);  | 
375  | 0  |     return FALSE;  | 
376  | 0  | }  | 
377  |  |  | 
378  |  | void UnifiedCache::_get(  | 
379  |  |         const CacheKeyBase &key,  | 
380  |  |         const SharedObject *&value,  | 
381  |  |         const void *creationContext,  | 
382  | 0  |         UErrorCode &status) const { | 
383  | 0  |     U_ASSERT(value == NULL);  | 
384  | 0  |     U_ASSERT(status == U_ZERO_ERROR);  | 
385  | 0  |     if (_poll(key, value, status)) { | 
386  | 0  |         if (value == fNoValue) { | 
387  | 0  |             SharedObject::clearPtr(value);  | 
388  | 0  |         }  | 
389  | 0  |         return;  | 
390  | 0  |     }  | 
391  | 0  |     if (U_FAILURE(status)) { | 
392  | 0  |         return;  | 
393  | 0  |     }  | 
394  | 0  |     value = key.createObject(creationContext, status);  | 
395  | 0  |     U_ASSERT(value == NULL || value->hasHardReferences());  | 
396  | 0  |     U_ASSERT(value != NULL || status != U_ZERO_ERROR);  | 
397  | 0  |     if (value == NULL) { | 
398  | 0  |         SharedObject::copyPtr(fNoValue, value);  | 
399  | 0  |     }  | 
400  | 0  |     _putIfAbsentAndGet(key, value, status);  | 
401  | 0  |     if (value == fNoValue) { | 
402  | 0  |         SharedObject::clearPtr(value);  | 
403  | 0  |     }  | 
404  | 0  | }  | 
405  |  |  | 
406  |  | void UnifiedCache::_registerPrimary(  | 
407  | 0  |             const CacheKeyBase *theKey, const SharedObject *value) const { | 
408  | 0  |     theKey->fIsPrimary = true;  | 
409  | 0  |     value->cachePtr = this;  | 
410  | 0  |     ++fNumValuesTotal;  | 
411  | 0  |     ++fNumValuesInUse;  | 
412  | 0  | }  | 
413  |  |  | 
414  |  | void UnifiedCache::_put(  | 
415  |  |         const UHashElement *element,  | 
416  |  |         const SharedObject *value,  | 
417  | 0  |         const UErrorCode status) const { | 
418  | 0  |     U_ASSERT(_inProgress(element));  | 
419  | 0  |     const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;  | 
420  | 0  |     const SharedObject *oldValue = (const SharedObject *) element->value.pointer;  | 
421  | 0  |     theKey->fCreationStatus = status;  | 
422  | 0  |     if (value->softRefCount == 0) { | 
423  | 0  |         _registerPrimary(theKey, value);  | 
424  | 0  |     }  | 
425  | 0  |     value->softRefCount++;  | 
426  | 0  |     UHashElement *ptr = const_cast<UHashElement *>(element);  | 
427  | 0  |     ptr->value.pointer = (void *) value;  | 
428  | 0  |     U_ASSERT(oldValue == fNoValue);  | 
429  | 0  |     removeSoftRef(oldValue);  | 
430  |  |  | 
431  |  |     // Tell waiting threads that we replace in-progress status with  | 
432  |  |     // an error.  | 
433  | 0  |     gInProgressValueAddedCond->notify_all();  | 
434  | 0  | }  | 
435  |  |  | 
436  |  | void UnifiedCache::_fetch(  | 
437  |  |         const UHashElement *element,  | 
438  |  |         const SharedObject *&value,  | 
439  | 0  |         UErrorCode &status) const { | 
440  | 0  |     const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;  | 
441  | 0  |     status = theKey->fCreationStatus;  | 
442  |  |  | 
443  |  |     // Since we have the cache lock, calling regular SharedObject add/removeRef  | 
444  |  |     // could cause us to deadlock on ourselves since they may need to lock  | 
445  |  |     // the cache mutex.  | 
446  | 0  |     removeHardRef(value);  | 
447  | 0  |     value = static_cast<const SharedObject *>(element->value.pointer);  | 
448  | 0  |     addHardRef(value);  | 
449  | 0  | }  | 
450  |  |  | 
451  |  |  | 
452  | 0  | UBool UnifiedCache::_inProgress(const UHashElement* element) const { | 
453  | 0  |     UErrorCode status = U_ZERO_ERROR;  | 
454  | 0  |     const SharedObject * value = NULL;  | 
455  | 0  |     _fetch(element, value, status);  | 
456  | 0  |     UBool result = _inProgress(value, status);  | 
457  | 0  |     removeHardRef(value);  | 
458  | 0  |     return result;  | 
459  | 0  | }  | 
460  |  |  | 
461  |  | UBool UnifiedCache::_inProgress(  | 
462  | 0  |         const SharedObject* theValue, UErrorCode creationStatus) const { | 
463  | 0  |     return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);  | 
464  | 0  | }  | 
465  |  |  | 
466  |  | UBool UnifiedCache::_isEvictable(const UHashElement *element) const  | 
467  | 0  | { | 
468  | 0  |     const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;  | 
469  | 0  |     const SharedObject *theValue =  | 
470  | 0  |             (const SharedObject *) element->value.pointer;  | 
471  |  |  | 
472  |  |     // Entries that are under construction are never evictable  | 
473  | 0  |     if (_inProgress(theValue, theKey->fCreationStatus)) { | 
474  | 0  |         return FALSE;  | 
475  | 0  |     }  | 
476  |  |  | 
477  |  |     // We can evict entries that are either not a primary or have just  | 
478  |  |     // one reference (The one reference being from the cache itself).  | 
479  | 0  |     return (!theKey->fIsPrimary || (theValue->softRefCount == 1 && theValue->noHardReferences()));  | 
480  | 0  | }  | 
481  |  |  | 
482  | 0  | void UnifiedCache::removeSoftRef(const SharedObject *value) const { | 
483  | 0  |     U_ASSERT(value->cachePtr == this);  | 
484  | 0  |     U_ASSERT(value->softRefCount > 0);  | 
485  | 0  |     if (--value->softRefCount == 0) { | 
486  | 0  |         --fNumValuesTotal;  | 
487  | 0  |         if (value->noHardReferences()) { | 
488  | 0  |             delete value;  | 
489  | 0  |         } else { | 
490  |  |             // This path only happens from flush(all). Which only happens from the  | 
491  |  |             // UnifiedCache destructor.  Nulling out value.cacheptr changes the behavior  | 
492  |  |             // of value.removeRef(), causing the deletion to be done there.  | 
493  | 0  |             value->cachePtr = nullptr;  | 
494  | 0  |         }  | 
495  | 0  |     }  | 
496  | 0  | }  | 
497  |  |  | 
498  | 0  | int32_t UnifiedCache::removeHardRef(const SharedObject *value) const { | 
499  | 0  |     int refCount = 0;  | 
500  | 0  |     if (value) { | 
501  | 0  |         refCount = umtx_atomic_dec(&value->hardRefCount);  | 
502  | 0  |         U_ASSERT(refCount >= 0);  | 
503  | 0  |         if (refCount == 0) { | 
504  | 0  |             --fNumValuesInUse;  | 
505  | 0  |         }  | 
506  | 0  |     }  | 
507  | 0  |     return refCount;  | 
508  | 0  | }  | 
509  |  |  | 
510  | 0  | int32_t UnifiedCache::addHardRef(const SharedObject *value) const { | 
511  | 0  |     int refCount = 0;  | 
512  | 0  |     if (value) { | 
513  | 0  |         refCount = umtx_atomic_inc(&value->hardRefCount);  | 
514  | 0  |         U_ASSERT(refCount >= 1);  | 
515  | 0  |         if (refCount == 1) { | 
516  | 0  |             fNumValuesInUse++;  | 
517  | 0  |         }  | 
518  | 0  |     }  | 
519  | 0  |     return refCount;  | 
520  | 0  | }  | 
521  |  |  | 
522  |  | U_NAMESPACE_END  |