static int32_t usprep_internal_flushCache(UBool noRefCount) { UStringPrepProfile * profile = NULL; UStringPrepKey * key = NULL; int32_t pos = -1; int32_t deletedNum = 0; const UHashElement * e; /* * if shared data hasn't even been lazy evaluated yet * return 0 */ umtx_lock(&usprepMutex); if (SHARED_DATA_HASHTABLE == NULL) { umtx_unlock(&usprepMutex); return 0; } /*creates an enumeration to iterate through every element in the table */ while ((e = uhash_nextElement(SHARED_DATA_HASHTABLE, &pos)) != NULL) { profile = (UStringPrepProfile *) e->value.pointer; key = (UStringPrepKey *) e->key.pointer; if ((noRefCount == FALSE && profile->refCount == 0) || noRefCount == TRUE) { deletedNum++; uhash_removeElement(SHARED_DATA_HASHTABLE, e); /* unload the data */ usprep_unload(profile); if (key->name != NULL) { uprv_free(key->name); key->name = NULL; } if (key->path != NULL) { uprv_free(key->path); key->path = NULL; } uprv_free(profile); uprv_free(key); } } umtx_unlock(&usprepMutex); return deletedNum; }
U_CAPI void U_EXPORT2 uhash_removeAll(UHashtable *hash) { int32_t pos = -1; const UHashElement *e; U_ASSERT(hash != NULL); if (hash->count != 0) { while ((e = uhash_nextElement(hash, &pos)) != NULL) { uhash_removeElement(hash, e); } } U_ASSERT(hash->count == 0); }
// Flushes the contents of the cache. If cache values hold references to other // cache values then _flush should be called in a loop until it returns FALSE. // On entry, gCacheMutex must be held. // On exit, those values with are evictable are flushed. If all is true // then every value is flushed even if it is not evictable. // Returns TRUE if any value in cache was flushed or FALSE otherwise. UBool UnifiedCache::_flush(UBool all) const { UBool result = FALSE; int32_t origSize = uhash_count(fHashtable); for (int32_t i = 0; i < origSize; ++i) { const UHashElement *element = _nextElement(); if (all || _isEvictable(element)) { const SharedObject *sharedObject = (const SharedObject *) element->value.pointer; uhash_removeElement(fHashtable, element); sharedObject->removeSoftRef(); result = TRUE; } } return result; }
// Flushes the contents of the cache. If cache values hold references to other // cache values then _flush should be called in a loop until it returns FALSE. // On entry, gCacheMutex must be held. // On exit, those values with only soft references are flushed. If all is true // then every value is flushed even if hard references are held. // Returns TRUE if any value in cache was flushed or FALSE otherwise. UBool UnifiedCache::_flush(UBool all) const { UBool result = FALSE; int32_t pos = UHASH_FIRST; const UHashElement *element = uhash_nextElement(fHashtable, &pos); for (; element != NULL; element = uhash_nextElement(fHashtable, &pos)) { const SharedObject *sharedObject = (const SharedObject *) element->value.pointer; if (all || sharedObject->allSoftReferences()) { uhash_removeElement(fHashtable, element); sharedObject->removeSoftRef(); result = TRUE; } } return result; }
// Run an eviction slice. // On entry, gCacheMutex must be held. // _runEvictionSlice runs a slice of the evict pipeline by examining the next // 10 entries in the cache round robin style evicting them if they are eligible. void UnifiedCache::_runEvictionSlice() const { int32_t maxItemsToEvict = _computeCountOfItemsToEvict(); if (maxItemsToEvict <= 0) { return; } for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) { const UHashElement *element = _nextElement(); if (_isEvictable(element)) { const SharedObject *sharedObject = (const SharedObject *) element->value.pointer; uhash_removeElement(fHashtable, element); sharedObject->removeSoftRef(); ++fAutoEvictedCount; if (--maxItemsToEvict == 0) { break; } } } }
UBool UnifiedCache::_flush(UBool all) const { UBool result = FALSE; int32_t origSize = uhash_count(fHashtable); for (int32_t i = 0; i < origSize; ++i) { const UHashElement *element = _nextElement(); if (element == nullptr) { break; } if (all || _isEvictable(element)) { const SharedObject *sharedObject = (const SharedObject *) element->value.pointer; U_ASSERT(sharedObject->cachePtr == this); uhash_removeElement(fHashtable, element); removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero. result = TRUE; } } return result; }
U_CDECL_END /** * Function used for removing unreferrenced cache entries exceeding * the expiration time. This function must be called with in the mutex * block. */ static void sweepCache() { int32_t pos = UHASH_FIRST; const UHashElement* elem; double now = (double)uprv_getUTCtime(); while ((elem = uhash_nextElement(gTZGNCoreCache, &pos))) { TZGNCoreRef *entry = (TZGNCoreRef *)elem->value.pointer; if (entry->refCount <= 0 && (now - entry->lastAccess) > CACHE_EXPIRATION) { // delete this entry uhash_removeElement(gTZGNCoreCache, elem); } } }
IdentifierInfo &IdentifierInfo::setIdentifier(const UnicodeString &identifier, UErrorCode &status) { if (U_FAILURE(status)) { return *this; } *fIdentifier = identifier; clear(); ScriptSet scriptsForCP; UChar32 cp; for (int32_t i = 0; i < identifier.length(); i += U16_LENGTH(cp)) { cp = identifier.char32At(i); // Store a representative character for each kind of decimal digit if (u_charType(cp) == U_DECIMAL_DIGIT_NUMBER) { // Just store the zero character as a representative for comparison. Unicode guarantees it is cp - value fNumerics->add(cp - (UChar32)u_getNumericValue(cp)); } UScriptCode extensions[500]; int32_t extensionsCount = uscript_getScriptExtensions(cp, extensions, UPRV_LENGTHOF(extensions), &status); if (U_FAILURE(status)) { return *this; } scriptsForCP.resetAll(); for (int32_t j=0; j<extensionsCount; j++) { scriptsForCP.set(extensions[j], status); } scriptsForCP.reset(USCRIPT_COMMON, status); scriptsForCP.reset(USCRIPT_INHERITED, status); switch (scriptsForCP.countMembers()) { case 0: break; case 1: // Single script, record it. fRequiredScripts->Union(scriptsForCP); break; default: if (!fRequiredScripts->intersects(scriptsForCP) && !uhash_geti(fScriptSetSet, &scriptsForCP)) { // If the set hasn't been added already, add it // (Add a copy, fScriptSetSet takes ownership of the copy.) uhash_puti(fScriptSetSet, new ScriptSet(scriptsForCP), 1, &status); } break; } } // Now make a final pass through ScriptSetSet to remove alternates that came before singles. // [Kana], [Kana Hira] => [Kana] // This is relatively infrequent, so doesn't have to be optimized. // We also compute any commonalities among the alternates. if (uhash_count(fScriptSetSet) > 0) { fCommonAmongAlternates->setAll(); for (int32_t it = UHASH_FIRST;;) { const UHashElement *nextHashEl = uhash_nextElement(fScriptSetSet, &it); if (nextHashEl == NULL) { break; } ScriptSet *next = static_cast<ScriptSet *>(nextHashEl->key.pointer); // [Kana], [Kana Hira] => [Kana] if (fRequiredScripts->intersects(*next)) { uhash_removeElement(fScriptSetSet, nextHashEl); } else { fCommonAmongAlternates->intersect(*next); // [[Arab Syrc Thaa]; [Arab Syrc]] => [[Arab Syrc]] for (int32_t otherIt = UHASH_FIRST;;) { const UHashElement *otherHashEl = uhash_nextElement(fScriptSetSet, &otherIt); if (otherHashEl == NULL) { break; } ScriptSet *other = static_cast<ScriptSet *>(otherHashEl->key.pointer); if (next != other && next->contains(*other)) { uhash_removeElement(fScriptSetSet, nextHashEl); break; } } } } } if (uhash_count(fScriptSetSet) == 0) { fCommonAmongAlternates->resetAll(); } return *this; }
/*Frees all shared immutable objects that aren't referred to (reference count = 0) */ U_CAPI int32_t U_EXPORT2 ucnv_flushCache () { UConverterSharedData *mySharedData = NULL; int32_t pos; int32_t tableDeletedNum = 0; const UHashElement *e; UErrorCode status = U_ILLEGAL_ARGUMENT_ERROR; int32_t i, remaining; UTRACE_ENTRY_OC(UTRACE_UCNV_FLUSH_CACHE); /* Close the default converter without creating a new one so that everything will be flushed. */ ucnv_close(u_getDefaultConverter(&status)); /*if shared data hasn't even been lazy evaluated yet * return 0 */ if (SHARED_DATA_HASHTABLE == NULL) { UTRACE_EXIT_VALUE((int32_t)0); return 0; } /*creates an enumeration to iterate through every element in the * table * * Synchronization: holding cnvCacheMutex will prevent any other thread from * accessing or modifying the hash table during the iteration. * The reference count of an entry may be decremented by * ucnv_close while the iteration is in process, but this is * benign. It can't be incremented (in ucnv_createConverter()) * because the sequence of looking up in the cache + incrementing * is protected by cnvCacheMutex. */ umtx_lock(&cnvCacheMutex); /* * double loop: A delta/extension-only converter has a pointer to its base table's * shared data; the first iteration of the outer loop may see the delta converter * before the base converter, and unloading the delta converter may get the base * converter's reference counter down to 0. */ i = 0; do { remaining = 0; pos = -1; while ((e = uhash_nextElement (SHARED_DATA_HASHTABLE, &pos)) != NULL) { mySharedData = (UConverterSharedData *) e->value.pointer; /*deletes only if reference counter == 0 */ if (mySharedData->referenceCounter == 0) { tableDeletedNum++; UCNV_DEBUG_LOG("del",mySharedData->staticData->name,mySharedData); uhash_removeElement(SHARED_DATA_HASHTABLE, e); mySharedData->sharedDataCached = FALSE; ucnv_deleteSharedConverterData (mySharedData); } else { ++remaining; } } } while(++i == 1 && remaining > 0); umtx_unlock(&cnvCacheMutex); UTRACE_DATA1(UTRACE_INFO, "ucnv_flushCache() exits with %d converters remaining", remaining); ucnv_io_flushAvailableConverterCache(); UTRACE_EXIT_VALUE(tableDeletedNum); return tableDeletedNum; }