/* destroy */ void hashtable_destroy(struct hashtable *h) { unsigned int i; struct entry *e, *f; struct entry **table = h->table; for (i = 0; i < h->tablelength; i++) { e = table[i]; while (NULL != e) { f = e; e = e->next; EXIP_MFREE(f); } } EXIP_MFREE(h->table); EXIP_MFREE(h); }
struct hashtable * create_hashtable(unsigned int minsize, uint32_t (*hashfn) (String key), boolean (*eqfn) (const String str1, const String str2)) { struct hashtable *h; unsigned int pindex, size = primes[0]; /* Check requested hashtable isn't too large */ if (minsize > MAX_HASH_TABLE_SIZE) return NULL; /* Enforce size as prime */ for (pindex=0; pindex < prime_table_length; pindex++) { if (primes[pindex] >= minsize) { size = primes[pindex]; break; } } h = (struct hashtable *)EXIP_MALLOC(sizeof(struct hashtable)); if (NULL == h) return NULL; /*oom*/ h->table = (struct entry **)EXIP_MALLOC(sizeof(struct entry*) * size); if (NULL == h->table) { EXIP_MFREE(h); return NULL; } /*oom*/ memset(h->table, 0, size * sizeof(struct entry *)); h->tablelength = size; h->primeindex = pindex; h->entrycount = 0; h->hashfn = hashfn; h->eqfn = eqfn; h->loadlimit = CEIL(size * max_load_factor); return h; }
Index hashtable_remove(struct hashtable *h, String key) { /* TODO: consider compacting the table when the load factor drops enough, * or provide a 'compact' method. */ struct entry *e; struct entry **pE; Index value; uint32_t hashvalue; unsigned int index; hashvalue = h->hashfn(key); // hash(h,k, len); index = indexFor(h->tablelength, hashvalue); pE = &(h->table[index]); e = *pE; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if (hashvalue == e->hash && h->eqfn(key, e->key)) { *pE = e->next; h->entrycount--; value = e->value; // freekey(e->key); EXIP_MFREE(e); return value; } pE = &(e->next); e = e->next; } return INDEX_MAX; }
static int hashtable_expand(struct hashtable *h) { /* Double the size of the table to accommodate more entries */ struct entry **newtable; struct entry *e; struct entry **pE; unsigned int newsize, i, index; /* Check we're not hitting max capacity */ if (h->primeindex == (prime_table_length - 1) || primes[h->primeindex + 1] > MAX_HASH_TABLE_SIZE) return 0; newsize = primes[++(h->primeindex)]; newtable = (struct entry **)EXIP_MALLOC(sizeof(struct entry*) * newsize); if (NULL != newtable) { memset(newtable, 0, newsize * sizeof(struct entry *)); /* This algorithm is not 'stable'. ie. it reverses the list * when it transfers entries between the tables */ for (i = 0; i < h->tablelength; i++) { while (NULL != (e = h->table[i])) { h->table[i] = e->next; index = indexFor(newsize,e->hash); e->next = newtable[index]; newtable[index] = e; } } EXIP_MFREE(h->table); h->table = newtable; } /* Plan B: realloc instead */ else { newtable = (struct entry **) EXIP_REALLOC(h->table, newsize * sizeof(struct entry *)); if (NULL == newtable) { (h->primeindex)--; return 0; } h->table = newtable; memset(newtable[h->tablelength], 0, newsize - h->tablelength); for (i = 0; i < h->tablelength; i++) { for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { index = indexFor(newsize,e->hash); if (index == i) { pE = &(e->next); } else { *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } } } h->tablelength = newsize; h->loadlimit = CEIL(newsize * max_load_factor); return -1; }
void freeAllocList(AllocList* list) { struct allocBlock* tmpBlock = list->firstBlock; struct allocBlock* rmBl; unsigned int i = 0; unsigned int allocLimitInBlock; while(tmpBlock != NULL) { if(tmpBlock->nextBlock != NULL) allocLimitInBlock = ALLOCATION_ARRAY_SIZE; else allocLimitInBlock = list->currAllocSlot; for(i = 0; i < allocLimitInBlock; i++) EXIP_MFREE(tmpBlock->allocation[i]); rmBl = tmpBlock; tmpBlock = tmpBlock->nextBlock; EXIP_MFREE(rmBl); } }
END_TEST START_TEST (test_createPfxTable) { PfxTable* pfxTable; errorCode err = EXIP_UNEXPECTED_ERROR; err = createPfxTable(&pfxTable); fail_unless (err == EXIP_OK, "createPfxTable returns error code %d", err); fail_unless (pfxTable->count == 0, "createPfxTable populates the pfxTable with count: %d", pfxTable->count); fail_if(pfxTable->pfxStr == NULL); EXIP_MFREE(pfxTable); }
void popGrammar(EXIGrammarStack** gStack, EXIGrammar** grammar) { struct GrammarStackNode* node = *gStack; if((*gStack) == NULL) { (*grammar) = NULL; } else { node = *gStack; *gStack = (*gStack)->nextInStack; (*grammar) = node->grammar; EXIP_MFREE(node); } }
errorCode decodeBinary(EXIStream* strm, char** binary_val, Index* nbytes) { errorCode tmp_err_code = EXIP_UNEXPECTED_ERROR; UnsignedInteger length = 0; unsigned int int_val = 0; UnsignedInteger i = 0; DEBUG_MSG(INFO, DEBUG_STREAM_IO, (">> (binary)")); TRY(decodeUnsignedInteger(strm, &length)); *nbytes = (Index) length; (*binary_val) = (char*) EXIP_MALLOC(length); // This memory should be manually freed after the content handler is invoked if((*binary_val) == NULL) return EXIP_MEMORY_ALLOCATION_ERROR; for(i = 0; i < length; i++) { TRY_CATCH(readBits(strm, 8, &int_val), EXIP_MFREE(*binary_val)); (*binary_val)[i]=(char) int_val; } return EXIP_OK; }
errorCode addValueEntry(EXIStream* strm, String valueStr, QNameID qnameID) { errorCode tmp_err_code = EXIP_UNEXPECTED_ERROR; ValueEntry* valueEntry = NULL; Index valueEntryId; #if VALUE_CROSSTABLE_USE Index vxEntryId; { struct LnEntry* lnEntry; VxEntry vxEntry; // Find the local name entry from QNameID lnEntry = &GET_LN_URI_QNAME(strm->schema->uriTable, qnameID); // Add entry to the local name entry's value cross table (vxTable) if(lnEntry->vxTable == NULL) { lnEntry->vxTable = memManagedAllocate(&strm->memList, sizeof(VxTable)); if(lnEntry->vxTable == NULL) return EXIP_MEMORY_ALLOCATION_ERROR; // First value entry - create the vxTable TRY(createDynArray(&lnEntry->vxTable->dynArray, sizeof(VxEntry), DEFAULT_VX_ENTRIES_NUMBER)); } assert(lnEntry->vxTable->vx); // Set the global ID in the value cross table entry vxEntry.globalId = strm->valueTable.globalId; // Add the entry TRY(addDynEntry(&lnEntry->vxTable->dynArray, (void*) &vxEntry, &vxEntryId)); } #endif // If the global ID is less than the actual array size, we must have wrapped around // In this case, we must reuse an existing entry if(strm->valueTable.globalId < strm->valueTable.count) { // Get the existing value entry valueEntry = &strm->valueTable.value[strm->valueTable.globalId]; #if VALUE_CROSSTABLE_USE assert(GET_LN_URI_QNAME(strm->schema->uriTable, valueEntry->locValuePartition.forQNameId).vxTable); // Null out the existing cross table entry GET_LN_URI_QNAME(strm->schema->uriTable, valueEntry->locValuePartition.forQNameId).vxTable->vx[valueEntry->locValuePartition.vxEntryId].globalId = INDEX_MAX; #endif #if HASH_TABLE_USE // Remove existing value string from hash table (if present) if(strm->valueTable.hashTbl != NULL) { hashtable_remove(strm->valueTable.hashTbl, valueEntry->valueStr); } #endif // Free the memory allocated by the previous string entry EXIP_MFREE(valueEntry->valueStr.str); } else { // We are filling up the array and have not wrapped round yet // See http://www.w3.org/TR/exi/#encodingOptimizedForMisses TRY(addEmptyDynEntry(&strm->valueTable.dynArray, (void**)&valueEntry, &valueEntryId)); } // Set the value entry fields valueEntry->valueStr = valueStr; #if VALUE_CROSSTABLE_USE valueEntry->locValuePartition.forQNameId = qnameID; valueEntry->locValuePartition.vxEntryId = vxEntryId; #endif #if HASH_TABLE_USE // Add value string to hash table (if present) if(strm->valueTable.hashTbl != NULL) { TRY(hashtable_insert(strm->valueTable.hashTbl, valueStr, strm->valueTable.globalId)); } #endif // Increment global ID strm->valueTable.globalId++; // The value table is limited by valuePartitionCapacity. If we have exceeded, we wrap around // to the beginning of the value table and null out existing IDs in the corresponding // cross table IDs if(strm->valueTable.globalId == strm->header.opts.valuePartitionCapacity) strm->valueTable.globalId = 0; return EXIP_OK; }
void destroyDynArray(DynArray* dynArray) { void** base = (void **)(dynArray + 1); EXIP_MFREE(*base); }
void freeAllMem(EXIStream* strm) { Index g, i, j; DynGrammarRule* tmp_rule; // Explicitly free the memory for any build-in grammars for(g = strm->schema->staticGrCount; g < strm->schema->grammarTable.count; g++) { for(i = 0; i < strm->schema->grammarTable.grammar[g].count; i++) { tmp_rule = &((DynGrammarRule*) strm->schema->grammarTable.grammar[g].rule)[i]; for(j = 0; j < 3; j++) { if(tmp_rule->part[j].prod != NULL) {} EXIP_MFREE(tmp_rule->part[j].prod); } } EXIP_MFREE(strm->schema->grammarTable.grammar[g].rule); } strm->schema->grammarTable.count = strm->schema->staticGrCount; // Freeing the value cross tables for(i = 0; i < strm->schema->uriTable.count; i++) { for(j = 0; j < strm->schema->uriTable.uri[i].lnTable.count; j++) { if(GET_LN_URI_IDS(strm->schema->uriTable, i, j).vxTable.vx != NULL) { destroyDynArray(&GET_LN_URI_IDS(strm->schema->uriTable, i, j).vxTable.dynArray); } strm->schema->uriTable.uri[i].lnTable.ln[j].vxTable.vx = NULL; strm->schema->uriTable.uri[i].lnTable.ln[j].vxTable.count = 0; } } // Hash tables are freed separately // #DOCUMENT# #if HASH_TABLE_USE == ON if(strm->valueTable.hashTbl != NULL) hashtable_destroy(strm->valueTable.hashTbl); #endif // Freeing the value table if present if(strm->valueTable.value != NULL) { Index i; for(i = 0; i < strm->valueTable.count; i++) { EXIP_MFREE(strm->valueTable.value[i].valueStr.str); } destroyDynArray(&strm->valueTable.dynArray); } // In case a default schema was used for this stream if(strm->schema->staticGrCount <= SIMPLE_TYPE_COUNT) { // No schema-informed grammars. This is an empty EXIPSchema container that needs to be freed // Freeing the string tables for(i = 0; i < strm->schema->uriTable.count; i++) { if(strm->schema->uriTable.uri[i].pfxTable != NULL) EXIP_MFREE(strm->schema->uriTable.uri[i].pfxTable); destroyDynArray(&strm->schema->uriTable.uri[i].lnTable.dynArray); } destroyDynArray(&strm->schema->uriTable.dynArray); destroyDynArray(&strm->schema->grammarTable.dynArray); if(strm->schema->simpleTypeTable.sType != NULL) destroyDynArray(&strm->schema->simpleTypeTable.dynArray); freeAllocList(&strm->schema->memList); } freeAllocList(&(strm->memList)); }