void TRI_Insert2ArrayJson (TRI_memory_zone_t* zone, TRI_json_t* object, char const* name, TRI_json_t* subobject) { TRI_json_t copy; char* att; size_t length; TRI_ASSERT(object->_type == TRI_JSON_ARRAY); if (subobject == NULL) { return; } if (TRI_ReserveVector(&object->_value._objects, 2) != TRI_ERROR_NO_ERROR) { // TODO: signal OOM here return; } length = strlen(name); att = TRI_DuplicateString2Z(zone, name, length); if (att == NULL) { // TODO: signal OOM here return; } // attribute name InitString(©, att, length); TRI_PushBackVector(&object->_value._objects, ©); // attribute value TRI_PushBackVector(&object->_value._objects, subobject); }
void TRI_Insert4ArrayJson (TRI_memory_zone_t* zone, TRI_json_t* object, char* name, size_t nameLength, TRI_json_t* subobject) { TRI_json_t copy; copy._type = TRI_JSON_STRING; copy._value._string.length = nameLength + 1; copy._value._string.data = name; TRI_PushBackVector(&object->_value._objects, ©); TRI_PushBackVector(&object->_value._objects, subobject); TRI_Free(zone, subobject); }
int TRI_InsertBlockerCompactorVocBase (TRI_vocbase_t* vocbase, double lifetime, TRI_voc_tick_t* id) { compaction_blocker_t blocker; int res; if (lifetime <= 0.0) { return TRI_ERROR_BAD_PARAMETER; } blocker._id = TRI_NewTickServer(); blocker._expires = TRI_microtime() + lifetime; LockCompaction(vocbase); res = TRI_PushBackVector(&vocbase->_compactionBlockers._data, &blocker); UnlockCompaction(vocbase); if (res != TRI_ERROR_NO_ERROR) { return res; } *id = blocker._id; return TRI_ERROR_NO_ERROR; }
void TRI_Insert2ObjectJson (TRI_memory_zone_t* zone, TRI_json_t* object, char const* name, TRI_json_t const* subobject) { TRI_ASSERT(object->_type == TRI_JSON_OBJECT); if (subobject == nullptr) { return; } if (TRI_ReserveVector(&object->_value._objects, 2) != TRI_ERROR_NO_ERROR) { // TODO: signal OOM here return; } size_t length = strlen(name); char* att = TRI_DuplicateString2Z(zone, name, length); if (att == nullptr) { // TODO: signal OOM here return; } // create attribute name in place TRI_json_t* next = static_cast<TRI_json_t*>(TRI_NextVector(&object->_value._objects)); // we have made sure above with the reserve call that the vector has enough capacity TRI_ASSERT(next != nullptr); InitString(next, att, length); // attribute value TRI_PushBackVector(&object->_value._objects, subobject); }
int TRI_PushBack2ArrayJson (TRI_json_t* array, TRI_json_t const* object) { TRI_ASSERT(array != nullptr); TRI_ASSERT(array->_type == TRI_JSON_ARRAY); TRI_ASSERT(object != nullptr); return TRI_PushBackVector(&array->_value._objects, object); }
void TRI_PushBackListJson (TRI_memory_zone_t* zone, TRI_json_t* list, TRI_json_t* object) { TRI_json_t copy; assert(list->_type == TRI_JSON_LIST); TRI_CopyToJson(zone, ©, object); TRI_PushBackVector(&list->_value._objects, ©); }
void TRI_Insert2ArrayJson (TRI_memory_zone_t* zone, TRI_json_t* object, char const* name, TRI_json_t* subobject) { TRI_json_t copy; size_t length; assert(object->_type == TRI_JSON_ARRAY); if (subobject == NULL) { return; } length = strlen(name); copy._type = TRI_JSON_STRING; copy._value._string.length = length + 1; copy._value._string.data = TRI_DuplicateString2Z(zone, name, length); // including '\0' TRI_PushBackVector(&object->_value._objects, ©); TRI_PushBackVector(&object->_value._objects, subobject); }
static void CompactifySimCollection (TRI_sim_collection_t* sim) { TRI_vector_t vector; size_t n; size_t i; if (! TRI_TRY_READ_LOCK_DATAFILES_SIM_COLLECTION(sim)) { return; } TRI_InitVector(&vector, TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_datafile_info_t)); // copy datafile information n = sim->base.base._datafiles._length; for (i = 0; i < n; ++i) { TRI_datafile_t* df; TRI_doc_datafile_info_t* dfi; df = sim->base.base._datafiles._buffer[i]; dfi = TRI_FindDatafileInfoDocCollection(&sim->base, df->_fid); TRI_PushBackVector(&vector, dfi); } TRI_READ_UNLOCK_DATAFILES_SIM_COLLECTION(sim); // handle datafiles with dead objects for (i = 0; i < vector._length; ++i) { TRI_doc_datafile_info_t* dfi; dfi = TRI_AtVector(&vector, i); if (dfi->_numberDead == 0) { continue; } LOG_DEBUG("datafile = %lu, alive = %lu / %lu, dead = %lu / %lu, deletions = %lu", (unsigned long) dfi->_fid, (unsigned long) dfi->_numberAlive, (unsigned long) dfi->_sizeAlive, (unsigned long) dfi->_numberDead, (unsigned long) dfi->_sizeDead, (unsigned long) dfi->_numberDeletion); CompactifyDatafile(sim, dfi->_fid); } // cleanup local variables TRI_DestroyVector(&vector); }
void TRI_Insert4ArrayJson (TRI_memory_zone_t* zone, TRI_json_t* object, char* name, size_t nameLength, TRI_json_t* subobject, bool asReference) { TRI_json_t copy; TRI_ASSERT(name != NULL); // attribute name if (asReference) { InitStringReference(©, name, nameLength); } else { InitString(©, name, nameLength); } if (TRI_ReserveVector(&object->_value._objects, 2) != TRI_ERROR_NO_ERROR) { // TODO: signal OOM here return; } TRI_PushBackVector(&object->_value._objects, ©); // attribute value TRI_PushBackVector(&object->_value._objects, subobject); }
int TRI_PushBack2ListJson (TRI_json_t* list, TRI_json_t* object) { assert(list->_type == TRI_JSON_LIST); assert(object); return TRI_PushBackVector(&list->_value._objects, object); }
static bool CompactifyDocumentCollection (TRI_document_collection_t* document) { TRI_primary_collection_t* primary; TRI_vector_t vector; int64_t numAlive; size_t i, n; bool compactNext; compactNext = false; primary = &document->base; // if we cannot acquire the read lock instantly, we will exit directly. // otherwise we'll risk a multi-thread deadlock between synchroniser, // compactor and data-modification threads (e.g. POST /_api/document) if (! TRI_TRY_READ_LOCK_DATAFILES_DOC_COLLECTION(primary)) { return false; } n = primary->base._datafiles._length; if (primary->base._compactors._length > 0 || n == 0) { // we already have created a compactor file in progress. // if this happens, then a previous compaction attempt for this collection failed // additionally, if there are no datafiles, then there's no need to compact TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary); return false; } // copy datafile information TRI_InitVector(&vector, TRI_UNKNOWN_MEM_ZONE, sizeof(compaction_info_t)); numAlive = 0; for (i = 0; i < n; ++i) { TRI_datafile_t* df; TRI_doc_datafile_info_t* dfi; compaction_info_t compaction; bool shouldCompact; df = primary->base._datafiles._buffer[i]; assert(df != NULL); dfi = TRI_FindDatafileInfoPrimaryCollection(primary, df->_fid, true); if (dfi == NULL) { continue; } shouldCompact = false; if (! compactNext && df->_maximalSize < COMPACTOR_MIN_SIZE && i < n - 1) { // very small datafile. let's compact it so it's merged with others shouldCompact = true; compactNext = true; } else if (numAlive == 0 && dfi->_numberDeletion > 0) { // compact first datafile already if it has got some deletions shouldCompact = true; compactNext = true; } else { // in all other cases, only check the number and size of "dead" objects if (dfi->_sizeDead >= (int64_t) COMPACTOR_DEAD_SIZE_THRESHOLD) { shouldCompact = true; compactNext = true; } else if (dfi->_sizeDead > 0) { // the size of dead objects is above some threshold double share = (double) dfi->_sizeDead / ((double) dfi->_sizeDead + (double) dfi->_sizeAlive); if (share >= COMPACTOR_DEAD_SIZE_SHARE) { // the size of dead objects is above some share shouldCompact = true; compactNext = true; } } } if (! shouldCompact) { // only use those datafiles that contain dead objects if (! compactNext) { numAlive += (int64_t) dfi->_numberAlive; continue; } } LOG_TRACE("found datafile eligible for compaction. fid: %llu, size: %llu " "numberDead: %llu, numberAlive: %llu, numberTransaction: %llu, numberDeletion: %llu, " "sizeDead: %llu, sizeAlive: %llu, sizeTransaction: %llu", (unsigned long long) df->_fid, (unsigned long long) df->_maximalSize, (unsigned long long) dfi->_numberDead, (unsigned long long) dfi->_numberAlive, (unsigned long long) dfi->_numberTransaction, (unsigned long long) dfi->_numberDeletion, (unsigned long long) dfi->_sizeDead, (unsigned long long) dfi->_sizeAlive, (unsigned long long) dfi->_sizeTransaction); compaction._datafile = df; compaction._keepDeletions = (numAlive > 0 && i > 0); TRI_PushBackVector(&vector, &compaction); // we stop at the first few datafiles. // this is better than going over all datafiles in a collection in one go // because the compactor is single-threaded, and collecting all datafiles // might take a long time (it might even be that there is a request to // delete the collection in the middle of compaction, but the compactor // will not pick this up as it is read-locking the collection status) if (TRI_LengthVector(&vector) >= COMPACTOR_MAX_FILES) { // found enough to compact break; } numAlive += (int64_t) dfi->_numberAlive; } // can now continue without the lock TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary); if (vector._length == 0) { // cleanup local variables TRI_DestroyVector(&vector); return false; } // handle datafiles with dead objects n = vector._length; assert(n >= 1); CompactifyDatafiles(document, &vector); // cleanup local variables TRI_DestroyVector(&vector); return true; }
int TRI_PushBack2ListJson (TRI_json_t* list, TRI_json_t const* object) { TRI_ASSERT(list->_type == TRI_JSON_LIST); TRI_ASSERT(object); return TRI_PushBackVector(&list->_value._objects, object); }
int BitarrayIndex_new(BitarrayIndex** baIndex, TRI_memory_zone_t* memoryZone, size_t cardinality, TRI_vector_t* values, bool supportUndef, void* context) { int result; size_t numArrays; int j; // ........................................................................... // Some simple checks // ........................................................................... if (baIndex == NULL) { assert(false); return TRI_ERROR_INTERNAL; } // ........................................................................... // If the bit array index has arealdy been created, return internal error // ........................................................................... if (*baIndex != NULL) { return TRI_ERROR_INTERNAL; } // ........................................................................... // If the memory zone is invalid, then return an internal error // ........................................................................... if (memoryZone == NULL) { return TRI_ERROR_INTERNAL; } // ........................................................................... // Create the bit array index structure // ........................................................................... *baIndex = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(BitarrayIndex), true); if (*baIndex == NULL) { return TRI_ERROR_OUT_OF_MEMORY; } // ........................................................................... // Copy the values into this index // ........................................................................... TRI_InitVector(&((*baIndex)->_values), TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_json_t)); for (j = 0; j < values->_length; ++j) { TRI_json_t value; TRI_CopyToJson(TRI_UNKNOWN_MEM_ZONE, &value, (TRI_json_t*)(TRI_AtVector(values,j))); TRI_PushBackVector(&((*baIndex)->_values), &value); } // ........................................................................... // Store whether or not the index supports 'undefined' documents (that is // documents with attributes which do not match those of the index // ........................................................................... (*baIndex)->_supportUndef = supportUndef; // ........................................................................... // Determine the number of bit columns which will comprise the bit array index. // ........................................................................... numArrays = cardinality; // ........................................................................... // Create the bit arrays // ........................................................................... result = TRI_InitBitarray(&((*baIndex)->_bitarray), memoryZone, numArrays, NULL); // ........................................................................... // return the result of creating the bit arrays // ........................................................................... return result; }