void TRI_DestroyArrayShaper (TRI_shaper_t* shaper) { array_shaper_t* s; size_t i; size_t n; s = (array_shaper_t*) shaper; for (i = 0, n = s->_attributes._length; i < n; ++i) { attribute_2_id_t* a2i; a2i = s->_attributes._buffer[i]; TRI_Free(shaper->_memoryZone, a2i); } TRI_DestroyAssociativePointer(&s->_attributeNames); TRI_DestroyVectorPointer(&s->_attributes); for (i = 0, n = s->_shapes._length; i < n; ++i) { TRI_shape_t* shape; shape = s->_shapes._buffer[i]; TRI_Free(shaper->_memoryZone, shape); } TRI_DestroyAssociativePointer(&s->_shapeDictionary); TRI_DestroyVectorPointer(&s->_shapes); TRI_DestroyShaper(shaper); }
static void FreeNodes (TRI_aql_context_t* const context) { size_t i = context->_memory._nodes._length; while (i--) { TRI_aql_node_t* node = (TRI_aql_node_t*) context->_memory._nodes._buffer[i]; if (node == NULL) { continue; } TRI_DestroyVectorPointer(&node->_members); if (node->_type == TRI_AQL_NODE_COLLECTION) { // free attached collection hint TRI_aql_collection_hint_t* hint = (TRI_aql_collection_hint_t*) (TRI_AQL_NODE_DATA(node)); if (hint != NULL) { TRI_FreeCollectionHintAql(hint); } } else if (node->_type == TRI_AQL_NODE_FOR) { // free attached for hint TRI_aql_for_hint_t* hint = (TRI_aql_for_hint_t*) (TRI_AQL_NODE_DATA(node)); if (hint != NULL) { TRI_FreeForHintScopeAql(hint); } } // free node itself TRI_Free(TRI_UNKNOWN_MEM_ZONE, node); } TRI_DestroyVectorPointer(&context->_memory._nodes); }
void TRI_FreeParserAql (TRI_aql_parser_t* const parser) { TRI_DestroyVectorPointer(&parser->_scopes); TRI_DestroyVectorPointer(&parser->_stack); // free lexer if (parser) { Ahuacatllex_destroy(parser->_scanner); TRI_Free(TRI_UNKNOWN_MEM_ZONE, parser); } }
void TRI_FreeScopesAql (TRI_aql_context_t* const context) { size_t i, n; assert(context); n = context->_memory._scopes._length; for (i = 0; i < n; ++i) { TRI_aql_scope_t* scope = (TRI_aql_scope_t*) TRI_AtVectorPointer(&context->_memory._scopes, i); FreeScope(scope); } TRI_DestroyVectorPointer(&context->_memory._scopes); TRI_DestroyVectorPointer(&context->_currentScopes); }
void TRI_FreeStatementWalkerAql (TRI_aql_statement_walker_t* const walker) { assert(walker); TRI_DestroyVectorPointer(&walker->_currentScopes); TRI_Free(TRI_UNKNOWN_MEM_ZONE, walker); }
void TRI_GlobalFreeStatementListAql (void) { if (DummyNopNode != NULL) { TRI_DestroyVectorPointer(&DummyNopNode->_members); TRI_Free(TRI_UNKNOWN_MEM_ZONE, DummyNopNode); } if (DummyReturnEmptyNode != NULL) { TRI_aql_node_t* list = TRI_AQL_NODE_MEMBER(DummyReturnEmptyNode, 0); TRI_DestroyVectorPointer(&list->_members); TRI_Free(TRI_UNKNOWN_MEM_ZONE, list); TRI_DestroyVectorPointer(&DummyReturnEmptyNode->_members); TRI_Free(TRI_UNKNOWN_MEM_ZONE, DummyReturnEmptyNode); } }
TRI_headers_t::~TRI_headers_t () { for (size_t i = 0; i < _blocks._length; ++i) { delete[] static_cast<TRI_doc_mptr_t*>(_blocks._buffer[i]); } TRI_DestroyVectorPointer(&_blocks); }
static void FreeScope (TRI_aql_scope_t* const scope) { size_t i, n; // free variables lookup hash n = scope->_variables._nrAlloc; for (i = 0; i < n; ++i) { TRI_aql_variable_t* variable = scope->_variables._table[i]; if (variable) { TRI_FreeVariableAql(variable); } } TRI_DestroyAssociativePointer(&scope->_variables); if (scope->_ranges) { // free ranges if set TRI_FreeAccessesAql(scope->_ranges); } for (i = 0; i < scope->_sorts._length; ++i) { char* criterion = (char*) TRI_AtVectorPointer(&scope->_sorts, i); TRI_Free(TRI_UNKNOWN_MEM_ZONE, criterion); } TRI_DestroyVectorPointer(&scope->_sorts); TRI_Free(TRI_UNKNOWN_MEM_ZONE, scope); }
void TRI_FreeStatementListAql (TRI_aql_statement_list_t* const list) { if (list == NULL) { return; } TRI_DestroyVectorPointer(&list->_statements); TRI_Free(TRI_UNKNOWN_MEM_ZONE, list); }
void TRI_DestroySimpleHeaders (TRI_headers_t* h) { simple_headers_t* headers = (simple_headers_t*) h; size_t i; for (i = 0; i < headers->_blocks._length; ++i) { TRI_Free(TRI_UNKNOWN_MEM_ZONE, headers->_blocks._buffer[i]); } TRI_DestroyVectorPointer(&headers->_blocks); }
static void FreeCollections (TRI_aql_context_t* const context) { size_t i = context->_collections._length; while (i--) { TRI_aql_collection_t* collection = (TRI_aql_collection_t*) context->_collections._buffer[i]; if (collection) { TRI_Free(TRI_UNKNOWN_MEM_ZONE, collection); } } TRI_DestroyVectorPointer(&context->_collections); }
static void FreeStrings (TRI_aql_context_t* const context) { size_t i = context->_memory._strings._length; while (i--) { void* string = context->_memory._strings._buffer[i]; if (string) { TRI_Free(TRI_UNKNOWN_MEM_ZONE, context->_memory._strings._buffer[i]); } } TRI_DestroyVectorPointer(&context->_memory._strings); }
static TRI_index_result_t MultiHashIndex_find (TRI_hash_index_t* hashIndex, TRI_index_search_value_t* key) { TRI_vector_pointer_t result; TRI_index_result_t results; // ............................................................................. // We can only use the LookupByKey method for non-unique hash indexes, since // we want more than one result returned! // ............................................................................. result = TRI_LookupByKeyHashArrayMulti(&hashIndex->_hashArray, key); if (result._length == 0) { results._length = 0; results._documents = NULL; } else { size_t j; results._length = result._length; results._documents = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, result._length* sizeof(TRI_doc_mptr_t*), false); if (results._documents == NULL) { TRI_DestroyVectorPointer(&result); return results; } for (j = 0; j < result._length; ++j) { results._documents[j] = ((TRI_hash_index_element_t*)(result._buffer[j]))->_document; } } TRI_DestroyVectorPointer(&result); return results; }
static TRI_aql_scope_t* CreateScope (TRI_aql_context_t* const context, const TRI_aql_scope_e type) { TRI_aql_scope_t* scope; int res; assert(context); scope = (TRI_aql_scope_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_aql_scope_t), false); if (scope == NULL) { return NULL; } scope->_type = NextType(context, type); scope->_ranges = NULL; scope->_selfContained = true; scope->_empty = false; scope->_level = 0; // init with a dummy value scope->_limit._offset = 0; scope->_limit._limit = INT64_MAX; scope->_limit._status = TRI_AQL_LIMIT_UNDEFINED; scope->_limit._hasFilter = false; scope->_limit._found = 0; if (context->_fullCount) { // if option "fullCount" is specified, we must ignore all limit optimisations scope->_limit._status = TRI_AQL_LIMIT_IGNORE; } TRI_InitVectorPointer(&scope->_sorts, TRI_UNKNOWN_MEM_ZONE); res = TRI_InitAssociativePointer(&scope->_variables, TRI_UNKNOWN_MEM_ZONE, &TRI_HashStringKeyAssociativePointer, &TRI_HashVariableAql, &TRI_EqualVariableAql, 0); if (res != TRI_ERROR_NO_ERROR) { TRI_DestroyVectorPointer(&scope->_sorts); TRI_Free(TRI_UNKNOWN_MEM_ZONE, scope); return NULL; } return scope; }
void TRI_FreeNodeVectorQuery (TRI_vector_pointer_t* const memory) { void* nodePtr; size_t i; i = memory->_length; // free all nodes in vector, starting at the end // (prevents copying the remaining elements in vector) while (i > 0) { i--; nodePtr = TRI_RemoveVectorPointer(memory, i); TRI_FreeNodeQuery(nodePtr); if (i == 0) { break; } } TRI_DestroyVectorPointer(memory); }
static void FreeDatafilesVector (TRI_vector_pointer_t* const vector) { size_t i; size_t n; assert(vector); n = vector->_length; for (i = 0; i < n ; ++i) { TRI_datafile_t* datafile = (TRI_datafile_t*) vector->_buffer[i]; LOG_TRACE("freeing collection datafile"); assert(datafile); TRI_FreeDatafile(datafile); } TRI_DestroyVectorPointer(vector); }
void TRI_FreeStringVectorQuery (TRI_vector_pointer_t* const memory) { char* stringPtr; size_t i; i = memory->_length; // free all strings in vector, starting at the end // (prevents copying the remaining elements in vector) while (i > 0) { i--; stringPtr = TRI_RemoveVectorPointer(memory, i); if (stringPtr) { TRI_Free(stringPtr); stringPtr = NULL; } if (i == 0) { break; } } TRI_DestroyVectorPointer(memory); }
void TRI_ShutdownProcess () { TRI_FreeString(TRI_CORE_MEM_ZONE, ProcessName); #ifdef TRI_TAMPER_WITH_ENVIRON if (MustFreeEnvironment) { size_t i = 0; TRI_ASSERT(environ); // free all arguments copied for environ while (environ[i]) { TRI_FreeString(TRI_CORE_MEM_ZONE, environ[i]); ++i; } TRI_Free(TRI_CORE_MEM_ZONE, environ); } #endif TRI_DestroyVectorPointer(&ExternalProcesses); TRI_DestroyMutex(&ExternalProcessesLock); }
static void FreeSelectResult (TRI_select_result_t* result) { TRI_select_datapart_t* datapart; size_t i; if (result->_index._start) { TRI_Free(result->_index._start); } if (result->_documents._start) { TRI_Free(result->_documents._start); } for (i = 0; i < result->_dataParts->_length; i++) { datapart = (TRI_select_datapart_t*) result->_dataParts->_buffer[i]; datapart->free(datapart); } TRI_DestroyVectorPointer(result->_dataParts); TRI_Free(result->_dataParts); TRI_Free(result); }
void TRI_DestroyHashArrayMulti (TRI_hash_array_multi_t* array) { // ........................................................................... // Go through each item in the array and remove any internal allocated memory // ........................................................................... // array->_table might be NULL if array initialisation fails if (array->_table != nullptr) { TRI_hash_index_element_multi_t* p; TRI_hash_index_element_multi_t* e; p = array->_table; e = p + array->_nrAlloc; for (; p < e; ++p) { if (p->_document != nullptr) { // destroy overflow elements auto current = p->_next; while (current != nullptr) { auto ptr = current->_next; DestroyElement(array, current); current = ptr; } // destroy the element itself DestroyElement(array, p); } } TRI_Free(TRI_UNKNOWN_MEM_ZONE, array->_tablePtr); } // free overflow elements for (size_t i = 0; i < array->_blocks._length; ++i) { TRI_Free(TRI_UNKNOWN_MEM_ZONE, array->_blocks._buffer[i]); } TRI_DestroyVectorPointer(&array->_blocks); }
void TRI_CompactorVocBase (void* data) { TRI_vocbase_t* vocbase = data; TRI_vector_pointer_t collections; assert(vocbase->_active); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { size_t n; size_t i; TRI_col_type_e type; // keep initial _active value as vocbase->_active might change during compaction loop int active = vocbase->_active; if (active == 2) { // shadows must be cleaned before collections are handled // otherwise the shadows might still hold barriers on collections // and collections cannot be closed properly CleanupShadows(vocbase, true); } // copy all collections TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_doc_collection_t* doc; collection = collections._buffer[i]; if (! TRI_TRY_READ_LOCK_STATUS_VOCBASE_COL(collection)) { continue; } doc = collection->_collection; if (doc == NULL) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } type = doc->base._type; // for simple document collection, compactify datafiles if (type == TRI_COL_TYPE_SIMPLE_DOCUMENT) { if (collection->_status == TRI_VOC_COL_STATUS_LOADED) { CompactifySimCollection((TRI_sim_collection_t*) doc); } } TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); // now release the lock and maybe unload the collection or some datafiles if (type == TRI_COL_TYPE_SIMPLE_DOCUMENT) { CleanupSimCollection((TRI_sim_collection_t*) doc); } } if (vocbase->_active == 1) { // clean up unused shadows CleanupShadows(vocbase, false); // only sleep while server is still running usleep(COMPACTOR_INTERVAL); } if (active == 2) { // server shutdown break; } } TRI_DestroyVectorPointer(&collections); }
static bool CheckCollection (TRI_collection_t* collection) { TRI_datafile_t* datafile; TRI_vector_pointer_t all; TRI_vector_pointer_t compactors; TRI_vector_pointer_t datafiles; TRI_vector_pointer_t journals; TRI_vector_pointer_t sealed; TRI_vector_string_t files; bool stop; regex_t re; size_t i; size_t n; stop = false; // check files within the directory files = TRI_FilesDirectory(collection->_directory); n = files._length; regcomp(&re, "^(journal|datafile|index|compactor)-([0-9][0-9]*)\\.(db|json)$", REG_EXTENDED); TRI_InitVectorPointer(&journals, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&compactors, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&datafiles, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&sealed, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&all, TRI_UNKNOWN_MEM_ZONE); for (i = 0; i < n; ++i) { char const* file = files._buffer[i]; regmatch_t matches[4]; if (regexec(&re, file, sizeof(matches) / sizeof(matches[0]), matches, 0) == 0) { char const* first = file + matches[1].rm_so; size_t firstLen = matches[1].rm_eo - matches[1].rm_so; char const* third = file + matches[3].rm_so; size_t thirdLen = matches[3].rm_eo - matches[3].rm_so; // ............................................................................. // file is an index, just store the filename // ............................................................................. if (TRI_EqualString2("index", first, firstLen) && TRI_EqualString2("json", third, thirdLen)) { char* filename; filename = TRI_Concatenate2File(collection->_directory, file); TRI_PushBackVectorString(&collection->_indexFiles, filename); } // ............................................................................. // file is a journal or datafile, open the datafile // ............................................................................. else if (TRI_EqualString2("db", third, thirdLen)) { char* filename; char* ptr; TRI_col_header_marker_t* cm; filename = TRI_Concatenate2File(collection->_directory, file); datafile = TRI_OpenDatafile(filename); if (datafile == NULL) { collection->_lastError = TRI_errno(); stop = true; LOG_ERROR("cannot open datafile '%s': %s", filename, TRI_last_error()); break; } TRI_PushBackVectorPointer(&all, datafile); // check the document header ptr = datafile->_data; ptr += TRI_DF_ALIGN_BLOCK(sizeof(TRI_df_header_marker_t)); cm = (TRI_col_header_marker_t*) ptr; if (cm->base._type != TRI_COL_MARKER_HEADER) { LOG_ERROR("collection header mismatch in file '%s', expected TRI_COL_MARKER_HEADER, found %lu", filename, (unsigned long) cm->base._type); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); stop = true; break; } if (cm->_cid != collection->_info._cid) { LOG_ERROR("collection identifier mismatch, expected %llu, found %llu", (unsigned long long) collection->_info._cid, (unsigned long long) cm->_cid); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); stop = true; break; } // file is a journal if (TRI_EqualString2("journal", first, firstLen)) { if (datafile->_isSealed) { LOG_WARNING("strange, journal '%s' is already sealed; must be a left over; will use it as datafile", filename); TRI_PushBackVectorPointer(&sealed, datafile); } else { TRI_PushBackVectorPointer(&journals, datafile); } } // file is a compactor file else if (TRI_EqualString2("compactor", first, firstLen)) { if (datafile->_isSealed) { LOG_WARNING("strange, compactor journal '%s' is already sealed; must be a left over; will use it as datafile", filename); TRI_PushBackVectorPointer(&sealed, datafile); } else { TRI_PushBackVectorPointer(&compactors, datafile); } } // file is a datafile else if (TRI_EqualString2("datafile", first, firstLen)) { if (! datafile->_isSealed) { LOG_ERROR("datafile '%s' is not sealed, this should never happen", filename); collection->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE); stop = true; break; } else { TRI_PushBackVectorPointer(&datafiles, datafile); } } else { LOG_ERROR("unknown datafile '%s'", file); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } else { LOG_ERROR("unknown datafile '%s'", file); } } } TRI_DestroyVectorString(&files); regfree(&re); // convert the sealed journals into datafiles if (! stop) { n = sealed._length; for (i = 0; i < n; ++i) { char* number; char* dname; char* filename; bool ok; datafile = sealed._buffer[i]; number = TRI_StringUInt64(datafile->_fid); dname = TRI_Concatenate3String("datafile-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); ok = TRI_RenameDatafile(datafile, filename); if (ok) { TRI_PushBackVectorPointer(&datafiles, datafile); LOG_DEBUG("renamed sealed journal to '%s'", filename); } else { collection->_lastError = datafile->_lastError; stop = true; LOG_ERROR("cannot rename sealed log-file to %s, this should not happen: %s", filename, TRI_last_error()); break; } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } } TRI_DestroyVectorPointer(&sealed); // stop if necessary if (stop) { n = all._length; for (i = 0; i < n; ++i) { datafile = all._buffer[i]; LOG_TRACE("closing datafile '%s'", datafile->_filename); TRI_CloseDatafile(datafile); TRI_FreeDatafile(datafile); } TRI_DestroyVectorPointer(&all); TRI_DestroyVectorPointer(&datafiles); return false; } TRI_DestroyVectorPointer(&all); // add the datafiles and journals collection->_datafiles = datafiles; collection->_journals = journals; collection->_compactors = compactors; return true; }
static bool BytecodeShapeAccessor (TRI_shaper_t* shaper, TRI_shape_access_t* accessor) { TRI_shape_aid_t const* paids; TRI_shape_path_t const* path; TRI_shape_t const* shape; TRI_vector_pointer_t ops; size_t i; size_t j; int res; // find the shape shape = shaper->lookupShapeId(shaper, accessor->_sid); if (shape == nullptr) { LOG_ERROR("unknown shape id %llu", (unsigned long long) accessor->_sid); #ifdef TRI_ENABLE_MAINTAINER_MODE TRI_ASSERT(false); #endif return false; } // find the attribute path path = shaper->lookupAttributePathByPid(shaper, accessor->_pid); if (path == nullptr) { LOG_ERROR("unknown attribute path %llu", (unsigned long long) accessor->_pid); #ifdef TRI_ENABLE_MAINTAINER_MODE TRI_ASSERT(false); #endif return false; } paids = (TRI_shape_aid_t*) (((char const*) path) + sizeof(TRI_shape_path_t)); // collect the bytecode // we need at least 2 entries in the vector to store an accessor TRI_InitVectorPointer2(&ops, shaper->_memoryZone, 2); // and follow it for (i = 0; i < path->_aidLength; ++i, ++paids) { #ifdef DEBUG_SHAPE_ACCESSOR printf("%lu: aid: %lu, sid: %lu, type %lu\n", (unsigned long) i, (unsigned long) *paids, (unsigned long) shape->_sid, (unsigned long) shape->_type); #endif if (shape->_type == TRI_SHAPE_ARRAY) { TRI_array_shape_t* s; TRI_shape_aid_t const* aids; TRI_shape_sid_t const* sids; TRI_shape_sid_t sid; TRI_shape_size_t const* offsetsF; TRI_shape_size_t f; TRI_shape_size_t n; TRI_shape_size_t v; char const* qtr; s = (TRI_array_shape_t*) shape; f = s->_fixedEntries; v = s->_variableEntries; n = f + v; // find the aid within the shape qtr = (char const*) shape; qtr += sizeof(TRI_array_shape_t); sids = (TRI_shape_sid_t const*) qtr; qtr += n * sizeof(TRI_shape_sid_t); aids = (TRI_shape_aid_t const*) qtr; qtr += n * sizeof(TRI_shape_aid_t); offsetsF = (TRI_shape_size_t const*) qtr; // check for fixed size aid for (j = 0; j < f; ++j, ++sids, ++aids, ++offsetsF) { if (*paids == *aids) { sid = *sids; LOG_TRACE("found aid '%ld' as fixed entry with sid '%ld' and offset '%ld' - '%ld'", (unsigned long) *paids, (unsigned long) sid, (unsigned long) offsetsF[0], (unsigned long) offsetsF[1]); shape = shaper->lookupShapeId(shaper, sid); if (shape == nullptr) { LOG_ERROR("unknown shape id '%ld' for attribute id '%ld'", (unsigned long) accessor->_sid, (unsigned long) *paids); TRI_DestroyVectorPointer(&ops); return false; } res = TRI_PushBackVectorPointer(&ops, (void*) TRI_SHAPE_AC_OFFSET_FIX); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } res = TRI_PushBackVectorPointer(&ops, (void*) (uintptr_t) (offsetsF[0])); // offset is always smaller than 4 GByte if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } res = TRI_PushBackVectorPointer(&ops, (void*) (uintptr_t) (offsetsF[1])); // offset is always smaller than 4 GByte if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } break; } } if (j < f) { continue; } // check for variable size aid for (j = 0; j < v; ++j, ++sids, ++aids) { if (*paids == *aids) { sid = *sids; LOG_TRACE("found aid '%ld' as variable entry with sid '%ld'", (unsigned long) *paids, (unsigned long) sid); shape = shaper->lookupShapeId(shaper, sid); if (shape == nullptr) { LOG_ERROR("unknown shape id '%ld' for attribute id '%ld'", (unsigned long) accessor->_sid, (unsigned long) *paids); LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } res = TRI_PushBackVectorPointer(&ops, (void*) TRI_SHAPE_AC_OFFSET_VAR); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } res = TRI_PushBackVectorPointer(&ops, (void*) j); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } break; } } if (j < v) { continue; } LOG_TRACE("unknown attribute id '%ld'", (unsigned long) *paids); TRI_DestroyVectorPointer(&ops); accessor->_resultSid = TRI_SHAPE_ILLEGAL; accessor->_code = nullptr; return true; } else { TRI_DestroyVectorPointer(&ops); accessor->_resultSid = TRI_SHAPE_ILLEGAL; accessor->_code = nullptr; return true; } } // travel attribute path to the end res = TRI_PushBackVectorPointer(&ops, (void*) TRI_SHAPE_AC_DONE); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("out of memory"); TRI_DestroyVectorPointer(&ops); return false; } // remember resulting sid accessor->_resultSid = shape->_sid; // steal buffer from ops vector so we don't need to copy it accessor->_code = const_cast<void const**>(ops._buffer); // inform the vector that we took over ownership ops._buffer = nullptr; TRI_DestroyVectorPointer(&ops); return true; }
void TRI_FreeVectorPointer (TRI_memory_zone_t* zone, TRI_vector_pointer_t* vector) { TRI_DestroyVectorPointer(vector); TRI_Free(zone, vector); }
void TRI_CleanupVocBase (void* data) { TRI_vocbase_t* vocbase; TRI_vector_pointer_t collections; uint64_t iterations = 0; vocbase = data; assert(vocbase); assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { int state; // keep initial _state value as vocbase->_state might change during cleanup loop state = vocbase->_state; ++iterations; if (state == 2) { // shadows must be cleaned before collections are handled // otherwise the shadows might still hold barriers on collections // and collections cannot be closed properly CleanupCursors(vocbase, true); } // check if we can get the compactor lock exclusively if (TRI_CheckAndLockCompactorVocBase(vocbase)) { size_t i, n; TRI_col_type_e type; // copy all collections TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_primary_collection_t* primary; collection = collections._buffer[i]; TRI_READ_LOCK_STATUS_VOCBASE_COL(collection); primary = collection->_collection; if (primary == NULL) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } type = primary->base._info._type; TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); // we're the only ones that can unload the collection, so using // the collection pointer outside the lock is ok // maybe cleanup indexes, unload the collection or some datafiles if (TRI_IS_DOCUMENT_COLLECTION(type)) { TRI_document_collection_t* document = (TRI_document_collection_t*) primary; // clean indexes? if (iterations % (uint64_t) CLEANUP_INDEX_ITERATIONS == 0) { document->cleanupIndexes(document); } CleanupDocumentCollection(document); } } TRI_UnlockCompactorVocBase(vocbase); } if (vocbase->_state >= 1) { // server is still running, clean up unused shadows if (iterations % CLEANUP_SHADOW_ITERATIONS == 0) { CleanupCursors(vocbase, false); } // clean up expired compactor locks TRI_CleanupCompactorVocBase(vocbase); if (state == 1) { TRI_LockCondition(&vocbase->_cleanupCondition); TRI_TimedWaitCondition(&vocbase->_cleanupCondition, (uint64_t) CLEANUP_INTERVAL); TRI_UnlockCondition(&vocbase->_cleanupCondition); } } if (state == 3) { // server shutdown break; } } TRI_DestroyVectorPointer(&collections); LOG_TRACE("shutting down cleanup thread"); }
void TRI_CleanupVocBase (void* data) { TRI_vocbase_t* vocbase = data; TRI_vector_pointer_t collections; assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { size_t n; size_t i; TRI_col_type_e type; // keep initial _state value as vocbase->_state might change during compaction loop int state = vocbase->_state; if (state == 2) { // shadows must be cleaned before collections are handled // otherwise the shadows might still hold barriers on collections // and collections cannot be closed properly CleanupShadows(vocbase, true); } // copy all collections TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_primary_collection_t* primary; collection = collections._buffer[i]; TRI_READ_LOCK_STATUS_VOCBASE_COL(collection); primary = collection->_collection; if (primary == NULL) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } type = primary->base._type; TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); // now release the lock and maybe unload the collection or some datafiles if (TRI_IS_DOCUMENT_COLLECTION(type)) { CleanupDocumentCollection((TRI_document_collection_t*) primary); } } if (vocbase->_state >= 1) { // server is still running, clean up unused shadows CleanupShadows(vocbase, false); TRI_LockCondition(&vocbase->_cleanupCondition); TRI_TimedWaitCondition(&vocbase->_cleanupCondition, CLEANUP_INTERVAL); TRI_UnlockCondition(&vocbase->_cleanupCondition); } if (state == 3) { // server shutdown break; } } TRI_DestroyVectorPointer(&collections); }
TRI_aql_index_t* TRI_DetermineIndexAql (TRI_aql_context_t* const context, const TRI_vector_pointer_t* const availableIndexes, const char* const collectionName, const TRI_vector_pointer_t* candidates) { TRI_aql_index_t* picked = NULL; TRI_vector_pointer_t matches; size_t i, n; TRI_InitVectorPointer(&matches, TRI_UNKNOWN_MEM_ZONE); assert(context); assert(collectionName); assert(candidates); n = availableIndexes->_length; for (i = 0; i < n; ++i) { TRI_index_t* idx = (TRI_index_t*) availableIndexes->_buffer[i]; size_t numIndexFields; bool lastTypeWasExact; size_t j; if (! CanUseIndex(idx)) { continue; } LogIndexString("checking", idx, collectionName); TRI_ClearVectorPointer(&matches); lastTypeWasExact = true; numIndexFields = idx->_fields._length; // now loop over all index fields, from left to right // index field order is important because skiplists can be used with leftmost prefixes as well, // but not with rightmost prefixes for (j = 0; j < numIndexFields; ++j) { char* indexedFieldName; char* fieldName; size_t k; indexedFieldName = idx->_fields._buffer[j]; if (indexedFieldName == NULL) { continue; } // now loop over all candidates for (k = 0; k < candidates->_length; ++k) { TRI_aql_field_access_t* candidate = (TRI_aql_field_access_t*) TRI_AtVectorPointer(candidates, k); if (candidate->_type == TRI_AQL_ACCESS_IMPOSSIBLE || candidate->_type == TRI_AQL_ACCESS_ALL) { // wrong index type, doesn't help us at all continue; } fieldName = candidate->_fullName + candidate->_variableNameLength + 1; if (idx->_type == TRI_IDX_TYPE_PRIMARY_INDEX) { // primary index key names must be treated differently. _id and _key are the same if (! TRI_EqualString("_id", fieldName) && ! TRI_EqualString(TRI_VOC_ATTRIBUTE_KEY, fieldName)) { continue; } } else if (idx->_type == TRI_IDX_TYPE_EDGE_INDEX) { // edge index key names must be treated differently. _from and _to can be used independently if (! TRI_EqualString(TRI_VOC_ATTRIBUTE_FROM, fieldName) && ! TRI_EqualString(TRI_VOC_ATTRIBUTE_TO, fieldName)) { continue; } } else if (! TRI_EqualString(indexedFieldName, fieldName)) { // different attribute, doesn't help continue; } // attribute is used in index if (idx->_type == TRI_IDX_TYPE_PRIMARY_INDEX || idx->_type == TRI_IDX_TYPE_EDGE_INDEX) { if (! IsExactCandidate(candidate)) { // wrong access type for primary index continue; } TRI_PushBackVectorPointer(&matches, candidate); } else if (idx->_type == TRI_IDX_TYPE_HASH_INDEX) { if (! IsExactCandidate(candidate)) { // wrong access type for hash index continue; } if (candidate->_type == TRI_AQL_ACCESS_LIST && numIndexFields != 1) { // we found a list, but the index covers multiple attributes. that means we cannot use list access continue; } TRI_PushBackVectorPointer(&matches, candidate); } else if (idx->_type == TRI_IDX_TYPE_BITARRAY_INDEX) { if (! IsExactCandidate(candidate)) { // wrong access type for hash index continue; } if (candidate->_type == TRI_AQL_ACCESS_LIST) { // we found a list, but the index covers multiple attributes. that means we cannot use list access continue; } TRI_PushBackVectorPointer(&matches, candidate); } else if (idx->_type == TRI_IDX_TYPE_SKIPLIST_INDEX) { bool candidateIsExact; if (candidate->_type != TRI_AQL_ACCESS_EXACT && candidate->_type != TRI_AQL_ACCESS_LIST && candidate->_type != TRI_AQL_ACCESS_RANGE_SINGLE && candidate->_type != TRI_AQL_ACCESS_RANGE_DOUBLE && candidate->_type != TRI_AQL_ACCESS_REFERENCE) { // wrong access type for skiplists continue; } if (candidate->_type == TRI_AQL_ACCESS_LIST && numIndexFields != 1) { // we found a list, but the index covers multiple attributes. that means we cannot use list access continue; } candidateIsExact = IsExactCandidate(candidate); if ((candidateIsExact && ! lastTypeWasExact) || (! candidateIsExact && ! lastTypeWasExact)) { // if we already had a range query, we cannot check for equality after that // if we already had a range query, we cannot check another range after that continue; } if (candidate->_type == TRI_AQL_ACCESS_RANGE_SINGLE) { // range type. check if the compare value is a list or an object TRI_json_t* value = candidate->_value._singleRange._value; if (TRI_IsListJson(value) || TRI_IsArrayJson(value)) { // list or object, we cannot use this for comparison in a skiplist continue; } } else if (candidate->_type == TRI_AQL_ACCESS_RANGE_DOUBLE) { // range type. check if the compare value is a list or an object TRI_json_t* value = candidate->_value._between._lower._value; if (TRI_IsListJson(value) || TRI_IsArrayJson(value)) { // list or object, we cannot use this for comparison in a skiplist continue; } value = candidate->_value._between._upper._value; if (TRI_IsListJson(value) || TRI_IsArrayJson(value)) { // list or object, we cannot use this for comparison in a skiplist continue; } } lastTypeWasExact = candidateIsExact; TRI_PushBackVectorPointer(&matches, candidate); } } // finished iterating over all candidates if (matches._length != j + 1) { // we already have picked less candidate fields than we should break; } } if (matches._length < 1) { // nothing found continue; } // we now do or don't have an index candidate in the matches vector if (matches._length < numIndexFields && TRI_NeedsFullCoverageIndex(idx->_type)) { // the matches vector does not fully cover the indexed fields, but the index requires it continue; } // if we can use the primary index, we'll use it picked = PickIndex(context, picked, idx, &matches); } TRI_DestroyVectorPointer(&matches); if (picked) { LogIndexString("using", picked->_idx, collectionName); } return picked; }
static bool FindEdges (const TRI_edge_direction_e direction, TRI_multi_pointer_t* idx, TRI_vector_pointer_t* result, TRI_edge_header_t* entry, const int matchType) { TRI_vector_pointer_t found; TRI_edge_header_t* edge; entry->_flags = TRI_LookupFlagsEdge(direction); found = TRI_LookupByKeyMultiPointer(TRI_UNKNOWN_MEM_ZONE, idx, entry); if (found._length > 0) { size_t i; if (result->_capacity == 0) { int res; // if result vector is still empty and we have results, re-init the // result vector to a "good" size. this will save later reallocations res = TRI_InitVectorPointer2(result, TRI_UNKNOWN_MEM_ZONE, found._length); if (res != TRI_ERROR_NO_ERROR) { TRI_DestroyVectorPointer(&found); TRI_set_errno(res); return false; } } // add all results found for (i = 0; i < found._length; ++i) { edge = (TRI_edge_header_t*) found._buffer[i]; // the following queries will use the following sequences of matchTypes: // inEdges(): 1, 2, outEdges(): 1, 2, edges(): 1, 3 // if matchType is 1, we'll return all found edges without further filtering // // if matchType is 2 (inEdges or outEdges query), the direction is reversed. // We'll exclude all self-reflexive edges now (we already got them in iteration 1), // and alsoexclude all unidirectional edges // // if matchType is 3, the direction is also reversed. We'll exclude all // self-reflexive edges now (we already got them in iteration 1) if (matchType > 1) { // if the edge is self-reflexive, we have already found it in iteration 1 // we must skip it here, otherwise we would produce duplicates if (IsReflexive(edge)) { continue; } } TRI_PushBackVectorPointer(result, CONST_CAST(edge->_mptr)); } } TRI_DestroyVectorPointer(&found); return true; }
void TRI_CompactorVocBase (void* data) { TRI_vocbase_t* vocbase; TRI_vector_pointer_t collections; vocbase = data; assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { int state; // keep initial _state value as vocbase->_state might change during compaction loop state = vocbase->_state; // check if compaction is currently disallowed if (CheckAndLockCompaction(vocbase)) { // compaction is currently allowed size_t i, n; // copy all collections TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_primary_collection_t* primary; TRI_col_type_e type; bool doCompact; bool worked; collection = collections._buffer[i]; if (! TRI_TRY_READ_LOCK_STATUS_VOCBASE_COL(collection)) { // if we can't acquire the read lock instantly, we continue directly // we don't want to stall here for too long continue; } primary = collection->_collection; if (primary == NULL) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } worked = false; doCompact = primary->base._info._doCompact; type = primary->base._info._type; // for document collection, compactify datafiles if (TRI_IS_DOCUMENT_COLLECTION(type)) { if (collection->_status == TRI_VOC_COL_STATUS_LOADED && doCompact) { TRI_barrier_t* ce; // check whether someone else holds a read-lock on the compaction lock if (! TRI_TryWriteLockReadWriteLock(&primary->_compactionLock)) { // someone else is holding the compactor lock, we'll not compact TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } ce = TRI_CreateBarrierCompaction(&primary->_barrierList); if (ce == NULL) { // out of memory LOG_WARNING("out of memory when trying to create a barrier element"); } else { worked = CompactifyDocumentCollection((TRI_document_collection_t*) primary); TRI_FreeBarrier(ce); } // read-unlock the compaction lock TRI_WriteUnlockReadWriteLock(&primary->_compactionLock); } } TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); if (worked) { // signal the cleanup thread that we worked and that it can now wake up TRI_LockCondition(&vocbase->_cleanupCondition); TRI_SignalCondition(&vocbase->_cleanupCondition); TRI_UnlockCondition(&vocbase->_cleanupCondition); } } UnlockCompaction(vocbase); } if (state != 2 && vocbase->_state == 1) { // only sleep while server is still running TRI_LockCondition(&vocbase->_compactorCondition); TRI_TimedWaitCondition(&vocbase->_compactorCondition, (uint64_t) COMPACTOR_INTERVAL); TRI_UnlockCondition(&vocbase->_compactorCondition); } if (state == 2) { // server shutdown break; } } TRI_DestroyVectorPointer(&collections); LOG_TRACE("shutting down compactor thread"); }
void TRI_SynchroniserVocBase (void* data) { TRI_col_type_e type; TRI_vocbase_t* vocbase = data; TRI_vector_pointer_t collections; assert(vocbase->_state == 1); TRI_InitVectorPointer(&collections, TRI_UNKNOWN_MEM_ZONE); while (true) { size_t n; size_t i; bool worked; // keep initial _state value as vocbase->_state might change during sync loop int state = vocbase->_state; worked = false; // copy all collections and release the lock TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase); TRI_CopyDataVectorPointer(&collections, &vocbase->_collections); TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase); // loop over all copied collections n = collections._length; for (i = 0; i < n; ++i) { TRI_vocbase_col_t* collection; TRI_primary_collection_t* primary; collection = collections._buffer[i]; // if we cannot acquire the read lock instantly, we will continue. // otherwise we'll risk a multi-thread deadlock between synchroniser, // compactor and data-modification threads (e.g. POST /_api/document) if (! TRI_TRY_READ_LOCK_STATUS_VOCBASE_COL(collection)) { continue; } if (collection->_status != TRI_VOC_COL_STATUS_LOADED) { TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); continue; } primary = collection->_collection; // for simple collection, first sync and then seal type = primary->base._info._type; if (TRI_IS_DOCUMENT_COLLECTION(type)) { bool result; result = CheckSyncDocumentCollection((TRI_document_collection_t*) primary); worked |= result; result = CheckJournalDocumentCollection((TRI_document_collection_t*) primary); worked |= result; } TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection); } // only sleep while server is still running and no-one is waiting if (! worked && vocbase->_state == 1) { TRI_LOCK_SYNCHRONISER_WAITER_VOCBASE(vocbase); if (vocbase->_syncWaiters == 0) { TRI_WAIT_SYNCHRONISER_WAITER_VOCBASE(vocbase, (uint64_t) SYNCHRONISER_INTERVAL); } TRI_UNLOCK_SYNCHRONISER_WAITER_VOCBASE(vocbase); } // server shutdown if (state == 2) { break; } } TRI_DestroyVectorPointer(&collections); LOG_TRACE("shutting down synchroniser thread"); }