static void RemoveDatafileCallback (TRI_datafile_t* datafile, void* data) { TRI_collection_t* collection; char* old; char* filename; char* name; char* number; bool ok; int res; collection = data; number = TRI_StringUInt32(datafile->_fid); name = TRI_Concatenate3String("deleted-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, name); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, name); old = datafile->_filename; ok = TRI_RenameDatafile(datafile, filename); if (! ok) { LOG_ERROR("cannot rename obsolete datafile '%s' to '%s': %s", old, filename, TRI_last_error()); } LOG_DEBUG("finished compactifing datafile '%s'", datafile->_filename); ok = TRI_CloseDatafile(datafile); if (! ok) { LOG_ERROR("cannot close obsolete datafile '%s': %s", datafile->_filename, TRI_last_error()); } else { if (collection->_vocbase->_removeOnCompacted) { LOG_DEBUG("wiping compacted datafile from disk"); res = TRI_UnlinkFile(filename); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot wipe obsolete datafile '%s': %s", datafile->_filename, TRI_last_error()); } } } TRI_FreeDatafile(datafile); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); }
static bool CheckSyncCompactorDocumentCollection (TRI_document_collection_t* sim) { TRI_collection_t* base; TRI_datafile_t* journal; bool ok; bool worked; char const* synced; char* written; double ti; size_t i; size_t n; worked = false; base = &sim->base.base; // ............................................................................. // the only thread MODIFYING the _compactors variable is this thread, // therefore no locking is required to access the _compactors // ............................................................................. n = base->_compactors._length; for (i = 0; i < n; ++i) { journal = base->_compactors._buffer[i]; TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim); synced = journal->_synced; written = journal->_written; TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim); if (synced < written) { worked = true; ok = TRI_msync(journal->_fd, journal->_mmHandle, synced, written); ti = TRI_microtime(); TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim); if (ok) { journal->_synced = written; journal->_lastSynced = ti; } else { journal->_state = TRI_DF_STATE_WRITE_ERROR; } TRI_BROADCAST_JOURNAL_ENTRIES_DOC_COLLECTION(sim); TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim); if (ok) { LOG_TRACE("msync succeeded %p, size %lu", synced, (unsigned long)(written - synced)); } else { LOG_ERROR("msync failed with: %s", TRI_last_error()); } } } return worked; }
int TRI_SaveCollectionInfo (char const* path, const TRI_col_info_t* const info) { TRI_json_t* json; char* filename; bool ok; filename = TRI_Concatenate2File(path, TRI_COL_PARAMETER_FILE); // create a json info object json = TRI_CreateArrayJson(TRI_UNKNOWN_MEM_ZONE); if (json == NULL) { // out of memory LOG_ERROR("cannot save info block '%s': out of memory", filename); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return TRI_ERROR_OUT_OF_MEMORY; } TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "version", TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, info->_version)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "type", TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, info->_type)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "cid", TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, info->_cid)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "deleted", TRI_CreateBooleanJson(TRI_UNKNOWN_MEM_ZONE, info->_deleted)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "maximalSize", TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, info->_maximalSize)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "name", TRI_CreateStringCopyJson(TRI_UNKNOWN_MEM_ZONE, info->_name)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "isVolatile", TRI_CreateBooleanJson(TRI_UNKNOWN_MEM_ZONE, info->_isVolatile)); TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "waitForSync", TRI_CreateBooleanJson(TRI_UNKNOWN_MEM_ZONE, info->_waitForSync)); if (info->_keyOptions) { TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "keyOptions", TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, info->_keyOptions)); } // save json info to file ok = TRI_SaveJson(filename, json); TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json); if (! ok) { LOG_ERROR("cannot save info block '%s': '%s'", filename, TRI_last_error()); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return TRI_errno(); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return TRI_ERROR_NO_ERROR; }
bool TRI_ExecuteRubyFile (mrb_state* mrb, char const* filename) { bool ok; char* content; mrb_value result; content = TRI_SlurpFile(TRI_UNKNOWN_MEM_ZONE, filename, NULL); if (content == 0) { LOG_TRACE("cannot loaded ruby file '%s': %s", filename, TRI_last_error()); return false; } ok = TRI_ExecuteRubyString(mrb, content, filename, false, &result); TRI_FreeString(TRI_UNKNOWN_MEM_ZONE, content); return ok; }
static void DropDatafileCallback (TRI_datafile_t* datafile, void* data) { TRI_primary_collection_t* primary; TRI_voc_fid_t fid; char* filename; char* name; char* number; char* copy; bool ok; primary = data; fid = datafile->_fid; copy = NULL; number = TRI_StringUInt64(fid); name = TRI_Concatenate3String("deleted-", number, ".db"); filename = TRI_Concatenate2File(primary->base._directory, name); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, name); if (datafile->isPhysical(datafile)) { copy = TRI_DuplicateStringZ(TRI_CORE_MEM_ZONE, datafile->_filename); ok = TRI_RenameDatafile(datafile, filename); if (! ok) { LOG_ERROR("cannot rename obsolete datafile '%s' to '%s': %s", copy, filename, TRI_last_error()); } } LOG_DEBUG("finished compacting datafile '%s'", datafile->getName(datafile)); ok = TRI_CloseDatafile(datafile); if (! ok) { LOG_ERROR("cannot close obsolete datafile '%s': %s", datafile->getName(datafile), TRI_last_error()); } else if (datafile->isPhysical(datafile)) { if (primary->base._vocbase->_settings.removeOnCompacted) { int res; LOG_DEBUG("wiping compacted datafile from disk"); res = TRI_UnlinkFile(filename); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot wipe obsolete datafile '%s': %s", datafile->getName(datafile), TRI_last_error()); } // check for .dead files if (copy != NULL) { // remove .dead file for datafile char* deadfile = TRI_Concatenate2String(copy, ".dead"); if (deadfile != NULL) { // check if .dead file exists, then remove it if (TRI_ExistsFile(deadfile)) { TRI_UnlinkFile(deadfile); } TRI_FreeString(TRI_CORE_MEM_ZONE, deadfile); } } } } TRI_FreeDatafile(datafile); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); if (copy != NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, copy); } }
static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafile_t* datafile, bool journal) { union { TRI_doc_mptr_t const* c; TRI_doc_mptr_t* v; } cnv; TRI_df_marker_t* result; TRI_doc_datafile_info_t* dfi; TRI_doc_mptr_t const* found; TRI_sim_collection_t* sim; TRI_voc_fid_t fid; bool deleted; int res; sim = data; // new or updated document if (marker->_type == TRI_DOC_MARKER_DOCUMENT || marker->_type == TRI_DOC_MARKER_EDGE) { TRI_doc_document_marker_t const* d; size_t markerSize; if (marker->_type == TRI_DOC_MARKER_DOCUMENT) { markerSize = sizeof(TRI_doc_document_marker_t); } else if (marker->_type == TRI_DOC_MARKER_EDGE) { markerSize = sizeof(TRI_doc_edge_marker_t); } else { LOG_FATAL("unknown marker type %d", (int) marker->_type); exit(EXIT_FAILURE); } d = (TRI_doc_document_marker_t const*) marker; // check if the document is still active TRI_READ_LOCK_DOCUMENTS_INDEXES_SIM_COLLECTION(sim); found = TRI_LookupByKeyAssociativePointer(&sim->_primaryIndex, &d->_did); deleted = found == NULL || found->_deletion != 0; TRI_READ_UNLOCK_DOCUMENTS_INDEXES_SIM_COLLECTION(sim); if (deleted) { LOG_TRACE("found a stale document: %lu", d->_did); return true; } // write to compactor files res = CopyDocument(sim, marker, &result, &fid); if (res != TRI_ERROR_NO_ERROR) { LOG_FATAL("cannot write compactor file: ", TRI_last_error()); return false; } // check if the document is still active TRI_READ_LOCK_DOCUMENTS_INDEXES_SIM_COLLECTION(sim); found = TRI_LookupByKeyAssociativePointer(&sim->_primaryIndex, &d->_did); deleted = found == NULL || found->_deletion != 0; TRI_READ_UNLOCK_DOCUMENTS_INDEXES_SIM_COLLECTION(sim); // update datafile TRI_WRITE_LOCK_DATAFILES_SIM_COLLECTION(sim); dfi = TRI_FindDatafileInfoDocCollection(&sim->base, fid); if (deleted) { dfi->_numberDead += 1; dfi->_sizeDead += marker->_size - markerSize; LOG_DEBUG("found a stale document after copying: %lu", d->_did); TRI_WRITE_UNLOCK_DATAFILES_SIM_COLLECTION(sim); return true; } cnv.c = found; cnv.v->_fid = datafile->_fid; cnv.v->_data = result; cnv.v->_document._data.data = ((char*) cnv.v->_data) + markerSize; // update datafile info dfi->_numberAlive += 1; dfi->_sizeAlive += marker->_size - markerSize; TRI_WRITE_UNLOCK_DATAFILES_SIM_COLLECTION(sim); } // deletion else if (marker->_type == TRI_DOC_MARKER_DELETION) { // TODO: remove TRI_doc_deletion_marker_t from file // write to compactor files res = CopyDocument(sim, marker, &result, &fid); if (res != TRI_ERROR_NO_ERROR) { LOG_FATAL("cannot write compactor file: ", TRI_last_error()); return false; } // update datafile info TRI_WRITE_LOCK_DATAFILES_SIM_COLLECTION(sim); dfi = TRI_FindDatafileInfoDocCollection(&sim->base, fid); dfi->_numberDeletion += 1; TRI_WRITE_UNLOCK_DATAFILES_SIM_COLLECTION(sim); } return true; }
TRI_collection_t* TRI_CreateCollection (TRI_vocbase_t* vocbase, TRI_collection_t* collection, char const* path, const TRI_col_info_t* const parameter) { char* filename; // sanity check if (sizeof(TRI_df_header_marker_t) + sizeof(TRI_df_footer_marker_t) > parameter->_maximalSize) { TRI_set_errno(TRI_ERROR_ARANGO_DATAFILE_FULL); LOG_ERROR("cannot create datafile '%s' in '%s', maximal size '%u' is too small", parameter->_name, path, (unsigned int) parameter->_maximalSize); return NULL; } if (! TRI_IsDirectory(path)) { TRI_set_errno(TRI_ERROR_ARANGO_WRONG_VOCBASE_PATH); LOG_ERROR("cannot create collection '%s', path is not a directory", path); return NULL; } filename = TRI_GetDirectoryCollection(path, parameter); if (filename == NULL) { LOG_ERROR("cannot create collection '%s'", TRI_last_error()); return NULL; } // directory must not exist if (TRI_ExistsFile(filename)) { TRI_set_errno(TRI_ERROR_ARANGO_COLLECTION_DIRECTORY_ALREADY_EXISTS); LOG_ERROR("cannot create collection '%s' in '%s', directory already exists", parameter->_name, filename); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return NULL; } // create directory if (! TRI_CreateDirectory(filename)) { LOG_ERROR("cannot create collection '%s' in '%s' as '%s': %s", parameter->_name, path, filename, TRI_last_error()); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return NULL; } // create collection structure if (collection == NULL) { collection = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_collection_t), false); if (collection == NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, filename); LOG_ERROR("cannot create collection '%s', out of memory", path); return NULL; } } // we are passing filename to this struct, so we must not free it if you use the struct later InitCollection(vocbase, collection, filename, parameter); /* PANAIA: 1) the parameter file if it exists must be removed 2) if collection */ return collection; }
static bool CheckCollection (TRI_collection_t* collection) { TRI_datafile_t* datafile; TRI_vector_pointer_t all; TRI_vector_pointer_t compactors; TRI_vector_pointer_t datafiles; TRI_vector_pointer_t journals; TRI_vector_pointer_t sealed; TRI_vector_string_t files; bool stop; regex_t re; size_t i; size_t n; stop = false; // check files within the directory files = TRI_FilesDirectory(collection->_directory); n = files._length; regcomp(&re, "^(journal|datafile|index|compactor)-([0-9][0-9]*)\\.(db|json)$", REG_EXTENDED); TRI_InitVectorPointer(&journals, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&compactors, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&datafiles, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&sealed, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&all, TRI_UNKNOWN_MEM_ZONE); for (i = 0; i < n; ++i) { char const* file = files._buffer[i]; regmatch_t matches[4]; if (regexec(&re, file, sizeof(matches) / sizeof(matches[0]), matches, 0) == 0) { char const* first = file + matches[1].rm_so; size_t firstLen = matches[1].rm_eo - matches[1].rm_so; char const* third = file + matches[3].rm_so; size_t thirdLen = matches[3].rm_eo - matches[3].rm_so; // ............................................................................. // file is an index, just store the filename // ............................................................................. if (TRI_EqualString2("index", first, firstLen) && TRI_EqualString2("json", third, thirdLen)) { char* filename; filename = TRI_Concatenate2File(collection->_directory, file); TRI_PushBackVectorString(&collection->_indexFiles, filename); } // ............................................................................. // file is a journal or datafile, open the datafile // ............................................................................. else if (TRI_EqualString2("db", third, thirdLen)) { char* filename; char* ptr; TRI_col_header_marker_t* cm; filename = TRI_Concatenate2File(collection->_directory, file); datafile = TRI_OpenDatafile(filename); if (datafile == NULL) { collection->_lastError = TRI_errno(); stop = true; LOG_ERROR("cannot open datafile '%s': %s", filename, TRI_last_error()); break; } TRI_PushBackVectorPointer(&all, datafile); // check the document header ptr = datafile->_data; ptr += TRI_DF_ALIGN_BLOCK(sizeof(TRI_df_header_marker_t)); cm = (TRI_col_header_marker_t*) ptr; if (cm->base._type != TRI_COL_MARKER_HEADER) { LOG_ERROR("collection header mismatch in file '%s', expected TRI_COL_MARKER_HEADER, found %lu", filename, (unsigned long) cm->base._type); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); stop = true; break; } if (cm->_cid != collection->_info._cid) { LOG_ERROR("collection identifier mismatch, expected %llu, found %llu", (unsigned long long) collection->_info._cid, (unsigned long long) cm->_cid); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); stop = true; break; } // file is a journal if (TRI_EqualString2("journal", first, firstLen)) { if (datafile->_isSealed) { LOG_WARNING("strange, journal '%s' is already sealed; must be a left over; will use it as datafile", filename); TRI_PushBackVectorPointer(&sealed, datafile); } else { TRI_PushBackVectorPointer(&journals, datafile); } } // file is a compactor file else if (TRI_EqualString2("compactor", first, firstLen)) { if (datafile->_isSealed) { LOG_WARNING("strange, compactor journal '%s' is already sealed; must be a left over; will use it as datafile", filename); TRI_PushBackVectorPointer(&sealed, datafile); } else { TRI_PushBackVectorPointer(&compactors, datafile); } } // file is a datafile else if (TRI_EqualString2("datafile", first, firstLen)) { if (! datafile->_isSealed) { LOG_ERROR("datafile '%s' is not sealed, this should never happen", filename); collection->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE); stop = true; break; } else { TRI_PushBackVectorPointer(&datafiles, datafile); } } else { LOG_ERROR("unknown datafile '%s'", file); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } else { LOG_ERROR("unknown datafile '%s'", file); } } } TRI_DestroyVectorString(&files); regfree(&re); // convert the sealed journals into datafiles if (! stop) { n = sealed._length; for (i = 0; i < n; ++i) { char* number; char* dname; char* filename; bool ok; datafile = sealed._buffer[i]; number = TRI_StringUInt64(datafile->_fid); dname = TRI_Concatenate3String("datafile-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); ok = TRI_RenameDatafile(datafile, filename); if (ok) { TRI_PushBackVectorPointer(&datafiles, datafile); LOG_DEBUG("renamed sealed journal to '%s'", filename); } else { collection->_lastError = datafile->_lastError; stop = true; LOG_ERROR("cannot rename sealed log-file to %s, this should not happen: %s", filename, TRI_last_error()); break; } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } } TRI_DestroyVectorPointer(&sealed); // stop if necessary if (stop) { n = all._length; for (i = 0; i < n; ++i) { datafile = all._buffer[i]; LOG_TRACE("closing datafile '%s'", datafile->_filename); TRI_CloseDatafile(datafile); TRI_FreeDatafile(datafile); } TRI_DestroyVectorPointer(&all); TRI_DestroyVectorPointer(&datafiles); return false; } TRI_DestroyVectorPointer(&all); // add the datafiles and journals collection->_datafiles = datafiles; collection->_journals = journals; collection->_compactors = compactors; return true; }
static TRI_datafile_t* CreateJournal (TRI_primary_collection_t* primary, bool compactor) { TRI_col_header_marker_t cm; TRI_collection_t* collection; TRI_datafile_t* journal; TRI_df_marker_t* position; int res; char* filename; collection = &primary->base; if (collection->_info._isVolatile) { // in-memory collection filename = NULL; } else { char* jname; char* number; // construct a suitable filename number = TRI_StringUInt64(TRI_NewTickVocBase()); if (compactor) { jname = TRI_Concatenate3String("journal-", number, ".db"); } else { jname = TRI_Concatenate3String("compactor-", number, ".db"); } filename = TRI_Concatenate2File(collection->_directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); } // create journal file journal = TRI_CreateDatafile(filename, collection->_info._maximalSize); if (filename != NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } if (journal == NULL) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { collection->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); collection->_state = TRI_COL_STATE_READ; } else { collection->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); collection->_state = TRI_COL_STATE_WRITE_ERROR; } return NULL; } LOG_TRACE("created a new primary journal '%s'", journal->getName(journal)); if (journal->isPhysical(journal)) { char* jname; char* number; bool ok; // and use the correct name number = TRI_StringUInt64(journal->_fid); if (compactor) { jname = TRI_Concatenate3String("compactor-", number, ".db"); } else { jname = TRI_Concatenate3String("journal-", number, ".db"); } filename = TRI_Concatenate2File(collection->_directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); ok = TRI_RenameDatafile(journal, filename); if (! ok) { LOG_WARNING("failed to rename the journal to '%s': %s", filename, TRI_last_error()); } else { LOG_TRACE("renamed journal to '%s'", filename); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } // create a collection header res = TRI_ReserveElementDatafile(journal, sizeof(TRI_col_header_marker_t), &position); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } memset(&cm, 0, sizeof(cm)); cm.base._size = sizeof(TRI_col_header_marker_t); cm.base._type = TRI_COL_MARKER_HEADER; cm.base._tick = TRI_NewTickVocBase(); cm._cid = collection->_info._cid; TRI_FillCrcMarkerDatafile(journal, &cm.base, sizeof(cm), 0, 0, 0, 0); res = TRI_WriteElementDatafile(journal, position, &cm.base, sizeof(cm), 0, 0, 0, 0, true); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } // that's it if (compactor) { TRI_PushBackVectorPointer(&collection->_compactors, journal); } else { TRI_PushBackVectorPointer(&collection->_journals, journal); } return journal; }
static bool CheckSyncDocumentCollection (TRI_document_collection_t* doc) { TRI_collection_t* base; TRI_datafile_t* journal; bool ok; bool worked; char const* synced; char* written; size_t i; size_t n; worked = false; base = &doc->base.base; // ............................................................................. // the only thread MODIFYING the _journals variable is this thread, // therefore no locking is required to access the _journals // ............................................................................. n = base->_journals._length; for (i = 0; i < n; ++i) { journal = base->_journals._buffer[i]; // we only need to care about physical datafiles if (! journal->isPhysical(journal)) { // anonymous regions do not need to be synced continue; } TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(doc); synced = journal->_synced; written = journal->_written; TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(doc); if (synced < written) { worked = true; ok = journal->sync(journal, synced, written); TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(doc); if (ok) { journal->_synced = written; } else { journal->_state = TRI_DF_STATE_WRITE_ERROR; } TRI_BROADCAST_JOURNAL_ENTRIES_DOC_COLLECTION(doc); TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(doc); if (ok) { LOG_TRACE("msync succeeded %p, size %lu", synced, (unsigned long)(written - synced)); } else { LOG_ERROR("msync failed with: %s", TRI_last_error()); } } } return worked; }
SimpleHttpResult* SimpleHttpClient::request (rest::HttpRequest::HttpRequestType method, const string& location, const char* body, size_t bodyLength, const map<string, string>& headerFields) { assert(_result == 0); _result = new SimpleHttpResult; _errorMessage = ""; // set body to all connections setRequest(method, location, body, bodyLength, headerFields); double endTime = now() + _requestTimeout; double remainingTime = _requestTimeout; while (isWorking() && remainingTime > 0.0) { switch (_state) { case (IN_CONNECT): { handleConnect(); break; } case (IN_WRITE): { size_t bytesWritten = 0; TRI_set_errno(TRI_ERROR_NO_ERROR); if (! _connection->handleWrite(remainingTime, (void*) (_writeBuffer.c_str() + _written), _writeBuffer.length() - _written, &bytesWritten)) { setErrorMessage(TRI_last_error(), false); this->close(); } else { _written += bytesWritten; if (_written == _writeBuffer.length()) { _state = IN_READ_HEADER; } } break; } case (IN_READ_HEADER): case (IN_READ_BODY): case (IN_READ_CHUNKED_HEADER): case (IN_READ_CHUNKED_BODY): { TRI_set_errno(TRI_ERROR_NO_ERROR); if (_connection->handleRead(remainingTime, _readBuffer)) { switch (_state) { case (IN_READ_HEADER): readHeader(); break; case (IN_READ_BODY): readBody(); break; case (IN_READ_CHUNKED_HEADER): readChunkedHeader(); break; case (IN_READ_CHUNKED_BODY): readChunkedBody(); break; default: break; } } else { setErrorMessage(TRI_last_error(), false); this->close(); } break; } default: break; } remainingTime = endTime - now(); } if (isWorking() && _errorMessage == "" ) { setErrorMessage("Request timeout reached"); } // set result type in getResult() SimpleHttpResult* result = getResult(); _result = 0; return result; }
static TRI_datafile_t* CreateJournal (TRI_primary_collection_t* primary, TRI_voc_size_t maximalSize) { TRI_col_header_marker_t cm; TRI_collection_t* collection; TRI_datafile_t* journal; TRI_df_marker_t* position; TRI_voc_fid_t fid; int res; collection = &primary->base; fid = (TRI_voc_fid_t) TRI_NewTickServer(); if (collection->_info._isVolatile) { // in-memory collection journal = TRI_CreateDatafile(NULL, fid, maximalSize); } else { char* jname; char* number; char* filename; // construct a suitable filename (which is temporary at the beginning) number = TRI_StringUInt64(fid); jname = TRI_Concatenate3String("temp-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); journal = TRI_CreateDatafile(filename, fid, maximalSize); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } if (journal == NULL) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { collection->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); collection->_state = TRI_COL_STATE_READ; } else { collection->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); collection->_state = TRI_COL_STATE_WRITE_ERROR; } return NULL; } LOG_TRACE("created new journal '%s'", journal->getName(journal)); // create a collection header, still in the temporary file res = TRI_ReserveElementDatafile(journal, sizeof(TRI_col_header_marker_t), &position, maximalSize); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } TRI_InitMarker((char*) &cm, TRI_COL_MARKER_HEADER, sizeof(TRI_col_header_marker_t)); cm.base._tick = (TRI_voc_tick_t) fid; cm._type = (TRI_col_type_t) collection->_info._type; cm._cid = collection->_info._cid; res = TRI_WriteCrcElementDatafile(journal, position, &cm.base, sizeof(cm), true); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } assert(fid == journal->_fid); // if a physical file, we can rename it from the temporary name to the correct name if (journal->isPhysical(journal)) { char* jname; char* number; char* filename; bool ok; // and use the correct name number = TRI_StringUInt64(journal->_fid); jname = TRI_Concatenate3String("journal-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); ok = TRI_RenameDatafile(journal, filename); if (! ok) { LOG_ERROR("failed to rename the journal to '%s': %s", filename, TRI_last_error()); TRI_FreeDatafile(journal); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return NULL; } else { LOG_TRACE("renamed journal from %s to '%s'", journal->getName(journal), filename); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } TRI_PushBackVectorPointer(&collection->_journals, journal); return journal; }
static TRI_datafile_t* CreateCompactor (TRI_primary_collection_t* primary, TRI_voc_fid_t fid, TRI_voc_size_t maximalSize) { TRI_col_header_marker_t cm; TRI_collection_t* collection; TRI_datafile_t* journal; TRI_df_marker_t* position; int res; collection = &primary->base; if (collection->_info._isVolatile) { // in-memory collection journal = TRI_CreateDatafile(NULL, fid, maximalSize); } else { char* jname; char* number; char* filename; number = TRI_StringUInt64(fid); jname = TRI_Concatenate3String("compaction-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); if (TRI_ExistsFile(filename)) { // remove any existing temporary file first TRI_UnlinkFile(filename); } journal = TRI_CreateDatafile(filename, fid, maximalSize); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } if (journal == NULL) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { collection->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); collection->_state = TRI_COL_STATE_READ; } else { collection->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); collection->_state = TRI_COL_STATE_WRITE_ERROR; } return NULL; } LOG_TRACE("created new compactor '%s'", journal->getName(journal)); // create a collection header, still in the temporary file res = TRI_ReserveElementDatafile(journal, sizeof(TRI_col_header_marker_t), &position, maximalSize); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in compactor '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } TRI_InitMarker((char*) &cm, TRI_COL_MARKER_HEADER, sizeof(TRI_col_header_marker_t)); cm.base._tick = (TRI_voc_tick_t) fid; cm._type = (TRI_col_type_t) collection->_info._type; cm._cid = collection->_info._cid; res = TRI_WriteCrcElementDatafile(journal, position, &cm.base, sizeof(cm), false); if (res != TRI_ERROR_NO_ERROR) { collection->_lastError = journal->_lastError; LOG_ERROR("cannot create document header in compactor '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return NULL; } assert(fid == journal->_fid); return journal; }
TRI_external_status_t TRI_CheckExternalProcess (TRI_external_id_t pid, bool wait) { TRI_external_status_t status; TRI_external_t* external = nullptr; // Just to please the compiler size_t i; TRI_LockMutex(&ExternalProcessesLock); status._status = TRI_EXT_NOT_FOUND; status._exitStatus = 0; for (i = 0; i < ExternalProcesses._length; ++i) { external = static_cast<TRI_external_t*>(TRI_AtVectorPointer(&ExternalProcesses, i)); if (external->_pid == pid._pid) { break; } } if (i == ExternalProcesses._length) { TRI_UnlockMutex(&ExternalProcessesLock); status._errorMessage = std::string("the pid you're looking for is not in our list: ") + triagens::basics::StringUtils::itoa(static_cast<int64_t>(pid._pid)); status._status = TRI_EXT_NOT_FOUND; LOG_WARNING("checkExternal: pid not found: %d", (int) pid._pid); return status; } if (external->_status == TRI_EXT_RUNNING || external->_status == TRI_EXT_STOPPED) { #ifndef _WIN32 TRI_pid_t res; int opts; int loc = 0; if (wait) { opts = WUNTRACED; } else { opts = WNOHANG | WUNTRACED; } res = waitpid(external->_pid, &loc, opts); if (res == 0) { if (wait) { status._errorMessage = std::string("waitpid returned 0 for pid while it shouldn't ") + triagens::basics::StringUtils::itoa(external->_pid); if (WIFEXITED(loc)) { external->_status = TRI_EXT_TERMINATED; external->_exitStatus = WEXITSTATUS(loc); } else if (WIFSIGNALED(loc)) { external->_status = TRI_EXT_ABORTED; external->_exitStatus = WTERMSIG(loc); } else if (WIFSTOPPED(loc)) { external->_status = TRI_EXT_STOPPED; external->_exitStatus = 0; } else { external->_status = TRI_EXT_ABORTED; external->_exitStatus = 0; } } else { external->_exitStatus = 0; } } else if (res == -1) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_WARNING("waitpid returned error for pid %d (%d): %s", (int) external->_pid, (int) wait, TRI_last_error()); status._errorMessage = std::string("waitpid returned error for pid ") + triagens::basics::StringUtils::itoa(external->_pid) + std::string(": ") + std::string(TRI_last_error()); } else if (static_cast<TRI_pid_t>(external->_pid) == static_cast<TRI_pid_t>(res)) { if (WIFEXITED(loc)) { external->_status = TRI_EXT_TERMINATED; external->_exitStatus = WEXITSTATUS(loc); } else if (WIFSIGNALED(loc)) { external->_status = TRI_EXT_ABORTED; external->_exitStatus = WTERMSIG(loc); } else if (WIFSTOPPED(loc)) { external->_status = TRI_EXT_STOPPED; external->_exitStatus = 0; } else { external->_status = TRI_EXT_ABORTED; external->_exitStatus = 0; } } else { LOG_WARNING("unexpected waitpid result for pid %d: %d", (int) external->_pid, (int) res); status._errorMessage = std::string("unexpected waitpid result for pid ") + triagens::basics::StringUtils::itoa(external->_pid) + std::string(": ") + triagens::basics::StringUtils::itoa(res); } #else { char windowsErrorBuf[256]; bool wantGetExitCode = wait; if (wait) { DWORD result; result = WaitForSingleObject(external->_process, INFINITE); if (result == WAIT_FAILED) { FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, windowsErrorBuf, sizeof(windowsErrorBuf), NULL); LOG_WARNING("could not wait for subprocess with PID '%ud': %s", (unsigned int) external->_pid, windowsErrorBuf); status._errorMessage = std::string("could not wait for subprocess with PID '") + triagens::basics::StringUtils::itoa(static_cast<int64_t>(external->_pid)) + std::string("'") + windowsErrorBuf; status._exitStatus = GetLastError(); } } else { DWORD result; result = WaitForSingleObject(external->_process, 0); switch (result) { case WAIT_ABANDONED: wantGetExitCode = true; LOG_WARNING("WAIT_ABANDONED while waiting for subprocess with PID '%ud'", (unsigned int)external->_pid); break; case WAIT_OBJECT_0: /// this seems to be the exit case - want getExitCodeProcess here. wantGetExitCode = true; break; case WAIT_TIMEOUT: // success - everything went well. external->_exitStatus = 0; break; case WAIT_FAILED: FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, windowsErrorBuf, sizeof(windowsErrorBuf), NULL); LOG_WARNING("could not wait for subprocess with PID '%ud': %s", (unsigned int)external->_pid, windowsErrorBuf); status._errorMessage = std::string("could not wait for subprocess with PID '") + triagens::basics::StringUtils::itoa(static_cast<int64_t>(external->_pid)) + std::string("'") + windowsErrorBuf; status._exitStatus = GetLastError(); default: wantGetExitCode = true; LOG_WARNING("unexpected status while waiting for subprocess with PID '%ud'", (unsigned int)external->_pid); } } if (wantGetExitCode) { DWORD exitCode = STILL_ACTIVE; if (!GetExitCodeProcess(external->_process, &exitCode)) { LOG_WARNING("exit status could not be determined for PID '%ud'", (unsigned int)external->_pid); status._errorMessage = std::string("exit status could not be determined for PID '") + triagens::basics::StringUtils::itoa(static_cast<int64_t>(external->_pid)) + std::string("'"); } else { if (exitCode == STILL_ACTIVE) { external->_exitStatus = 0; } else if (exitCode > 255) { // this should be one of our signals which we mapped... external->_status = TRI_EXT_ABORTED; external->_exitStatus = exitCode - 255; } else { external->_status = TRI_EXT_TERMINATED; external->_exitStatus = exitCode; } } } else { external->_status = TRI_EXT_RUNNING; } } #endif } else { LOG_WARNING("unexpected process status %d: %d", (int) external->_status, (int) external->_exitStatus); status._errorMessage = std::string("unexpected process status ") + triagens::basics::StringUtils::itoa(external->_status) + std::string(": ") + triagens::basics::StringUtils::itoa(external->_exitStatus); } status._status = external->_status; status._exitStatus = external->_exitStatus; // Do we have to free our data? if (external->_status != TRI_EXT_RUNNING && external->_status != TRI_EXT_STOPPED) { TRI_RemoveVectorPointer(&ExternalProcesses, i); FreeExternal(external); } TRI_UnlockMutex(&ExternalProcessesLock); return status; }
static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafile_t* datafile, bool journal) { TRI_df_marker_t* result; TRI_doc_mptr_t const* found; TRI_document_collection_t* document; TRI_primary_collection_t* primary; compaction_context_t* context; int res; context = data; document = context->_document; primary = &document->base; // new or updated document if (marker->_type == TRI_DOC_MARKER_KEY_DOCUMENT || marker->_type == TRI_DOC_MARKER_KEY_EDGE) { TRI_doc_document_key_marker_t const* d; TRI_doc_mptr_t* found2; TRI_voc_key_t key; bool deleted; d = (TRI_doc_document_key_marker_t const*) marker; key = (char*) d + d->_offsetKey; // check if the document is still active TRI_READ_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary); found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex, key); deleted = (found == NULL || found->_rid > d->_rid); TRI_READ_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary); if (deleted) { LOG_TRACE("found a stale document: %s", key); return true; } context->_keepDeletions = true; // write to compactor files res = CopyMarker(document, context->_compactor, marker, &result); if (res != TRI_ERROR_NO_ERROR) { LOG_FATAL_AND_EXIT("cannot write compactor file: %s", TRI_last_error()); } // check if the document is still active TRI_WRITE_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary); found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex, key); deleted = found == NULL; if (deleted) { context->_dfi._numberDead += 1; context->_dfi._sizeDead += (int64_t) marker->_size; TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary); LOG_DEBUG("found a stale document after copying: %s", key); return true; } found2 = CONST_CAST(found); assert(found2->_data != NULL); assert(((TRI_df_marker_t*) found2->_data)->_size > 0); // the fid might change if (found->_fid != context->_compactor->_fid) { // update old datafile's info TRI_doc_datafile_info_t* dfi = TRI_FindDatafileInfoPrimaryCollection(primary, found->_fid, false); if (dfi != NULL) { dfi->_numberDead += 1; dfi->_sizeDead += (int64_t) marker->_size; } found2->_fid = context->_compactor->_fid; } // let marker point to the new position found2->_data = result; // let _key point to the new key position found2->_key = ((char*) result) + (((TRI_doc_document_key_marker_t*) result)->_offsetKey); // update datafile info context->_dfi._numberAlive += 1; context->_dfi._sizeAlive += (int64_t) marker->_size; TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary); } // deletion else if (marker->_type == TRI_DOC_MARKER_KEY_DELETION && context->_keepDeletions) { // write to compactor files res = CopyMarker(document, context->_compactor, marker, &result); if (res != TRI_ERROR_NO_ERROR) { LOG_FATAL_AND_EXIT("cannot write compactor file: %s", TRI_last_error()); } // update datafile info context->_dfi._numberDeletion++; } else if (marker->_type == TRI_DOC_MARKER_BEGIN_TRANSACTION || marker->_type == TRI_DOC_MARKER_COMMIT_TRANSACTION || marker->_type == TRI_DOC_MARKER_ABORT_TRANSACTION || marker->_type == TRI_DOC_MARKER_PREPARE_TRANSACTION) { // write to compactor files res = CopyMarker(document, context->_compactor, marker, &result); if (res != TRI_ERROR_NO_ERROR) { LOG_FATAL_AND_EXIT("cannot write compactor file: %s", TRI_last_error()); } context->_dfi._numberTransaction++; context->_dfi._sizeTransaction += (int64_t) marker->_size; } return true; }
static bool CloseJournalPrimaryCollection (TRI_primary_collection_t* primary, size_t position, bool compactor) { TRI_datafile_t* journal; TRI_collection_t* collection; TRI_vector_pointer_t* vector; int res; collection = &primary->base; // either use a journal or a compactor if (compactor) { vector = &collection->_compactors; } else { vector = &collection->_journals; } // no journal at this position if (vector->_length <= position) { TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); return false; } // seal and rename datafile journal = vector->_buffer[position]; res = TRI_SealDatafile(journal); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("failed to seal datafile '%s': %s", journal->getName(journal), TRI_last_error()); TRI_RemoveVectorPointer(vector, position); TRI_PushBackVectorPointer(&collection->_datafiles, journal); return false; } if (journal->isPhysical(journal)) { // rename the file char* dname; char* filename; char* number; bool ok; number = TRI_StringUInt64(journal->_fid); dname = TRI_Concatenate3String("datafile-", number, ".db"); filename = TRI_Concatenate2File(collection->_directory, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, dname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); ok = TRI_RenameDatafile(journal, filename); if (! ok) { LOG_ERROR("failed to rename datafile '%s' to '%s': %s", journal->getName(journal), filename, TRI_last_error()); TRI_RemoveVectorPointer(vector, position); TRI_PushBackVectorPointer(&collection->_datafiles, journal); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return false; } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); LOG_TRACE("closed journal '%s'", journal->getName(journal)); } TRI_RemoveVectorPointer(vector, position); TRI_PushBackVectorPointer(&collection->_datafiles, journal); return true; }
static bool CreateJournal (TRI_shape_collection_t* collection) { TRI_col_header_marker_t cm; TRI_datafile_t* journal; TRI_df_marker_t* position; char* filename; int res; if (collection->base._info._isVolatile) { // in memory collection filename = NULL; } else { char* jname; char* number; number = TRI_StringUInt32(TRI_NewTickVocBase()); if (! number) { return false; } jname = TRI_Concatenate3String("journal-", number, ".db"); TRI_FreeString(TRI_CORE_MEM_ZONE, number); filename = TRI_Concatenate2File(collection->base._directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); } journal = TRI_CreateDatafile(filename, collection->base._info._maximalSize); if (filename != NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } // check that a journal was created if (journal == NULL) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { collection->base._lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); collection->base._state = TRI_COL_STATE_READ; } else { collection->base._lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); collection->base._state = TRI_COL_STATE_WRITE_ERROR; } return false; } LOG_TRACE("created a new shape journal '%s'", journal->getName(journal)); if (journal->isPhysical(journal)) { char* jname; char* number; bool ok; // and use the correct name number = TRI_StringUInt32(journal->_fid); jname = TRI_Concatenate3String("journal-", number, ".db"); filename = TRI_Concatenate2File(collection->base._directory, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, jname); ok = TRI_RenameDatafile(journal, filename); if (! ok) { // TODO: remove disastrous call to exit() here LOG_FATAL_AND_EXIT("failed to rename the journal to '%s': %s", filename, TRI_last_error()); } else { LOG_TRACE("renamed journal to '%s'", filename); } TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } // create a collection header res = TRI_ReserveElementDatafile(journal, sizeof(TRI_col_header_marker_t), &position); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return false; } // create a header memset(&cm, 0, sizeof(cm)); cm.base._size = sizeof(TRI_col_header_marker_t); cm.base._type = TRI_COL_MARKER_HEADER; cm.base._tick = TRI_NewTickVocBase(); cm._cid = collection->base._info._cid; TRI_FillCrcMarkerDatafile(journal, &cm.base, sizeof(cm), 0, 0, 0, 0); // on journal creation, always use waitForSync = true res = TRI_WriteElementDatafile(journal, position, &cm.base, sizeof(cm), 0, 0, 0, 0, true); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot create document header in journal '%s': %s", journal->getName(journal), TRI_last_error()); TRI_FreeDatafile(journal); return false; } // that's it TRI_PushBackVectorPointer(&collection->base._journals, journal); return true; }