char* TRI_GetDirectoryCollection (char const* path, const TRI_col_info_t* const parameter) { char* filename; assert(path); assert(parameter); // shape collections use just the name, e.g. path/SHAPES if (parameter->_type == TRI_COL_TYPE_SHAPE) { filename = TRI_Concatenate2File(path, parameter->_name); } // other collections use the collection identifier else if (TRI_IS_DOCUMENT_COLLECTION(parameter->_type)) { char* tmp1; char* tmp2; tmp1 = TRI_StringUInt64(parameter->_cid); if (tmp1 == NULL) { TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY); return NULL; } tmp2 = TRI_Concatenate2String("collection-", tmp1); if (tmp2 == NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, tmp1); TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY); return NULL; } filename = TRI_Concatenate2File(path, tmp2); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp1); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp2); } // oops, unknown collection type else { TRI_set_errno(TRI_ERROR_ARANGO_UNKNOWN_COLLECTION_TYPE); return NULL; } if (filename == NULL) { TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY); } // might be NULL return filename; }
bool TRI_RegisterMimetype (const char* extension, const char* mimetype, bool appendCharset) { mimetype_t* entry = static_cast<mimetype_t*>(TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(mimetype_t), false)); entry->_extension = TRI_DuplicateString(extension); entry->_appendCharset = appendCharset; if (appendCharset) { entry->_mimetype = TRI_Concatenate2String(mimetype, "; charset=utf-8"); } else { entry->_mimetype = TRI_DuplicateString(mimetype); } void* found = TRI_InsertKeyAssociativePointer(&Mimetypes, extension, entry, false); return (found != nullptr); }
bool TRI_SaveJson (char const* filename, TRI_json_t const* object) { bool ok; char* tmp; int fd; int res; ssize_t m; tmp = TRI_Concatenate2String(filename, ".tmp"); if (tmp == NULL) { return false; } fd = TRI_CREATE(tmp, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); if (fd < 0) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot create json file '%s': '%s'", tmp, TRI_LAST_ERROR_STR); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } ok = TRI_PrintJson(fd, object); if (! ok) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot write to json file '%s': '%s'", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } m = TRI_WRITE(fd, "\n", 1); if (m <= 0) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot write to json file '%s': '%s'", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } ok = TRI_fsync(fd); if (! ok) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot sync saved json '%s': '%s'", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } res = TRI_CLOSE(fd); if (res < 0) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot close saved file '%s': '%s'", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } res = TRI_RenameFile(tmp, filename); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot rename saved file '%s' to '%s': '%s'", tmp, filename, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return res; } TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return ok; }
static void CompactifyDatafiles (TRI_document_collection_t* document, TRI_vector_t const* compactions) { TRI_datafile_t* compactor; TRI_primary_collection_t* primary; compaction_initial_context_t initial; compaction_context_t context; size_t i, j, n; n = compactions->_length; assert(n > 0); initial = InitCompaction(document, compactions); if (initial._failed) { LOG_ERROR("could not create initialise compaction"); return; } LOG_TRACE("compactify called for collection '%llu' for %d datafiles of total size %llu", (unsigned long long) document->base.base._info._cid, (int) n, (unsigned long long) initial._targetSize); // now create a new compactor file // we are re-using the _fid of the first original datafile! compactor = CreateCompactor(document, initial._fid, initial._targetSize); if (compactor == NULL) { // some error occurred LOG_ERROR("could not create compactor file"); return; } LOG_DEBUG("created new compactor file '%s'", compactor->getName(compactor)); memset(&context._dfi, 0, sizeof(TRI_doc_datafile_info_t)); // these attributes remain the same for all datafiles we collect context._document = document; context._compactor = compactor; context._dfi._fid = compactor->_fid; // now compact all datafiles for (i = 0; i < n; ++i) { compaction_info_t* compaction; TRI_datafile_t* df; bool ok; compaction = TRI_AtVector(compactions, i); df = compaction->_datafile; LOG_DEBUG("compacting datafile '%s' into '%s', number: %d, keep deletions: %d", df->getName(df), compactor->getName(compactor), (int) i, (int) compaction->_keepDeletions); // if this is the first datafile in the list of datafiles, we can also collect // deletion markers context._keepDeletions = compaction->_keepDeletions; // run the actual compaction of a single datafile ok = TRI_IterateDatafile(df, Compactifier, &context, false, false); if (! ok) { LOG_WARNING("failed to compact datafile '%s'", df->getName(df)); // compactor file does not need to be removed now. will be removed on next startup // TODO: Remove return; } } // next file // locate the compactor // must acquire a write-lock as we're about to change the datafiles vector primary = &document->base; TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary); if (! LocateDatafile(&primary->base._compactors, compactor->_fid, &j)) { // not found TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary); LOG_ERROR("logic error in CompactifyDatafiles: could not find compactor"); return; } if (! TRI_CloseCompactorPrimaryCollection(primary, j)) { TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary); LOG_ERROR("could not close compactor file"); // TODO: how do we recover from this state? return; } TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary); if (context._dfi._numberAlive == 0 && context._dfi._numberDead == 0 && context._dfi._numberDeletion == 0 && context._dfi._numberTransaction == 0) { TRI_barrier_t* b; if (n > 1) { // create .dead files for all collected files for (i = 0; i < n; ++i) { compaction_info_t* compaction; TRI_datafile_t* datafile; compaction = TRI_AtVector(compactions, i); datafile = compaction->_datafile; if (datafile->isPhysical(datafile)) { char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead"); if (filename != NULL) { TRI_WriteFile(filename, "", 0); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } } } } // compactor is fully empty. remove it RemoveCompactor(document, compactor); for (i = 0; i < n; ++i) { compaction_info_t* compaction; compaction = TRI_AtVector(compactions, i); // datafile is also empty after compaction and thus useless RemoveDatafile(document, compaction->_datafile); // add a deletion marker to the result set container b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary); if (b == NULL) { LOG_ERROR("out of memory when creating datafile-drop barrier"); } } } else { if (n > 1) { // create .dead files for all collected files but the first for (i = 1; i < n; ++i) { compaction_info_t* compaction; TRI_datafile_t* datafile; compaction = TRI_AtVector(compactions, i); datafile = compaction->_datafile; if (datafile->isPhysical(datafile)) { char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead"); if (filename != NULL) { TRI_WriteFile(filename, "", 0); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); } } } } for (i = 0; i < n; ++i) { TRI_barrier_t* b; compaction_info_t* compaction; compaction = TRI_AtVector(compactions, i); if (i == 0) { // add a rename marker void* copy; copy = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(compaction_context_t), false); memcpy(copy, &context, sizeof(compaction_context_t)); b = TRI_CreateBarrierRenameDatafile(&primary->_barrierList, compaction->_datafile, RenameDatafileCallback, copy); if (b == NULL) { LOG_ERROR("out of memory when creating datafile-rename barrier"); TRI_Free(TRI_CORE_MEM_ZONE, copy); } } else { // datafile is empty after compaction and thus useless RemoveDatafile(document, compaction->_datafile); // add a drop datafile marker b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary); if (b == NULL) { LOG_ERROR("out of memory when creating datafile-drop barrier"); } } } } }
static void DropDatafileCallback (TRI_datafile_t* datafile, void* data) { TRI_primary_collection_t* primary; TRI_voc_fid_t fid; char* filename; char* name; char* number; char* copy; bool ok; primary = data; fid = datafile->_fid; copy = NULL; number = TRI_StringUInt64(fid); name = TRI_Concatenate3String("deleted-", number, ".db"); filename = TRI_Concatenate2File(primary->base._directory, name); TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, name); if (datafile->isPhysical(datafile)) { copy = TRI_DuplicateStringZ(TRI_CORE_MEM_ZONE, datafile->_filename); ok = TRI_RenameDatafile(datafile, filename); if (! ok) { LOG_ERROR("cannot rename obsolete datafile '%s' to '%s': %s", copy, filename, TRI_last_error()); } } LOG_DEBUG("finished compacting datafile '%s'", datafile->getName(datafile)); ok = TRI_CloseDatafile(datafile); if (! ok) { LOG_ERROR("cannot close obsolete datafile '%s': %s", datafile->getName(datafile), TRI_last_error()); } else if (datafile->isPhysical(datafile)) { if (primary->base._vocbase->_settings.removeOnCompacted) { int res; LOG_DEBUG("wiping compacted datafile from disk"); res = TRI_UnlinkFile(filename); if (res != TRI_ERROR_NO_ERROR) { LOG_ERROR("cannot wipe obsolete datafile '%s': %s", datafile->getName(datafile), TRI_last_error()); } // check for .dead files if (copy != NULL) { // remove .dead file for datafile char* deadfile = TRI_Concatenate2String(copy, ".dead"); if (deadfile != NULL) { // check if .dead file exists, then remove it if (TRI_ExistsFile(deadfile)) { TRI_UnlinkFile(deadfile); } TRI_FreeString(TRI_CORE_MEM_ZONE, deadfile); } } } } TRI_FreeDatafile(datafile); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); if (copy != NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, copy); } }
bool TRI_SaveJson (char const* filename, TRI_json_t const* object, bool syncFile) { char* tmp; int fd; int res; tmp = TRI_Concatenate2String(filename, ".tmp"); if (tmp == NULL) { return false; } // remove a potentially existing temporary file if (TRI_ExistsFile(tmp)) { TRI_UnlinkFile(tmp); } fd = TRI_CREATE(tmp, O_CREAT | O_TRUNC | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); if (fd < 0) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot create json file '%s': %s", tmp, TRI_LAST_ERROR_STR); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } if (! TRI_PrintJson(fd, object, true)) { TRI_CLOSE(fd); TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot write to json file '%s': %s", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } if (syncFile) { LOG_TRACE("syncing tmp file '%s'", tmp); if (! TRI_fsync(fd)) { TRI_CLOSE(fd); TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot sync saved json '%s': %s", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } } res = TRI_CLOSE(fd); if (res < 0) { TRI_set_errno(TRI_ERROR_SYS_ERROR); LOG_ERROR("cannot close saved file '%s': %s", tmp, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } res = TRI_RenameFile(tmp, filename); if (res != TRI_ERROR_NO_ERROR) { TRI_set_errno(res); LOG_ERROR("cannot rename saved file '%s' to '%s': %s", tmp, filename, TRI_LAST_ERROR_STR); TRI_UnlinkFile(tmp); TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return false; } TRI_FreeString(TRI_CORE_MEM_ZONE, tmp); return true; }