static int SetError (TRI_replication_applier_t* applier, int errorCode, char const* msg) { TRI_replication_applier_state_t* state; char const* realMsg; if (msg == NULL || strlen(msg) == 0) { realMsg = TRI_errno_string(errorCode); } else { realMsg = msg; } // log error message if (errorCode != TRI_ERROR_REPLICATION_APPLIER_STOPPED) { LOG_ERROR("replication applier error for database '%s': %s", applier->_databaseName, realMsg); } state = &applier->_state; state->_lastError._code = errorCode; TRI_GetTimeStampReplication(state->_lastError._time, sizeof(state->_lastError._time) - 1); if (state->_lastError._msg != NULL) { TRI_FreeString(TRI_CORE_MEM_ZONE, state->_lastError._msg); } state->_lastError._msg = TRI_DuplicateStringZ(TRI_CORE_MEM_ZONE, realMsg); return errorCode; }
int TRI_ShutdownReplicationApplier (TRI_replication_applier_t* applier) { if (applier == nullptr) { return TRI_ERROR_NO_ERROR; } LOG_TRACE("requesting replication applier shutdown"); if (applier->_vocbase->_type == TRI_VOCBASE_TYPE_COORDINATOR) { return TRI_ERROR_CLUSTER_UNSUPPORTED; } TRI_WriteLockReadWriteLock(&applier->_statusLock); if (! applier->_state._active) { TRI_WriteUnlockReadWriteLock(&applier->_statusLock); return TRI_ERROR_NO_ERROR; } int res = StopApplier(applier, true); TRI_WriteUnlockReadWriteLock(&applier->_statusLock); // join the thread without the status lock (otherwise it would probably not join) if (res == TRI_ERROR_NO_ERROR) { res = TRI_JoinThread(&applier->_thread); } else { // stop the thread but keep original error code int res2 = TRI_JoinThread(&applier->_thread); if (res2 != TRI_ERROR_NO_ERROR) { LOG_ERROR("could not join replication applier for database '%s': %s", applier->_databaseName, TRI_errno_string(res2)); } } SetTerminateFlag(applier, false); TRI_WriteLockReadWriteLock(&applier->_statusLock); // really abort all ongoing transactions applier->abortRunningRemoteTransactions(); TRI_WriteUnlockReadWriteLock(&applier->_statusLock); LOG_INFO("stopped replication applier for database '%s'", applier->_databaseName); return res; }
static int ApplyCap (TRI_cap_constraint_t* cap, TRI_document_collection_t* document, TRI_transaction_collection_t* trxCollection) { TRI_headers_t* headers = document->_headersPtr; // PROTECTED by trx in trxCollection size_t currentCount = headers->count(); int64_t currentSize = headers->size(); int res = TRI_ERROR_NO_ERROR; // delete while at least one of the constraints is still violated while ((cap->_count > 0 && currentCount > cap->_count) || (cap->_size > 0 && currentSize > cap->_size)) { TRI_doc_mptr_t* oldest = headers->front(); if (oldest != nullptr) { TRI_ASSERT(oldest->getDataPtr() != nullptr); // ONLY IN INDEX, PROTECTED by RUNTIME size_t oldSize = ((TRI_df_marker_t*) (oldest->getDataPtr()))->_size; // ONLY IN INDEX, PROTECTED by RUNTIME TRI_ASSERT(oldSize > 0); if (trxCollection != nullptr) { res = TRI_DeleteDocumentDocumentCollection(trxCollection, nullptr, oldest); if (res != TRI_ERROR_NO_ERROR) { LOG_WARNING("cannot cap collection: %s", TRI_errno_string(res)); break; } } else { headers->unlink(oldest); } currentCount--; currentSize -= (int64_t) oldSize; } else { // we should not get here LOG_WARNING("logic error in %s", __FUNCTION__); break; } } return res; }
static int ApplyCap (TRI_cap_constraint_t* cap, TRI_primary_collection_t* primary, TRI_transaction_collection_t* trxCollection) { TRI_document_collection_t* document; TRI_headers_t* headers; size_t count; int res; document = (TRI_document_collection_t*) primary; headers = document->_headers; count = headers->count(headers); res = TRI_ERROR_NO_ERROR; while (count > cap->_size) { TRI_doc_mptr_t* oldest = headers->front(headers); if (oldest != NULL) { if (trxCollection != NULL) { res = TRI_DeleteDocumentDocumentCollection(trxCollection, NULL, oldest); if (res != TRI_ERROR_NO_ERROR) { LOG_WARNING("cannot cap collection: %s", TRI_errno_string(res)); break; } } else { headers->unlink(headers, oldest); } count--; } else { // we should not get here LOG_WARNING("logic error in %s", __FUNCTION__); break; } } return res; }
bool ImportHelper::importJson (const string& collectionName, const string& fileName) { _collectionName = collectionName; _firstLine = ""; _numberLines = 0; _numberOk = 0; _numberError = 0; _outputBuffer.clear(); _errorMessage = ""; _hasError = false; // read and convert int fd; int64_t totalLength; if (fileName == "-") { // we don't have a filesize totalLength = 0; fd = STDIN_FILENO; } else { // read filesize totalLength = TRI_SizeFile(fileName.c_str()); fd = TRI_OPEN(fileName.c_str(), O_RDONLY); if (fd < 0) { _errorMessage = TRI_LAST_ERROR_STR; return false; } } bool isArray = false; bool checkedFront = false; // progress display control variables int64_t totalRead = 0; double nextProgress = ProgressStep; static const int BUFFER_SIZE = 32768; while (! _hasError) { // reserve enough room to read more data if (_outputBuffer.reserve(BUFFER_SIZE) == TRI_ERROR_OUT_OF_MEMORY) { _errorMessage = TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY); if (fd != STDIN_FILENO) { TRI_CLOSE(fd); } return false; } // read directly into string buffer ssize_t n = TRI_READ(fd, _outputBuffer.end(), BUFFER_SIZE - 1); if (n < 0) { _errorMessage = TRI_LAST_ERROR_STR; if (fd != STDIN_FILENO) { TRI_CLOSE(fd); } return false; } else if (n == 0) { // we're done break; } // adjust size of the buffer by the size of the chunk we just read _outputBuffer.increaseLength(n); if (! checkedFront) { // detect the import file format (single lines with individual JSON objects // or a JSON array with all documents) char const* p = _outputBuffer.begin(); char const* e = _outputBuffer.end(); while (p < e && (*p == ' ' || *p == '\r' || *p == '\n' || *p == '\t' || *p == '\f' || *p == '\b')) { ++p; } isArray = (*p == '['); checkedFront = true; } totalRead += (int64_t) n; reportProgress(totalLength, totalRead, nextProgress); if (_outputBuffer.length() > _maxUploadSize) { if (isArray) { if (fd != STDIN_FILENO) { TRI_CLOSE(fd); } _errorMessage = "import file is too big. please increase the value of --batch-size (currently " + StringUtils::itoa(_maxUploadSize) + ")"; return false; } // send all data before last '\n' char const* first = _outputBuffer.c_str(); char* pos = (char*) memrchr(first, '\n', _outputBuffer.length()); if (pos != 0) { size_t len = pos - first + 1; sendJsonBuffer(first, len, isArray); _outputBuffer.erase_front(len); } } } if (_outputBuffer.length() > 0) { sendJsonBuffer(_outputBuffer.c_str(), _outputBuffer.length(), isArray); } _numberLines = _numberError + _numberOk; if (fd != STDIN_FILENO) { TRI_CLOSE(fd); } _outputBuffer.clear(); return ! _hasError; }