void CRelaxReminderDlg::SaveSettingToIniFile() { // 进入临界区保护 EnterCriticalSection(&m_csSettingFile); CreateIniFileHeader(m_strConfigFile); CIni Ini(m_strConfigFile); // 先保存版本信息 Ini.Write("Version", "CurrentVersion", VersionNum2String(m_dwVersion)); // 保存时间参数 Ini.Write("Time", "WorkDuration", m_tm.GetWD()); Ini.Write("Time", "ShortRelaxDuration", m_tm.GetSRD()); Ini.Write("Time", "EnableLongRelax", m_tm.GetEnableLongRelax()); Ini.Write("Time", "LongRelaxFrequency", m_tm.GetLRF()); Ini.Write("Time", "LongRelaxDuration", m_tm.GetLRD()); // 保存提示参数 Ini.Write("Notify", "PreNotify", m_bPreNotify); Ini.Write("Notify", "LockInput", m_bLockInput); Ini.Write("Notify", "LockType", m_iLockType); Ini.Write("Notify", "GraceTimeBeforeLockInput", m_iGraceTimeBeforeLockInput); Ini.Write("Notify", "DarkerScreen", m_bDarkerScreen); Ini.Write("Notify", "DarkerScreenAlpha", m_iDarkerScreenAlpha); Ini.Write("Notify", "DarkerScreenAnimate", m_eDarkerScreenAnimateType); Ini.Write("Notify", "DarkerScreenColor", m_colorDarkerScreen); Ini.Write("Notify", "DarkerScreenType", m_eDarkerScreenType); Ini.Write("Notify", "NotifyWindowType", m_eNotifyWindowType); // 保存自动状态转换参数 Ini.Write("AutoAway", "EnableAutoPause", m_tm.GetEnableAutoPause()); Ini.Write("AutoAway", "AutoPauseThreshold", m_tm.GetAPT()); Ini.Write("AutoAway", "FullscreenPause", m_tm.GetEnableFullScreenPause()); Ini.Write("AutoAway", "EnableAutoReset", m_tm.GetEnableAutoReset()); Ini.Write("AutoAway", "AutoResetThreshold", m_tm.GetART()); // 保存语言参数 Ini.Write("Language", "LanguageId", m_strLanguageId); // 保存杂项参数 Ini.Write("Misc", "LockComputer", m_bLockComputer); Ini.Write("Misc", "CloseMonitor", m_bCloseMonitor); Ini.Write("Misc", "ShowFloatingWindow", m_bShowFloatingWindowAtStartup); Ini.Write("Misc", "ShowTrayIcon", m_bShowTrayIconAtStartup); Ini.Write("Misc", "EnableLog", GetEnableLog()); Ini.Write("Misc", "LogLevel", GetLogLevel()); Ini.Write("Misc", "LogFileSize", GetLogFileSize()); Ini.Write("Misc", "AutoStartWithSystem", GetAppAutoStartWithSystem()); // 离开临界区保护 LeaveCriticalSection(&m_csSettingFile); }
// // Func: // 检查日志文件大小是否超出设定值。 // 当日志文件大小超过${SizeLimit}/2,日志文件会被重命名为"${Filename}0.ext",如果该文件已经存在则覆盖。 // 然后新建文件${Filename}.ext。这样,实际上日志文件被分成两个部分,${Filename}.ext和${Filename}0.ext. // Param: // 无 // Return: // -1 - 失败 // >0 - 日志文件剩余容量 // int CSimpleLogger::CheckLogFileSize() { int nFileSize = GetLogFileSize(); if (nFileSize == -1) return -1; if (nFileSize < m_logConfig.nFileSizeLimit) return m_logConfig.nFileSizeLimit - nFileSize; wstring wstrNewFileName = m_logConfig.wstrOutFile; size_t nPos = wstrNewFileName.find_last_of(L'.'); if (nPos == wstring::npos) wstrNewFileName.append(L".part0"); else wstrNewFileName.insert(nPos, L".part0"); if (::DeleteFile(wstrNewFileName.c_str()) || ::GetLastError() == ERROR_FILE_NOT_FOUND) { ::MoveFile(m_logConfig.wstrOutFile.c_str(), wstrNewFileName.c_str()); } return m_logConfig.nFileSizeLimit; }
/** * @brief Recovery system based on log file */ void AriesFrontendLogger::DoRecovery() { // Set log file size log_file_size = GetLogFileSize(log_file_fd); // Go over the log size if needed if (log_file_size > 0) { bool reached_end_of_file = false; // Start the recovery transaction auto &txn_manager = concurrency::TransactionManager::GetInstance(); // Although we call BeginTransaction here, recovery txn will not be // recoreded in log file since we are in recovery mode auto recovery_txn = txn_manager.BeginTransaction(); // Go over each log record in the log file while (reached_end_of_file == false) { // Read the first byte to identify log record type // If that is not possible, then wrap up recovery auto record_type = GetNextLogRecordType(log_file, log_file_size); switch (record_type) { case LOGRECORD_TYPE_TRANSACTION_BEGIN: AddTransactionToRecoveryTable(); break; case LOGRECORD_TYPE_TRANSACTION_END: RemoveTransactionFromRecoveryTable(); break; case LOGRECORD_TYPE_TRANSACTION_COMMIT: MoveCommittedTuplesToRecoveryTxn(recovery_txn); break; case LOGRECORD_TYPE_TRANSACTION_ABORT: AbortTuplesFromRecoveryTable(); break; case LOGRECORD_TYPE_ARIES_TUPLE_INSERT: InsertTuple(recovery_txn); break; case LOGRECORD_TYPE_ARIES_TUPLE_DELETE: DeleteTuple(recovery_txn); break; case LOGRECORD_TYPE_ARIES_TUPLE_UPDATE: UpdateTuple(recovery_txn); break; default: reached_end_of_file = true; break; } } // Commit the recovery transaction txn_manager.CommitTransaction(); // Finally, abort ACTIVE transactions in recovery_txn_table AbortActiveTransactions(); // After finishing recovery, set the next oid with maximum oid // observed during the recovery auto &manager = catalog::Manager::GetInstance(); manager.SetNextOid(max_oid); } }
/** * @brief Recovery system based on log file */ void WriteBehindFrontendLogger::DoRecovery() { // Set log file size log_file_size = GetLogFileSize(log_file_fd); // Go over the log size if needed if (log_file_size > 0) { bool reached_end_of_file = false; // check whether first item is LOGRECORD_TYPE_TRANSACTION_COMMIT // if not, no need to do recovery. // if yes, need to replay all log records before we hit // LOGRECORD_TYPE_TRANSACTION_DONE bool need_recovery = NeedRecovery(); if (need_recovery == true) { TransactionRecord dummy_transaction_record(LOGRECORD_TYPE_INVALID); cid_t current_commit_id = INVALID_CID; // Go over each log record in the log file while (reached_end_of_file == false) { // Read the first byte to identify log record type // If that is not possible, then wrap up recovery LogRecordType log_type = GetNextLogRecordType(log_file, log_file_size); switch (log_type) { case LOGRECORD_TYPE_TRANSACTION_DONE: case LOGRECORD_TYPE_TRANSACTION_COMMIT: { // read but do nothing ReadTransactionRecordHeader(dummy_transaction_record, log_file, log_file_size); } break; case LOGRECORD_TYPE_WBL_TUPLE_INSERT: { TupleRecord insert_record(LOGRECORD_TYPE_WBL_TUPLE_INSERT); ReadTupleRecordHeader(insert_record, log_file, log_file_size); auto insert_location = insert_record.GetInsertLocation(); auto info = SetInsertCommitMark(insert_location); current_commit_id = info.first; } break; case LOGRECORD_TYPE_WBL_TUPLE_DELETE: { TupleRecord delete_record(LOGRECORD_TYPE_WBL_TUPLE_DELETE); ReadTupleRecordHeader(delete_record, log_file, log_file_size); auto delete_location = delete_record.GetDeleteLocation(); auto info = SetDeleteCommitMark(delete_location); current_commit_id = info.first; } break; case LOGRECORD_TYPE_WBL_TUPLE_UPDATE: { TupleRecord update_record(LOGRECORD_TYPE_WBL_TUPLE_UPDATE); ReadTupleRecordHeader(update_record, log_file, log_file_size); auto delete_location = update_record.GetDeleteLocation(); SetDeleteCommitMark(delete_location); auto insert_location = update_record.GetInsertLocation(); auto info = SetInsertCommitMark(insert_location); current_commit_id = info.first; } break; default: reached_end_of_file = true; break; } } // Update latest commit id if (latest_commit_id < current_commit_id) { latest_commit_id = current_commit_id; } // write out a trasaction done log record to file // to avoid redo next time during recovery WriteTransactionLogRecord( TransactionRecord(LOGRECORD_TYPE_TRANSACTION_DONE)); } // After finishing recovery, set the next oid with maximum oid // observed during the recovery auto &manager = catalog::Manager::GetInstance(); manager.SetNextOid(max_oid); } }
void BuildLog(oid_t db_oid, oid_t table_oid) { std::chrono::time_point<std::chrono::system_clock> start, end; std::chrono::duration<double, std::milli> elapsed_milliseconds; // Build a pool auto logging_pool = new VarlenPool(BACKEND_TYPE_MM); // Create db CreateDatabase(db_oid); auto& manager = catalog::Manager::GetInstance(); storage::Database* db = manager.GetDatabaseWithOid(db_oid); // Create table, drop it and create again // so that table can have a newly added tile group and // not just the default tile group storage::DataTable* table = CreateUserTable(db_oid, table_oid); db->AddTable(table); // Tuple count oid_t per_backend_tuple_count = state.tuple_count / state.backend_count; // Create Tuples auto tuples = CreateTuples(table->GetSchema(), per_backend_tuple_count, logging_pool); //===--------------------------------------------------------------------===// // ACTIVE PROCESSING //===--------------------------------------------------------------------===// start = std::chrono::system_clock::now(); // Execute the workload to build the log std::vector<std::thread> thread_group; oid_t num_threads = state.backend_count; // Launch a group of threads for (uint64_t thread_itr = 0; thread_itr < num_threads; ++thread_itr) { thread_group.push_back(std::thread(RunBackends, table, tuples)); } // Join the threads with the main thread for (uint64_t thread_itr = 0; thread_itr < num_threads; ++thread_itr) { thread_group[thread_itr].join(); } end = std::chrono::system_clock::now(); elapsed_milliseconds = end - start; // Build log time if (state.experiment_type == EXPERIMENT_TYPE_ACTIVE || state.experiment_type == EXPERIMENT_TYPE_WAIT) { WriteOutput(elapsed_milliseconds.count()); } else if (state.experiment_type == EXPERIMENT_TYPE_STORAGE) { auto log_file_size = GetLogFileSize(); LOG_INFO("Log file size :: %lu", log_file_size); WriteOutput(log_file_size); } // Clean up data for (auto tuple : tuples) { delete tuple; } // Check the tuple count if needed if (state.check_tuple_count) { oid_t total_expected = 0; CheckTupleCount(db_oid, table_oid, total_expected); } // We can only drop the table in case of WAL if (IsBasedOnWriteAheadLogging(peloton_logging_mode) == true) { db->DropTableWithOid(table_oid); DropDatabase(db_oid); } }
/** * @brief writing a simple log file */ bool PrepareLogFile() { if (chdir(state.log_file_dir.c_str())) { LOG_ERROR("change directory failed"); } // start a thread for logging auto& log_manager = logging::LogManager::GetInstance(); if (log_manager.ContainsFrontendLogger() == true) { LOG_ERROR("another logging thread is running now"); return false; } Timer<> timer; std::thread thread; timer.Start(); if (peloton_logging_mode != LOGGING_TYPE_INVALID) { // Launching a thread for logging if (!log_manager.IsInLoggingMode()) { // Set sync commit mode log_manager.SetSyncCommit(false); // Wait for standby mode auto local_thread = std::thread( &peloton::logging::LogManager::StartStandbyMode, &log_manager); thread.swap(local_thread); log_manager.WaitForModeTransition(peloton::LOGGING_STATUS_TYPE_STANDBY, true); // Clean up database tile state before recovery from checkpoint log_manager.PrepareRecovery(); // Do any recovery log_manager.StartRecoveryMode(); // Wait for logging mode log_manager.WaitForModeTransition(peloton::LOGGING_STATUS_TYPE_LOGGING, true); // Done recovery log_manager.DoneRecovery(); } } // Build the log BuildLog(); // Stop frontend logger if in a valid logging mode if (peloton_logging_mode != LOGGING_TYPE_INVALID) { // Wait for the mode transition :: LOGGING -> TERMINATE -> SLEEP if (log_manager.EndLogging()) { thread.join(); } } timer.Stop(); auto duration = timer.GetDuration(); auto throughput = (ycsb::state.transaction_count * ycsb::state.backend_count) / duration; // Log the build log time if (state.experiment_type == EXPERIMENT_TYPE_INVALID || state.experiment_type == EXPERIMENT_TYPE_ACTIVE || state.experiment_type == EXPERIMENT_TYPE_WAIT) { WriteOutput(throughput); } else if (state.experiment_type == EXPERIMENT_TYPE_STORAGE) { auto log_file_size = GetLogFileSize(); WriteOutput(log_file_size); } return true; }
BOOL LoggingProc ( IN LPLOG_THREAD_DATA pArg ) { HQUERY hQuery; HCOUNTER hThisCounter; DWORD dwDelay; DWORD dwSampleInterval, dwSampleTime; PDH_STATUS pdhStatus; DWORD dwNumCounters; LONG lStatus; TCHAR szDefaultDir[MAX_PATH]; TCHAR szBaseName[MAX_PATH]; LPTSTR szThisPath; DWORD dwLogType = OPD_CSV_FILE; BOOL bRun = FALSE; DWORD dwSamplesUntilNewFile; TCHAR szCurrentLogFile[MAX_PATH]; LONG lWaitStatus; LPTSTR szStringArray[4]; DWORD dwFileSizeLimit; LONGLONG llFileSizeLimit; LONGLONG llFileSize; PLOG_COUNTER_INFO pCtrInfo; // read registry values if (!LoadDataFromRegistry (pArg, szDefaultDir, szBaseName, szCurrentLogFile)) { // unable to initialize the query from the registry return FALSE; } // convert to milliseconds for use in timeouts dwSampleInterval = pArg->dwTimeInterval * 1000L; // open query and add counters from info file pdhStatus = PdhOpenQuery (NULL, 0, &hQuery); // from current activity if (pdhStatus == ERROR_SUCCESS) { dwNumCounters = 0; for (szThisPath = pArg->mszCounterList; *szThisPath != 0; szThisPath += lstrlen(szThisPath) + 1) { pdhStatus = PdhAddCounter (hQuery, (LPTSTR)szThisPath, dwNumCounters++, &hThisCounter); if (pdhStatus == ERROR_SUCCESS) { // then add this handle to the list pCtrInfo = G_ALLOC (sizeof (LOG_COUNTER_INFO)); if (pCtrInfo != NULL) { // insert at front of list since the order isn't // important and this is simpler than walking the // list each time. pCtrInfo->hCounter = hThisCounter; pCtrInfo->next = pFirstCounter; pFirstCounter = pCtrInfo; } } } // to make sure we get to log the data SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST); bRun = TRUE; while (bRun) { // Get the current Log filename if (pArg->dwRenameIntervalCount != 0) { // then this is an autonamed file // so make current name BuildCurrentLogFileName ( szBaseName, szDefaultDir, szCurrentLogFile, &pArg->dwCurrentSerialNumber, pArg->dwAutoNameFormat, pArg->dwLogType); // reset loop counter switch (pArg->dwRenameIntervalUnits) { case OPD_RENAME_KBYTES: dwFileSizeLimit = pArg->dwRenameIntervalCount * 1024; dwSamplesUntilNewFile = 0; break; case OPD_RENAME_MBYTES: dwFileSizeLimit = pArg->dwRenameIntervalCount * 1024 * 1024; dwSamplesUntilNewFile = 0; break; case OPD_RENAME_HOURS: case OPD_RENAME_DAYS: case OPD_RENAME_MONTHS: default: dwSamplesUntilNewFile = GetSamplesInRenameInterval( pArg->dwTimeInterval, pArg->dwRenameIntervalCount, pArg->dwRenameIntervalUnits); dwFileSizeLimit = 0; break; } } else { // filename is left as read from the registry dwSamplesUntilNewFile = 0; dwFileSizeLimit = 0; } llFileSizeLimit = dwFileSizeLimit; // open log file using this query dwLogType = pArg->dwLogType; pdhStatus = OpenLogW ( szCurrentLogFile, LOG_WRITE_ACCESS | LOG_CREATE_ALWAYS, &dwLogType, hQuery, 0); if (pdhStatus == ERROR_SUCCESS) { szStringArray[0] = pArg->szQueryName; szStringArray[1] = szCurrentLogFile; ReportEvent (hEventLog, EVENTLOG_INFORMATION_TYPE, 0, PERFLOG_LOGGING_QUERY, NULL, 2, 0, szStringArray, NULL); // start sampling immediately dwDelay = 0; while ((lWaitStatus = WaitForSingleObject (pArg->hExitEvent, dwDelay)) == WAIT_TIMEOUT) { // the event flag will be set when the sampling should exit. if // the wait times out, then that means it's time to collect and // log another sample of data. // the argument received the time it took to take the // sample so the delay can be adjusted accordingly dwSampleTime = 0; pdhStatus = UpdateLog (&dwSampleTime); if (pdhStatus == ERROR_SUCCESS) { // see if it's time to rename the file if (dwSamplesUntilNewFile) { if (!--dwSamplesUntilNewFile) break; } else if (llFileSizeLimit) { // see if the file is too big pdhStatus = GetLogFileSize (&llFileSize); if (pdhStatus == ERROR_SUCCESS) { if (llFileSizeLimit <= llFileSize) break; } } // compute new timeout value if (dwSampleTime < dwSampleInterval) { dwDelay = dwSampleInterval - dwSampleTime; } else { dwDelay = 0; } } else { // unable to update the log so log event and exit ReportEvent (hEventLog, EVENTLOG_ERROR_TYPE, 0, PERFLOG_UNABLE_UPDATE_LOG, NULL, 0, sizeof(DWORD), NULL, (LPVOID)&pdhStatus); bRun = FALSE; break; } } // end while wait keeps timing out if (lWaitStatus == WAIT_OBJECT_0) { // then the loop was terminated by the Exit event // so clear the "run" flag to exit the loop & thread bRun = FALSE; } CloseLog (0); } else { // unable to open log file so log event log message bRun = FALSE; // exit now } } // end while (bRun) PdhCloseQuery (hQuery); // update log serial number if necssary if (pArg->dwAutoNameFormat == OPD_NAME_NNNNNN) { lStatus = RegSetValueEx ( pArg->hKeyQuery, TEXT("Log File Serial Number"), 0L, REG_DWORD, (LPBYTE)&pArg->dwCurrentSerialNumber, sizeof(DWORD)); } } else { // unable to open query so write event log message } return bRun; }