void ScanThread::checkDir(const Record& recDir) try { const fs::path dirPath = recDir.second.header.fileName; if(fs::is_directory(dirPath)) { time_t const lastWriteTime = fs::last_write_time(dirPath); if(lastWriteTime != recDir.second.header.lastWriteTime) { RecordData newData = recDir.second; newData.header.lastWriteTime = lastWriteTime; replaceEntry(make_Record(recDir.first, std::move(newData))); scanDir(recDir.first, fs::directory_iterator(dirPath), fs::directory_iterator()); } } else { delEntry(recDir.first); } } catch(const std::exception& ex) { std::cerr << "Failed to check dir '" << recDir.second.header.fileName << "': " << ex.what() << std::endl; }
void Daemon::coalescePages() { // Skip if there hasn't been enough entries pruned to make this // worth our while. if (iv_totalPruned < PAGESIZE) { return; } iv_totalPruned = 0; // Allocate a new back-end page for the coalesced entries. BufferPage* outputPage = BufferPage::allocate(true); BufferPage* originalOutputPage = outputPage; // Get the first page from the original back-end buffer. BufferPage* currentPage = iv_first; // Iterate through the back-end pages. while(currentPage != NULL) { // Look at all the entries on the back-end pages. size_t offset = 0; while (offset < currentPage->usedSize) { Entry* entry = reinterpret_cast<Entry*>(¤tPage->data[offset]); if (NULL != entry->comp) // Ensure entry is valid. { Entry* newEntry = NULL; // Allocate space on new back-end pages. while (NULL == (newEntry = outputPage->claimEntry(entry->size + sizeof(Entry)))) { BufferPage* newPage = BufferPage::allocate(true); newPage->next = outputPage; outputPage->prev = newPage; outputPage = newPage; } // Move entry to new back-end page. replaceEntry(entry, newEntry); } offset += entry->size + sizeof(Entry); } currentPage = currentPage->prev; } BufferPage* oldPage = iv_first; // Update back-end buffer pointers to point to new back-end pages. iv_last = outputPage; iv_first = originalOutputPage; // Toggle client buffers to ensure no trace extract is going on // on the old back-end pages. for (size_t i = 0; i < BUFFER_COUNT; i++) { iv_service->iv_buffers[i]->consumerOp(); } // Delete the old back-end pages. while(oldPage) { BufferPage* temp = oldPage->prev; BufferPage::deallocate(oldPage); oldPage = temp; } }
void Daemon::collectTracePages() { // Clear indication from clients. iv_service->iv_daemon->clearSignal(); // Collect buffer pages from front-end. BufferPage* srcPages[BUFFER_COUNT]; for (size_t i = 0; i < BUFFER_COUNT; i++) { iv_curPages[i] = srcPages[i] = iv_service->iv_buffers[i]->claimPages(); iv_curOffset[i] = 0; } char* contBuffer = NULL; size_t contBufferSize = 0; // Process buffer pages. do { size_t whichBuffer = BUFFER_COUNT; Entry* whichEntry = NULL; uint64_t minTimeStamp = UINT64_MAX; // Find the entry with the earliest timestamp. for (size_t i = 0; i < BUFFER_COUNT; i++) { if (NULL == iv_curPages[i]) continue; Entry* entry = reinterpret_cast<Entry*>( &((&(iv_curPages[i]->data[0]))[iv_curOffset[i]]) ); trace_bin_entry_t* binEntry = reinterpret_cast<trace_bin_entry_t*>( &(entry->data[0]) ); // Wait for entry to be committed. while(unlikely(entry->committed == 0)) { task_yield(); } isync(); uint64_t curTimeStamp = TWO_UINT32_TO_UINT64(binEntry->stamp.tbh, binEntry->stamp.tbl); if (curTimeStamp < minTimeStamp) { whichBuffer = i; whichEntry = entry; minTimeStamp = curTimeStamp; } } // Did not find another entry, our work is done. if (whichBuffer == BUFFER_COUNT) { break; } // Increment pointers to next buffer entry. iv_curOffset[whichBuffer] += whichEntry->size + sizeof(Entry); if (iv_curOffset[whichBuffer] >= iv_curPages[whichBuffer]->usedSize) { iv_curPages[whichBuffer] = iv_curPages[whichBuffer]->next; iv_curOffset[whichBuffer] = 0; } trace_bin_entry_t* contEntry = reinterpret_cast<trace_bin_entry_t*>(&whichEntry->data[0]); // Calculate the sizes of the entry. size_t contEntryDataLength = contEntry->head.length + sizeof(trace_bin_entry_t); size_t contEntrySize = whichEntry->comp->iv_compNameLen + contEntryDataLength; // Allocate a new continuous trace page if needed. if ((NULL == contBuffer) || ((contBufferSize + contEntrySize) >= PAGESIZE)) { if (NULL != contBuffer) { sendContBuffer(contBuffer, contBufferSize); // contBuffer pointer is transfered to mailbox now. } contBuffer = reinterpret_cast<char*>(malloc(PAGESIZE)); memset(contBuffer, '\0', PAGESIZE); contBuffer[0] = TRACE_BUF_CONT; contBufferSize = 1; } // Add entry to continous trace. memcpy(&contBuffer[contBufferSize], whichEntry->comp->iv_compName, whichEntry->comp->iv_compNameLen); contBufferSize += whichEntry->comp->iv_compNameLen; memcpy(&contBuffer[contBufferSize], &whichEntry->data[0], contEntryDataLength); contBufferSize += contEntryDataLength; // Allocate a new back-end entry. Entry* mainBuffEntry = NULL; while (NULL == (mainBuffEntry = iv_last->claimEntry(whichEntry->size + sizeof(Entry)))) { BufferPage* n = BufferPage::allocate(true); n->next = iv_last; iv_last->prev = n; iv_last = n; } // Move entry from front-end buffer to back-end. replaceEntry(whichEntry, mainBuffEntry); } while(1); // Send remainder of continous trace buffer. if (NULL != contBuffer) { if (contBufferSize > 1) { sendContBuffer(contBuffer, contBufferSize); // contBuffer pointer is transfered to mailbox now. } else { free(contBuffer); } } // Release pages. for (size_t i = 0; i < BUFFER_COUNT; i++) { // Toggle lock to ensure no trace extract currently going on. iv_service->iv_buffers[i]->consumerOp(); while(srcPages[i]) { BufferPage* tmp = srcPages[i]->next; BufferPage::deallocate(srcPages[i]); srcPages[i] = tmp; } } }
void ScanThread::scanEntry( const fs::path& path, const RecordID& parentID, Records& oldRecords, bool recursive) try { if (shouldBreak()) { return; } const bool isDir = fs::is_directory(path); Record newRecord = make_Record( NULL_RECORD_ID, RecordData(parentID, /*last write time*/0, isDir, path.string())); const std::pair<Records::iterator, Records::iterator> oldRange = std::equal_range(oldRecords.begin(), oldRecords.end(), newRecord, CmpByPath()); assert(std::distance(oldRange.first, oldRange.second) <= 1); const Records::iterator itOldRecord = oldRange.first != oldRange.second ? oldRange.first : oldRecords.end(); if (isDir && recursive) { // if new entry if (itOldRecord == oldRecords.end()) { addEntry(std::move(newRecord.second)); } } else // file { if (!isSupportedExtension(path)) { return; // unsupported extension } newRecord.second.header.lastWriteTime = fs::last_write_time(path); if (itOldRecord == oldRecords.end()) { addEntry(std::move(newRecord.second)); } else if(newRecord.second.header.lastWriteTime != itOldRecord->second.header.lastWriteTime) { newRecord.first = itOldRecord->first; replaceEntry(std::move(newRecord)); } } // record was processed, so removing from the list if (oldRecords.end() != itOldRecord) { oldRecords.erase(itOldRecord); } } catch(const fs::filesystem_error& ex) { std::cerr << "Failed to process filesystem element " << path << ": " << ex.what() << std::endl; // if the entry is inaccessible due to network resource down // it shouldn't be deleted from the database if (ex.code().value() != ENOENT) { const Record fakeRecord = make_Record(NULL_RECORD_ID, RecordData(NULL_RECORD_ID, 0, false, path.string())); const Records::iterator itOldRecord = std::lower_bound( oldRecords.begin(), oldRecords.end(), fakeRecord, CmpByPath()); // prevent record from deletion if (oldRecords.end() != itOldRecord) { oldRecords.erase(itOldRecord); } } } catch(const std::exception& ex) { std::cerr << "Failed to process filesystem element " << path << ": " << ex.what() << std::endl; }