/** * Stats the given file. If <tt>throttleRate</tt> seconds have passed since * the last time stat() was called on this file, then the file will be * re-stat()ted, otherwise the cached stat information will be returned. * * @param filename The file to stat. * @param stat A pointer to a stat struct; the retrieved stat information * will be stored here. * @param throttleRate Tells this CachedFileStat that the file may only * be statted at most every <tt>throttleRate</tt> seconds. * @return 0 if the stat() call succeeded or if the cached stat information was used; * -1 if something went wrong while statting the file. In the latter * case, <tt>errno</tt> will be populated with an appropriate error code. * @throws SystemException Something went wrong while retrieving the * system time. stat() errors will <em>not</em> result in * SystemException being thrown. * @throws boost::thread_interrupted */ int stat(const string &filename, struct stat *buf, unsigned int throttleRate = 0) { boost::unique_lock<boost::mutex> l(lock); EntryMap::iterator it(cache.find(filename)); EntryPtr entry; int ret; if (it == cache.end()) { // Filename not in cache. // If cache is full, remove the least recently used // cache entry. if (maxSize != 0 && cache.size() == maxSize) { EntryList::iterator listEnd(entries.end()); listEnd--; string filename((*listEnd)->filename); entries.pop_back(); cache.erase(filename); } // Add to cache as most recently used. entry = EntryPtr(new Entry(filename)); entries.push_front(entry); cache[filename] = entries.begin(); } else { // Cache hit. entry = *it->second; // Mark this cache item as most recently used. entries.erase(it->second); entries.push_front(entry); cache[filename] = entries.begin(); } ret = entry->refresh(throttleRate); *buf = entry->info; return ret; }
void TastyDataCache::addEntry(const EntryPtr& entry) { Q_ASSERT(!entry || !entry->parent()); if (entry) _entries.insert(entry->entryId(), entry.data()); }
void FATArchive::rename(EntryPtr id, const std::string& strNewName) { // TESTED BY: fmt_grp_duke3d_rename assert(this->isValid(id)); FATEntry *pFAT = dynamic_cast<FATEntry *>(id.get()); // Make sure filename is within the allowed limit if ( (this->lenMaxFilename > 0) && (strNewName.length() > this->lenMaxFilename) ) { throw stream::error(createString("maximum filename length is " << this->lenMaxFilename << " chars")); } this->updateFileName(pFAT, strNewName); pFAT->strName = strNewName; return; }
void FATArchive::remove(EntryPtr id) { // TESTED BY: fmt_grp_duke3d_remove // TESTED BY: fmt_grp_duke3d_remove2 // TESTED BY: fmt_grp_duke3d_remove_insert // TESTED BY: fmt_grp_duke3d_insert_remove // Make sure the caller doesn't try to remove something that doesn't exist! assert(this->isValid(id)); FATEntry *pFATDel = dynamic_cast<FATEntry *>(id.get()); assert(pFATDel); // Remove the file's entry from the FAT this->preRemoveFile(pFATDel); // Remove the entry from the vector VC_ENTRYPTR::iterator itErase = std::find(this->vcFAT.begin(), this->vcFAT.end(), id); assert(itErase != this->vcFAT.end()); this->vcFAT.erase(itErase); // Update the offsets of any files located after this one (since they will // all have been shifted back to fill the gap made by the removal.) this->shiftFiles( pFATDel, pFATDel->iOffset, -((stream::delta)pFATDel->storedSize + (stream::delta)pFATDel->lenHeader), -1 ); // Remove the file's data from the archive this->psArchive->seekp(pFATDel->iOffset, stream::start); this->psArchive->remove(pFATDel->storedSize + pFATDel->lenHeader); // Mark it as invalid in case some other code is still holding on to it. pFATDel->bValid = false; this->postRemoveFile(pFATDel); return; }
size_t ScrollAdapterBase::GetVisibleItems( cursespp::ScrollableWindow* window, size_t desiredTopIndex, std::deque<EntryPtr>& target) { size_t actualTopIndex = desiredTopIndex; /* ensure we have enough data to draw from the specified position to the end. if we don't try to back up a bit until we can fill the buffer */ int totalHeight = (int) this->height; int entryCount = (int) this->GetEntryCount(); /* we assume the general case -- we're some where in the middle of the list. we'll start from the specified first item and work our way down */ for (int i = (int) desiredTopIndex; i < entryCount && totalHeight > 0; i++) { EntryPtr entry = this->GetEntry(window, i); entry->SetWidth(this->width); totalHeight -= entry->GetLineCount(); target.push_back(entry); } /* however, if the list is short, we can actually draw more items above the specified one. let's work our way backwards! */ if (totalHeight > 0) { target.clear(); totalHeight = this->height; int i = GetEntryCount() - 1; while (i >= 0 && totalHeight >= 0) { EntryPtr entry = this->GetEntry(window, i); entry->SetWidth(this->width); int lines = entry->GetLineCount(); if (lines > totalHeight) { break; /* this Entry won't fit. bail. */ } totalHeight -= lines; target.push_front(entry); --i; } actualTopIndex = i + 1; } return actualTopIndex; }
void ArchiveTestCase<ClassFactoryT>::ExtractArchive(wxInputStream& in) { typedef Ptr<EntryT> EntryPtr; typedef std::list<EntryPtr> Entries; typedef typename Entries::iterator EntryIter; auto_ptr<InputStreamT> arc(m_factory->NewStream(in)); int expectedTotal = m_testEntries.size(); EntryPtr entry; Entries entries; if ((m_options & PipeIn) == 0) OnArchiveExtracted(*arc, expectedTotal); while (entry = EntryPtr(arc->GetNextEntry()), entry.get() != NULL) { wxString name = entry->GetName(wxPATH_UNIX); // provide some context for the error message so that we know which // iteration of the loop we were on string error_entry((_T(" '") + name + _T("'")).mb_str()); string error_context(" failed for entry" + error_entry); TestEntries::iterator it = m_testEntries.find(name); CPPUNIT_ASSERT_MESSAGE( "archive contains an entry that shouldn't be there" + error_entry, it != m_testEntries.end()); const TestEntry& testEntry = *it->second; wxDateTime dt = testEntry.GetDateTime(); if (dt.IsValid()) CPPUNIT_ASSERT_MESSAGE("timestamp check" + error_context, dt == entry->GetDateTime()); // non-seekable entries are allowed to have GetSize == wxInvalidOffset // until the end of the entry's data has been read past CPPUNIT_ASSERT_MESSAGE("entry size check" + error_context, testEntry.GetLength() == entry->GetSize() || ((m_options & PipeIn) != 0 && entry->GetSize() == wxInvalidOffset)); CPPUNIT_ASSERT_MESSAGE( "arc->GetLength() == entry->GetSize()" + error_context, arc->GetLength() == entry->GetSize()); if (name.Last() != _T('/')) { CPPUNIT_ASSERT_MESSAGE("!IsDir" + error_context, !entry->IsDir()); wxCharBuffer buf(testEntry.GetSize() + 1); CPPUNIT_ASSERT_MESSAGE("Read until Eof" + error_context, arc->Read(buf.data(), testEntry.GetSize() + 1).Eof()); CPPUNIT_ASSERT_MESSAGE("LastRead check" + error_context, arc->LastRead() == testEntry.GetSize()); CPPUNIT_ASSERT_MESSAGE("data compare" + error_context, !memcmp(buf.data(), testEntry.GetData(), testEntry.GetSize())); } else { CPPUNIT_ASSERT_MESSAGE("IsDir" + error_context, entry->IsDir()); } // GetSize() must return the right result in all cases after all the // data has been read CPPUNIT_ASSERT_MESSAGE("entry size check" + error_context, testEntry.GetLength() == entry->GetSize()); CPPUNIT_ASSERT_MESSAGE( "arc->GetLength() == entry->GetSize()" + error_context, arc->GetLength() == entry->GetSize()); if ((m_options & PipeIn) == 0) { OnEntryExtracted(*entry, testEntry, arc.get()); delete it->second; m_testEntries.erase(it); } else { entries.push_back(entry); } } // check that the end of the input archive was reached without error CPPUNIT_ASSERT(arc->Eof()); // for non-seekable streams these data are only guaranteed to be // available once the end of the archive has been reached if (m_options & PipeIn) { for (EntryIter i = entries.begin(); i != entries.end(); ++i) { wxString name = (*i)->GetName(wxPATH_UNIX); TestEntries::iterator j = m_testEntries.find(name); OnEntryExtracted(**i, *j->second); delete j->second; m_testEntries.erase(j); } OnArchiveExtracted(*arc, expectedTotal); } }
bool FATArchive::isValid(const EntryPtr id) const { const FATEntry *id2 = dynamic_cast<const FATEntry *>(id.get()); return ((id2) && (id2->bValid)); }
void FATArchive::resize(EntryPtr id, stream::len newStoredSize, stream::len newRealSize) { assert(this->isValid(id)); stream::delta iDelta = newStoredSize - id->storedSize; FATEntry *pFAT = dynamic_cast<FATEntry *>(id.get()); stream::len oldStoredSize = pFAT->storedSize; stream::len oldRealSize = pFAT->realSize; pFAT->storedSize = newStoredSize; pFAT->realSize = newRealSize; try { // Update the FAT with the file's new sizes this->updateFileSize(pFAT, iDelta); } catch (stream::error) { // Undo and abort the resize pFAT->storedSize = oldStoredSize; pFAT->realSize = oldRealSize; throw; } // Add or remove the data in the underlying stream stream::pos iStart; if (iDelta > 0) { // inserting data // TESTED BY: fmt_grp_duke3d_resize_larger iStart = pFAT->iOffset + pFAT->lenHeader + oldStoredSize; this->psArchive->seekp(iStart, stream::start); this->psArchive->insert(iDelta); } else if (iDelta < 0) { // removing data // TESTED BY: fmt_grp_duke3d_resize_smaller iStart = pFAT->iOffset + pFAT->lenHeader + newStoredSize; this->psArchive->seekp(iStart, stream::start); this->psArchive->remove(-iDelta); } else if (pFAT->realSize == newRealSize) { // Not resizing the internal size, and the external/real size // hasn't changed either, so nothing to do. return; } if (iDelta != 0) { // The internal file size is changing, so adjust the offsets etc. of the // rest of the files in the archive, including any open streams. this->shiftFiles(pFAT, iStart, iDelta, 0); // Resize any open substreams for this file for (OPEN_FILES::iterator i = this->openFiles.begin(); i != this->openFiles.end(); i++ ) { if (i->first.get() == pFAT) { if (stream::sub_sptr sub = i->second.lock()) { sub->resize(newStoredSize); // no break, could be multiple opens for same entry } } } } // else only realSize changed return; }
FATArchive::EntryPtr FATArchive::insert(const EntryPtr idBeforeThis, const std::string& strFilename, stream::pos storedSize, std::string type, int attr ) { // TESTED BY: fmt_grp_duke3d_insert2 // TESTED BY: fmt_grp_duke3d_remove_insert // TESTED BY: fmt_grp_duke3d_insert_remove // Make sure filename is within the allowed limit if ( (this->lenMaxFilename > 0) && (strFilename.length() > this->lenMaxFilename) ) { throw stream::error(createString("maximum filename length is " << this->lenMaxFilename << " chars")); } FATEntry *pNewFile = this->createNewFATEntry(); EntryPtr ep(pNewFile); pNewFile->strName = strFilename; pNewFile->storedSize = storedSize; pNewFile->realSize = storedSize; // default to no filter pNewFile->type = type; pNewFile->fAttr = attr; pNewFile->lenHeader = 0; pNewFile->bValid = false; // not yet valid // Figure out where the new file is going to go const FATEntry *pFATBeforeThis = NULL; if (this->isValid(idBeforeThis)) { // Insert at idBeforeThis // TESTED BY: fmt_grp_duke3d_insert_mid pFATBeforeThis = dynamic_cast<const FATEntry *>(idBeforeThis.get()); assert(pFATBeforeThis); pNewFile->iOffset = pFATBeforeThis->iOffset; pNewFile->iIndex = pFATBeforeThis->iIndex; } else { // Append to end of archive // TESTED BY: fmt_grp_duke3d_insert_end if (this->vcFAT.size()) { const FATEntry *pFATAfterThis = dynamic_cast<const FATEntry *>(this->vcFAT.back().get()); assert(pFATAfterThis); pNewFile->iOffset = pFATAfterThis->iOffset + pFATAfterThis->lenHeader + pFATAfterThis->storedSize; pNewFile->iIndex = pFATAfterThis->iIndex + 1; } else { // There are no files in the archive pNewFile->iOffset = this->offFirstFile; pNewFile->iIndex = 0; } } // Add the file's entry from the FAT. May throw (e.g. filename too long), // archive should be left untouched in this case. FATEntry *returned = this->preInsertFile(pFATBeforeThis, pNewFile); if (returned != pNewFile) { ep.reset(returned); pNewFile = returned; } if (!pNewFile->filter.empty()) { // The format handler wants us to apply a filter to this file. } // Now it's mostly valid. Really this is here so that it's invalid during // preInsertFile(), so any calls in there to shiftFiles() will ignore the // new file. But we're about to call shiftFiles() now, and we need the file // to be marked valid otherwise it won't be skipped/ignored. pNewFile->bValid = true; if (this->isValid(idBeforeThis)) { // Update the offsets of any files located after this one (since they will // all have been shifted forward to make room for the insert.) this->shiftFiles( pNewFile, pNewFile->iOffset + pNewFile->lenHeader, pNewFile->storedSize, 1 ); // Add the new file to the vector now all the existing offsets have been // updated. // TESTED BY: fmt_grp_duke3d_insert_mid VC_ENTRYPTR::iterator itBeforeThis = std::find(this->vcFAT.begin(), this->vcFAT.end(), idBeforeThis); assert(itBeforeThis != this->vcFAT.end()); this->vcFAT.insert(itBeforeThis, ep); } else { // TESTED BY: fmt_grp_duke3d_insert_end this->vcFAT.push_back(ep); } // Insert space for the file's data into the archive. If there is a header // (e.g. embedded FAT) then preInsertFile() will have inserted space for // this and written the data, so our insert should start just after the // header. this->psArchive->seekp(pNewFile->iOffset + pNewFile->lenHeader, stream::start); this->psArchive->insert(pNewFile->storedSize); this->postInsertFile(pNewFile); return ep; }
ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * ast, Context & query_context) { EntryPtr res; const ClientInfo & client_info = query_context.getClientInfo(); const Settings & settings = query_context.getSettingsRef(); if (client_info.current_query_id.empty()) throw Exception("Query id cannot be empty", ErrorCodes::LOGICAL_ERROR); bool is_unlimited_query = isUnlimitedQuery(ast); { std::unique_lock lock(mutex); if (!is_unlimited_query && max_size && processes.size() >= max_size) { auto max_wait_ms = settings.queue_max_wait_ms.totalMilliseconds(); if (!max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(max_wait_ms), [&]{ return processes.size() < max_size; })) throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES); } /** Why we use current user? * Because initial one is passed by client and credentials for it is not verified, * and using initial_user for limits will be insecure. * * Why we use current_query_id? * Because we want to allow distributed queries that will run multiple secondary queries on same server, * like SELECT count() FROM remote('127.0.0.{1,2}', system.numbers) * so they must have different query_ids. */ { auto user_process_list = user_to_queries.find(client_info.current_user); if (user_process_list != user_to_queries.end()) { if (!is_unlimited_query && settings.max_concurrent_queries_for_user && user_process_list->second.queries.size() >= settings.max_concurrent_queries_for_user) throw Exception("Too many simultaneous queries for user " + client_info.current_user + ". Current: " + toString(user_process_list->second.queries.size()) + ", maximum: " + settings.max_concurrent_queries_for_user.toString(), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES); auto range = user_process_list->second.queries.equal_range(client_info.current_query_id); if (range.first != range.second) { if (!settings.replace_running_query) throw Exception("Query with id = " + client_info.current_query_id + " is already running.", ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING); /// Ask queries to cancel. They will check this flag. for (auto it = range.first; it != range.second; ++it) it->second->is_killed.store(true, std::memory_order_relaxed); } } } auto process_it = processes.emplace(processes.end(), query_, client_info, settings.max_memory_usage, settings.memory_tracker_fault_probability, priorities.insert(settings.priority)); res = std::make_shared<Entry>(*this, process_it); process_it->query_context = &query_context; if (!client_info.current_query_id.empty()) { ProcessListForUser & user_process_list = user_to_queries[client_info.current_user]; user_process_list.queries.emplace(client_info.current_query_id, &res->get()); process_it->setUserProcessList(&user_process_list); /// Limits are only raised (to be more relaxed) or set to something instead of zero, /// because settings for different queries will interfere each other: /// setting from one query effectively sets values for all other queries. /// Track memory usage for all simultaneously running queries. /// You should specify this value in configuration for default profile, /// not for specific users, sessions or queries, /// because this setting is effectively global. total_memory_tracker.setOrRaiseLimit(settings.max_memory_usage_for_all_queries); total_memory_tracker.setDescription("(total)"); /// Track memory usage for all simultaneously running queries from single user. user_process_list.user_memory_tracker.setParent(&total_memory_tracker); user_process_list.user_memory_tracker.setOrRaiseLimit(settings.max_memory_usage_for_user); user_process_list.user_memory_tracker.setDescription("(for user)"); /// Actualize thread group info if (auto thread_group = CurrentThread::getGroup()) { std::lock_guard lock_thread_group(thread_group->mutex); thread_group->performance_counters.setParent(&user_process_list.user_performance_counters); thread_group->memory_tracker.setParent(&user_process_list.user_memory_tracker); thread_group->query = process_it->query; /// Set query-level memory trackers thread_group->memory_tracker.setOrRaiseLimit(process_it->max_memory_usage); thread_group->memory_tracker.setDescription("(for query)"); if (process_it->memory_tracker_fault_probability) thread_group->memory_tracker.setFaultProbability(process_it->memory_tracker_fault_probability); /// NOTE: Do not set the limit for thread-level memory tracker since it could show unreal values /// since allocation and deallocation could happen in different threads process_it->thread_group = std::move(thread_group); } if (!user_process_list.user_throttler) { if (settings.max_network_bandwidth_for_user) user_process_list.user_throttler = std::make_shared<Throttler>(settings.max_network_bandwidth_for_user, total_network_throttler); else if (settings.max_network_bandwidth_for_all_users) user_process_list.user_throttler = total_network_throttler; } } if (!total_network_throttler && settings.max_network_bandwidth_for_all_users) { total_network_throttler = std::make_shared<Throttler>(settings.max_network_bandwidth_for_all_users); } } return res; }
int console_filebrowse_completion(ContextPtr env, char *cmd) { LinkList<Entry> files; struct stat filestatus; #if defined (HAVE_DARWIN) || defined (HAVE_FREEBSD) struct dirent **filelist; #else struct dirent **filelist; #endif char path[MAX_CMDLINE]; char needle[MAX_CMDLINE]; bool incomplete = false; if(cmd[0] != '/') // path is relative: prefix our location snprintf(path, MAX_CMDLINE, "%s/%s", getenv("PWD"), cmd); else // path is absolute strncpy(path, cmd, MAX_CMDLINE); if(stat(path, &filestatus) < 0) { // no file there? int c = 0; // parse backwards to the first '/' and zero it, // store the word of the right part in needle for(c = strlen(path); path[c] != '/' && c > 0; c--) ; strncpy(needle, &path[c + 1], MAX_CMDLINE); path[c + 1] = '\0'; incomplete = true; if(stat(path, &filestatus) < 0) { // yet no valid file? error("error on file completion path %s: %s", path, strerror(errno)); return 0; } } else { // we have a file! if(S_ISREG(filestatus.st_mode)) return 1; // is a regular file! // is it a directory? then append the trailing slash if(S_ISDIR(filestatus.st_mode)) { int c = strlen(path); if(path[c - 1] != '/') { path[c] = '/'; path[c + 1] = '\0'; } } strncpy(cmd, path, MAX_CMDLINE); } func("file completion: %s", cmd); // at this point in path there should be something valid int found = scandir (path, &filelist, filebrowse_completion_selector, alphasort); if(found < 0) { error("filebrowse_completion: scandir: %s", strerror(errno)); return 0; } for(int c = found - 1; c > 0; c--) { // insert each entry found in a linklist EntryPtr e = MakeShared<Entry>(filelist[c]->d_name); files.push_back(e); } int c = 0; // counter for entries found if(incomplete) { // list all files in directory *path starting with *needle // Find completions EntryPtr exactEntry; LinkList<Entry> retList; std::string cmdString(needle); std::transform(cmdString.begin(), cmdString.end(), cmdString.begin(), ::tolower); std::copy_if(files.begin(), files.end(), retList.begin(), [&] (EntryPtr entry) { std::string name = entry->getName(); std::transform(name.begin(), name.end(), name.begin(), ::tolower); if(name == cmdString) { exactEntry = entry; } return name.compare(cmdString) == 0; }); c = retList.size(); if(exactEntry != NULL) { snprintf(cmd, MAX_CMDLINE, "%s%s", path, exactEntry->getName().c_str()); } else { notice("list of %s* files in %s:", needle, path); std::for_each(retList.begin(), retList.end(), [&] (EntryPtr entry) { ::act(" %s", entry->getName().c_str()); }); } } else { // list all entries notice("list of all files in %s:", path); std::for_each(files.begin(), files.end(), [&] (EntryPtr e) { ::act("%s", e->getName().c_str()); }); } return(c); }
ImagePtr Image_TilesetFroms::openImage(const EntryPtr& id) { ImageEntry *fat = dynamic_cast<ImageEntry *>(id.get()); assert(fat); return fat->item.image; }
void ScrollAdapterBase::DrawPage(ScrollableWindow* scrollable, size_t index, ScrollPosition& result) { WINDOW* window = scrollable->GetContent(); werase(window); if (!scrollable->IsVisible() || !window || this->height == 0 || this->width == 0 || this->GetEntryCount() == 0) { return; } if (index >= GetEntryCount()) { index = GetEntryCount() - 1; } std::deque<EntryPtr> visible; size_t topIndex = GetVisibleItems(scrollable, index, visible); size_t drawnLines = 0; for (size_t e = 0; e < visible.size(); e++) { EntryPtr entry = visible.at(e); size_t count = entry->GetLineCount(); for (size_t i = 0; i < count && drawnLines < this->height; i++) { Color attrs = Color::Default; if (this->decorator) { attrs = this->decorator(scrollable, topIndex + e, i, entry); } if (attrs == -1) { attrs = entry->GetAttrs(i); } if (attrs != -1) { wattron(window, attrs); } std::string line = entry->GetLine(i); size_t len = u8cols(line); /* pad with empty spaces to the end of the line. this allows us to do highlight rows. this should probably be configurable. */ int remain = this->width - len; if (remain > 0) { line += std::string(remain, ' '); } /* string is padded above, we don't need a \n */ checked_wprintw(window, "%s", line.c_str()); if (attrs != -1) { wattroff(window, attrs); } ++drawnLines; } } result.visibleEntryCount = visible.size(); result.firstVisibleEntryIndex = topIndex; result.lineCount = drawnLines; result.totalEntries = GetEntryCount(); }