bool copyFtr( const Path& src, const Path &tgt ) { VFSProvider* file = Engine::getVFS("file"); fassert( file != 0 ); DirEntry *entry = file->openDir( src.getFullLocation() ); if( entry == 0 ) { warning( "Can't open directory " + src.getFullLocation() ); return false; } String fname; String module = src.getFile(); Path orig( src ); Path target( tgt ); while( entry->read( fname ) ) { if( fname.startsWith( module ) && fname.endsWith( ".ftt" ) ) { orig.setFilename( fname ); target.setFilename( fname ); if( ! copyFile( orig.get(), target.get() ) ) { warning( "Can't copy source FTT file " + orig.get() ); } } } entry->close(); delete entry; return true; }
/*! \brief Returns the next entry belonging to the directory. \param foundItem Pointer to a pre-allocated Item that shall be set to the found item. \param entryIndex Pointer to a pre-allocated int32 that shall be set to the found entry index. \param _entry Pointer to a pre-allocated DirEntry pointer that shall be set to the found entry. May be \c NULL. \return \c B_OK, if everything went fine, \c B_ENTRY_NOT_FOUND, if we're through. */ status_t DirEntryIterator::GetNext(DirItem *foundItem, int32 *entryIndex, DirEntry **_entry) { status_t error = (foundItem && entryIndex ? InitCheck() : B_BAD_VALUE); // get the next DirItem, if necessary // the loop skips empty DirItems gracefully while (error == B_OK && (fIndex < 0 || fIndex >= fDirItem.GetEntryCount())) { error = fItemIterator.GetNext(&fDirItem, TYPE_DIRENTRY); if (error == B_OK) { if (fDirItem.Check() == B_OK) fIndex = 0; else // bad data: skip the item fIndex = -1; } } // get the next entry and check whether it has the correct offset if (error == B_OK) { DirEntry *entry = fDirItem.EntryAt(fIndex); if (!fFixedHash || offset_hash_value(entry->GetOffset()) == offset_hash_value(GetOffset())) { *foundItem = fDirItem; *entryIndex = fIndex; if (_entry) *_entry = entry; fIndex++; } else error = B_ENTRY_NOT_FOUND; } return error; }
static void addDirsAsGroups(Directory *root,GroupDef *parent,int level) { GroupDef *gd=0; if (root->kind()==DirEntry::Dir) { gd = new GroupDef("[generated]", 1, root->path(), // name root->name() // title ); if (parent) { parent->addGroup(gd); gd->makePartOfGroup(parent); } else { Doxygen::groupSDict->append(root->path(),gd); } } QListIterator<DirEntry> dli(root->children()); DirEntry *de; for (dli.toFirst();(de=dli.current());++dli) { if (de->kind()==DirEntry::Dir) { addDirsAsGroups((Directory *)de,gd,level+1); } } }
static void writeDirTreeNode(QTextStream &t,Directory *root,int level) { QCString indent; indent.fill(' ',level*2); QListIterator<DirEntry> dli(root->children()); DirEntry *de; for (dli.toFirst();(de=dli.current());++dli) { t << indent << "<p>"; generateIndent(t,de,0); if (de->kind()==DirEntry::Dir) { Directory *dir=(Directory *)de; //printf("%s [dir]: %s (last=%d,dir=%d)\n",indent.data(),dir->name().data(),dir->isLast(),dir->kind()==DirEntry::Dir); t << "<img " << FTV_IMGATTRIBS(folderclosed) << "/>"; t << dir->name(); t << "</p>\n"; t << indent << "<div>\n"; writeDirTreeNode(t,dir,level+1); t << indent << "</div>\n"; } else { //printf("%s [file]: %s (last=%d,dir=%d)\n",indent.data(),de->file()->name().data(),de->isLast(),de->kind()==DirEntry::Dir); t << "<img " << FTV_IMGATTRIBS(doc) << "/>"; t << de->file()->name(); t << "</p>\n"; } } }
// _VerifyHashFunction bool Volume::_VerifyHashFunction(hash_function_t function) { bool result = true; // iterate over the entries in the root dir until we find an entry, that // doesn't falsify the hash function DirEntryIterator iterator(fTree, fRootVNode->GetDirID(), fRootVNode->GetObjectID(), DOT_DOT_OFFSET + 1); DirItem item; int32 index = 0; while (iterator.GetNext(&item, &index) == B_OK) { DirEntry *entry = item.EntryAt(index); uint64 offset = entry->GetOffset(); // try the hash function size_t nameLen = 0; if (const char *name = item.EntryNameAt(index, &nameLen)) { uint64 testOffset = key_offset_for_name(function, name, nameLen); if (offset_hash_value(offset) != offset_hash_value(testOffset)) { result = false; break; } } // else: bad data } return result; }
void Path::GetAllFiles(std::vector<std::string> &files, const std::string &path, const std::string &filter) { Directory* pDir = new Directory(); DirEnumerator* pDirEnum; result r = pDir->Construct(path.c_str()); if (r != E_SUCCESS) { AppLog(GetErrorMessage(r)); delete pDir; return; } pDirEnum = pDir->ReadN(); while (pDirEnum->MoveNext() == E_SUCCESS) { DirEntry entry = pDirEnum->GetCurrentDirEntry(); if (entry.IsDirectory()) continue; Path fileName(StringUtils::ToNarrow(entry.GetName().GetPointer())); if (fileName.GetExt() == filter.substr(filter.size() - 3)) { files.push_back(fileName.GetFilenameExt()); } } delete pDir; delete pDirEnum; }
int DirPage::deleteRecords(Where *where, int* attrType, int numOfAttr){ if(DEcount == 0){ lg2("@DirPage_"<<pageid<<" : No Records to delete."); return 0; } DataPage* datapage; long noOfRecs = 0; long dataPid; long maxTFS; DirEntry *de; // dirEntries; for(vector<DirEntry*>::size_type i = 0; i<dirEntries.size() ; i++){ //create DataPage for every entry and try retreiving records. de = dirEntries[i]; dataPid = de->getPageID(); datapage = new DataPage(dataPid); noOfRecs += datapage->deleteRecords(where,attrType,numOfAttr); maxTFS = datapage->getTotalFreeSize(); de->setTFS(maxTFS); if(maxSpaceAvailable < maxTFS) maxSpaceAvailable = maxTFS; de->writeDE(p,i); writeToPage(); delete datapage; } return noOfRecs; }
void init(const DirEntry & src) { name = src.name(); kind = src.kind(); size = src.size(); hasProps = src.hasProps(); createdRev = src.createdRev(); time = src.time(); lastAuthor = src.lastAuthor(); lockToken = src.lockToken(); lockOwner = src.lockOwner(); lockComment = src.lockComment(); }
/*! \brief Returns the previous entry belonging to the directory. \param foundItem Pointer to a pre-allocated Item that shall be set to the found item. \param entryIndex Pointer to a pre-allocated int32 that shall be set to the found entry index. \param _entry Pointer to a pre-allocated DirEntry pointer that shall be set to the found entry. May be \c NULL. \return \c B_OK, if everything went fine, \c B_ENTRY_NOT_FOUND, if we're through. */ status_t DirEntryIterator::GetPrevious(DirItem *foundItem, int32 *entryIndex, DirEntry **_entry) { //printf("DirEntryIterator::GetPrevious()\n"); status_t error = (foundItem && entryIndex ? InitCheck() : B_BAD_VALUE); if (error == B_OK && fDone) error = B_ENTRY_NOT_FOUND; // get the next DirItem, if necessary // the loop skips empty DirItems gracefully while (error == B_OK && (fIndex < 0 || fIndex >= fDirItem.GetEntryCount())) { error = fItemIterator.GetPrevious(&fDirItem, TYPE_DIRENTRY); if (error == B_OK) { if (fDirItem.Check() == B_OK) fIndex = fDirItem.GetEntryCount() - 1; else // bad data: skip the item fIndex = -1; } } //printf(" found dir item: %s\n", strerror(error)); // skip entries with a greater offset while (error == B_OK && fIndex >= 0 && fDirItem.EntryAt(fIndex)->GetOffset() > GetOffset()) { //printf(" skipping entry %ld: offset %lu\n", fIndex, fDirItem.EntryAt(fIndex)->GetOffset()); fIndex--; } // get the entry and check whether it has the correct offset if (error == B_OK) { //printf(" entries with greater offsets skipped: index: %ld\n", fIndex); if (fIndex >= 0 //&& (printf(" entry index %ld: offset %lu\n", fIndex, fDirItem.EntryAt(fIndex)->GetOffset()), true) && (!fFixedHash || offset_hash_value(fDirItem.EntryAt(fIndex)->GetOffset()) == offset_hash_value(GetOffset()))) { //printf(" entry found\n"); DirEntry *entry = fDirItem.EntryAt(fIndex); *foundItem = fDirItem; *entryIndex = fIndex; fDone = (fFixedHash && offset_generation_number(entry->GetOffset()) == 0); if (_entry) *_entry = entry; fIndex--; } else error = B_ENTRY_NOT_FOUND; } //printf("DirEntryIterator::GetPrevious() done: %s\n", strerror(error)); return error; }
FhgfsOpsErr MovingDirInsertMsgEx::insert() { MetaStore* metaStore = Program::getApp()->getMetaStore(); FhgfsOpsErr retVal; EntryInfo* toDirInfo = this->getToDirInfo(); // reference parent DirInode* parentDir = metaStore->referenceDir(toDirInfo->getEntryID(), true); if(!parentDir) return FhgfsOpsErr_PATHNOTEXISTS; /* create dir-entry and add information about its inode from the given buffer */ std::string newName = this->getNewName(); const char* buf = this->getSerialBuf(); DirEntry* newDirEntry = new DirEntry(newName); if (!newDirEntry->deserializeDentry(buf) ) { LogContext("File rename").logErr("Bug: Deserialization of remote buffer failed. Are all " "meta servers running with the same version?" ); delete newDirEntry; metaStore->releaseDir(toDirInfo->getEntryID() ); return FhgfsOpsErr_INTERNAL; } FhgfsOpsErr mkRes = parentDir->makeDirEntry(newDirEntry); switch(mkRes) { case FhgfsOpsErr_SUCCESS: case FhgfsOpsErr_EXISTS: retVal = mkRes; break; default: retVal = FhgfsOpsErr_INTERNAL; break; } // clean-up metaStore->releaseDir(toDirInfo->getEntryID() ); return retVal; }
XMDirent* xmBadaReaddir ( XMDir* dirp ) { XMDirent* ret = 0; if ( dirp->dir_enum->MoveNext ( ) == E_SUCCESS ) { DirEntry entry = dirp->dir_enum->GetCurrentDirEntry ( ); String name = entry.GetName ( ); ByteBuffer* utf8 = StringUtil::StringToUtf8N ( name ); strcpy ( (KDchar *) dirp->dir_info.d_name, (const KDchar *) utf8->GetPointer ( ) ); ret = &dirp->dir_info; } return ret; }
DirEntry* VirtualFileSystem::fromPath(const char* path) { DirEntry* root = getRoot(); // we loop over each subfolder in the path while (true) { // first we skip as many slashes as we can while (*path == '/') path++; // then we get the length of the current folder, this is the length until the next / or the end int length = 0; while (path[length] != '/' && path[length] != '\0') length++; // if the length is zero, this was a trailing slash and we can stop here if (length == 0) break; // we get the folder name std::string cur(path, length); // and then try to find the directory that matches bool matched = false; while (root->valid()) { if (root->name() == cur) { auto next = root->openDir(); delete root; root = next; matched = true; break; } root->advance(); } if (!matched) { delete root; return nullptr; } path += length; if (*path == '\0') break; } return root; }
DataPage* DirPage::createDataPage(){ //create a datapage. //get tfs and store it in DE if(DEcount == MAX_DE){ error("No Free space to insert DE"); return NULL; } DataPage *dp = new DataPage(); long pid = dp->getPageid(); long tfs = dp->getTotalFreeSize(); DirEntry *de = new DirEntry(pid,tfs); dirEntries.push_back(de); de->writeDE(p,DEcount); DEcount++; if(tfs>maxSpaceAvailable) maxSpaceAvailable = tfs; writeToPage(); return dp; }
void ZLbadaPaintContext::collectFiles(std::map<std::string, std::string> &names, const char* path ) { //TODO collectFiles AppLog("ZLbadaPaintContext::collectFiles") ; Directory dir; DirEnumerator *pDirEnum = null; result r = E_SUCCESS; // Opens the directory r = dir.Construct(path); AppLog(" dir.Construct %s",path) ; if(IsFailed(r)) AppLog("IsFailed"); //goto CATCH; // Reads all the directory entries pDirEnum = dir.ReadN(); // if(!pDirEnum) // goto CATCH; while(pDirEnum->MoveNext() == E_SUCCESS) { DirEntry dirEntry = pDirEnum->GetCurrentDirEntry(); Tizen::Base::String str = dirEntry.GetName(); // AppLog("dirEntry name Length = %d",str.GetLength()) ; Utf8Encoding utf8; ByteBuffer* pBB = utf8.GetBytesN(str); std::string shortName((const char*)pBB->GetPointer());//,str.GetLength()); AppLog("dirEntry name = %s",shortName.c_str()) ; if (shortName !="." && shortName !="..") { std::string fullName; fullName = path + shortName; AppLog("fullName = %s",fullName.c_str()); names.insert(std::make_pair(shortName,fullName)); } delete pBB; // names.push_back(shortName); } // Deletes the enumerator delete pDirEnum; AppLog("Succeeded"); }
/*! \brief Searches an entry in a directory. \note Must not be called with \a entryName "." or ".."! \param dir The directory. \param entryName Name of the entry. \param foundNode pointer to a pre-allocated VNode to be initialized to the found entry. \param failIfHidden The method shall fail, if the entry is hidden. \return \c B_OK, if everything went fine. */ status_t Volume::FindDirEntry(VNode *dir, const char *entryName, VNode *foundNode, bool failIfHidden) { status_t error = (dir && foundNode ? B_OK : B_BAD_VALUE); // find the DirEntry DirItem item; int32 entryIndex = 0; if (error == B_OK) { error = fTree->FindDirEntry(dir->GetDirID(), dir->GetObjectID(), entryName, &item, &entryIndex); } // find the child node if (error == B_OK) { DirEntry *entry = item.EntryAt(entryIndex); error = FindVNode(entry->GetDirID(), entry->GetObjectID(), foundNode); if (error == B_OK && failIfHidden && entry->IsHidden()) error = B_ENTRY_NOT_FOUND; } return error; }
long DirPage::updateRecords(Where *where, Modify *modify, RecordSet* rs){ if(DEcount == 0){ lg2("@DirPage_"<<pageid<<" : No Records to update."); return 0; } DataPage* datapage; long noOfRecs = 0; long dataPid; long maxTFS; DirEntry *de; // dirEntries; for(vector<DirEntry*>::size_type i = modify->getStartDirEntry(); i<dirEntries.size() ; i++){ //create DataPage for every entry and try retreiving records. de = dirEntries[i]; dataPid = de->getPageID(); datapage = new DataPage(dataPid); noOfRecs += datapage->updateRecords(where,modify,rs); maxTFS = datapage->getTotalFreeSize(); de->setTFS(maxTFS); if(maxSpaceAvailable < maxTFS) maxSpaceAvailable = maxTFS; de->writeDE(p,i); if(modify->isThereMore()){ writeToPage(); modify->setStartDirEntry(i); delete datapage; return noOfRecs; } delete datapage; } writeToPage(); return noOfRecs; }
// _DetectHashFunction uint32 Volume::_DetectHashFunction() { // iterate over the entries in the root dir until we find an entry, that // let us draw an unambiguous conclusion DirEntryIterator iterator(fTree, fRootVNode->GetDirID(), fRootVNode->GetObjectID(), DOT_DOT_OFFSET + 1); uint32 foundCode = UNSET_HASH; DirItem item; int32 index = 0; while (foundCode == UNSET_HASH && iterator.GetNext(&item, &index) == B_OK) { DirEntry *entry = item.EntryAt(index); uint64 offset = entry->GetOffset(); uint32 hashCodes[] = { TEA_HASH, YURA_HASH, R5_HASH }; int32 hashCodeCount = sizeof(hashCodes) / sizeof(uint32); size_t nameLen = 0; const char *name = item.EntryNameAt(index, &nameLen); if (!name) // bad data! continue; // try each hash function -- if there's a single winner, we're done, // otherwise the next entry must help for (int32 i = 0; i < hashCodeCount; i++) { hash_function_t function = hash_function_for_code(hashCodes[i]); uint64 testOffset = key_offset_for_name(function, name, nameLen); if (offset_hash_value(offset) == offset_hash_value(testOffset)) { if (foundCode != UNSET_HASH) { // ambiguous foundCode = UNSET_HASH; break; } else foundCode = hashCodes[i]; } } } return foundCode; }
void CacheBase::FillReadDir(const char* path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi) { BlockLockMutex lock(this); DirEntry* dir = dynamic_cast<DirEntry*>(Path2File(path)); if(!dir) throw NoSuchFileOrDir(); FileMap files = dir->GetFiles(); for(FileMap::const_iterator it = files.begin(); it != files.end(); ++it) { if(it->second->IsRemoved()) continue; struct stat st; memset(&st, 0, sizeof st); /*st.st_ino = de->d_ino; st.st_mode = de->d_type << 12;*/ if(filler(buf, it->second->GetName().c_str(), &st, 0)) break; } }
/*! \brief Finds the node identified by a directory ID, object ID pair. \note The method does not initialize the parent ID for non-directory nodes. \param dirID Directory ID of the node to be found. \param objectID Object ID of the node to be found. \param node pointer to a pre-allocated VNode to be initialized to the found node. \return \c B_OK, if everything went fine. */ status_t Volume::FindVNode(uint32 dirID, uint32 objectID, VNode *node) { // NOTE: The node's parent dir ID is not initialized! status_t error = (node ? B_OK : B_BAD_VALUE); // init the node if (error == B_OK) error = node->SetTo(dirID, objectID); // find the stat item StatItem item; if (error == B_OK) { error = fTree->FindStatItem(dirID, objectID, &item); if (error != B_OK) { FATAL(("Couldn't find stat item for node (%lu, %lu)\n", dirID, objectID)); } } // get the stat data if (error == B_OK) SET_ERROR(error, item.GetStatData(node->GetStatData(), true)); // for dirs get the ".." entry, since we need the parent dir ID if (error == B_OK && node->IsDir()) { DirItem dirItem; int32 index = 0; error = fTree->FindDirEntry(dirID, objectID, "..", &dirItem, &index); if (error == B_OK) { DirEntry *entry = dirItem.EntryAt(index); node->SetParentID(entry->GetDirID(), entry->GetObjectID()); } else { FATAL(("failed to find `..' entry for dir node (%lu, %ld)\n", dirID, objectID)); } } return error; }
int FileRoot::checkfile(const char* name,DirEntry &info,DirEntry::object_info_level mode) { std::string new_name; if(name[0] != '/') { new_name=cur_dir+'/'+name; } else { new_name=name; }; error=FileNode::no_error; if(!Arc::CanonicalDir(new_name,false)) return 1; if(new_name.empty()) { info.reset(); info.name="/"; info.is_file=false; return 0; }; for(std::list<FileNode>::iterator i=nodes.begin();i!=nodes.end();++i) { if(i->belongs(new_name.c_str())) { if(i->checkfile(new_name,info,mode) != 0) { error=i->error(); return 1; }; info.name="/"+new_name; return 0; }; }; NO_PLUGIN(name); return 1; }
result CategoryItemForm::ReadCustomListItems() { result r = E_SUCCESS; String dirName(L"/Home/catalog/"+dir); Directory* pDir; DirEnumerator* pDirEnum; pDir = new Directory; // allocate Directory instance // Open directory r = pDir->Construct(dirName); // Read all directory entries pDirEnum = pDir->ReadN(); String contentType; int i = 0; while(pDirEnum->MoveNext() == E_SUCCESS) { DirEntry dirEntry = pDirEnum->GetCurrentDirEntry(); if(dirEntry.IsNomalFile()) { //AppLog("%S", dirEntry.GetName().GetPointer()); if(!dirEntry.GetName().Equals("category.info", false)) { String fileName(dirName+"/"+dirEntry.GetName()); String title, desc; String iTempStr, iTempStr2; File file; result r = file.Construct(fileName, L"r"); if( IsFailed(r) ) { AppLog("File::Consturct() is failed by %s", GetErrorMessage(r)); } FileAttributes fileAttrs; file.GetAttributes(fileName, fileAttrs); long long size = fileAttrs.GetFileSize(); ByteBuffer readBuffer; readBuffer.Construct((int)size + 1); r = file.Read(readBuffer); if( IsFailed(r) ) { AppLog("File::Read() is failed by %s", GetErrorMessage(r)); } char* data = new char[readBuffer.GetLimit()+1]; readBuffer.SetPosition(0); readBuffer.GetArray((byte*)data, 0, readBuffer.GetLimit()); data[readBuffer.GetLimit()] ='\0'; //String str = String(data); String str; r = StringUtil::Utf8ToString(data, str); delete data; if(IsFailed(r)) { AppLog("File read error. File : %S", fileName.GetPointer()); continue; } file.Seek(FILESEEKPOSITION_BEGIN, 0); file.Read(title); r = TextPic::GetTranslated(title); if (IsFailed(r)) { continue; } int linecount = 0; while(file.Read(iTempStr) != E_END_OF_FILE) { linecount++; iTempStr2.Append(iTempStr); } anciilist.Add(*(new String(iTempStr2))); titlelist.Add(*(new String(title))); filelist.Add(*(new String(fileName))); ItemListForm::AddListItem(*CategoryList, title, iTempStr2, i++, linecount); file.Flush(); } } } delete pDirEnum; delete pDir; return r; }
bool operator()(const DirEntry& de1, const DirEntry& de2) { return de1.Path() > de2.Path(); }
bool copyRuntime( const Path& binpath, const Path& tgtpath ) { message( "Searching VSx CRT in " + binpath.getFullLocation() ); // open the binary path in search of "Microsoft.*.CRT" VFSProvider* provider = Engine::getVFS( "file" ); fassert( provider != 0 ); DirEntry* dir = provider->openDir( binpath.getFullLocation() ); if( dir == 0 ) { warning( "Can't search CRT in " + binpath.getFullLocation() ); return false; } String fname; while( dir->read(fname) ) { if( fname.wildcardMatch("Microsoft.*.CRT") ) { // we're done with dir. delete dir; Path source( binpath.getFullLocation() + "/" + fname + "/"); Path target( tgtpath.getFullLocation() + "/" + fname + "/"); // first, create the target path int32 fsStatus; if( ! Sys::fal_mkdir( target.getFullLocation(), fsStatus, true ) ) { warning( "Can't create CRT directory in " + target.getFullLocation() ); return false; } // then copy everything inside it. DirEntry* crtdir = provider->openDir( source.getFullLocation() ); if( crtdir == 0 ) { warning( "Can't read source CRT directory " + source.getFullLocation() ); return false; } //loop copying everything that's not a dir. String sFile; while( crtdir->read( sFile ) ) { if( sFile.startsWith(".") ) continue; source.setFilename( sFile ); target.setFilename( sFile ); if ( ! copyFile( source.get(), target.get() ) ) { delete crtdir; warning( "Can't copy CRT file " + source.get() + " into " + target.get() ); return false; } } return true; } } delete dir; return false; }
void addPlugins( const Options& options_main, const String& parentModule, const String& path ) { message( "Loading plugin \"" + path +"\" for module " + parentModule ); Path modPath( parentModule ); modPath = modPath.getFullLocation() + "/" + path; // prepare the target plugin path Path outputPath( modPath ); outputPath.setFilename(""); Path mainPath; mainPath.setFullLocation( options_main.m_sMainScriptPath ); relativize( mainPath, outputPath ); outputPath.setFullLocation( options_main.m_sTargetDir +"/"+ outputPath.getLocation() ); // topmost location of the plugin must be if( path.endsWith("*") ) { VFSProvider* file = Engine::getVFS("file"); fassert( file != 0 ); DirEntry *entry = file->openDir( modPath.getFullLocation() ); if( entry == 0 ) { warning( "Can't open plugin directory \"" + modPath.getFullLocation() + "\" for module " + parentModule ); } String fname; while( entry->read( fname ) ) { // binary? if ( fname.endsWith(".fam") ) { // do we have also a source? modPath.setFilename( fname ); modPath.setExtension( "fal" ); FileStat famStats; if( Sys::fal_stats( modPath.get(), famStats ) ) { // we have already a fal that has been transferred or will be transferred later, // so wait for that. continue; // otherwise, go on transferring the source. } // same for ftd modPath.setExtension( "ftd" ); if( Sys::fal_stats( modPath.get(), famStats ) ) { continue; } } else if( fname.endsWith( DllLoader::dllExt() ) ) { //Transfer DLLS as they are. } // source? else if( fname.endsWith( ".fal" ) || fname.endsWith(".ftd") ) { // go on, transfer the source. } else { // we don't know how to manage other plugins continue; } // copy our options, so that transferModule doesn't pollute them Options options( options_main ); options.m_sTargetDir = outputPath.get(); // ok, transfer the thing modPath.setFilename( fname ); transferModules( options, modPath.get() ); } entry->close(); delete entry; } else { // copy our options, so that transferModule doesn't pollute them Options options( options_main ); options.m_sTargetDir = outputPath.get(); transferModules( options, modPath.get() ); } }
bool operator()(const DirEntry& de1, const DirEntry& de2) { return de1.Status().Native().st_mtime > de2.Status().Native().st_mtime; }
/** * Create a new file on this (remote) meta-server. This is the 'toFile' on a rename() client call. * * Note: Replaces existing entry. * * @param buf serialized inode object * @param outUnlinkedInode the unlinked (owned) file (in case a file was overwritten * by the move operation); the caller is responsible for the deletion of the local file and the * corresponding object; may not be NULL */ FhgfsOpsErr MetaStore::moveRemoteFileInsert(EntryInfo* fromFileInfo, std::string toParentID, std::string newEntryName, const char* buf, FileInode** outUnlinkedInode) { // note: we do not allow newEntry to be a file if the old entry was a directory (and vice versa) const char* logContext = "rename(): Insert remote entry"; FhgfsOpsErr retVal = FhgfsOpsErr_INTERNAL; *outUnlinkedInode = NULL; SafeRWLock safeMetaStoreLock(&rwlock, SafeRWLock_READ); // L O C K DirInode* toParent = referenceDirUnlocked(toParentID, true); if(!toParent) { retVal = FhgfsOpsErr_PATHNOTEXISTS; safeMetaStoreLock.unlock(); // U N L O C K return retVal; } // toParent exists SafeRWLock toParentMutexLock(&toParent->rwlock, SafeRWLock_WRITE); // L O C K ( T O ) DirEntry* overWrittenEntry = toParent->dirEntryCreateFromFileUnlocked(newEntryName); if (overWrittenEntry) { EntryInfo overWriteInfo; std::string parentID = overWrittenEntry->getID(); overWrittenEntry->getEntryInfo(parentID, 0, &overWriteInfo); bool isSameInode; FhgfsOpsErr checkRes = checkRenameOverwrite(fromFileInfo, &overWriteInfo, isSameInode); if ((checkRes != FhgfsOpsErr_SUCCESS) || ((checkRes == FhgfsOpsErr_SUCCESS) && isSameInode) ) { retVal = checkRes; goto outUnlock; } // only unlink the dir-entry-name here, so we can still restore it from dir-entry-id FhgfsOpsErr unlinkRes = toParent->unlinkDirEntryUnlocked(newEntryName, overWrittenEntry, DirEntry_UNLINK_FILENAME); if (unlikely(unlinkRes != FhgfsOpsErr_SUCCESS) ) { if (unlikely (unlinkRes == FhgfsOpsErr_PATHNOTEXISTS) ) LogContext(logContext).log(Log_WARNING, "Unexpectedly failed to unlink file: " + toParent->entries.getDirEntryPathUnlocked() + newEntryName + ". "); else { LogContext(logContext).logErr("Failed to unlink existing file. Aborting rename()."); retVal = unlinkRes; goto outUnlock; } } } { // create new dirEntry with inlined inode FileInode* inode = new FileInode(); // the deserialized inode bool deserializeRes = inode->deserializeMetaData(buf); if (deserializeRes == false) { LogContext("File rename").logErr("Bug: Deserialization of remote buffer failed. Are all " "meta servers running with the same version?" ); retVal = FhgfsOpsErr_INTERNAL; delete inode; goto outUnlock; } // destructs inode retVal = mkMetaFileUnlocked(toParent, newEntryName, fromFileInfo->getEntryType(), inode); } if (overWrittenEntry && retVal == FhgfsOpsErr_SUCCESS) { // unlink the overwritten entry, will unlock, release and return bool unlinkedWasInlined = overWrittenEntry->getIsInodeInlined(); FhgfsOpsErr unlinkRes = unlinkOverwrittenEntryUnlocked(toParent, overWrittenEntry, outUnlinkedInode); EntryInfo unlinkEntryInfo; overWrittenEntry->getEntryInfo(toParentID, 0, &unlinkEntryInfo); // unlock everything here, but do not release toParent yet. toParentMutexLock.unlock(); // U N L O C K ( T O ) safeMetaStoreLock.unlock(); // unlinkInodeLater() requires that everything was unlocked! if (unlinkRes == FhgfsOpsErr_INUSE) { unlinkRes = unlinkInodeLater(&unlinkEntryInfo, unlinkedWasInlined ); if (unlinkRes == FhgfsOpsErr_AGAIN) unlinkRes = unlinkOverwrittenEntry(toParent, overWrittenEntry, outUnlinkedInode); if (unlinkRes != FhgfsOpsErr_SUCCESS && unlinkRes != FhgfsOpsErr_PATHNOTEXISTS) LogContext(logContext).logErr("Failed to unlink overwritten entry:" " FileName: " + newEntryName + " ParentEntryID: " + toParent->getID() + " entryID: " + overWrittenEntry->getEntryID() + " Error: " + FhgfsOpsErrTk::toErrString(unlinkRes) ); } delete overWrittenEntry; releaseDir(toParentID); return retVal; } else if (overWrittenEntry) { // TODO: Restore the overwritten entry } outUnlock: toParentMutexLock.unlock(); // U N L O C K ( T O ) dirStore.releaseDir(toParent->getID() ); safeMetaStoreLock.unlock(); SAFE_DELETE(overWrittenEntry); return retVal; }
/** * Simple rename on the same server in the same directory. * * @param outUnlinkInode is the inode of a dirEntry being possibly overwritten (toName already * existed). */ FhgfsOpsErr MetaStore::renameInSameDir(DirInode* parentDir, std::string fromName, std::string toName, FileInode** outUnlinkInode) { const char* logContext = "Rename in dir"; SafeRWLock safeLock(&rwlock, SafeRWLock_READ); // L O C K SafeRWLock fromMutexLock(&parentDir->rwlock, SafeRWLock_WRITE); // L O C K ( F R O M ) FhgfsOpsErr retVal; FhgfsOpsErr unlinkRes; DirEntry* overWrittenEntry = NULL; retVal = performRenameEntryInSameDir(parentDir, fromName, toName, &overWrittenEntry); if (retVal != FhgfsOpsErr_SUCCESS) { fromMutexLock.unlock(); safeLock.unlock(); SAFE_DELETE(overWrittenEntry); return retVal; } EntryInfo unlinkEntryInfo; bool unlinkedWasInlined; if (overWrittenEntry) { std::string parentDirID = parentDir->getID(); overWrittenEntry->getEntryInfo(parentDirID, 0, &unlinkEntryInfo); unlinkedWasInlined = overWrittenEntry->getIsInodeInlined(); unlinkRes = unlinkOverwrittenEntryUnlocked(parentDir, overWrittenEntry, outUnlinkInode); } else { *outUnlinkInode = NULL; // irrelevant values, just to please the compiler unlinkRes = FhgfsOpsErr_SUCCESS; unlinkedWasInlined = true; } /* Now update the ctime (attribChangeTime) of the renamed entry. * Only do that for Directory dentry after giving up the DirInodes (fromMutex) lock * as dirStore.setAttr() will aquire the InodeDirStore:: lock * and the lock order is InodeDirStore:: and then DirInode:: (risk of deadlock) */ DirEntry* entry = parentDir->dirEntryCreateFromFileUnlocked(toName); if (likely(entry) ) // entry was just renamed to, so very likely it exists { EntryInfo entryInfo; std::string parentID = parentDir->getID(); entry->getEntryInfo(parentID, 0, &entryInfo); fromMutexLock.unlock(); setAttrUnlocked(&entryInfo, 0, NULL); /* This will fail if the DirInode is on another * meta server, but as updating the ctime is not * a real posix requirement (but filesystems usually * do it) we simply ignore this issue for now. */ SAFE_DELETE(entry); } else fromMutexLock.unlock(); safeLock.unlock(); // unlink later must be called after releasing all locks if (overWrittenEntry) { if (unlinkRes == FhgfsOpsErr_INUSE) { unlinkRes = unlinkInodeLater(&unlinkEntryInfo, unlinkedWasInlined ); if (unlinkRes == FhgfsOpsErr_AGAIN) { unlinkRes = unlinkOverwrittenEntry(parentDir, overWrittenEntry, outUnlinkInode); } } if (unlinkRes != FhgfsOpsErr_SUCCESS && unlinkRes != FhgfsOpsErr_PATHNOTEXISTS) { LogContext(logContext).logErr("Failed to unlink overwritten entry:" " FileName: " + toName + " ParentEntryID: " + parentDir->getID() + " entryID: " + overWrittenEntry->getEntryID() + " Error: " + FhgfsOpsErrTk::toErrString(unlinkRes) ); // TODO: Restore the dentry } } SAFE_DELETE(overWrittenEntry); return retVal; }
/** * Perform the rename action here. * * In constrast to the moving...()-methods, this method performs a simple rename of an entry, * where no moving is involved. * * Rules: Files can overwrite existing files, but not existing dirs. Dirs cannot overwrite any * existing entry. * * @param dir needs to write-locked already * @param outOverwrittenEntry the caller is responsible for the deletion of the local file; * accoring to the rules, this can only be an overwritten file, not a dir; may not be NULL. * Also, we only overwrite the entryName dentry, but not the ID dentry. * * Note: MetaStore is ReadLocked, dir is WriteLocked */ FhgfsOpsErr MetaStore::performRenameEntryInSameDir(DirInode* dir, std::string fromName, std::string toName, DirEntry** outOverwrittenEntry) { *outOverwrittenEntry = NULL; FhgfsOpsErr retVal; // load DirInode on demand if required, we need it now bool loadSuccess = dir->loadIfNotLoadedUnlocked(); if (!loadSuccess) return FhgfsOpsErr_PATHNOTEXISTS; // of the file being renamed DirEntry* fromEntry = dir->dirEntryCreateFromFileUnlocked(fromName); if (!fromEntry) { return FhgfsOpsErr_PATHNOTEXISTS; } EntryInfo fromEntryInfo; std::string parentEntryID = dir->getID(); fromEntry->getEntryInfo(parentEntryID, 0, &fromEntryInfo); // reference the inode FileInode* fromFileInode = NULL; // DirInode* fromDirInode = NULL; if (DirEntryType_ISDIR(fromEntryInfo.getEntryType() ) ) { // TODO, exclusive lock } else { fromFileInode = referenceFileUnlocked(dir, &fromEntryInfo); if (!fromFileInode) { /* Note: The inode might be exclusively locked and a remote rename op might be in progress. * If that fails we should actually continue with our rename. That will be solved * in the future by using hardlinks for remote renaming. */ return FhgfsOpsErr_PATHNOTEXISTS; } } DirEntry* overWriteEntry = dir->dirEntryCreateFromFileUnlocked(toName); if (overWriteEntry) { // sanity checks if we really shall overwrite the entry std::string parentID = dir->getID(); EntryInfo fromEntryInfo; fromEntry->getEntryInfo(parentID , 0, &fromEntryInfo); EntryInfo overWriteEntryInfo; overWriteEntry->getEntryInfo(parentID, 0, &overWriteEntryInfo); bool isSameInode; retVal = checkRenameOverwrite(&fromEntryInfo, &overWriteEntryInfo, isSameInode); if (isSameInode) { delete(overWriteEntry); overWriteEntry = NULL; goto out; // nothing to do then, rename request will be silently ignored } if (retVal != FhgfsOpsErr_SUCCESS) goto out; // not allowed for some reasons, return it to the user } // eventually rename here retVal = dir->renameDirEntryUnlocked(fromName, toName, overWriteEntry); /* Note: If rename faild and and an existing toName was to be overwritten, we don't need to care * about it, the underlying file system has to handle it. */ /* Note2: Do not decrease directory link count here, even if we overwrote an entry. We will do * that later on in common unlink code, when we going to unlink the entry from * the #fsIDs# dir. */ if (fromFileInode) releaseFileUnlocked(dir, fromFileInode); else { // TODO dir } out: if (retVal == FhgfsOpsErr_SUCCESS) *outOverwrittenEntry = overWriteEntry; else SAFE_DELETE(overWriteEntry); SAFE_DELETE(fromEntry); // always exists when we are here return retVal; }
bool copyAllResources( Options& options, const Path& from, const Path& tgtPath ) { // do we have an extension filter? bool bHasExt = from.getExtension() != ""; VFSProvider* file = Engine::getVFS("file"); if( file == 0 ) { error( "Can't find FILE resource" ); return false; } DirEntry *entry = file->openDir( from.getFullLocation() ); if( entry == 0 ) { warning( "Can't open directory " + from.getFullLocation() ); return false; } String fname; while( entry->read( fname ) ) { if( fname == ".." || fname == "." ) { continue; } FileStat fs; if ( ! Sys::fal_stats( from.getFullLocation() + "/" + fname, fs ) ) { continue; } if ( fs.m_type == FileStat::t_normal || fs.m_type == FileStat::t_link ) { // do we filter the extension? if( bHasExt ) { if ( ! fname.endsWith( "." + from.getExtension(), true ) ) { continue; } } // TODO: Jail resources under modpath if ( ! copyFile( from.getFullLocation() + "/" + fname, tgtPath.getFullLocation() + "/" + fname ) ) { warning( "Cannot copy resource " + from.getFullLocation() + "/" + fname + " into " + tgtPath.getFullLocation() + "/" + fname ); entry->close(); delete entry; return false; } /* // descend Path nfrom( from ); nfrom.setFullLocation( from.getFullLocation() + "/" + fname ); if( ! copyAllResources( options, nfrom, modPath, tgtPath ) ) { return false; } */ } } entry->close(); delete entry; return true; }
bool operator()(const DirEntry& de1, const DirEntry& de2) { return de1.Status().Size() < de2.Status().Size(); }