// Update our composite status and deal with things if it's changed void CCachedDirectory::UpdateCurrentStatus() { svn_wc_status_kind newStatus = CalculateRecursiveStatus(); if ((newStatus != m_currentFullStatus)&&(m_ownStatus.IsVersioned())) { if ((m_currentFullStatus != svn_wc_status_none)&&(m_ownStatus.GetEffectiveStatus() != svn_wc_status_ignored)) { // Our status has changed - tell the shell CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": Dir %s, status change from %d to %d\n"), m_directoryPath.GetWinPath(), m_currentFullStatus, newStatus); CSVNStatusCache::Instance().UpdateShell(m_directoryPath); } if (m_ownStatus.GetEffectiveStatus() != svn_wc_status_ignored) m_currentFullStatus = newStatus; else m_currentFullStatus = svn_wc_status_ignored; } // And tell our parent, if we've got one... // we tell our parent *always* about our status, even if it hasn't // changed. This is to make sure that the parent has really our current // status - the parent can decide itself if our status has changed // or not. CTSVNPath parentPath = m_directoryPath.GetContainingDirectory(); if(!parentPath.IsEmpty()) { CCachedDirectory * cachedDir = CSVNStatusCache::Instance().GetDirectoryCacheEntry(parentPath); if (cachedDir) cachedDir->UpdateChildDirectoryStatus(m_directoryPath, m_currentFullStatus); } }
// Update our composite status and deal with things if it's changed void CCachedDirectory::UpdateCurrentStatus() { git_wc_status_kind newStatus = CalculateRecursiveStatus(); CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": UpdateCurrentStatus %s new:%d old: %d\n"), m_directoryPath.GetWinPath(), newStatus, m_currentFullStatus); if (newStatus != m_currentFullStatus && m_ownStatus.IsDirectory()) { m_currentFullStatus = newStatus; // Our status has changed - tell the shell CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": Dir %s, status change from %d to %d\n"), m_directoryPath.GetWinPath(), m_currentFullStatus, newStatus); CGitStatusCache::Instance().UpdateShell(m_directoryPath); } // And tell our parent, if we've got one... // we tell our parent *always* about our status, even if it hasn't // changed. This is to make sure that the parent has really our current // status - the parent can decide itself if our status has changed // or not. CTGitPath parentPath = m_directoryPath.GetContainingDirectory(); if(!parentPath.IsEmpty()) { // We have a parent // just version controled directory need to cache. CString root1, root2; if (parentPath.HasAdminDir(&root1) && (CGitStatusCache::Instance().IsRecurseSubmodules() || m_directoryPath.HasAdminDir(&root2) && root1 == root2)) { CCachedDirectory * cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(parentPath); if (cachedDir) cachedDir->UpdateChildDirectoryStatus(m_directoryPath, m_currentFullStatus); } } }
CStatusCacheEntry CSVNStatusCache::GetStatusForPath(const CTSVNPath& path, DWORD flags, bool bFetch /* = true */) { bool bRecursive = !!(flags & TSVNCACHE_FLAGS_RECUSIVE_STATUS); // Check a very short-lived 'mini-cache' of the last thing we were asked for. long now = (long)GetTickCount(); if(now-m_mostRecentExpiresAt < 0) { if(path.IsEquivalentToWithoutCase(m_mostRecentAskedPath)) { return m_mostRecentStatus; } } { AutoLocker lock(m_critSec); m_mostRecentAskedPath = path; m_mostRecentExpiresAt = now+1000; } if (IsPathGood(path) && m_shellCache.IsPathAllowed(path.GetWinPath())) { // Stop the crawler starting on a new folder while we're doing this much more important task... // Please note, that this may be a second "lock" used concurrently to the one in RemoveCacheForPath(). CCrawlInhibitor crawlInhibit(&m_folderCrawler); CTSVNPath dirpath = path.GetContainingDirectory(); if (dirpath.IsEmpty()) dirpath = path.GetDirectory(); CCachedDirectory * cachedDir = GetDirectoryCacheEntry(dirpath); if (cachedDir != NULL) { CStatusCacheEntry entry = cachedDir->GetStatusForMember(path, bRecursive, bFetch); { AutoLocker lock(m_critSec); m_mostRecentStatus = entry; return m_mostRecentStatus; } } cachedDir = GetDirectoryCacheEntry(path.GetDirectory()); if (cachedDir != NULL) { CStatusCacheEntry entry = cachedDir->GetStatusForMember(path, bRecursive, bFetch); { AutoLocker lock(m_critSec); m_mostRecentStatus = entry; return m_mostRecentStatus; } } } AutoLocker lock(m_critSec); m_mostRecentStatus = CStatusCacheEntry(); if (m_shellCache.ShowExcludedAsNormal() && path.IsDirectory() && m_shellCache.HasSVNAdminDir(path.GetWinPath(), true)) { m_mostRecentStatus.ForceStatus(svn_wc_status_normal); } return m_mostRecentStatus; }
// Update our composite status and deal with things if it's changed void CCachedDirectory::UpdateCurrentStatus() { git_wc_status_kind newStatus = CalculateRecursiveStatus(); ATLTRACE(_T("UpdateCurrentStatus %s new:%d old: %d\n"), m_directoryPath.GetWinPath(), newStatus, m_currentFullStatus); if ( this->m_ownStatus.GetEffectiveStatus() < git_wc_status_normal ) { if (::PathFileExists(this->m_directoryPath.GetWinPathString()+_T("\\.git"))) { //project root must be normal status at least. ATLTRACE(_T("force update project root directory as normal status\n")); this->m_ownStatus.ForceStatus(git_wc_status_normal); } } if ((newStatus != m_currentFullStatus) && m_ownStatus.IsVersioned()) { if ((m_currentFullStatus != git_wc_status_none)&&(m_ownStatus.GetEffectiveStatus() != git_wc_status_missing)) { // Our status has changed - tell the shell ATLTRACE(_T("Dir %s, status change from %d to %d, send shell notification\n"), m_directoryPath.GetWinPath(), m_currentFullStatus, newStatus); CGitStatusCache::Instance().UpdateShell(m_directoryPath); } if (m_ownStatus.GetEffectiveStatus() != git_wc_status_missing) m_currentFullStatus = newStatus; else m_currentFullStatus = git_wc_status_missing; } // And tell our parent, if we've got one... // we tell our parent *always* about our status, even if it hasn't // changed. This is to make sure that the parent has really our current // status - the parent can decide itself if our status has changed // or not. CTGitPath parentPath = m_directoryPath.GetContainingDirectory(); if(!parentPath.IsEmpty()) { // We have a parent // just version controled directory need to cache. CString root1, root2; if(parentPath.HasAdminDir(&root1) && m_directoryPath.HasAdminDir(&root2) && root1 == root2) { CCachedDirectory * cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(parentPath); if (cachedDir) cachedDir->UpdateChildDirectoryStatus(m_directoryPath, m_currentFullStatus); } } }
void CCachedDirectory::AddEntry(const CTSVNPath& path, const svn_client_status_t* pSVNStatus, bool needsLock, bool forceNormal) { svn_wc_status_kind nodestatus = forceNormal ? svn_wc_status_normal : (pSVNStatus ? pSVNStatus->node_status : svn_wc_status_none); if(path.IsDirectory()) { // no lock here: // AutoLocker lock(m_critSec); // because GetDirectoryCacheEntry() can try to obtain a write lock CCachedDirectory * childDir = CSVNStatusCache::Instance().GetDirectoryCacheEntry(path); if (childDir) { if ((childDir->GetCurrentFullStatus() != svn_wc_status_ignored)||(pSVNStatus==NULL)||(nodestatus != svn_wc_status_unversioned)) childDir->m_ownStatus.SetStatus(pSVNStatus, needsLock, forceNormal); childDir->m_ownStatus.SetKind(svn_node_dir); } } else { AutoLocker lock(m_critSec); CStringA cachekey = GetCacheKey(path); CacheEntryMap::iterator entry_it = m_entryCache.lower_bound(cachekey); if (entry_it != m_entryCache.end() && entry_it->first == cachekey) { if (pSVNStatus) { if (entry_it->second.GetEffectiveStatus() > svn_wc_status_none && entry_it->second.GetEffectiveStatus() != nodestatus) { CSVNStatusCache::Instance().UpdateShell(path); CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": shell update for %s\n"), path.GetWinPath()); } } } else { entry_it = m_entryCache.insert(entry_it, std::make_pair(cachekey, CStatusCacheEntry())); } entry_it->second = CStatusCacheEntry(pSVNStatus, needsLock, path.GetLastWriteTime(), forceNormal); } }
BOOL CCachedDirectory::GetStatusCallback(const CString & path, git_wc_status_kind status,bool isDir, void *, bool assumeValid, bool skipWorktree) { git_wc_status2_t _status; git_wc_status2_t *status2 = &_status; status2->prop_status = status2->text_status = status; status2->assumeValid = assumeValid; status2->skipWorktree = skipWorktree; CTGitPath gitPath(path); CCachedDirectory *pThis = CGitStatusCache::Instance().GetDirectoryCacheEntry(gitPath.GetContainingDirectory()); if(pThis == NULL) return FALSE; // if(status->entry) { if (isDir) { /*gitpath is directory*/ //if ( !gitPath.IsEquivalentToWithoutCase(pThis->m_directoryPath) ) { if (!gitPath.Exists()) { CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": Miss dir %s \n"), gitPath.GetWinPath()); pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, git_wc_status_deleted); } if ( status < git_wc_status_normal) { if( ::PathFileExists(path+_T("\\.git"))) { // this is submodule CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": skip submodule %s\n"), path); return FALSE; } } if (pThis->m_bRecursive) { // Add any versioned directory, which is not our 'self' entry, to the list for having its status updated //OutputDebugStringA("AddFolderCrawl: ");OutputDebugStringW(svnPath.GetWinPathString());OutputDebugStringA("\r\n"); if (status >= git_wc_status_normal || (CGitStatusCache::Instance().IsUnversionedAsModified() && status == git_wc_status_unversioned)) CGitStatusCache::Instance().AddFolderForCrawling(gitPath); } // Make sure we know about this child directory // This initial status value is likely to be overwritten from below at some point git_wc_status_kind s = GitStatus::GetMoreImportant(status2->text_status, status2->prop_status); // folders must not be displayed as added or deleted only as modified if (s == git_wc_status_deleted || s == git_wc_status_added) s = git_wc_status_modified; CCachedDirectory * cdir = CGitStatusCache::Instance().GetDirectoryCacheEntryNoCreate(gitPath); if (cdir) { // This child directory is already in our cache! // So ask this dir about its recursive status git_wc_status_kind st = GitStatus::GetMoreImportant(s, cdir->GetCurrentFullStatus()); AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[gitPath] = st; CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": call 1 Update dir %s %d\n"), gitPath.GetWinPath(), st); } else { AutoLocker lock(pThis->m_critSec); // the child directory is not in the cache. Create a new entry for it in the cache which is // initially 'unversioned'. But we added that directory to the crawling list above, which // means the cache will be updated soon. CGitStatusCache::Instance().GetDirectoryCacheEntry(gitPath); pThis->m_childDirectories[gitPath] = s; CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": call 2 Update dir %s %d\n"), gitPath.GetWinPath(), s); } } } else /* gitpath is file*/ { // Keep track of the most important status of all the files in this directory // Don't include subdirectories in this figure, because they need to provide their // own 'most important' value pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, status2->text_status); pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, status2->prop_status); if ((status2->text_status == git_wc_status_unversioned) && (CGitStatusCache::Instance().IsUnversionedAsModified())) { // treat unversioned files as modified if (pThis->m_mostImportantFileStatus != git_wc_status_added) pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, git_wc_status_modified); } } } pThis->AddEntry(gitPath, status2); return FALSE; }
void CCachedDirectory::AddEntry(const CTGitPath& path, const git_wc_status2_t* pGitStatus, DWORD validuntil /* = 0*/) { AutoLocker lock(m_critSec); if(path.IsDirectory()) { CCachedDirectory * childDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(path); if (childDir) { if ((childDir->GetCurrentFullStatus() != git_wc_status_missing)||(pGitStatus==NULL)||(pGitStatus->text_status != git_wc_status_unversioned)) { if(pGitStatus) { if(childDir->GetCurrentFullStatus() != GitStatus::GetMoreImportant(pGitStatus->prop_status, pGitStatus->text_status)) { CGitStatusCache::Instance().UpdateShell(path); //CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": shell update for %s\n"), path.GetWinPath()); childDir->m_ownStatus.SetKind(git_node_dir); childDir->m_ownStatus.SetStatus(pGitStatus); } } } childDir->m_ownStatus.SetKind(git_node_dir); } } else { CCachedDirectory * childDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(path.GetContainingDirectory()); bool bNotified = false; if(!childDir) return ; AutoLocker lock2(childDir->m_critSec); CString cachekey = GetCacheKey(path); CacheEntryMap::iterator entry_it = childDir->m_entryCache.lower_bound(cachekey); if (entry_it != childDir->m_entryCache.end() && entry_it->first == cachekey) { if (pGitStatus) { if (entry_it->second.GetEffectiveStatus() > git_wc_status_none && entry_it->second.GetEffectiveStatus() != GitStatus::GetMoreImportant(pGitStatus->prop_status, pGitStatus->text_status) ) { bNotified =true; } } } else { entry_it = childDir->m_entryCache.insert(entry_it, std::make_pair(cachekey, CStatusCacheEntry())); bNotified = true; } entry_it->second = CStatusCacheEntry(pGitStatus, path.GetLastWriteTime(), path.IsReadOnly(), validuntil); // TEMP(?): git status doesn't not have "entry" that contains node type, so manually set as file entry_it->second.SetKind(git_node_file); childDir->m_entryCache_tmp[cachekey] = entry_it->second; if(bNotified) { CGitStatusCache::Instance().UpdateShell(path); //CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": shell update for %s\n"), path.GetWinPath()); } //CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) _T(": Path Entry Add %s %s %s %d\n"), path.GetWinPath(), cachekey, m_directoryPath.GetWinPath(), pGitStatus->text_status); } }
CStatusCacheEntry CCachedDirectory::GetStatusFromGit(const CTGitPath &path, CString sProjectRoot) { CString subpaths = path.GetGitPathString(); if(subpaths.GetLength() >= sProjectRoot.GetLength()) { if(subpaths[sProjectRoot.GetLength()] == _T('/')) subpaths=subpaths.Right(subpaths.GetLength() - sProjectRoot.GetLength()-1); else subpaths=subpaths.Right(subpaths.GetLength() - sProjectRoot.GetLength()); } GitStatus *pGitStatus = &CGitStatusCache::Instance().m_GitStatus; UNREFERENCED_PARAMETER(pGitStatus); bool isVersion =true; pGitStatus->IsUnderVersionControl(sProjectRoot, subpaths, path.IsDirectory(), &isVersion); if(!isVersion) { //untracked file bool isDir = path.IsDirectory(); bool isIgnoreFileChanged = pGitStatus->HasIgnoreFilesChanged(sProjectRoot, subpaths, isDir); if( isIgnoreFileChanged) { pGitStatus->LoadIgnoreFile(sProjectRoot, subpaths, isDir); } if (isDir) { CCachedDirectory * dirEntry = CGitStatusCache::Instance().GetDirectoryCacheEntry(path, false); /* we needn't watch untracked directory*/ if(dirEntry) { AutoLocker lock(dirEntry->m_critSec); git_wc_status_kind dirstatus = dirEntry->GetCurrentFullStatus() ; if (CGitStatusCache::Instance().IsUnversionedAsModified() || dirstatus == git_wc_status_none || dirstatus >= git_wc_status_normal || isIgnoreFileChanged) {/* status have not initialized*/ bool isignore = false; pGitStatus->IsIgnore(sProjectRoot, subpaths, &isignore, isDir); if (!isignore && CGitStatusCache::Instance().IsUnversionedAsModified()) { dirEntry->EnumFiles(path, TRUE); dirEntry->UpdateCurrentStatus(); return CStatusCacheEntry(dirEntry->GetCurrentFullStatus()); } git_wc_status2_t status2; status2.text_status = status2.prop_status = (isignore? git_wc_status_ignored:git_wc_status_unversioned); // we do not know anything about files here, all we know is that there are not versioned files in this dir dirEntry->m_mostImportantFileStatus = git_wc_status_none; dirEntry->m_ownStatus.SetKind(git_node_dir); dirEntry->m_ownStatus.SetStatus(&status2); dirEntry->m_currentFullStatus = status2.text_status; } return dirEntry->m_ownStatus; } } else /* path is file */ { AutoLocker lock(m_critSec); CString strCacheKey = GetCacheKey(path); if (strCacheKey.IsEmpty()) return CStatusCacheEntry(); CacheEntryMap::iterator itMap = m_entryCache.find(strCacheKey); if(itMap == m_entryCache.end() || isIgnoreFileChanged) { git_wc_status2_t status2; bool isignore = false; pGitStatus->IsIgnore(sProjectRoot, subpaths, &isignore, isDir); status2.text_status = status2.prop_status = (isignore? git_wc_status_ignored:git_wc_status_unversioned); AddEntry(path, &status2); return m_entryCache[strCacheKey]; } else { return itMap->second; } } return CStatusCacheEntry(); } else { EnumFiles(path, TRUE); UpdateCurrentStatus(); if (!path.IsDirectory()) return GetCacheStatusForMember(path); return CStatusCacheEntry(m_ownStatus); } }
CStatusCacheEntry CCachedDirectory::GetStatusFromCache(const CTGitPath& path, bool bRecursive) { if(path.IsDirectory()) { // We don't have directory status in our cache // Ask the directory if it knows its own status CCachedDirectory * dirEntry = CGitStatusCache::Instance().GetDirectoryCacheEntry(path); if( dirEntry) { if (dirEntry->IsOwnStatusValid()) return dirEntry->GetOwnStatus(bRecursive); else { /* cache have outof date, need crawl again*/ /*AutoLocker lock(dirEntry->m_critSec); ChildDirStatus::const_iterator it; for(it = dirEntry->m_childDirectories.begin(); it != dirEntry->m_childDirectories.end(); ++it) { CGitStatusCache::Instance().AddFolderForCrawling(it->first); }*/ CGitStatusCache::Instance().AddFolderForCrawling(path); /*Return old status during crawling*/ return dirEntry->GetOwnStatus(bRecursive); } } else { CGitStatusCache::Instance().AddFolderForCrawling(path); } return CStatusCacheEntry(); } else { //All file ignored if under ignore directory if (m_ownStatus.GetEffectiveStatus() == git_wc_status_ignored) return CStatusCacheEntry(git_wc_status_ignored); if (m_ownStatus.GetEffectiveStatus() == git_wc_status_unversioned) return CStatusCacheEntry(git_wc_status_unversioned); // Look up a file in our own cache AutoLocker lock(m_critSec); CString strCacheKey = GetCacheKey(path); CacheEntryMap::iterator itMap = m_entryCache.find(strCacheKey); if(itMap != m_entryCache.end()) { // We've hit the cache - check for timeout if(!itMap->second.HasExpired((long)GetTickCount())) { if(itMap->second.DoesFileTimeMatch(path.GetLastWriteTime())) { if ((itMap->second.GetEffectiveStatus()!=git_wc_status_missing)||(!PathFileExists(path.GetWinPath()))) { // Note: the filetime matches after a modified has been committed too. // So in that case, we would return a wrong status (e.g. 'modified' instead // of 'normal') here. return itMap->second; } } } } CGitStatusCache::Instance().AddFolderForCrawling(path.GetContainingDirectory()); return CStatusCacheEntry(); } }
git_error_t * CCachedDirectory::GetStatusCallback(void *baton, const char *path, git_wc_status2_t *status) { CCachedDirectory* pThis = (CCachedDirectory*)baton; if (path == NULL) return 0; CTGitPath svnPath; if(status->entry) { if ((status->text_status != git_wc_status_none)&&(status->text_status != git_wc_status_missing)) svnPath.SetFromSVN(path, (status->entry->kind == svn_node_dir)); else svnPath.SetFromSVN(path); if(svnPath.IsDirectory()) { if(!svnPath.IsEquivalentToWithoutCase(pThis->m_directoryPath)) { if (pThis->m_bRecursive) { // Add any versioned directory, which is not our 'self' entry, to the list for having its status updated CGitStatusCache::Instance().AddFolderForCrawling(svnPath); } // Make sure we know about this child directory // This initial status value is likely to be overwritten from below at some point git_wc_status_kind s = GitStatus::GetMoreImportant(status->text_status, status->prop_status); CCachedDirectory * cdir = CGitStatusCache::Instance().GetDirectoryCacheEntryNoCreate(svnPath); if (cdir) { // This child directory is already in our cache! // So ask this dir about its recursive status git_wc_status_kind st = GitStatus::GetMoreImportant(s, cdir->GetCurrentFullStatus()); AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[svnPath] = st; } else { // the child directory is not in the cache. Create a new entry for it in the cache which is // initially 'unversioned'. But we added that directory to the crawling list above, which // means the cache will be updated soon. CGitStatusCache::Instance().GetDirectoryCacheEntry(svnPath); AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[svnPath] = s; } } } else { // Keep track of the most important status of all the files in this directory // Don't include subdirectories in this figure, because they need to provide their // own 'most important' value pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, status->text_status); pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, status->prop_status); if (((status->text_status == git_wc_status_unversioned)||(status->text_status == git_wc_status_none)) &&(CGitStatusCache::Instance().IsUnversionedAsModified())) { // treat unversioned files as modified if (pThis->m_mostImportantFileStatus != git_wc_status_added) pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, git_wc_status_modified); } } } else { svnPath.SetFromSVN(path); // Subversion returns no 'entry' field for versioned folders if they're // part of another working copy (nested layouts). // So we have to make sure that such an 'unversioned' folder really // is unversioned. if (((status->text_status == git_wc_status_unversioned)||(status->text_status == git_wc_status_missing))&&(!svnPath.IsEquivalentToWithoutCase(pThis->m_directoryPath))&&(svnPath.IsDirectory())) { if (svnPath.HasAdminDir()) { CGitStatusCache::Instance().AddFolderForCrawling(svnPath); // Mark the directory as 'versioned' (status 'normal' for now). // This initial value will be overwritten from below some time later { AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[svnPath] = git_wc_status_normal; } // Make sure the entry is also in the cache CGitStatusCache::Instance().GetDirectoryCacheEntry(svnPath); // also mark the status in the status object as normal status->text_status = git_wc_status_normal; } } else if (status->text_status == git_wc_status_external) { CGitStatusCache::Instance().AddFolderForCrawling(svnPath); // Mark the directory as 'versioned' (status 'normal' for now). // This initial value will be overwritten from below some time later { AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[svnPath] = git_wc_status_normal; } // we have added a directory to the child-directory list of this // directory. We now must make sure that this directory also has // an entry in the cache. CGitStatusCache::Instance().GetDirectoryCacheEntry(svnPath); // also mark the status in the status object as normal status->text_status = git_wc_status_normal; } else { if (svnPath.IsDirectory()) { AutoLocker lock(pThis->m_critSec); pThis->m_childDirectories[svnPath] = GitStatus::GetMoreImportant(status->text_status, status->prop_status); } else if ((CGitStatusCache::Instance().IsUnversionedAsModified())&&(status->text_status != git_wc_status_missing)) { // make this unversioned item change the most important status of this // folder to modified if it doesn't already have another status if (pThis->m_mostImportantFileStatus != git_wc_status_added) pThis->m_mostImportantFileStatus = GitStatus::GetMoreImportant(pThis->m_mostImportantFileStatus, git_wc_status_modified); } } } pThis->AddEntry(svnPath, status); return 0; }
svn_error_t * CCachedDirectory::GetStatusCallback(void *baton, const char *path, const svn_client_status_t *status, apr_pool_t * pool) { CCachedDirectory* pThis = (CCachedDirectory*)baton; if (path == NULL) return SVN_NO_ERROR; CTSVNPath svnPath; bool forceNormal = false; bool needsLock = false; const svn_wc_status_kind nodeStatus = status->node_status; if(status->versioned) { if ((nodeStatus != svn_wc_status_none)&&(nodeStatus != svn_wc_status_ignored)) svnPath.SetFromSVN(path, (status->kind == svn_node_dir)); else svnPath.SetFromSVN(path); if(svnPath.IsDirectory()) { if(!svnPath.IsEquivalentToWithoutCase(pThis->m_directoryPath)) { // Make sure we know about this child directory // This initial status value is likely to be overwritten from below at some point svn_wc_status_kind s = nodeStatus; if (status->conflicted) s = SVNStatus::GetMoreImportant(s, svn_wc_status_conflicted); CCachedDirectory * cdir = CSVNStatusCache::Instance().GetDirectoryCacheEntryNoCreate(svnPath); if (cdir) { // This child directory is already in our cache! // So ask this dir about its recursive status svn_wc_status_kind st = SVNStatus::GetMoreImportant(s, cdir->GetCurrentFullStatus()); pThis->SetChildStatus(svnPath, st); } else { // the child directory is not in the cache. Create a new entry for it in the cache which is // initially 'unversioned'. But we added that directory to the crawling list above, which // means the cache will be updated soon. CSVNStatusCache::Instance().GetDirectoryCacheEntry(svnPath); pThis->SetChildStatus(svnPath, s); } } } else { // only fetch the svn:needs-lock property if the status of this file is 'normal', because // if the status is something else, the needs-lock overlay won't show up anyway if ((pThis->m_pCtx)&&(nodeStatus == svn_wc_status_normal)) { const svn_string_t * value = NULL; svn_error_t * err = svn_wc_prop_get2(&value, pThis->m_pCtx->wc_ctx, path, "svn:needs-lock", pool, pool); if ((err==NULL) && value) needsLock = true; if (err) svn_error_clear(err); } } } else { if ((status->kind != svn_node_unknown)&&(status->kind != svn_node_none)) svnPath.SetFromSVN(path, status->kind == svn_node_dir); else svnPath.SetFromSVN(path); // Subversion returns no 'entry' field for versioned folders if they're // part of another working copy (nested layouts). // So we have to make sure that such an 'unversioned' folder really // is unversioned. if (((nodeStatus == svn_wc_status_unversioned)||(nodeStatus == svn_wc_status_ignored))&&(!svnPath.IsEquivalentToWithoutCase(pThis->m_directoryPath))&&(svnPath.IsDirectory())) { if (svnPath.IsWCRoot()) { CSVNStatusCache::Instance().AddFolderForCrawling(svnPath); // Mark the directory as 'versioned' (status 'normal' for now). // This initial value will be overwritten from below some time later pThis->SetChildStatus(svnPath, svn_wc_status_normal); // Make sure the entry is also in the cache CSVNStatusCache::Instance().GetDirectoryCacheEntry(svnPath); // also mark the status in the status object as normal forceNormal = true; } else { pThis->SetChildStatus(svnPath, nodeStatus); } } else if (nodeStatus == svn_wc_status_external) { if ((status->kind == svn_node_dir) || (svnPath.IsDirectory())) { CSVNStatusCache::Instance().AddFolderForCrawling(svnPath); // Mark the directory as 'versioned' (status 'normal' for now). // This initial value will be overwritten from below some time later pThis->SetChildStatus(svnPath, svn_wc_status_normal); // we have added a directory to the child-directory list of this // directory. We now must make sure that this directory also has // an entry in the cache. CSVNStatusCache::Instance().GetDirectoryCacheEntry(svnPath); // also mark the status in the status object as normal forceNormal = true; } } else { if (svnPath.IsDirectory()) { svn_wc_status_kind s = nodeStatus; if (status->conflicted) s = SVNStatus::GetMoreImportant(s, svn_wc_status_conflicted); pThis->SetChildStatus(svnPath, s); } } } pThis->AddEntry(svnPath, status, needsLock, forceNormal); return SVN_NO_ERROR; }
CStatusCacheEntry CCachedDirectory::GetStatusForMember(const CTSVNPath& path, bool bRecursive, bool bFetch /* = true */) { CStringA strCacheKey; bool bThisDirectoryIsUnversioned = false; bool bRequestForSelf = false; if(path.IsEquivalentToWithoutCase(m_directoryPath)) { bRequestForSelf = true; } // In all most circumstances, we ask for the status of a member of this directory. ATLASSERT(m_directoryPath.IsEquivalentToWithoutCase(path.GetContainingDirectory()) || bRequestForSelf); long long dbFileTime = CSVNStatusCache::Instance().WCRoots()->GetDBFileTime(m_directoryPath); bool wcDbFileTimeChanged = (m_wcDbFileTime != dbFileTime); if ( !wcDbFileTimeChanged ) { if(m_wcDbFileTime == 0) { // We are a folder which is not in a working copy bThisDirectoryIsUnversioned = true; m_ownStatus.SetStatus(NULL, false, false); // If a user removes the .svn directory, we get here with m_entryCache // not being empty, but still us being unversioned if (!m_entryCache.empty()) { m_entryCache.clear(); } ATLASSERT(m_entryCache.empty()); // However, a member *DIRECTORY* might be the top of WC // so we need to ask them to get their own status if(!path.IsDirectory()) { if ((PathFileExists(path.GetWinPath()))||(bRequestForSelf)) return CStatusCacheEntry(); // the entry doesn't exist anymore! // but we can't remove it from the cache here: // the GetStatusForMember() method is called only with a read // lock and not a write lock! // So mark it for crawling, and let the crawler remove it // later CSVNStatusCache::Instance().AddFolderForCrawling(path.GetContainingDirectory()); return CStatusCacheEntry(); } else { // If we're in the special case of a directory being asked for its own status // and this directory is unversioned, then we should just return that here if(bRequestForSelf) { return CStatusCacheEntry(); } } } if (CSVNStatusCache::Instance().GetDirectoryCacheEntryNoCreate(path) != NULL) { // We don't have directory status in our cache // Ask the directory if it knows its own status CCachedDirectory * dirEntry = CSVNStatusCache::Instance().GetDirectoryCacheEntry(path); if ((dirEntry)&&(dirEntry->IsOwnStatusValid())) { // To keep recursive status up to date, we'll request that children are all crawled again // We have to do this because the directory watcher isn't very reliable (especially under heavy load) // and also has problems with SUBSTed drives. // If nothing has changed in those directories, this crawling is fast and only // accesses two files for each directory. if (bRecursive) { AutoLocker lock(dirEntry->m_critSec); ChildDirStatus::const_iterator it; for(it = dirEntry->m_childDirectories.begin(); it != dirEntry->m_childDirectories.end(); ++it) { CTSVNPath newpath; CString winPath = CUnicodeUtils::GetUnicode (it->first); newpath.SetFromWin(winPath, true); CSVNStatusCache::Instance().AddFolderForCrawling(newpath); } } return dirEntry->GetOwnStatus(bRecursive); } } else { { // if we currently are fetching the status of the directory // we want the status for, we just return an empty entry here // and don't wait for that fetching to finish. // That's because fetching the status can take a *really* long // time (e.g. if a commit is also in progress on that same // directory), and we don't want to make the explorer appear // to hang. if ((!bFetch)&&(m_FetchingStatus)) { if (m_directoryPath.IsAncestorOf(path)) { m_currentFullStatus = m_mostImportantFileStatus = svn_wc_status_none; return GetCacheStatusForMember(path); } } } // Look up a file in our own cache AutoLocker lock(m_critSec); strCacheKey = GetCacheKey(path); CacheEntryMap::iterator itMap = m_entryCache.find(strCacheKey); if(itMap != m_entryCache.end()) { // We've hit the cache - check for timeout if(!itMap->second.HasExpired((long)GetTickCount())) { if(itMap->second.DoesFileTimeMatch(path.GetLastWriteTime())) { if ((itMap->second.GetEffectiveStatus()!=svn_wc_status_missing)||(!PathFileExists(path.GetWinPath()))) { // Note: the filetime matches after a modified has been committed too. // So in that case, we would return a wrong status (e.g. 'modified' instead // of 'normal') here. return itMap->second; } } } } } } else { if ((!bFetch)&&(m_FetchingStatus)) { if (m_directoryPath.IsAncestorOf(path)) { // returning empty status (status fetch in progress) // also set the status to 'none' to have the status change and // the shell updater invoked in the crawler m_currentFullStatus = m_mostImportantFileStatus = svn_wc_status_none; CSVNStatusCache::Instance().AddFolderForCrawling(m_directoryPath.GetDirectory()); return GetCacheStatusForMember(path); } } // if we're fetching the status for the explorer, // we don't refresh the status but use the one // we already have (to save time and make the explorer // more responsive in stress conditions). // We leave the refreshing to the crawler. if ((!bFetch)&&(m_wcDbFileTime)) { CSVNStatusCache::Instance().AddFolderForCrawling(m_directoryPath.GetDirectory()); return GetCacheStatusForMember(path); } AutoLocker lock(m_critSec); m_entryCache.clear(); strCacheKey = GetCacheKey(path); } // We've not got this item in the cache - let's add it // We never bother asking SVN for the status of just one file, always for its containing directory if (g_SVNAdminDir.IsAdminDirPath(path.GetWinPathString())) { // We're being asked for the status of an .SVN directory // It's not worth asking for this return CStatusCacheEntry(); } { if ((!bFetch)&&(m_FetchingStatus)) { if (m_directoryPath.IsAncestorOf(path)) { m_currentFullStatus = m_mostImportantFileStatus = svn_wc_status_none; return GetCacheStatusForMember(path); } } } { AutoLocker lock(m_critSec); m_mostImportantFileStatus = svn_wc_status_none; m_childDirectories.clear(); m_entryCache.clear(); m_ownStatus.SetStatus(NULL, false, false); } if(!bThisDirectoryIsUnversioned) { if (!SvnUpdateMembersStatus()) { m_wcDbFileTime = 0; return CStatusCacheEntry(); } } // Now that we've refreshed our SVN status, we can see if it's // changed the 'most important' status value for this directory. // If it has, then we should tell our parent UpdateCurrentStatus(); m_wcDbFileTime = dbFileTime; if (path.IsDirectory()) { CCachedDirectory * dirEntry = CSVNStatusCache::Instance().GetDirectoryCacheEntry(path); if ((dirEntry)&&(dirEntry->IsOwnStatusValid())) { //CSVNStatusCache::Instance().AddFolderForCrawling(path); return dirEntry->GetOwnStatus(bRecursive); } // If the status *still* isn't valid here, it means that // the current directory is unversioned, and we shall need to ask its children for info about themselves if ((dirEntry)&&(dirEntry != this)) return dirEntry->GetStatusForMember(path,bRecursive); // add the path for crawling: if it's really unversioned, the crawler will // only check for the admin dir and do nothing more. But if it is // versioned (could happen in a nested layout) the crawler will update its // status correctly CSVNStatusCache::Instance().AddFolderForCrawling(path); return CStatusCacheEntry(); } else { CacheEntryMap::iterator itMap = m_entryCache.find(strCacheKey); if(itMap != m_entryCache.end()) { return itMap->second; } } AddEntry(path, NULL, false, false); return CStatusCacheEntry(); }
void CFolderCrawler::WorkerThread() { HANDLE hWaitHandles[2]; hWaitHandles[0] = m_hTerminationEvent; hWaitHandles[1] = m_hWakeEvent; CTGitPath workingPath; bool bFirstRunAfterWakeup = false; DWORD currentTicks = 0; // Quick check if we're on Vista OSVERSIONINFOEX inf; SecureZeroMemory(&inf, sizeof(OSVERSIONINFOEX)); inf.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); GetVersionEx((OSVERSIONINFO *)&inf); WORD fullver = MAKEWORD(inf.dwMinorVersion, inf.dwMajorVersion); for(;;) { bool bRecursive = !!(DWORD)CRegStdDWORD(_T("Software\\TortoiseGit\\RecursiveOverlay"), TRUE); if (fullver >= 0x0600) { SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_END); } DWORD waitResult = WaitForMultipleObjects(_countof(hWaitHandles), hWaitHandles, FALSE, INFINITE); // exit event/working loop if the first event (m_hTerminationEvent) // has been signaled or if one of the events has been abandoned // (i.e. ~CFolderCrawler() is being executed) if(m_bRun == false || waitResult == WAIT_OBJECT_0 || waitResult == WAIT_ABANDONED_0 || waitResult == WAIT_ABANDONED_0+1) { // Termination event break; } if (fullver >= 0x0600) { SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN); } // If we get here, we've been woken up by something being added to the queue. // However, it's important that we don't do our crawling while // the shell is still asking for items bFirstRunAfterWakeup = true; for(;;) { if (!m_bRun) break; // Any locks today? if (CGitStatusCache::Instance().m_bClearMemory) { CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().ClearCache(); CGitStatusCache::Instance().Done(); CGitStatusCache::Instance().m_bClearMemory = false; } if(m_lCrawlInhibitSet > 0) { // We're in crawl hold-off ATLTRACE("Crawl hold-off\n"); Sleep(50); continue; } if (bFirstRunAfterWakeup) { Sleep(20); ATLTRACE("Crawl bFirstRunAfterWakeup\n"); bFirstRunAfterWakeup = false; continue; } if ((m_blockReleasesAt < GetTickCount())&&(!m_blockedPath.IsEmpty())) { ATLTRACE(_T("Crawl stop blocking path %s\n"), m_blockedPath.GetWinPath()); m_blockedPath.Reset(); } if ((m_foldersToUpdate.empty())&&(m_pathsToUpdate.empty())) { // Nothing left to do break; } currentTicks = GetTickCount(); if (!m_pathsToUpdate.empty()) { { AutoLocker lock(m_critSec); m_bPathsAddedSinceLastCrawl = false; workingPath = m_pathsToUpdate.front(); //m_pathsToUpdateUnique.erase (workingPath); m_pathsToUpdate.pop_front(); if ((DWORD(workingPath.GetCustomData()) >= currentTicks) || ((!m_blockedPath.IsEmpty())&&(m_blockedPath.IsAncestorOf(workingPath)))) { // move the path to the end of the list //m_pathsToUpdateUnique.insert (workingPath); m_pathsToUpdate.push_back(workingPath); if (m_pathsToUpdate.size() < 3) Sleep(50); continue; } } // don't crawl paths that are excluded if (!CGitStatusCache::Instance().IsPathAllowed(workingPath)) continue; // check if the changed path is inside an .git folder CString projectroot; if ((workingPath.HasAdminDir(&projectroot)&&workingPath.IsDirectory()) || workingPath.IsAdminDir()) { // we don't crawl for paths changed in a tmp folder inside an .git folder. // Because we also get notifications for those even if we just ask for the status! // And changes there don't affect the file status at all, so it's safe // to ignore notifications on those paths. if (workingPath.IsAdminDir()) { // TODO: add git specific filters here. is there really any change besides index file in .git // that is relevant for overlays? /*CString lowerpath = workingPath.GetWinPathString(); lowerpath.MakeLower(); if (lowerpath.Find(_T("\\tmp\\"))>0) continue; if (lowerpath.Find(_T("\\tmp")) == (lowerpath.GetLength()-4)) continue; if (lowerpath.Find(_T("\\log"))>0) continue;*/ // Here's a little problem: // the lock file is also created for fetching the status // and not just when committing. // If we could find out why the lock file was changed // we could decide to crawl the folder again or not. // But for now, we have to crawl the parent folder // no matter what. //if (lowerpath.Find(_T("\\lock"))>0) // continue; } else if (!workingPath.Exists()) { CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); CGitStatusCache::Instance().Done(); continue; } do { workingPath = workingPath.GetContainingDirectory(); } while(workingPath.IsAdminDir()); ATLTRACE(_T("Invalidating and refreshing folder: %s\n"), workingPath.GetWinPath()); { AutoLocker print(critSec); _stprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _T("Invalidating and refreshing folder: %s"), workingPath.GetWinPath()); nCurrentCrawledpathIndex++; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWnd, NULL, FALSE); CGitStatusCache::Instance().WaitToRead(); // Invalidate the cache of this folder, to make sure its status is fetched again. CCachedDirectory * pCachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath); if (pCachedDir) { git_wc_status_kind status = pCachedDir->GetCurrentFullStatus(); pCachedDir->Invalidate(); if (workingPath.Exists()) { pCachedDir->RefreshStatus(bRecursive); // if the previous status wasn't normal and now it is, then // send a notification too. // We do this here because GetCurrentFullStatus() doesn't send // notifications for 'normal' status - if it would, we'd get tons // of notifications when crawling a working copy not yet in the cache. if ((status != git_wc_status_normal)&&(pCachedDir->GetCurrentFullStatus() != status)) { CGitStatusCache::Instance().UpdateShell(workingPath); ATLTRACE(_T("shell update in crawler for %s\n"), workingPath.GetWinPath()); } } else { CGitStatusCache::Instance().Done(); CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); } } CGitStatusCache::Instance().Done(); //In case that svn_client_stat() modified a file and we got //a notification about that in the directory watcher, //remove that here again - this is to prevent an endless loop AutoLocker lock(m_critSec); m_pathsToUpdate.erase(std::remove(m_pathsToUpdate.begin(), m_pathsToUpdate.end(), workingPath), m_pathsToUpdate.end()); } else if (workingPath.HasAdminDir()) { if (!workingPath.Exists()) { CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); CGitStatusCache::Instance().Done(); continue; } if (!workingPath.Exists()) continue; ATLTRACE(_T("Updating path: %s\n"), workingPath.GetWinPath()); { AutoLocker print(critSec); _stprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _T("Updating path: %s"), workingPath.GetWinPath()); nCurrentCrawledpathIndex++; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWnd, NULL, FALSE); // HasAdminDir() already checks if the path points to a dir DWORD flags = TGITCACHE_FLAGS_FOLDERISKNOWN; flags |= (workingPath.IsDirectory() ? TGITCACHE_FLAGS_ISFOLDER : 0); flags |= (bRecursive ? TGITCACHE_FLAGS_RECUSIVE_STATUS : 0); CGitStatusCache::Instance().WaitToRead(); // Invalidate the cache of folders manually. The cache of files is invalidated // automatically if the status is asked for it and the file times don't match // anymore, so we don't need to manually invalidate those. if (workingPath.IsDirectory()) { CCachedDirectory * cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath); if (cachedDir) cachedDir->Invalidate(); } CStatusCacheEntry ce = CGitStatusCache::Instance().GetStatusForPath(workingPath, flags); if (ce.GetEffectiveStatus() > git_wc_status_unversioned) { CGitStatusCache::Instance().UpdateShell(workingPath); ATLTRACE(_T("shell update in folder crawler for %s\n"), workingPath.GetWinPath()); } CGitStatusCache::Instance().Done(); AutoLocker lock(m_critSec); m_pathsToUpdate.erase(std::remove(m_pathsToUpdate.begin(), m_pathsToUpdate.end(), workingPath), m_pathsToUpdate.end()); } else { if (!workingPath.Exists()) { CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); CGitStatusCache::Instance().Done(); } } } else if (!m_foldersToUpdate.empty()) { { AutoLocker lock(m_critSec); m_bItemsAddedSinceLastCrawl = false; // create a new CTSVNPath object to make sure the cached flags are requested again. // without this, a missing file/folder is still treated as missing even if it is available // now when crawling. CTGitPath& folderToUpdate = m_foldersToUpdate.front(); workingPath = CTGitPath(folderToUpdate.GetWinPath()); workingPath.SetCustomData(folderToUpdate.GetCustomData()); m_foldersToUpdate.pop_front(); if ((DWORD(workingPath.GetCustomData()) >= currentTicks) || ((!m_blockedPath.IsEmpty())&&(m_blockedPath.IsAncestorOf(workingPath)))) { // move the path to the end of the list m_foldersToUpdate.push_back (workingPath); if (m_foldersToUpdate.size() < 3) Sleep(50); continue; } } if (DWORD(workingPath.GetCustomData()) >= currentTicks) { Sleep(50); continue; } if ((!m_blockedPath.IsEmpty())&&(m_blockedPath.IsAncestorOf(workingPath))) continue; if (!CGitStatusCache::Instance().IsPathAllowed(workingPath)) continue; ATLTRACE(_T("Crawling folder: %s\n"), workingPath.GetWinPath()); { AutoLocker print(critSec); _stprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _T("Crawling folder: %s"), workingPath.GetWinPath()); nCurrentCrawledpathIndex++; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWnd, NULL, FALSE); CGitStatusCache::Instance().WaitToRead(); // Now, we need to visit this folder, to make sure that we know its 'most important' status CCachedDirectory * cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath.GetDirectory()); // check if the path is monitored by the watcher. If it isn't, then we have to invalidate the cache // for that path and add it to the watcher. if (!CGitStatusCache::Instance().IsPathWatched(workingPath)) { if (workingPath.HasAdminDir()) { ATLTRACE(_T("Add watch path %s\n"), workingPath.GetWinPath()); CGitStatusCache::Instance().AddPathToWatch(workingPath); } if (cachedDir) cachedDir->Invalidate(); else { CGitStatusCache::Instance().Done(); CGitStatusCache::Instance().WaitToWrite(); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); } } if (cachedDir) cachedDir->RefreshStatus(bRecursive); #if 0 // While refreshing the status, we could get another crawl request for the same folder. // This can happen if the crawled folder has a lower status than one of the child folders // (recursively). To avoid double crawlings, remove such a crawl request here AutoLocker lock(m_critSec); if (m_bItemsAddedSinceLastCrawl) { if (m_foldersToUpdate.back().IsEquivalentToWithoutCase(workingPath)) { m_foldersToUpdate.pop_back(); m_bItemsAddedSinceLastCrawl = false; } } #endif CGitStatusCache::Instance().Done(); } } } _endthread(); }
void CFolderCrawler::WorkerThread() { HANDLE hWaitHandles[2]; hWaitHandles[0] = m_hTerminationEvent; hWaitHandles[1] = m_hWakeEvent; CTGitPath workingPath; ULONGLONG currentTicks = 0; for(;;) { bool bRecursive = !!(DWORD)CRegStdDWORD(L"Software\\TortoiseGit\\RecursiveOverlay", TRUE); SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_END); DWORD waitResult = WaitForMultipleObjects(_countof(hWaitHandles), hWaitHandles, FALSE, INFINITE); // exit event/working loop if the first event (m_hTerminationEvent) // has been signaled or if one of the events has been abandoned // (i.e. ~CFolderCrawler() is being executed) if(m_bRun == false || waitResult == WAIT_OBJECT_0 || waitResult == WAIT_ABANDONED_0 || waitResult == WAIT_ABANDONED_0+1) { // Termination event break; } SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN); // If we get here, we've been woken up by something being added to the queue. // However, it's important that we don't do our crawling while // the shell is still asking for items bool bFirstRunAfterWakeup = true; for(;;) { if (!m_bRun) break; // Any locks today? if (CGitStatusCache::Instance().m_bClearMemory) { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().ClearCache(); CGitStatusCache::Instance().m_bClearMemory = false; } if(m_lCrawlInhibitSet > 0) { // We're in crawl hold-off CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Crawl hold-off\n"); Sleep(50); continue; } if (bFirstRunAfterWakeup) { Sleep(20); CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Crawl bFirstRunAfterWakeup\n"); bFirstRunAfterWakeup = false; continue; } if ((m_blockReleasesAt < GetTickCount64()) && (!m_blockedPath.IsEmpty())) { CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Crawl stop blocking path %s\n", m_blockedPath.GetWinPath()); m_blockedPath.Reset(); } CGitStatusCache::Instance().RemoveTimedoutBlocks(); while (!m_pathsToRelease.empty()) { AutoLocker lock(m_critSec); CTGitPath path = m_pathsToRelease.Pop(); GitStatus::ReleasePath(path.GetWinPathString()); } if (m_foldersToUpdate.empty() && m_pathsToUpdate.empty()) { // Nothing left to do break; } currentTicks = GetTickCount64(); if (!m_pathsToUpdate.empty()) { { AutoLocker lock(m_critSec); m_bPathsAddedSinceLastCrawl = false; workingPath = m_pathsToUpdate.Pop(); if ((!m_blockedPath.IsEmpty()) && (m_blockedPath.IsAncestorOf(workingPath))) { // move the path to the end of the list m_pathsToUpdate.Push(workingPath); if (m_pathsToUpdate.size() < 3) Sleep(50); continue; } } // don't crawl paths that are excluded if (!CGitStatusCache::Instance().IsPathAllowed(workingPath)) continue; // check if the changed path is inside an .git folder CString projectroot; if ((workingPath.HasAdminDir(&projectroot)&&workingPath.IsDirectory()) || workingPath.IsAdminDir()) { // we don't crawl for paths changed in a tmp folder inside an .git folder. // Because we also get notifications for those even if we just ask for the status! // And changes there don't affect the file status at all, so it's safe // to ignore notifications on those paths. if (workingPath.IsAdminDir()) { // TODO: add git specific filters here. is there really any change besides index file in .git // that is relevant for overlays? /*CString lowerpath = workingPath.GetWinPathString(); lowerpath.MakeLower(); if (lowerpath.Find(L"\\tmp\\") > 0) continue; if (CStringUtils::EndsWith(lowerpath, L"\\tmp")) continue; if (lowerpath.Find(L"\\log") > 0) continue;*/ // Here's a little problem: // the lock file is also created for fetching the status // and not just when committing. // If we could find out why the lock file was changed // we could decide to crawl the folder again or not. // But for now, we have to crawl the parent folder // no matter what. //if (lowerpath.Find(L"\\lock") > 0) // continue; // only go back to wc root if we are in .git-dir do { workingPath = workingPath.GetContainingDirectory(); } while(workingPath.IsAdminDir()); } else if (!workingPath.Exists()) { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); continue; } if (!CGitStatusCache::Instance().IsPathGood(workingPath)) { AutoLocker lock(m_critSec); // move the path, the root of the repository, to the end of the list if (projectroot.IsEmpty()) m_pathsToUpdate.Push(workingPath); else m_pathsToUpdate.Push(CTGitPath(projectroot)); if (m_pathsToUpdate.size() < 3) Sleep(50); continue; } CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Invalidating and refreshing folder: %s\n", workingPath.GetWinPath()); { AutoLocker print(critSec); _sntprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _TRUNCATE, L"Invalidating and refreshing folder: %s", workingPath.GetWinPath()); ++nCurrentCrawledpathIndex; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWndHidden, nullptr, FALSE); { CAutoReadLock readLock(CGitStatusCache::Instance().GetGuard()); // Invalidate the cache of this folder, to make sure its status is fetched again. CCachedDirectory * pCachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath); if (pCachedDir) { git_wc_status_kind status = pCachedDir->GetCurrentFullStatus(); pCachedDir->Invalidate(); if (workingPath.Exists()) { pCachedDir->RefreshStatus(bRecursive); // if the previous status wasn't normal and now it is, then // send a notification too. // We do this here because GetCurrentFullStatus() doesn't send // notifications for 'normal' status - if it would, we'd get tons // of notifications when crawling a working copy not yet in the cache. if ((status != git_wc_status_normal) && (pCachedDir->GetCurrentFullStatus() != status)) { CGitStatusCache::Instance().UpdateShell(workingPath); CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": shell update in crawler for %s\n", workingPath.GetWinPath()); } } else { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); } } } //In case that svn_client_stat() modified a file and we got //a notification about that in the directory watcher, //remove that here again - this is to prevent an endless loop AutoLocker lock(m_critSec); m_pathsToUpdate.erase(workingPath); } else if (workingPath.HasAdminDir()) { if (!workingPath.Exists()) { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); if (!workingPath.GetContainingDirectory().Exists()) continue; else workingPath = workingPath.GetContainingDirectory(); } CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Updating path: %s\n", workingPath.GetWinPath()); { AutoLocker print(critSec); _sntprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _TRUNCATE, L"Updating path: %s", workingPath.GetWinPath()); ++nCurrentCrawledpathIndex; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWndHidden, nullptr, FALSE); { CAutoReadLock readLock(CGitStatusCache::Instance().GetGuard()); // Invalidate the cache of folders manually. The cache of files is invalidated // automatically if the status is asked for it and the file times don't match // anymore, so we don't need to manually invalidate those. CCachedDirectory* cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath.GetDirectory()); if (cachedDir && workingPath.IsDirectory()) cachedDir->Invalidate(); if (cachedDir && cachedDir->GetStatusForMember(workingPath, bRecursive).GetEffectiveStatus() > git_wc_status_unversioned) CGitStatusCache::Instance().UpdateShell(workingPath); } AutoLocker lock(m_critSec); m_pathsToUpdate.erase(workingPath); } else { if (!workingPath.Exists()) { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); } } } if (!m_foldersToUpdate.empty()) { { AutoLocker lock(m_critSec); m_bItemsAddedSinceLastCrawl = false; // create a new CTGitPath object to make sure the cached flags are requested again. // without this, a missing file/folder is still treated as missing even if it is available // now when crawling. workingPath = CTGitPath(m_foldersToUpdate.Pop().GetWinPath()); if ((!m_blockedPath.IsEmpty())&&(m_blockedPath.IsAncestorOf(workingPath))) { // move the path to the end of the list m_foldersToUpdate.Push(workingPath); if (m_foldersToUpdate.size() < 3) Sleep(50); continue; } } if ((!m_blockedPath.IsEmpty())&&(m_blockedPath.IsAncestorOf(workingPath))) continue; if (!CGitStatusCache::Instance().IsPathAllowed(workingPath)) continue; if (!CGitStatusCache::Instance().IsPathGood(workingPath)) continue; CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Crawling folder: %s\n", workingPath.GetWinPath()); { AutoLocker print(critSec); _sntprintf_s(szCurrentCrawledPath[nCurrentCrawledpathIndex], MAX_CRAWLEDPATHSLEN, _TRUNCATE, L"Crawling folder: %s", workingPath.GetWinPath()); ++nCurrentCrawledpathIndex; if (nCurrentCrawledpathIndex >= MAX_CRAWLEDPATHS) nCurrentCrawledpathIndex = 0; } InvalidateRect(hWndHidden, nullptr, FALSE); { CAutoReadLock readLock(CGitStatusCache::Instance().GetGuard()); // Now, we need to visit this folder, to make sure that we know its 'most important' status CCachedDirectory * cachedDir = CGitStatusCache::Instance().GetDirectoryCacheEntry(workingPath.GetDirectory()); // check if the path is monitored by the watcher. If it isn't, then we have to invalidate the cache // for that path and add it to the watcher. if (!CGitStatusCache::Instance().IsPathWatched(workingPath)) { if (workingPath.HasAdminDir()) { CTraceToOutputDebugString::Instance()(_T(__FUNCTION__) L": Add watch path %s\n", workingPath.GetWinPath()); CGitStatusCache::Instance().AddPathToWatch(workingPath); } if (cachedDir) cachedDir->Invalidate(); else { CAutoWriteLock writeLock(CGitStatusCache::Instance().GetGuard()); CGitStatusCache::Instance().RemoveCacheForPath(workingPath); // now cacheDir is invalid because it got deleted in the RemoveCacheForPath() call above. cachedDir = nullptr; } } if (cachedDir) cachedDir->RefreshStatus(bRecursive); } // While refreshing the status, we could get another crawl request for the same folder. // This can happen if the crawled folder has a lower status than one of the child folders // (recursively). To avoid double crawlings, remove such a crawl request here AutoLocker lock(m_critSec); if (m_bItemsAddedSinceLastCrawl) { m_foldersToUpdate.erase(workingPath); } } } } _endthread(); }
void CGitStatusCache::Create() { ATLASSERT(m_pInstance == NULL); m_pInstance = new CGitStatusCache; m_pInstance->watcher.SetFolderCrawler(&m_pInstance->m_folderCrawler); #define LOADVALUEFROMFILE(x) if (fread(&x, sizeof(x), 1, pFile)!=1) goto exit; #define LOADVALUEFROMFILE2(x) if (fread(&x, sizeof(x), 1, pFile)!=1) goto error; unsigned int value = (unsigned int)-1; FILE * pFile = NULL; // find the location of the cache TCHAR path[MAX_PATH]; //MAX_PATH ok here. TCHAR path2[MAX_PATH]; if (SHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA, NULL, SHGFP_TYPE_CURRENT, path)==S_OK) { _tcscat_s(path, MAX_PATH, _T("\\TGitCache")); if (!PathIsDirectory(path)) { if (CreateDirectory(path, NULL)==0) goto error; } _tcscat_s(path, MAX_PATH, _T("\\cache")); // in case the cache file is corrupt, we could crash while // reading it! To prevent crashing every time once that happens, // we make a copy of the cache file and use that copy to read from. // if that copy is corrupt, the original file won't exist anymore // and the second time we start up and try to read the file, // it's not there anymore and we start from scratch without a crash. _tcscpy_s(path2, MAX_PATH, path); _tcscat_s(path2, MAX_PATH, _T("2")); DeleteFile(path2); CopyFile(path, path2, FALSE); DeleteFile(path); pFile = _tfsopen(path2, _T("rb"), _SH_DENYNO); if (pFile) { try { LOADVALUEFROMFILE(value); if (value != 2) { goto error; } int mapsize = 0; LOADVALUEFROMFILE(mapsize); for (int i=0; i<mapsize; ++i) { LOADVALUEFROMFILE2(value); if (value > MAX_PATH) goto error; if (value) { CString sKey; if (fread(sKey.GetBuffer(value+1), sizeof(TCHAR), value, pFile)!=value) { sKey.ReleaseBuffer(0); goto error; } sKey.ReleaseBuffer(value); CCachedDirectory * cacheddir = new CCachedDirectory(); if (cacheddir == NULL) goto error; if (!cacheddir->LoadFromDisk(pFile)) goto error; CTGitPath KeyPath = CTGitPath(sKey); if (m_pInstance->IsPathAllowed(KeyPath)) { m_pInstance->m_directoryCache[KeyPath] = cacheddir; // only add the path to the watch list if it is versioned if ((cacheddir->GetCurrentFullStatus() != git_wc_status_unversioned)&&(cacheddir->GetCurrentFullStatus() != git_wc_status_none)) m_pInstance->watcher.AddPath(KeyPath, false); // do *not* add the paths for crawling! // because crawled paths will trigger a shell // notification, which makes the desktop flash constantly // until the whole first time crawling is over // m_pInstance->AddFolderForCrawling(KeyPath); } } } } catch (CAtlException) { goto error; } } } exit: if (pFile) fclose(pFile); DeleteFile(path2); m_pInstance->watcher.ClearInfoMap(); ATLTRACE("cache loaded from disk successfully!\n"); return; error: if (pFile) fclose(pFile); DeleteFile(path2); m_pInstance->watcher.ClearInfoMap(); Destroy(); m_pInstance = new CGitStatusCache; ATLTRACE("cache not loaded from disk\n"); }
/* Fetch is false, means fetch status from cache */ CStatusCacheEntry CGitStatusCache::GetStatusForPath(const CTGitPath& path, DWORD flags, bool bFetch /* = true */) { bool bRecursive = !!(flags & TGITCACHE_FLAGS_RECUSIVE_STATUS); // Check a very short-lived 'mini-cache' of the last thing we were asked for. long now = (long)GetTickCount(); if(now-m_mostRecentExpiresAt < 0) { if(path.IsEquivalentToWithoutCase(m_mostRecentPath)) { return m_mostRecentStatus; } } { AutoLocker lock(m_critSec); m_mostRecentPath = path; m_mostRecentExpiresAt = now + 1000; } if (IsPathGood(path)) { // Stop the crawler starting on a new folder while we're doing this much more important task... // Please note, that this may be a second "lock" used concurrently to the one in RemoveCacheForPath(). CCrawlInhibitor crawlInhibit(&m_folderCrawler); CTGitPath dirpath = path.GetContainingDirectory(); if ((dirpath.IsEmpty()) || (!m_shellCache.IsPathAllowed(dirpath.GetWinPath()))) dirpath = path.GetDirectory(); CCachedDirectory * cachedDir = GetDirectoryCacheEntry(dirpath); if (cachedDir != NULL) { //ATLTRACE(_T("GetStatusForMember %d\n"), bFetch); CStatusCacheEntry entry = cachedDir->GetStatusForMember(path, bRecursive, bFetch); { AutoLocker lock(m_critSec); m_mostRecentStatus = entry; return m_mostRecentStatus; } } } else { // path is blocked for some reason: return the cached status if we have one // we do here only a cache search, absolutely no disk access is allowed! CCachedDirectory::ItDir itMap = m_directoryCache.find(path.GetDirectory()); if ((itMap != m_directoryCache.end())&&(itMap->second)) { if (path.IsDirectory()) { CStatusCacheEntry entry = itMap->second->GetOwnStatus(false); AutoLocker lock(m_critSec); m_mostRecentStatus = entry; return m_mostRecentStatus; } else { // We've found this directory in the cache CCachedDirectory * cachedDir = itMap->second; CStatusCacheEntry entry = cachedDir->GetCacheStatusForMember(path); { AutoLocker lock(m_critSec); m_mostRecentStatus = entry; return m_mostRecentStatus; } } } } AutoLocker lock(m_critSec); ATLTRACE(_T("ignored no good path %s\n"), path.GetWinPath()); m_mostRecentStatus = CStatusCacheEntry(); if (m_shellCache.ShowExcludedAsNormal() && path.IsDirectory() && m_shellCache.HasGITAdminDir(path.GetWinPath(), true)) { ATLTRACE(_T("force status %s\n"), path.GetWinPath()); m_mostRecentStatus.ForceStatus(git_wc_status_normal); } return m_mostRecentStatus; }
CStatusCacheEntry CCachedDirectory::GetStatusFromGit(const CTGitPath &path, CString sProjectRoot) { CString subpaths = path.GetGitPathString(); if(subpaths.GetLength() >= sProjectRoot.GetLength()) { if(subpaths[sProjectRoot.GetLength()] == _T('/')) subpaths=subpaths.Right(subpaths.GetLength() - sProjectRoot.GetLength()-1); else subpaths=subpaths.Right(subpaths.GetLength() - sProjectRoot.GetLength()); } GitStatus *pGitStatus = &CGitStatusCache::Instance().m_GitStatus; CGitHash head; pGitStatus->GetHeadHash(sProjectRoot,head); bool isVersion =true; pGitStatus->IsUnderVersionControl(sProjectRoot, subpaths, path.IsDirectory(), &isVersion); if(!isVersion) { //untracked file bool isIgnoreFileChanged=false; isIgnoreFileChanged = pGitStatus->IsGitReposChanged(sProjectRoot, subpaths, GIT_MODE_IGNORE); if( isIgnoreFileChanged) { pGitStatus->LoadIgnoreFile(sProjectRoot, subpaths); } if(path.IsDirectory()) { CCachedDirectory * dirEntry = CGitStatusCache::Instance().GetDirectoryCacheEntry(path, false); /* we needn't watch untracked directory*/ if(dirEntry) { AutoLocker lock(dirEntry->m_critSec); git_wc_status_kind dirstatus = dirEntry->GetCurrentFullStatus() ; if( dirstatus == git_wc_status_none || dirstatus >= git_wc_status_normal || isIgnoreFileChanged ) {/* status have not initialized*/ git_wc_status2_t status2; bool isignore = false; pGitStatus->IsIgnore(sProjectRoot,subpaths,&isignore); status2.text_status = status2.prop_status = (isignore? git_wc_status_ignored:git_wc_status_unversioned); dirEntry->m_ownStatus.SetStatus(&status2); dirEntry->m_ownStatus.SetKind(git_node_dir); } return dirEntry->m_ownStatus; } } else /* path is file */ { AutoLocker lock(m_critSec); CString strCacheKey = GetCacheKey(path); CacheEntryMap::iterator itMap = m_entryCache.find(strCacheKey); if(itMap == m_entryCache.end() || isIgnoreFileChanged) { git_wc_status2_t status2; bool isignore = false; pGitStatus->IsIgnore(sProjectRoot,subpaths,&isignore); status2.text_status = status2.prop_status = (isignore? git_wc_status_ignored:git_wc_status_unversioned); AddEntry(path, &status2); return m_entryCache[strCacheKey]; } else { return itMap->second; } } return CStatusCacheEntry(); } else { EnumFiles((CTGitPath*)&path, TRUE); UpdateCurrentStatus(); return CStatusCacheEntry(m_ownStatus); } }