int C_ASW_Game_Resource::CMarineToCrosshairInfo::FindIndexForMarine( C_ASW_Marine *pMarine ) { CheckCache(); for ( int i = 0 ; i < Count() ; ++i ) { if ( GetElement(i).m_hMarine.Get() == pMarine ) return i; } return -1; }
void PixmapCacheManager::PixmapChanged (PageGraphicsItem *item) { if (RecentlyUsed_.removeAll (item)) CurrentSize_ = std::accumulate (RecentlyUsed_.begin (), RecentlyUsed_.end (), 0, [] (qint64 size, const PageGraphicsItem *item) { return size + GetPixmapSize (item->pixmap ()); }); RecentlyUsed_ << item; CurrentSize_ += GetPixmapSize (item->pixmap ()); CheckCache (); }
TagLib::ByteVector CloudStream::readBlock(ulong length) { const uint start = cursor_; const uint end = qMin(cursor_ + length - 1, length_ - 1); if (end < start) { return TagLib::ByteVector(); } if (CheckCache(start, end)) { TagLib::ByteVector cached = GetCached(start, end); cursor_ += cached.size(); return cached; } QNetworkRequest request = QNetworkRequest(url_); if (!auth_.isEmpty()) { request.setRawHeader("Authorization", auth_.toUtf8()); } request.setRawHeader("Range", QString("bytes=%1-%2").arg(start).arg(end).toUtf8()); request.setAttribute(QNetworkRequest::CacheLoadControlAttribute, QNetworkRequest::AlwaysNetwork); // The Ubuntu One server applies the byte range to the gzipped data, rather // than the raw data so we must disable compression. if (url_.host() == "files.one.ubuntu.com") { request.setRawHeader("Accept-Encoding", "identity"); } QNetworkReply* reply = network_->get(request); connect(reply, SIGNAL(sslErrors(QList<QSslError>)), SLOT(SSLErrors(QList<QSslError>))); ++num_requests_; QEventLoop loop; QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit())); loop.exec(); reply->deleteLater(); int code = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (code >= 400) { qLog(Debug) << "Error retrieving url to tag:" << url_; return TagLib::ByteVector(); } QByteArray data = reply->readAll(); TagLib::ByteVector bytes(data.data(), data.size()); cursor_ += data.size(); FillCache(start, bytes); return bytes; }
void Decode(config * conf){ #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif RecvBlock(conf); Decompress(NULL); CheckCache(NULL); if (strcmp(conf->outfile, "") == 0) Reassemble(NULL); else Reassemble(conf->outfile); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif }
TagLib::ByteVector CloudStream::readBlock(ulong length) { const uint start = cursor_; const uint end = qMin(cursor_ + length - 1, length_ - 1); if (end < start) { return TagLib::ByteVector(); } if (CheckCache(start, end)) { TagLib::ByteVector cached = GetCached(start, end); cursor_ += cached.size(); return cached; } QNetworkRequest request = QNetworkRequest(url_); foreach (const QString& key, headers_) { request.setRawHeader(key.toLatin1(), headers_[key].toUtf8()); }
void PixmapCacheManager::handleCacheSizeChanged () { MaxSize_ = XmlSettingsManager::Instance ().property ("PixmapCacheSize").value<qint64> () * 1024 * 1024; CheckCache (); }
TagLib::ByteVector CloudStream::readBlock( ulong length ) { const uint start = m_cursor; const uint end = qMin( m_cursor + length - 1, m_length - 1 ); //tDebug( LOGINFO ) << "#### CloudStream : parsing from " << m_url.toString(); //tDebug( LOGINFO ) << "#### CloudStream : parsing from (encoded) " << m_url.toEncoded().constData(); if ( end < start ) { return TagLib::ByteVector(); } if ( CheckCache( start, end ) ) { TagLib::ByteVector cached = GetCached( start, end ); m_cursor += cached.size(); return cached; } if ( m_num_requests_in_error > MAX_ALLOW_ERROR_QUERY ) { //precache(); return TagLib::ByteVector(); } if ( m_refreshUrlEachTime ) { if( !refreshStreamUrl() ) { tDebug( LOGINFO ) << "#### CloudStream : cannot refresh streamUrl for " << m_filename; } } QNetworkRequest request = QNetworkRequest( m_url ); //setings of specials OAuth (1 or 2) headers foreach ( const QString& headerName, m_headers.keys() ) { request.setRawHeader( headerName.toLocal8Bit(), m_headers[headerName].toString().toLocal8Bit() ); } request.setRawHeader( "Range", QString( "bytes=%1-%2" ).arg( start ).arg( end ).toUtf8() ); request.setAttribute( QNetworkRequest::CacheLoadControlAttribute, QNetworkRequest::AlwaysNetwork ); // The Ubuntu One server applies the byte range to the gzipped data, rather // than the raw data so we must disable compression. if ( m_url.host() == "files.one.ubuntu.com" ) { request.setRawHeader( "Accept-Encoding", "identity" ); } tDebug() << "######## CloudStream : HTTP request : "; tDebug() << "#### CloudStream : url : " << request.url(); m_currentBlocklength = length; m_currentStart = start; m_reply = m_network->get( request ); connect( m_reply, SIGNAL( sslErrors( QList<QSslError> ) ), SLOT( SSLErrors( QList<QSslError> ) ) ); connect( m_reply, SIGNAL( finished() ), this, SLOT( onRequestFinished() ) ); ++m_num_requests; return TagLib::ByteVector(); }
// save downloaded url to cache gbool GUrlCache::SaveUrl(const char *url,const CString &srcFilePath,time_t creationTime,CString &cacheFileName) { CString relFilePath,absFilePath; GFileInfo info; CString selectedCacheDirectory; selectedCacheDirectory = writeCacheDir; int urlStrip= 0; if (writeMediaCacheEnabled && writeMediaCacheDirValid) { // Check if file to cache is from recognized Media Library mirror site. // If so, cache it in Media Library directory instead of regular cache directory if (strprefix(url,mirror1)) { selectedCacheDirectory = writeMediaCacheDir; urlStrip=strlen(mirror1); } else if (strprefix(url,mirror2)) { selectedCacheDirectory = writeMediaCacheDir; urlStrip=strlen(mirror2); } else if (strprefix(url,mirror3)) { selectedCacheDirectory = writeMediaCacheDir; urlStrip=strlen(mirror3); } else if (strprefix(url,mirror4)) { selectedCacheDirectory = writeMediaCacheDir; urlStrip=strlen(mirror4); } } if ((urlStrip == 0) && (!writeCacheEnabled) ) return FALSE; if (!UrlToFilePath(url+urlStrip,relFilePath)) { return FALSE; } if (!CombineCreateDirectory(selectedCacheDirectory,relFilePath,absFilePath)) { return FALSE; } // end Laurent - Jan.2000 cacheFileName = absFilePath; if (!CopyFile(srcFilePath,absFilePath,FALSE,creationTime)) { DWORD lastErr = GetLastError(); if ( (lastErr ==ERROR_NOT_ENOUGH_MEMORY || lastErr == ERROR_OUTOFMEMORY ) && (!cleanerBusy) ) { TRACE("GurlCache::Was out of disk space try cleaning \n"); CheckCache(); // try to make space if (!CopyFile(srcFilePath,absFilePath,FALSE,creationTime)) // retry return FALSE; } else return FALSE; } if (creationTime>0) // store the URL lastModified date for once per session comparison SetUrlModified(url,creationTime); if (Exists(absFilePath,&info)) { bytesWrittenToCache += info.sizeLow; return TRUE; } else return FALSE; }