nsresult CacheFileMetadata::SetHash(uint32_t aIndex, CacheHash::Hash16_t aHash) { LOG(("CacheFileMetadata::SetHash() [this=%p, idx=%d, hash=%x]", this, aIndex, aHash)); MarkDirty(); MOZ_ASSERT(aIndex <= mHashCount); if (aIndex > mHashCount) { return NS_ERROR_INVALID_ARG; } else if (aIndex == mHashCount) { if ((aIndex + 1) * sizeof(CacheHash::Hash16_t) > mHashArraySize) { // reallocate hash array buffer if (mHashArraySize == 0) mHashArraySize = 32 * sizeof(CacheHash::Hash16_t); else mHashArraySize *= 2; mHashArray = static_cast<CacheHash::Hash16_t *>( moz_xrealloc(mHashArray, mHashArraySize)); } mHashCount++; } NetworkEndian::writeUint16(&mHashArray[aIndex], aHash); DoMemoryReport(MemoryUsage()); return NS_OK; }
char* Arena::AllocateNewBlock(size_t block_bytes) { char* result = new char[block_bytes]; blocks_.push_back(result); memory_usage_.NoBarrier_Store( reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*))); return result; }
void CacheFileMetadata::EnsureBuffer(uint32_t aSize) { if (mBufSize < aSize) { if (mAllocExactSize) { // If this is not the only allocation, use power of two for following // allocations. mAllocExactSize = false; } else { // find smallest power of 2 greater than or equal to aSize --aSize; aSize |= aSize >> 1; aSize |= aSize >> 2; aSize |= aSize >> 4; aSize |= aSize >> 8; aSize |= aSize >> 16; ++aSize; } if (aSize < kInitialBufSize) { aSize = kInitialBufSize; } mBufSize = aSize; mBuf = static_cast<char *>(moz_xrealloc(mBuf, mBufSize)); DoMemoryReport(MemoryUsage()); } }
MemoryUsage CodeHeapPool::get_memory_usage() { size_t used = used_in_bytes(); size_t committed = _codeHeap->capacity(); size_t maxSize = (available_for_allocation() ? max_size() : 0); return MemoryUsage(initial_size(), used, committed, maxSize); }
MemoryUsage EdenMutableSpacePool::get_memory_usage() { size_t maxSize = (available_for_allocation() ? max_size() : 0); size_t used = used_in_bytes(); size_t committed = _space->capacity_in_bytes(); return MemoryUsage(initial_size(), used, committed, maxSize); }
MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() { size_t maxSize = (available_for_allocation() ? max_size() : 0); size_t used = used_in_bytes(); size_t committed = committed_in_bytes(); return MemoryUsage(initial_size(), used, committed, maxSize); }
MemoryUsage PSGenerationPool::get_memory_usage() { size_t maxSize = (available_for_allocation() ? max_size() : 0); size_t used = used_in_bytes(); size_t committed = _old_gen->capacity_in_bytes(); return MemoryUsage(initial_size(), used, committed, maxSize); }
MemoryUsage G1OldGenPool::get_memory_usage() { size_t initial_sz = initial_size(); size_t max_sz = max_size(); size_t used = used_in_bytes(); size_t committed = _g1mm->old_space_committed(); return MemoryUsage(initial_sz, used, committed, max_sz); }
nsresult CacheFileMetadata::WriteMetadata(uint32_t aOffset, CacheFileMetadataListener *aListener) { LOG(("CacheFileMetadata::WriteMetadata() [this=%p, offset=%d, listener=%p]", this, aOffset, aListener)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mWriteBuf); MOZ_ASSERT(!mKeyIsHash); nsresult rv; mIsDirty = false; mWriteBuf = static_cast<char *>(moz_xmalloc(sizeof(uint32_t) + mHashCount * sizeof(CacheHashUtils::Hash16_t) + sizeof(CacheFileMetadataHeader) + mKey.Length() + 1 + mElementsSize + sizeof(uint32_t))); char *p = mWriteBuf + sizeof(uint32_t); memcpy(p, mHashArray, mHashCount * sizeof(CacheHashUtils::Hash16_t)); p += mHashCount * sizeof(CacheHashUtils::Hash16_t); memcpy(p, &mMetaHdr, sizeof(CacheFileMetadataHeader)); p += sizeof(CacheFileMetadataHeader); memcpy(p, mKey.get(), mKey.Length()); p += mKey.Length(); *p = 0; p++; memcpy(p, mBuf, mElementsSize); p += mElementsSize; CacheHashUtils::Hash32_t hash; hash = CacheHashUtils::Hash(mWriteBuf + sizeof(uint32_t), p - mWriteBuf - sizeof(uint32_t)); *reinterpret_cast<uint32_t *>(mWriteBuf) = PR_htonl(hash); *reinterpret_cast<uint32_t *>(p) = PR_htonl(aOffset); p += sizeof(uint32_t); mListener = aListener; rv = CacheFileIOManager::Write(mHandle, aOffset, mWriteBuf, p - mWriteBuf, true, this); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::WriteMetadata() - CacheFileIOManager::Write() " "failed synchronously. [this=%p, rv=0x%08x]", this, rv)); mListener = nullptr; free(mWriteBuf); mWriteBuf = nullptr; NS_ENSURE_SUCCESS(rv, rv); } DoMemoryReport(MemoryUsage()); return NS_OK; }
void CacheFileMetadata::EnsureBuffer(uint32_t aSize) { if (mBufSize < aSize) { mBufSize = aSize; mBuf = static_cast<char *>(moz_xrealloc(mBuf, mBufSize)); } DoMemoryReport(MemoryUsage()); }
void MemoryPool::record_peak_memory_usage() { // Caller in JDK is responsible for synchronization - // acquire the lock for this memory pool before calling VM MemoryUsage usage = get_memory_usage(); size_t peak_used = get_max_value(usage.used(), _peak_usage.used()); size_t peak_committed = get_max_value(usage.committed(), _peak_usage.committed()); size_t peak_max_size = get_max_value(usage.max_size(), _peak_usage.max_size()); _peak_usage = MemoryUsage(initial_size(), peak_used, peak_committed, peak_max_size); }
ieImage::ieImage(ieWH wh_, iePixelFormat pf_) : ieRefCount(), wh(wh_), pf(pf_), at(ieAlphaType::None), Frame() { #ifdef _DEBUG nInstances++; nMemoryUsage += MemoryUsage(); #endif }
/** * Testing function * return true if there is no errors * return false if there are and the descriptions in strerr */ bool TestGraph::TestAll() { char* unit; float size = MemoryUsage(); if( size < 1024 ) unit = "KB"; else { size /= 1024; unit = "MB"; } printf("Graph used %.1f%s of RAM.\n", size, unit); return true; }
void CacheFileMetadata::InitEmptyMetadata() { if (mBuf) { free(mBuf); mBuf = nullptr; mBufSize = 0; } mOffset = 0; mMetaHdr.mFetchCount = 1; mMetaHdr.mExpirationTime = NO_EXPIRATION_TIME; mMetaHdr.mKeySize = mKey.Length(); DoMemoryReport(MemoryUsage()); }
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const { size_t used_sz = _prev_bitmap_mapper->committed_size() + _next_bitmap_mapper->committed_size() + _bot_mapper->committed_size() + _cardtable_mapper->committed_size() + _card_counts_mapper->committed_size(); size_t committed_sz = _prev_bitmap_mapper->reserved_size() + _next_bitmap_mapper->reserved_size() + _bot_mapper->reserved_size() + _cardtable_mapper->reserved_size() + _card_counts_mapper->reserved_size(); return MemoryUsage(0, used_sz, committed_sz, committed_sz); }
ieImage::~ieImage() { if (Frame.pimNext) { Frame.pimNext->FrameParams().pimPrev = nullptr; Frame.pimNext->Release(); } if (Frame.pimPrev) { Frame.pimPrev->FrameParams().pimNext = nullptr; Frame.pimPrev->Release(); } if (Frame.pimRest) { Frame.pimRest->Release(); } #ifdef _DEBUG nInstances--; nMemoryUsage -= MemoryUsage(); #endif }
void CacheFileMetadata::InitEmptyMetadata() { if (mBuf) { free(mBuf); mBuf = nullptr; mBufSize = 0; } mOffset = 0; mMetaHdr.mVersion = kCacheEntryVersion; mMetaHdr.mFetchCount = 0; mMetaHdr.mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME; mMetaHdr.mKeySize = mKey.Length(); DoMemoryReport(MemoryUsage()); // We're creating a new entry. If there is any old data truncate it. if (mHandle && mHandle->FileExists() && mHandle->FileSize()) { CacheFileIOManager::TruncateSeekSetEOF(mHandle, 0, 0, nullptr); } }
nsresult CacheFileMetadata::EnsureBuffer(uint32_t aSize) { if (aSize > kMaxElementsSize) { return NS_ERROR_FAILURE; } if (mBufSize < aSize) { if (mAllocExactSize) { // If this is not the only allocation, use power of two for following // allocations. mAllocExactSize = false; } else { // find smallest power of 2 greater than or equal to aSize --aSize; aSize |= aSize >> 1; aSize |= aSize >> 2; aSize |= aSize >> 4; aSize |= aSize >> 8; aSize |= aSize >> 16; ++aSize; } if (aSize < kInitialBufSize) { aSize = kInitialBufSize; } char *newBuf = static_cast<char *>(realloc(mBuf, aSize)); if (!newBuf) { return NS_ERROR_OUT_OF_MEMORY; } mBufSize = aSize; mBuf = newBuf; DoMemoryReport(MemoryUsage()); } return NS_OK; }
nsresult CacheFileMetadata::OnDataWritten(CacheFileHandle *aHandle, const char *aBuf, nsresult aResult) { LOG(("CacheFileMetadata::OnDataWritten() [this=%p, handle=%p, result=0x%08x]", this, aHandle, aResult)); MOZ_ASSERT(mListener); MOZ_ASSERT(mWriteBuf); free(mWriteBuf); mWriteBuf = nullptr; nsCOMPtr<CacheFileMetadataListener> listener; mListener.swap(listener); listener->OnMetadataWritten(aResult); DoMemoryReport(MemoryUsage()); return NS_OK; }
MemoryPool::MemoryPool(const char* name, PoolType type, size_t init_size, size_t max_size, bool support_usage_threshold, bool support_gc_threshold) { _name = name; _initial_size = init_size; _max_size = max_size; _memory_pool_obj = NULL; _available_for_allocation = true; _num_managers = 0; _type = type; // initialize the max and init size of collection usage _after_gc_usage = MemoryUsage(_initial_size, 0, 0, _max_size); _usage_sensor = NULL; _gc_usage_sensor = NULL; // usage threshold supports both high and low threshold _usage_threshold = new ThresholdSupport(support_usage_threshold, support_usage_threshold); // gc usage threshold supports only high threshold _gc_usage_threshold = new ThresholdSupport(support_gc_threshold, support_gc_threshold); }
int setTree( unsigned count, const Point3D< Real > * inPoints, const Point3D< Real > * inNormals, int maxDepth, int minDepth, int splatDepth, Real samplesPerNode, Real scaleFactor, bool useConfidence, Real constraintWeight, int adaptiveExponent) { if ( splatDepth < 0 ) splatDepth = 0; this->samplesPerNode = samplesPerNode; this->splatDepth = splatDepth; if( this->_boundaryType == 0 ) { maxDepth++, minDepth = std::max< int >( 1 , minDepth )+1; if (splatDepth > 0 ) splatDepth++; } else { minDepth = std::max< int >( 0 , minDepth ); } this->_minDepth = std::min< int >( minDepth , maxDepth ); this->_constrainValues = (constraintWeight>0); double pointWeightSum = 0; Point3D< Real > min , max; TreeOctNode::NeighborKey3 neighborKey; neighborKey.set( maxDepth ); this->tree.setFullDepth( _minDepth ); // Read through once to get the center and scale { for (unsigned k=0; k<count; ++k) { const Point3D< Real >& p = inPoints[k]; for( int i=0 ; i<DIMENSION ; i++ ) { if (k) { if( p[i] < min[i] ) min[i] = p[i]; else if( p[i] > max[i] ) max[i] = p[i]; } else { min[i] = max[i] = p[i]; } } } this->_center = ( max+min ) /2; this->_scale = std::max< Real >( max[0]-min[0] , std::max< Real >( max[1]-min[1] , max[2]-min[2] ) ) * 2; if( this->_boundaryType == 0 ) this->_scale *= 2; } //update scale and center with scale factor { this->_scale *= scaleFactor; for( int i=0 ; i<DIMENSION ; i++ ) this->_center[i] -= _scale/2; } if( splatDepth > 0 ) { const Point3D< Real >* _p = inPoints; const Point3D< Real >* _n = inNormals; for (unsigned k=0; k<count; ++k, ++_p, ++_n) { Point3D< Real > p = ( inPoints[k] - _center ) / _scale; if( !_inBounds(p) ) continue; Point3D< Real > myCenter = Point3D< Real >( Real(0.5) , Real(0.5) , Real(0.5) ); Real myWidth = Real(1.0); Real weight = Real(1.0); if( useConfidence ) { weight = Real( Length(inNormals[k]) ); } TreeOctNode* temp = &this->tree; int d = 0; while( d < splatDepth ) { UpdateWeightContribution( temp , p , neighborKey , weight ); if( !temp->children ) temp->initChildren(); int cIndex = TreeOctNode::CornerIndex( myCenter , p ); temp = temp->children + cIndex; myWidth /= 2; if( cIndex&1 ) myCenter[0] += myWidth/2; else myCenter[0] -= myWidth/2; if( cIndex&2 ) myCenter[1] += myWidth/2; else myCenter[1] -= myWidth/2; if( cIndex&4 ) myCenter[2] += myWidth/2; else myCenter[2] -= myWidth/2; d++; } UpdateWeightContribution( temp , p , neighborKey , weight ); } } //normals this->normals = new std::vector< Point3D<Real> >(); int cnt = 0; { for (unsigned k=0; k<count; ++k) { Point3D< Real > p = ( inPoints[k] - _center ) / _scale; if( !_inBounds(p) ) continue; Point3D< Real > n = inNormals[k] * Real(-1.0); //normalize n Real l = Real( Length( n ) ); if( l!=l || l<=EPSILON ) continue; if( !useConfidence ) n /= l; Point3D< Real > myCenter = Point3D< Real >( Real(0.5) , Real(0.5) , Real(0.5) ); Real myWidth = Real(1.0); Real pointWeight = Real(1.0f); if ( samplesPerNode > 0 && splatDepth ) { pointWeight = SplatOrientedPoint( p , n , neighborKey , splatDepth , samplesPerNode , _minDepth , maxDepth ); } else { TreeOctNode* temp = &this->tree; int d = 0; if( splatDepth ) { while( d < splatDepth ) { int cIndex = TreeOctNode::CornerIndex(myCenter,p); temp = &temp->children[cIndex]; myWidth /= 2; if(cIndex&1) myCenter[0] += myWidth/2; else myCenter[0] -= myWidth/2; if(cIndex&2) myCenter[1] += myWidth/2; else myCenter[1] -= myWidth/2; if(cIndex&4) myCenter[2] += myWidth/2; else myCenter[2] -= myWidth/2; d++; } pointWeight = GetSampleWeight( temp , p , neighborKey ); } { for (int i=0 ; i<DIMENSION ; i++ ) n[i] *= pointWeight; } while( d < maxDepth ) { if( !temp->children ) temp->initChildren(); int cIndex = TreeOctNode::CornerIndex(myCenter,p); temp = &temp->children[cIndex]; myWidth /= 2; if(cIndex&1) myCenter[0] += myWidth/2; else myCenter[0] -= myWidth/2; if(cIndex&2) myCenter[1] += myWidth/2; else myCenter[1] -= myWidth/2; if(cIndex&4) myCenter[2] += myWidth/2; else myCenter[2] -= myWidth/2; d++; } SplatOrientedPoint( temp , p , n , neighborKey ); } pointWeightSum += pointWeight; if ( this->_constrainValues ) { int d = 0; TreeOctNode* temp = &this->tree; Point3D< Real > myCenter = Point3D< Real >( Real(0.5) , Real(0.5) , Real(0.5) ); Real myWidth = Real(1.0); while( true ) { int idx = temp->nodeData.pointIndex; if( idx == -1 ) { idx = static_cast<int>( this->_points.size() ); this->_points.push_back( PointData( p , Real(1.0) ) ); temp->nodeData.pointIndex = idx; } else { this->_points[idx].weight += Real(1.0); this->_points[idx].position += p; } int cIndex = TreeOctNode::CornerIndex( myCenter , p ); if ( !temp->children ) break; temp = &temp->children[cIndex]; myWidth /= 2; if( cIndex&1 ) myCenter[0] += myWidth/2; else myCenter[0] -= myWidth/2; if( cIndex&2 ) myCenter[1] += myWidth/2; else myCenter[1] -= myWidth/2; if( cIndex&4 ) myCenter[2] += myWidth/2; else myCenter[2] -= myWidth/2; d++; } } ++cnt; } } if( this->_boundaryType == 0 ) pointWeightSum *= Real(4.0); constraintWeight *= static_cast<Real>(pointWeightSum); constraintWeight /= static_cast<Real>(cnt); MemoryUsage( ); if( this->_constrainValues ) { for( TreeOctNode* node=this->tree.nextNode() ; node ; node=this->tree.nextNode(node) ) { if( node->nodeData.pointIndex != -1 ) { int idx = node->nodeData.pointIndex; this->_points[idx].position /= this->_points[idx].weight; int e = ( this->_boundaryType == 0 ? node->depth()-1 : node->depth() ) * adaptiveExponent - ( this->_boundaryType == 0 ? maxDepth-1 : maxDepth ) * (adaptiveExponent-1); if ( e < 0 ) this->_points[idx].weight /= Real( 1<<(-e) ); else this->_points[idx].weight *= Real( 1<< e ); this->_points[idx].weight *= Real( constraintWeight ); } } } #if FORCE_NEUMANN_FIELD if( this->_boundaryType == 1 ) { for( TreeOctNode* node=this->tree.nextNode() ; node ; node=this->tree.nextNode( node ) ) { int d , off[3]; node->depthAndOffset( d , off ); int res = 1<<d; if( node->nodeData.normalIndex < 0 ) continue; Point3D< Real >& normal = (*this->normals)[node->nodeData.normalIndex]; for( int d=0 ; d<3 ; d++ ) if ( off[d]==0 || off[d]==res-1 ) normal[d] = 0; } } #endif // FORCE_NEUMANN_FIELD MemoryUsage(); return cnt; }
nsresult CacheFileMetadata::ReadMetadata(CacheFileMetadataListener *aListener) { LOG(("CacheFileMetadata::ReadMetadata() [this=%p, listener=%p]", this, aListener)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mHashArray); MOZ_ASSERT(!mBuf); MOZ_ASSERT(!mWriteBuf); nsresult rv; int64_t size = mHandle->FileSize(); MOZ_ASSERT(size != -1); if (size == 0) { if (mKeyIsHash) { LOG(("CacheFileMetadata::ReadMetadata() - Filesize == 0, cannot create " "empty metadata since key is a hash. [this=%p]", this)); CacheFileIOManager::DoomFile(mHandle, nullptr); return NS_ERROR_NOT_AVAILABLE; } // this is a new entry LOG(("CacheFileMetadata::ReadMetadata() - Filesize == 0, creating empty " "metadata. [this=%p]", this)); InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } if (size < int64_t(sizeof(CacheFileMetadataHeader) + 2*sizeof(uint32_t))) { if (mKeyIsHash) { LOG(("CacheFileMetadata::ReadMetadata() - File is corrupted, cannot " "create empty metadata since key is a hash. [this=%p, " "filesize=%lld]", this, size)); CacheFileIOManager::DoomFile(mHandle, nullptr); return NS_ERROR_FILE_CORRUPTED; } // there must be at least checksum, header and offset LOG(("CacheFileMetadata::ReadMetadata() - File is corrupted, creating " "empty metadata. [this=%p, filesize=%lld]", this, size)); InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } // round offset to 4k blocks int64_t offset = (size / kAlignSize) * kAlignSize; if (size - offset < kMinMetadataRead && offset > kAlignSize) offset -= kAlignSize; mBufSize = size - offset; mBuf = static_cast<char *>(moz_xmalloc(mBufSize)); DoMemoryReport(MemoryUsage()); LOG(("CacheFileMetadata::ReadMetadata() - Reading metadata from disk, trying " "offset=%lld, filesize=%lld [this=%p]", offset, size, this)); mListener = aListener; rv = CacheFileIOManager::Read(mHandle, offset, mBuf, mBufSize, this); if (NS_FAILED(rv)) { if (mKeyIsHash) { LOG(("CacheFileMetadata::ReadMetadata() - CacheFileIOManager::Read() " "failed synchronously, cannot create empty metadata since key is " "a hash. [this=%p, rv=0x%08x]", this, rv)); CacheFileIOManager::DoomFile(mHandle, nullptr); return rv; } LOG(("CacheFileMetadata::ReadMetadata() - CacheFileIOManager::Read() failed" " synchronously, creating empty metadata. [this=%p, rv=0x%08x]", this, rv)); mListener = nullptr; InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } return NS_OK; }
nsresult CacheFileMetadata::ParseMetadata(uint32_t aMetaOffset, uint32_t aBufOffset, bool aHaveKey) { LOG(("CacheFileMetadata::ParseMetadata() [this=%p, metaOffset=%d, " "bufOffset=%d, haveKey=%u]", this, aMetaOffset, aBufOffset, aHaveKey)); nsresult rv; uint32_t metaposOffset = mBufSize - sizeof(uint32_t); uint32_t hashesOffset = aBufOffset + sizeof(uint32_t); uint32_t hashCount = aMetaOffset / kChunkSize; if (aMetaOffset % kChunkSize) hashCount++; uint32_t hashesLen = hashCount * sizeof(CacheHash::Hash16_t); uint32_t hdrOffset = hashesOffset + hashesLen; uint32_t keyOffset = hdrOffset + sizeof(CacheFileMetadataHeader); LOG(("CacheFileMetadata::ParseMetadata() [this=%p]\n metaposOffset=%d\n " "hashesOffset=%d\n hashCount=%d\n hashesLen=%d\n hdfOffset=%d\n " "keyOffset=%d\n", this, metaposOffset, hashesOffset, hashCount, hashesLen,hdrOffset, keyOffset)); if (keyOffset > metaposOffset) { LOG(("CacheFileMetadata::ParseMetadata() - Wrong keyOffset! [this=%p]", this)); return NS_ERROR_FILE_CORRUPTED; } mMetaHdr.ReadFromBuf(mBuf + hdrOffset); if (mMetaHdr.mVersion != kCacheEntryVersion) { LOG(("CacheFileMetadata::ParseMetadata() - Not a version we understand to. " "[version=0x%x, this=%p]", mMetaHdr.mVersion, this)); return NS_ERROR_UNEXPECTED; } uint32_t elementsOffset = mMetaHdr.mKeySize + keyOffset + 1; if (elementsOffset > metaposOffset) { LOG(("CacheFileMetadata::ParseMetadata() - Wrong elementsOffset %d " "[this=%p]", elementsOffset, this)); return NS_ERROR_FILE_CORRUPTED; } // check that key ends with \0 if (mBuf[elementsOffset - 1] != 0) { LOG(("CacheFileMetadata::ParseMetadata() - Elements not null terminated. " "[this=%p]", this)); return NS_ERROR_FILE_CORRUPTED; } if (!aHaveKey) { // get the key form metadata mKey.Assign(mBuf + keyOffset, mMetaHdr.mKeySize); rv = ParseKey(mKey); if (NS_FAILED(rv)) return rv; } else { if (mMetaHdr.mKeySize != mKey.Length()) { LOG(("CacheFileMetadata::ParseMetadata() - Key collision (1), key=%s " "[this=%p]", nsCString(mBuf + keyOffset, mMetaHdr.mKeySize).get(), this)); return NS_ERROR_FILE_CORRUPTED; } if (memcmp(mKey.get(), mBuf + keyOffset, mKey.Length()) != 0) { LOG(("CacheFileMetadata::ParseMetadata() - Key collision (2), key=%s " "[this=%p]", nsCString(mBuf + keyOffset, mMetaHdr.mKeySize).get(), this)); return NS_ERROR_FILE_CORRUPTED; } } // check metadata hash (data from hashesOffset to metaposOffset) CacheHash::Hash32_t hashComputed, hashExpected; hashComputed = CacheHash::Hash(mBuf + hashesOffset, metaposOffset - hashesOffset); hashExpected = NetworkEndian::readUint32(mBuf + aBufOffset); if (hashComputed != hashExpected) { LOG(("CacheFileMetadata::ParseMetadata() - Metadata hash mismatch! Hash of " "the metadata is %x, hash in file is %x [this=%p]", hashComputed, hashExpected, this)); return NS_ERROR_FILE_CORRUPTED; } // check elements rv = CheckElements(mBuf + elementsOffset, metaposOffset - elementsOffset); if (NS_FAILED(rv)) return rv; mHashArraySize = hashesLen; mHashCount = hashCount; if (mHashArraySize) { mHashArray = static_cast<CacheHash::Hash16_t *>( moz_xmalloc(mHashArraySize)); memcpy(mHashArray, mBuf + hashesOffset, mHashArraySize); } mMetaHdr.mFetchCount++; MarkDirty(); mElementsSize = metaposOffset - elementsOffset; memmove(mBuf, mBuf + elementsOffset, mElementsSize); mOffset = aMetaOffset; // TODO: shrink memory if buffer is too big DoMemoryReport(MemoryUsage()); return NS_OK; }
nsresult CacheFileMetadata::OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult) { LOG(("CacheFileMetadata::OnDataRead() [this=%p, handle=%p, result=0x%08x]", this, aHandle, aResult)); MOZ_ASSERT(mListener); nsresult rv, retval; nsCOMPtr<CacheFileMetadataListener> listener; if (NS_FAILED(aResult)) { LOG(("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() failed" ", creating empty metadata. [this=%p, rv=0x%08x]", this, aResult)); InitEmptyMetadata(); retval = NS_OK; mListener.swap(listener); listener->OnMetadataRead(retval); return NS_OK; } // check whether we have read all necessary data uint32_t realOffset = NetworkEndian::readUint32(mBuf + mBufSize - sizeof(uint32_t)); int64_t size = mHandle->FileSize(); MOZ_ASSERT(size != -1); if (realOffset >= size) { LOG(("CacheFileMetadata::OnDataRead() - Invalid realOffset, creating " "empty metadata. [this=%p, realOffset=%d, size=%lld]", this, realOffset, size)); InitEmptyMetadata(); retval = NS_OK; mListener.swap(listener); listener->OnMetadataRead(retval); return NS_OK; } uint32_t usedOffset = size - mBufSize; if (realOffset < usedOffset) { uint32_t missing = usedOffset - realOffset; // we need to read more data mBuf = static_cast<char *>(moz_xrealloc(mBuf, mBufSize + missing)); memmove(mBuf + missing, mBuf, mBufSize); mBufSize += missing; DoMemoryReport(MemoryUsage()); LOG(("CacheFileMetadata::OnDataRead() - We need to read %d more bytes to " "have full metadata. [this=%p]", missing, this)); rv = CacheFileIOManager::Read(mHandle, realOffset, mBuf, missing, true, this); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() " "failed synchronously, creating empty metadata. [this=%p, " "rv=0x%08x]", this, rv)); InitEmptyMetadata(); retval = NS_OK; mListener.swap(listener); listener->OnMetadataRead(retval); return NS_OK; } return NS_OK; } // We have all data according to offset information at the end of the entry. // Try to parse it. rv = ParseMetadata(realOffset, realOffset - usedOffset, true); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::OnDataRead() - Error parsing metadata, creating " "empty metadata. [this=%p]", this)); InitEmptyMetadata(); retval = NS_OK; } else { retval = NS_OK; } mListener.swap(listener); listener->OnMetadataRead(retval); return NS_OK; }
nsresult CacheFileMetadata::ParseMetadata(uint32_t aMetaOffset, uint32_t aBufOffset) { LOG(("CacheFileMetadata::ParseMetadata() [this=%p, metaOffset=%d, " "bufOffset=%d]", this, aMetaOffset, aBufOffset)); nsresult rv; uint32_t metaposOffset = mBufSize - sizeof(uint32_t); uint32_t hashesOffset = aBufOffset + sizeof(uint32_t); uint32_t hashCount = aMetaOffset / kChunkSize; if (aMetaOffset % kChunkSize) hashCount++; uint32_t hashesLen = hashCount * sizeof(CacheHashUtils::Hash16_t); uint32_t hdrOffset = hashesOffset + hashesLen; uint32_t keyOffset = hdrOffset + sizeof(CacheFileMetadataHeader); LOG(("CacheFileMetadata::ParseMetadata() [this=%p]\n metaposOffset=%d\n " "hashesOffset=%d\n hashCount=%d\n hashesLen=%d\n hdfOffset=%d\n " "keyOffset=%d\n", this, metaposOffset, hashesOffset, hashCount, hashesLen,hdrOffset, keyOffset)); if (keyOffset > metaposOffset) { LOG(("CacheFileMetadata::ParseMetadata() - Wrong keyOffset! [this=%p]", this)); return NS_ERROR_FILE_CORRUPTED; } uint32_t elementsOffset = reinterpret_cast<CacheFileMetadataHeader *>( mBuf + hdrOffset)->mKeySize + keyOffset + 1; if (elementsOffset > metaposOffset) { LOG(("CacheFileMetadata::ParseMetadata() - Wrong elementsOffset %d " "[this=%p]", elementsOffset, this)); return NS_ERROR_FILE_CORRUPTED; } // check that key ends with \0 if (mBuf[elementsOffset - 1] != 0) { LOG(("CacheFileMetadata::ParseMetadata() - Elements not null terminated. " "[this=%p]", this)); return NS_ERROR_FILE_CORRUPTED; } nsAutoCString origKey; uint32_t keySize = reinterpret_cast<CacheFileMetadataHeader *>( mBuf + hdrOffset)->mKeySize; if (mKeyIsHash) { // get the original key origKey.Assign(mBuf + keyOffset, keySize); } else { if (keySize != mKey.Length()) { LOG(("CacheFileMetadata::ParseMetadata() - Key collision (1), key=%s " "[this=%p]", nsCString(mBuf + keyOffset, keySize).get(), this)); return NS_ERROR_FILE_CORRUPTED; } if (memcmp(mKey.get(), mBuf + keyOffset, mKey.Length()) != 0) { LOG(("CacheFileMetadata::ParseMetadata() - Key collision (2), key=%s " "[this=%p]", nsCString(mBuf + keyOffset, keySize).get(), this)); return NS_ERROR_FILE_CORRUPTED; } } // check metadata hash (data from hashesOffset to metaposOffset) CacheHashUtils::Hash32_t hash; hash = CacheHashUtils::Hash(mBuf + hashesOffset, metaposOffset - hashesOffset); if (hash != PR_ntohl(*(reinterpret_cast<uint32_t *>(mBuf + aBufOffset)))) { LOG(("CacheFileMetadata::ParseMetadata() - Metadata hash mismatch! Hash of " "the metadata is %x, hash in file is %x [this=%p]", hash, PR_ntohl(*(reinterpret_cast<uint32_t *>(mBuf + aBufOffset))), this)); return NS_ERROR_FILE_CORRUPTED; } // check elements rv = CheckElements(mBuf + elementsOffset, metaposOffset - elementsOffset); if (NS_FAILED(rv)) return rv; mHashArraySize = hashesLen; mHashCount = hashCount; if (mHashArraySize) { mHashArray = static_cast<CacheHashUtils::Hash16_t *>( moz_xmalloc(mHashArraySize)); memcpy(mHashArray, mBuf + hashesOffset, mHashArraySize); } memcpy(&mMetaHdr, mBuf + hdrOffset, sizeof(CacheFileMetadataHeader)); mMetaHdr.mFetchCount++; MarkDirty(); mElementsSize = metaposOffset - elementsOffset; memmove(mBuf, mBuf + elementsOffset, mElementsSize); mOffset = aMetaOffset; if (mKeyIsHash) { mKey = origKey; mKeyIsHash = false; } // TODO: shrink memory if buffer is too big DoMemoryReport(MemoryUsage()); return NS_OK; }
nsresult CacheFileMetadata::SyncReadMetadata(nsIFile *aFile) { LOG(("CacheFileMetadata::SyncReadMetadata() [this=%p]", this)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mHandle); MOZ_ASSERT(!mHashArray); MOZ_ASSERT(!mBuf); MOZ_ASSERT(!mWriteBuf); MOZ_ASSERT(mKey.IsEmpty()); nsresult rv; int64_t fileSize; rv = aFile->GetFileSize(&fileSize); if (NS_FAILED(rv)) { // Don't bloat the console return rv; } PRFileDesc *fd; rv = aFile->OpenNSPRFileDesc(PR_RDONLY, 0600, &fd); NS_ENSURE_SUCCESS(rv, rv); int64_t offset = PR_Seek64(fd, fileSize - sizeof(uint32_t), PR_SEEK_SET); if (offset == -1) { PR_Close(fd); return NS_ERROR_FAILURE; } uint32_t metaOffset; int32_t bytesRead = PR_Read(fd, &metaOffset, sizeof(uint32_t)); if (bytesRead != sizeof(uint32_t)) { PR_Close(fd); return NS_ERROR_FAILURE; } metaOffset = NetworkEndian::readUint32(&metaOffset); if (metaOffset > fileSize) { PR_Close(fd); return NS_ERROR_FAILURE; } mBufSize = fileSize - metaOffset; mBuf = static_cast<char *>(moz_xmalloc(mBufSize)); DoMemoryReport(MemoryUsage()); offset = PR_Seek64(fd, metaOffset, PR_SEEK_SET); if (offset == -1) { PR_Close(fd); return NS_ERROR_FAILURE; } bytesRead = PR_Read(fd, mBuf, mBufSize); PR_Close(fd); if (bytesRead != static_cast<int32_t>(mBufSize)) { return NS_ERROR_FAILURE; } rv = ParseMetadata(metaOffset, 0, false); NS_ENSURE_SUCCESS(rv, rv); return NS_OK; }
nsresult CacheFileMetadata::WriteMetadata(uint32_t aOffset, CacheFileMetadataListener *aListener) { LOG(("CacheFileMetadata::WriteMetadata() [this=%p, offset=%d, listener=%p]", this, aOffset, aListener)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mWriteBuf); nsresult rv; mIsDirty = false; mWriteBuf = static_cast<char *>(moz_xmalloc(sizeof(uint32_t) + mHashCount * sizeof(CacheHash::Hash16_t) + sizeof(CacheFileMetadataHeader) + mKey.Length() + 1 + mElementsSize + sizeof(uint32_t))); char *p = mWriteBuf + sizeof(uint32_t); memcpy(p, mHashArray, mHashCount * sizeof(CacheHash::Hash16_t)); p += mHashCount * sizeof(CacheHash::Hash16_t); mMetaHdr.WriteToBuf(p); p += sizeof(CacheFileMetadataHeader); memcpy(p, mKey.get(), mKey.Length()); p += mKey.Length(); *p = 0; p++; memcpy(p, mBuf, mElementsSize); p += mElementsSize; CacheHash::Hash32_t hash; hash = CacheHash::Hash(mWriteBuf + sizeof(uint32_t), p - mWriteBuf - sizeof(uint32_t)); NetworkEndian::writeUint32(mWriteBuf, hash); NetworkEndian::writeUint32(p, aOffset); p += sizeof(uint32_t); char * writeBuffer; if (aListener) { mListener = aListener; writeBuffer = mWriteBuf; } else { // We are not going to pass |this| as callback to CacheFileIOManager::Write // so we must allocate a new buffer that will be released automatically when // write is finished. This is actually better than to let // CacheFileMetadata::OnDataWritten do the job, since when dispatching the // result from some reason fails during shutdown, we would unnecessarily leak // both this object and the buffer. writeBuffer = static_cast<char *>(moz_xmalloc(p - mWriteBuf)); memcpy(mWriteBuf, writeBuffer, p - mWriteBuf); } rv = CacheFileIOManager::Write(mHandle, aOffset, writeBuffer, p - mWriteBuf, true, aListener ? this : nullptr); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::WriteMetadata() - CacheFileIOManager::Write() " "failed synchronously. [this=%p, rv=0x%08x]", this, rv)); mListener = nullptr; if (writeBuffer != mWriteBuf) { free(writeBuffer); } free(mWriteBuf); mWriteBuf = nullptr; NS_ENSURE_SUCCESS(rv, rv); } DoMemoryReport(MemoryUsage()); return NS_OK; }
nsresult CacheFileMetadata::ReadMetadata(CacheFileMetadataListener *aListener) { LOG(("CacheFileMetadata::ReadMetadata() [this=%p, listener=%p]", this, aListener)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mHashArray); MOZ_ASSERT(!mBuf); MOZ_ASSERT(!mWriteBuf); nsresult rv; int64_t size = mHandle->FileSize(); MOZ_ASSERT(size != -1); if (size == 0) { // this is a new entry LOG(("CacheFileMetadata::ReadMetadata() - Filesize == 0, creating empty " "metadata. [this=%p]", this)); InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } if (size < int64_t(sizeof(CacheFileMetadataHeader) + 2*sizeof(uint32_t))) { // there must be at least checksum, header and offset LOG(("CacheFileMetadata::ReadMetadata() - File is corrupted, creating " "empty metadata. [this=%p, filesize=%lld]", this, size)); InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } // Set offset so that we read at least kMinMetadataRead if the file is big // enough. int64_t offset; if (size < kMinMetadataRead) { offset = 0; } else { offset = size - kMinMetadataRead; } // round offset to kAlignSize blocks offset = (offset / kAlignSize) * kAlignSize; mBufSize = size - offset; mBuf = static_cast<char *>(moz_xmalloc(mBufSize)); DoMemoryReport(MemoryUsage()); LOG(("CacheFileMetadata::ReadMetadata() - Reading metadata from disk, trying " "offset=%lld, filesize=%lld [this=%p]", offset, size, this)); mListener = aListener; rv = CacheFileIOManager::Read(mHandle, offset, mBuf, mBufSize, true, this); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::ReadMetadata() - CacheFileIOManager::Read() failed" " synchronously, creating empty metadata. [this=%p, rv=0x%08x]", this, rv)); mListener = nullptr; InitEmptyMetadata(); aListener->OnMetadataRead(NS_OK); return NS_OK; } return NS_OK; }
nsresult CacheFileMetadata::WriteMetadata(uint32_t aOffset, CacheFileMetadataListener *aListener) { LOG(("CacheFileMetadata::WriteMetadata() [this=%p, offset=%d, listener=%p]", this, aOffset, aListener)); MOZ_ASSERT(!mListener); MOZ_ASSERT(!mWriteBuf); nsresult rv; mIsDirty = false; mWriteBuf = static_cast<char *>(malloc(CalcMetadataSize(mElementsSize, mHashCount))); if (!mWriteBuf) { return NS_ERROR_OUT_OF_MEMORY; } char *p = mWriteBuf + sizeof(uint32_t); memcpy(p, mHashArray, mHashCount * sizeof(CacheHash::Hash16_t)); p += mHashCount * sizeof(CacheHash::Hash16_t); mMetaHdr.WriteToBuf(p); p += sizeof(CacheFileMetadataHeader); memcpy(p, mKey.get(), mKey.Length()); p += mKey.Length(); *p = 0; p++; memcpy(p, mBuf, mElementsSize); p += mElementsSize; CacheHash::Hash32_t hash; hash = CacheHash::Hash(mWriteBuf + sizeof(uint32_t), p - mWriteBuf - sizeof(uint32_t)); NetworkEndian::writeUint32(mWriteBuf, hash); NetworkEndian::writeUint32(p, aOffset); p += sizeof(uint32_t); char * writeBuffer = mWriteBuf; if (aListener) { mListener = aListener; } else { // We are not going to pass |this| as a callback so the buffer will be // released by CacheFileIOManager. Just null out mWriteBuf here. mWriteBuf = nullptr; } rv = CacheFileIOManager::Write(mHandle, aOffset, writeBuffer, p - writeBuffer, true, true, aListener ? this : nullptr); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::WriteMetadata() - CacheFileIOManager::Write() " "failed synchronously. [this=%p, rv=0x%08x]", this, rv)); mListener = nullptr; if (mWriteBuf) { free(mWriteBuf); mWriteBuf = nullptr; } NS_ENSURE_SUCCESS(rv, rv); } DoMemoryReport(MemoryUsage()); return NS_OK; }
nsresult CacheFileMetadata::OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult) { LOG(("CacheFileMetadata::OnDataRead() [this=%p, handle=%p, result=0x%08x]", this, aHandle, aResult)); MOZ_ASSERT(mListener); nsresult rv; nsCOMPtr<CacheFileMetadataListener> listener; if (NS_FAILED(aResult)) { LOG(("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() failed" ", creating empty metadata. [this=%p, rv=0x%08x]", this, aResult)); InitEmptyMetadata(); mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; } if (mFirstRead) { Telemetry::AccumulateTimeDelta( Telemetry::NETWORK_CACHE_METADATA_FIRST_READ_TIME_MS, mReadStart); Telemetry::Accumulate( Telemetry::NETWORK_CACHE_METADATA_FIRST_READ_SIZE, mBufSize); } else { Telemetry::AccumulateTimeDelta( Telemetry::NETWORK_CACHE_METADATA_SECOND_READ_TIME_MS, mReadStart); } // check whether we have read all necessary data uint32_t realOffset = NetworkEndian::readUint32(mBuf + mBufSize - sizeof(uint32_t)); int64_t size = mHandle->FileSize(); MOZ_ASSERT(size != -1); if (realOffset >= size) { LOG(("CacheFileMetadata::OnDataRead() - Invalid realOffset, creating " "empty metadata. [this=%p, realOffset=%u, size=%lld]", this, realOffset, size)); InitEmptyMetadata(); mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; } uint32_t maxHashCount = size / kChunkSize; uint32_t maxMetadataSize = CalcMetadataSize(kMaxElementsSize, maxHashCount); if (size - realOffset > maxMetadataSize) { LOG(("CacheFileMetadata::OnDataRead() - Invalid realOffset, metadata would " "be too big, creating empty metadata. [this=%p, realOffset=%u, " "maxMetadataSize=%u, size=%lld]", this, realOffset, maxMetadataSize, size)); InitEmptyMetadata(); mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; } uint32_t usedOffset = size - mBufSize; if (realOffset < usedOffset) { uint32_t missing = usedOffset - realOffset; // we need to read more data char *newBuf = static_cast<char *>(realloc(mBuf, mBufSize + missing)); if (!newBuf) { LOG(("CacheFileMetadata::OnDataRead() - Error allocating %d more bytes " "for the missing part of the metadata, creating empty metadata. " "[this=%p]", missing, this)); InitEmptyMetadata(); mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; } mBuf = newBuf; memmove(mBuf + missing, mBuf, mBufSize); mBufSize += missing; DoMemoryReport(MemoryUsage()); LOG(("CacheFileMetadata::OnDataRead() - We need to read %d more bytes to " "have full metadata. [this=%p]", missing, this)); mFirstRead = false; mReadStart = mozilla::TimeStamp::Now(); rv = CacheFileIOManager::Read(mHandle, realOffset, mBuf, missing, this); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::OnDataRead() - CacheFileIOManager::Read() " "failed synchronously, creating empty metadata. [this=%p, " "rv=0x%08x]", this, rv)); InitEmptyMetadata(); mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; } return NS_OK; } Telemetry::Accumulate(Telemetry::NETWORK_CACHE_METADATA_SIZE, size - realOffset); // We have all data according to offset information at the end of the entry. // Try to parse it. rv = ParseMetadata(realOffset, realOffset - usedOffset, true); if (NS_FAILED(rv)) { LOG(("CacheFileMetadata::OnDataRead() - Error parsing metadata, creating " "empty metadata. [this=%p]", this)); InitEmptyMetadata(); } else { // Shrink elements buffer. mBuf = static_cast<char *>(moz_xrealloc(mBuf, mElementsSize)); mBufSize = mElementsSize; // There is usually no or just one call to SetMetadataElement() when the // metadata is parsed from disk. Avoid allocating power of two sized buffer // which we do in case of newly created metadata. mAllocExactSize = true; } mListener.swap(listener); listener->OnMetadataRead(NS_OK); return NS_OK; }