Exemple #1
0
VectorRecord VectorRecord::canonicallySliced(
					MemoryPool* inPool,
					VectorDataManager* inVDM,
					hash_type newVectorHash
					)  const
	{
	if (!mDataPtr)
		return VectorRecord();

	VectorRecord res(
		mDataPtr->slice(
			indicesWithinHandle(),
			inPool,
			inVDM,
			newVectorHash
			)
		);

	lassert_dump(
		res.size() == size(),
		"Slicing a vector of size " << mDataPtr->size() << " with " << prettyPrintString(indicesWithinHandle())
			<< " produced " << res.size() << ". Expected " << indicesWithinHandle().size()
			<< "\n\nHandle = "
			<< mDataPtr
		);

	return res;
	}
Exemple #2
0
	void vectorPageMapped(
						boost::shared_ptr<VectorPage> mappedPage,
						boost::shared_ptr<Ufora::threading::Trigger> mappedPageWantsUnmapped
						)
		{
		lassert_dump(false, "this should never happen. Mapping vectors in the free-store can't work");
		}
Exemple #3
0
	void refillBuffer(size_type minBytes)
		{
		lassert(minBytes >= 0);
		lassert(mBufferPos >= mBuffer.size());
		lassert(minBytes <= mBufferPreferredSize);

		clearBuffer();

		mBuffer.resize(mBufferPreferredSize);
		size_type bytesRead = mProtocol.read(mBufferPreferredSize, &mBuffer[0], false);

		if (bytesRead >= minBytes)
			{
			//we read enough - just return
			mBuffer.resize(bytesRead);
			}
		else
			{
			//force read to block until minimum number of bytes have been read
			bytesRead += mProtocol.read(minBytes - bytesRead, &mBuffer[bytesRead], true);
			mBuffer.resize(bytesRead);
			}

		lassert_dump(bytesRead >= minBytes, "Needed to get " << minBytes << " but only got "
					<< bytesRead);
		}
Exemple #4
0
boost::python::object PropertyStorage::getValue(LocationProperty inNode)
	{
	lassert_dump(has(inNode), inNode.name());

	if (inNode.attributeType() == attrMutable)
		mMutablePropertyAccesses[inNode] += 1;

	return mValues[inNode];
	}
SimpleMemoryAllocator::SimpleMemoryAllocator(uword_t totalSize, uword_t inAlignment) : 
		mAlignment(inAlignment),
		mTotalSize(totalSize)
	{
	lassert_dump(totalSize % mAlignment == 0, 
			"memory size must be " << mAlignment << "-byte aligned");

	mOffsetToBlocksizeMapUnallocated.insert(0, totalSize);
	mUnallocatedBlockUpperBoundsToOffsets[totalSize] = 0;
	}
Exemple #6
0
bool VectorRecord::entirelyCoveredByJOV(const JudgmentOnValue& inJOV) const
	{
	if (!dataPtr())
		return true;

	VectorHandle* handle = dataPtr();

	lassert(allValuesAreLoaded());

	int64_t curIndex = 0;

	while (curIndex < size())
		{
		TypedFora::Abi::ForaValueArraySlice slice = sliceForOffset(curIndex);

		lassert_dump(
			slice.array(),
			"We should have guaranteed that this value was loaded by calling 'allValuesAreLoaded'"
			);

		if (slice.mapping().stride() == 1)
			{
			bool allAreCovered = true;

			auto visitor = [&](const PackedForaValues& vals) {
				if (!allAreCovered || !inJOV.covers(vals.elementJOV()))
					allAreCovered = false;
				};

			slice.array()->visitValuesSequentially(
				visitor,
				slice.mapping().range().offset(),
				slice.mapping().range().endValue()
				);

			if (!allAreCovered)
				return false;
			}
		else
			{
			while (curIndex < slice.mapping().highIndex())
				{
				if (!inJOV.covers(slice.jovFor(curIndex)))
					return false;
				curIndex++;
				}
			}

		curIndex = slice.mapping().highIndex();
		}

	return true;
	}
NativeType nativeTypeForCppmlTuple()
	{
	//get a list of types and offsets into the tuple
	ImmutableTreeVector<pair<NativeType, uword_t> > offsets = 
		NativeTypeForCppmlTupleImpl<T, typename T::metadata>::get();

	NativeType resultType = NativeType::Composite();

	//build up the tuple type one field at a time
	for (long k = 0; k < offsets.size();k++)
		{
		if (offsets[k].second == resultType.packedSize())
			resultType = resultType + offsets[k].first;
			else
		if (offsets[k].second < resultType.packedSize())
			{
			//the sizes should have been linearly increasing
			lassert_dump(false, "inconsistent typing found: " + prettyPrintString(offsets));
			}
		else
			{
			//add enough padding to compensate for the extra bytes that C++ places in between
			//members to get alignment
			resultType = resultType + 
				NativeType::Composite(
					NativeType::Array(
						NativeType::Integer(8,false),
						offsets[k].second - resultType.packedSize()
						)
					);

			resultType = resultType + offsets[k].first;
			}
		}

	lassert(resultType.packedSize() <= sizeof(T));

	if (resultType.packedSize() < sizeof(T))
		resultType = resultType + 
			NativeType::Composite(
				NativeType::Array(
					NativeType::Integer(8,false),
					sizeof(T) - resultType.packedSize()
					)
				);

	return resultType;
	}
Exemple #8
0
void MemoryHeap::mark_unallocated(void* addr, size_t size)
	{
	while (size > 0)
		{
		auto itr = mPages.find(addr);
		std::pair<void*, alloc_info> info = *itr;
		lassert_dump(itr != mPages.end(), "could not find page in memory");

		lassert(info.second.size <= size);
		mPages.erase(itr);

		mHeapSize -= info.second.size;
		size -= info.second.size;
		addr = (void*)((uint64_t)addr + info.second.size);
		}
	}
	void write_(uword_t inByteCount, void *inData)
		{
		uint8_t* toWrite = (uint8_t*)inData;

		while (inByteCount > 0)
			{
			auto written = ::write(mFD, toWrite, inByteCount);

			if (written == -1 || written == 0)
				{
				std::string err = strerror(errno);
				lassert_dump(false, "failed to write: " << err << ". tried to write " << inByteCount);
				}

			inByteCount -= written;
			toWrite += written;
			}
		}
void ContinuationElement::destroy(ContinuationElement* prev)
	{
	lassert_dump(!mIsDestroyed, "double destroy!");
	mIsDestroyed = true;

	//first, remove from the linked list
	if (prev == 0)
		mContinuationPtr->mFirstContinuationElementPtr = mNextContinuationElementPtr;
	else
		prev->mNextContinuationElementPtr = mNextContinuationElementPtr;

	mTargetInstructionPtr->dropIncomingContinuationElement(this);

	ContinuationElement* continuationElementPtr = mContinuationPtr->mFirstContinuationElementPtr;
	while (continuationElementPtr)
		{
		lassert(continuationElementPtr != this);
		continuationElementPtr = continuationElementPtr-> mNextContinuationElementPtr;
		}
	}
Exemple #11
0
void 	Registry::callRegistrar(boost::shared_ptr<ExporterBase> inRegistrar)
	{
	if (mRegistrarsCalled.find(inRegistrar) != mRegistrarsCalled.end())
		return;
	
	std::vector<std::string> deps;
	inRegistrar->dependencies(deps);
	
	for (long k = 0; k < deps.size();k++)
		{
		lassert_dump(mExportersByTypeInfo[deps[k]], "no exporter for " << deps[k]);
		callRegistrar(mExportersByTypeInfo[deps[k]]);
		}
	
	mRegistrarsCalled.insert(inRegistrar);
	
	
	boost::python::scope scope(createModule(inRegistrar->getModuleName()));
	inRegistrar->exportPythonWrapper();
	}
Exemple #12
0
Fora::ReturnValue<VectorRecord, VectorLoadRequest>
			VectorRecord::deepcopiedAndContiguous(MemoryPool* inPool, VectorDataManager* inVDM) const
	{
	if (!dataPtr())
		return Fora::slot0(*this);

	lassert(!inPool->isBigVectorHandle());

	VectorHandle* handle = dataPtr();

	if (!allValuesAreLoaded())
		return Fora::slot1(VectorLoadRequest(*this));

	ForaValueArray* array = ForaValueArray::Empty(inPool);

	int64_t curIndex = 0;

	while (curIndex < size())
		{
		TypedFora::Abi::ForaValueArraySlice slice = sliceForOffset(curIndex);

		lassert(slice.mapping().indexIsValid(curIndex));

		lassert_dump(
			slice.array(),
			"We should have guaranteed that this value was loaded by calling 'allValuesAreLoaded'"
			);

		Nullable<int64_t> unmappedIndex =
			slice.firstValueNotLoadedInRange(
				curIndex,
				slice.mapping().highIndex()
				);

		lassert_dump(
			!unmappedIndex,
			"Index " << *unmappedIndex << " is unmapped in "
				<< prettyPrintString(slice) << " of size " << size()
			);

		if (slice.mapping().stride() == 1)
			{
			array->append(
				*slice.array(),
				slice.mapping().offsetForIndex(curIndex),
				slice.mapping().offsetForIndex(slice.mapping().highIndex())
				);
			}
		else
			{
			while (curIndex < slice.mapping().highIndex())
				{
				int64_t indexInTarget = slice.mapping().offsetForIndex(curIndex);
				array->append(*slice.array(), indexInTarget, indexInTarget+1);
				curIndex++;
				}
			}

		curIndex = slice.mapping().highIndex();
		}

	lassert(array->size() == size());

	return Fora::slot0(
		VectorRecord(
			inPool->construct<VectorHandle>(
				Fora::BigVectorId(),
				Fora::PageletTreePtr(),
				array,
				inPool,
				vectorHandleHash()
				)
			)
		);
	}
Exemple #13
0
double curThreadClock(void)
	{
	lassert_dump(false, "not implemented");
	}
void SimpleMemoryAllocator::checkInternalConsistency(void)
	{
	const std::map<uword_t, uword_t>& allocatedBlockSizes(mOffsetToBlocksizeMapAllocated.getKeyToValue());
	const std::map<uword_t, uword_t>& unallocatedBlockSizes(mOffsetToBlocksizeMapUnallocated.getKeyToValue());

	uword_t totalBytesAllocated = 0;
	uword_t totalBytesUnallocated = 0;
	
	//loop over all pairs of allocated blocks
	for (auto it = allocatedBlockSizes.begin(); it != allocatedBlockSizes.end(); ++it)
		{
		totalBytesAllocated += it->second;

		auto it2 = it;
		it2++;
		if (it2 != allocatedBlockSizes.end())
			{
			lassert_dump(it->first + it->second <= it2->first, "allocated blocks overlapped");
			if (it->first + it->second < it2->first)
				{
				//verify that the unallocated blocks make sense
				lassert_dump(mOffsetToBlocksizeMapUnallocated.hasKey(it->first + it->second),
					"unallocated block was missing");

				lassert_dump(mOffsetToBlocksizeMapUnallocated.getValue(it->first + it->second) == 
					it2->first - (it->first + it->second),
					"unallocated block had incorrect size");
				}
			}
		}

	//now loop over all pairs of unallocated blocks and check their consistency
	for (auto it = unallocatedBlockSizes.begin(); it != unallocatedBlockSizes.end(); ++it)
		{
		totalBytesUnallocated += it->second;

		auto it2 = it;
		it2++;

		if (it2 != unallocatedBlockSizes.end())
			{
			//unallocated blocks shouldn't overlap or even touch
			lassert_dump(it->first + it->second < it2->first, "unallocated blocks overlapped");

			//verify that there are allocated blocks in between
			lassert_dump(
				mOffsetToBlocksizeMapAllocated.hasKey(it->first + it->second) || 
				it->first + it->second == mTotalSize,
				"top end of unallocated block wasn't an allocated block."
				);

			//verify that the upper-bound map has the entry
			lassert_dump(
				mUnallocatedBlockUpperBoundsToOffsets.find(it->first + it->second) != 
					mUnallocatedBlockUpperBoundsToOffsets.end(),
				"upper bound map doesn't have an entry at " << (it->first + it->second)
				);

			lassert_dump(
				mUnallocatedBlockUpperBoundsToOffsets[it->first + it->second] == it->first,
				"upper bound map corrupt: we have an entry at " << (it->first + it->second)
					<< " that points at " 
					<< mUnallocatedBlockUpperBoundsToOffsets[it->first + it->second] 
					<< " instead of " << it->first

				);
			}
		}

	lassert_dump(totalBytesAllocated + totalBytesUnallocated == mTotalSize, 
		"sizes of allocated/deallocated regions didn't add up to total size: " << 
			totalBytesAllocated << " + " << totalBytesUnallocated << " != " << mTotalSize
		);
	}