Ejemplo n.º 1
0
void *larena_alloc(larena_t arena, long nbytes, const char *file, int line)
{
	lassert(arena);
	lassert(nbytes > 0);
	nbytes = ((nbytes + sizeof(union align) - 1)/ \
			(sizeof (union align))) * ( sizeof (union align));
	while (nbytes > arena->limit - arena->avail) {
		larena_t ptr;
		char *limit;
		if ((ptr = freechunks) != NULL){
			freechunks = freechunks->prev;
			nfree --;
			limit = ptr->limit;
		} else {
			long m = sizeof (union lheader) + nbytes + 10 * 1024;
			ptr = malloc(m);
			if (ptr == NULL) {
				if (file == NULL)
					RAISE(arena_failed);
				else
					lexcept_raise(&arena_failed, file, line);
			}
			limit = (char *)ptr + m;		
		}
		*ptr = *arena;
		arena->avail = (char *)((union lheader *)ptr + 1);
		arena->limit = limit;
		arena->prev = ptr;
	}	
	arena->avail += nbytes;
	return arena->avail - nbytes;
}
Ejemplo n.º 2
0
void *lmem_alloc(long nbytes, const char *file, int line)
{
	struct descriptor *bp;
	void *ptr;

	lassert(nbytes > 0);
	nbytes = ((nbytes + sizeof(union align) - 1)/ \
			(sizeof (union align))) * ( sizeof (union align));
	for (bp = freelist.free; bp; bp = bp->free){
		if (bp->size > nbytes){
			bp->size -= nbytes;
			ptr = (char *)bp->ptr + bp->size;
			if ((bp = dalloc(ptr, nbytes, file, line)) != NULL){
				unsigned h = hash(ptr, htab);
				bp->link = htab[h];
				htab[h] = bp;
				return ptr;
			}else{
				lexcept_raise(&mem_failed, file, line);
			}
		}
		if (bp == &freelist){
			struct descriptor *newptr;
			if ((ptr = malloc(nbytes + NALLOC)) == NULL
				|| (newptr = dalloc(ptr, nbytes + NALLOC, __FILE__, __LINE__)) == NULL)
				lexcept_raise(&mem_failed, file, line);		
			newptr->free = freelist.free;
			freelist.free = newptr;
		}
	}
	lassert(0);
	return NULL;
}
Ejemplo n.º 3
0
	void refillBuffer(size_type minBytes)
		{
		lassert(minBytes >= 0);
		lassert(mBufferPos >= mBuffer.size());
		lassert(minBytes <= mBufferPreferredSize);

		clearBuffer();

		mBuffer.resize(mBufferPreferredSize);
		size_type bytesRead = mProtocol.read(mBufferPreferredSize, &mBuffer[0], false);

		if (bytesRead >= minBytes)
			{
			//we read enough - just return
			mBuffer.resize(bytesRead);
			}
		else
			{
			//force read to block until minimum number of bytes have been read
			bytesRead += mProtocol.read(minBytes - bytesRead, &mBuffer[bytesRead], true);
			mBuffer.resize(bytesRead);
			}

		lassert_dump(bytesRead >= minBytes, "Needed to get " << minBytes << " but only got "
					<< bytesRead);
		}
Ejemplo n.º 4
0
std::tuple<T> toTuple(boost::python::tuple pyTuple)
	{
	lassert(boost::python::len(pyTuple) == 1);
	boost::python::extract<T> extractor(pyTuple[0]);
	lassert(extractor.check());
	
	return std::make_tuple(extractor());
	}
Ejemplo n.º 5
0
void* MemoryHeap::realloc(void *ptr, size_t size)
	{
	if (mMspace == NULL) 
		return 0;

	auto itr = mPages.find(ptr);

	if (itr != mPages.end())
		{
		alloc_info info = (*itr).second;

		if (info.largeAlloc)
			{
			if (size < mPageSize / 2)
				{
				lassert(size < info.size);

				//the new array is no longer a 'large alloc' and should be placed back in the main
				//pool
				void* newPtr = malloc(size);
				lassert(newPtr);
				memcpy(newPtr, ptr, size);
				free(ptr);
				return newPtr;
				}
			else
				{
				void* newPtr = mMRemapFun(ptr, info.size, size, MREMAP_MAYMOVE);

				if (newPtr == MAP_FAILED)
					return 0;

				mark_unallocated(ptr, info.size);
				mark_allocated(newPtr, size, info.largeAlloc);
				mBytesUsed += (size - info.size);
				return newPtr;
				}
			}
		}

	size_t oldSize = mspace_usable_size(ptr);

	//the new array will be large enough that we should mmap it.
	if (size >= mPageSize)
		{
		void* newPtr = malloc(size);
		lassert(newPtr);
		memcpy(newPtr, ptr, std::min<size_t>(oldSize, size));
		free(ptr);
		return newPtr;
		}

	void* newPtr = mspace_realloc(mMspace, ptr, size);
	if (newPtr != NULL)
		mBytesUsed += mspace_usable_size(newPtr) - oldSize;
	return newPtr;
	}
Ejemplo n.º 6
0
std::tuple<T1, T2> toTuple(boost::python::tuple pyTuple)
	{
	lassert(boost::python::len(pyTuple) == 2);
	boost::python::extract<T1> extractor1(pyTuple[0]);
	lassert(extractor1.check());
	boost::python::extract<T2> extractor2(pyTuple[1]);
	lassert(extractor2.check());
	
	return std::make_tuple(extractor1(), extractor2());
	}
Ejemplo n.º 7
0
void *lmem_calloc(long count, long nbytes, const char *file, int line)
{
	void *ptr;
	
	lassert(count > 0);
	lassert(nbytes > 0);
	ptr = lmem_alloc(count * nbytes, file, line);
	memset(ptr, '\0', count * nbytes);
	return ptr;
}
Ejemplo n.º 8
0
void MemoryHeap::validate() const
	{
	if (mMspace == NULL) return;
	lassert(mBytesUsed < mHeapSize);

	for (auto itr=mPages.begin(); itr!=mPages.end(); ++itr)
		{
		std::pair<void*, alloc_info> info = *itr;

		lassert(info.second.size > 0);
		}
	}
Ejemplo n.º 9
0
MemoryHeap::~MemoryHeap()
	{
	if (mMspace == NULL)
		return;

	size_t used = getHeapSize();
	size_t freed = destroy_mspace(mMspace);
	freed += free_allocated();

	lassert(used == freed);
	lassert(mPages.size() == 0);
	}
Ejemplo n.º 10
0
bool ComputationDependencyGraph::addRootToRootDependency(ComputationId source, ComputationId dest)
	{
	lassert(source.isRoot());
	lassert(dest.isRoot());

	if (mRootToRootDependencies.contains(source,dest))
		return false;

	mRootToRootDependencies.insert(source, dest);
	mDirtyPriorities.insert(dest);
	return true;
	}
Ejemplo n.º 11
0
std::tuple<T1, T2, T3, T4> toTuple(boost::python::tuple pyTuple)
	{
	lassert(boost::python::len(pyTuple) == 4);
	boost::python::extract<T1> extractor1(pyTuple[0]);
	lassert(extractor1.check());
	boost::python::extract<T2> extractor2(pyTuple[1]);
	lassert(extractor2.check());
	boost::python::extract<T3> extractor3(pyTuple[2]);
	lassert(extractor3.check());
	boost::python::extract<T4> extractor4(pyTuple[3]);
	lassert(extractor4.check());
	
	return std::make_tuple(extractor1(), extractor2(), extractor3(), extractor4());
	}
Ejemplo n.º 12
0
void *lmem_resize(void *ptr, long nbytes, const char *file, int line)
{
	struct descriptor *bp;
	void *newptr;
	
	lassert(ptr);
	lassert(nbytes > 0);
	if (((unsigned long)ptr)%(sizeof (union align)) != 0
		|| (bp = find(ptr)) == NULL || bp->free)
		lexcept_raise(&assert_failed, file, line);	
	newptr = lmem_alloc(nbytes, file, line);
	memcpy(newptr, ptr, nbytes < bp->size ? nbytes : bp->size);
	lmem_free(ptr, file, line);
	return newptr;	
}
Ejemplo n.º 13
0
static void l2fwd(struct xdpsock *xsk)
{
	for (;;) {
		struct xdp_desc descs[BATCH_SIZE];
		unsigned int rcvd, i;
		int ret;

		for (;;) {
			complete_tx_l2fwd(xsk);

			rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
			if (rcvd > 0)
				break;
		}

		for (i = 0; i < rcvd; i++) {
			char *pkt = xq_get_data(xsk, descs[i].addr);

			swap_mac_addresses(pkt);

			hex_dump(pkt, descs[i].len, descs[i].addr);
		}

		xsk->rx_npkts += rcvd;

		ret = xq_enq(&xsk->tx, descs, rcvd);
		lassert(ret == 0);
		xsk->outstanding_tx += rcvd;
	}
}
Ejemplo n.º 14
0
bool VectorRecord::allValuesAreLoaded() const
	{
	if (!dataPtr() || !dataPtr()->pagedAndPageletTreeValueCount())
		return true;

	IntegerSequence curSlice(size(), offset(), stride());

	IntegerSequence restrictedSlice = curSlice.intersect(IntegerSequence(pagedAndPageletTreeValueCount()));

	if (restrictedSlice.size() == 0)
		return true;

	Nullable<long> slotIndex;
	Fora::Interpreter::ExecutionContext* context = Fora::Interpreter::ExecutionContext::currentExecutionContext();

	if (context)
		slotIndex = context->getCurrentBigvecSlotIndex();
	else
		slotIndex = 0;

	lassert(slotIndex);

	if (!dataPtr()->bigvecHandleForSlot(*slotIndex))
		return false;

	bool tr = dataPtr()->
		bigvecHandleForSlot(*slotIndex)->allValuesAreLoadedBetween(
			restrictedSlice.smallestValue(),
			restrictedSlice.largestValue() + 1
			);

	return tr;
	}
Ejemplo n.º 15
0
bool ContinuationElement::recomputeTarget()
	{
	if (mSourceInstructionPtr->isRootInstruction())
		return false;

	lassert(!mIsDestroyed);

	InstructionPtr newTargetInstruction =
		mTargetInstructionPtr->instructionGraph().getInstruction(
			mTargetInstructionPtr->getGraph(),
			mTargetInstructionPtr->getLabel(),
			mTargetJOVs
			);

	if (newTargetInstruction == mTargetInstructionPtr)
		return false;

	mTargetInstructionPtr->dropIncomingContinuationElement(this);
	mTargetInstructionPtr = newTargetInstruction;
	mTargetInstructionPtr->addIncomingContinuationElement(this);

	mTargetInstructionPtr->instructionGraph().onInstructionContinuationsChanged(mSourceInstructionPtr);

	return true;
	}
Ejemplo n.º 16
0
void larena_dispos(larena_t *ap)
{
	lassert(ap && *ap);
	larena_free(*ap);
	free(*ap);
	*ap = NULL;
}
Ejemplo n.º 17
0
void hexStringToBytes(unsigned char* srcData, unsigned char* destData, uint32_t hexDigits)
	{
	lassert(hexDigits % 2 == 0);

	for (long k = 0; k < hexDigits;k += 2)
		destData[k/2] = hexCharValue(srcData[k]) * 16 + hexCharValue(srcData[k+1]);
	}
Ejemplo n.º 18
0
	IFileDescriptorProtocol(
				int fd, 
				size_t alignment, 
				size_t bufsize, 
				CloseOnDestroy closeOnDestroy = CloseOnDestroy::False
				) :
			mFD(fd),
			mPosition(0),
			mCloseOnDestroy(closeOnDestroy),
			mAlignment(alignment),
			mBufferSize(bufsize),
			mBufferBytesUsed(0),
			mBufferBytesConsumed(0),
			mBufPtr(0)
		{
		lassert(mBufferSize % mAlignment == 0);

		mBufferHolder.resize(mAlignment * 2 + mBufferSize);
		uword_t bufptr = (uword_t)&mBufferHolder[0];
		
		//make sure that the buffer is aligned to the alignment as well
		if (bufptr % mAlignment)
			bufptr += mAlignment - bufptr % mAlignment;

		mBufPtr = (char*)bufptr;
		}
Ejemplo n.º 19
0
static void tx_only(struct xdpsock *xsk)
{
	int timeout, ret, nfds = 1;
	struct pollfd fds[nfds + 1];
	unsigned int idx = 0;

	memset(fds, 0, sizeof(fds));
	fds[0].fd = xsk->sfd;
	fds[0].events = POLLOUT;
	timeout = 1000; /* 1sn */

	for (;;) {
		if (opt_poll) {
			ret = poll(fds, nfds, timeout);
			if (ret <= 0)
				continue;

			if (fds[0].fd != xsk->sfd ||
			    !(fds[0].revents & POLLOUT))
				continue;
		}

		if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
			lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);

			xsk->outstanding_tx += BATCH_SIZE;
			idx += BATCH_SIZE;
			idx %= NUM_FRAMES;
		}

		complete_tx_only(xsk);
	}
}
Ejemplo n.º 20
0
etask* etaskman::getTask(etaskthread& thread)
{
  int tmpi;
  runThreadsMutex.lock();
//  cout << pthread_self() << " [finished] run: " << runningThreads << " pending: " << firstPendingTask << " tasks: " << tasks.size() << endl;
  while (1) {
    if (tasks.size()-firstPendingTask>0){
//      cout << pthread_self() << " [running] job: " << firstPendingTask << " run: " << runningThreads << " pending: " << firstPendingTask << " tasks: " << tasks.size() << endl;
      tmpi=firstPendingTask;
      ++firstPendingTask;
      lassert(!tasks[tmpi].isPending());
      tasks[tmpi].setRunning();
      runThreadsMutex.unlock();
      return(&tasks[tmpi]);
    }
    if (runningThreads==1){
      runThreadsMutex.unlock();
      onAllDone.call(evararray(*this));
      runThreadsMutex.lock();
      if (tasks.size()-firstPendingTask>0) continue;
    }
    --runningThreads;
    if (runningThreads==0)
      finishedThreadsCond.signal();
//    cout << pthread_self() << " [waiting] run: " << runningThreads << " pending: " << firstPendingTask << " tasks: " << tasks.size() << endl;
    runThreadsCond.wait(runThreadsMutex);
//    cout << pthread_self() << " [waking] run: " << runningThreads << " pending: " << firstPendingTask << " tasks: " << tasks.size() << endl;
    ++runningThreads;
  }
//  cout << pthread_self() << " [exiting] run: " << runningThreads << " pending: " << firstPendingTask << " tasks: " << tasks.size() << endl;
  runThreadsMutex.unlock();
  return(0x00);
}
Ejemplo n.º 21
0
	~OFileDescriptorProtocol()
		{
		if (mBufferBytesUsed)
			{
			lassert(mAlignment > 0);

			if (mBufferBytesUsed % mAlignment)
				{
				memset(mBufPtr + mBufferBytesUsed, 0, mAlignment - mBufferBytesUsed % mAlignment);

				mBufferBytesUsed += mAlignment - mBufferBytesUsed % mAlignment;
				}

			try {
				write_(mBufferBytesUsed, mBufPtr);
				}
			catch(std::logic_error& e)
				{
				LOG_CRITICAL << "Exception thrown while flushing an OFileDescriptorProtocol:\n"
					<< e.what();
				}
			catch(...)
				{
				LOG_CRITICAL << "Unknown exception thrown while flushing an OFileDescriptorProtocol\n";
				}
			}

		if (mCloseOnDestroy == CloseOnDestroy::True)
			close(mFD);
		}
Ejemplo n.º 22
0
bool ComputationDependencyGraph::checkInternalState()
	{
	bool isValid = true;

	lassert(!mDirtyPriorities.size());

	for (auto it = mAllPriorities.begin(); it != mAllPriorities.end(); ++it)
		if (it->second != computePriorityFor(it->first))
			{
			LOG_WARN << "ComputationDependencyGraph had " << prettyPrintString(it->second)
				<< " as priority for " << prettyPrintString(it->first) << " instead of "
				<< prettyPrintString(computePriorityFor(it->first));
			isValid = false;
			}

	for (auto it = mRootToRootDependencies.getKeysToValues().begin(); 
									it != mRootToRootDependencies.getKeysToValues().end();++it)
		if (mAllPriorities.find(it->first) == mAllPriorities.end() && 
				!computePriorityFor(it->first).isNull())
			{
			LOG_WARN 
				<< "ComputationDependencyGraph had dependencies for "
				<< prettyPrintString(it->first) << " but no priority."
				;
			isValid = false;
			}

	return isValid;
	}
Ejemplo n.º 23
0
NativeExpression sizeExpression(
					const NativeExpression& arrayPtrE
					)
	{
	lassert(*arrayPtrE.type() == NativeTypeFor<MutableVectorHandle>::get().ptr());

	return arrayPtrE["mSize"].load();
	}
Ejemplo n.º 24
0
	//remove the lowest-indexed copy of 'in' and swap the last value in the
	//vector into its position. This prevents us from having to reindex the entire
	//vector.
	void removeAndSwapLastFor(const T& in)
		{
		const auto& indices = indicesContaining(in);

		lassert(indices.size());

		removeAndSwapLastForIndex(*indices.begin());
		}
Ejemplo n.º 25
0
void llist_map(llist_t list, void apply(void **x, void *cl), void *cl)
{
	lassert(apply);

	for ( ; list; list = list->rest) {
		apply(&list->first, cl);
	}
}
Ejemplo n.º 26
0
void MemoryHeap::initialize()
	{
	mMspace = create_mspace_with_granularity(mPageSize, 0, this);
	
	mspace_track_large_chunks(mMspace, 1);

	lassert(mPageSize == mspace_footprint(mMspace));
	}
Ejemplo n.º 27
0
	virtual void free(uint8_t* inBytes) 
		{
		boost::mutex::scoped_lock lock(mMutex);
		lassert(mByteCount.find(inBytes) != mByteCount.end());
		mByteCount.erase(inBytes);

		::free(inBytes);
		}
Ejemplo n.º 28
0
TypedFora::Abi::ForaValueArraySlice VectorRecord::sliceForOffset(int64_t index) const
	{
	lassert(mDataPtr);

	return mDataPtr->sliceForOffset(index * mStride + mOffset).compose(
		RangeToIntegerSequence(0, size(), offset(), stride())
		);
	}
Ejemplo n.º 29
0
void larena_free(larena_t arena)
{
	lassert(arena);
	while (arena->prev) {
		struct larena_t tmp = *arena->prev;
		if (nfree < THRESHOLD) {
			arena->prev->prev = freechunks;
			freechunks = arena->prev;
			nfree ++;
			freechunks->limit = arena->limit;
		} else {
			free(arena->prev);
		}
		*arena = tmp;
	}
	lassert(arena->limit == NULL);
	lassert(arena->avail == NULL);
}
	virtual std::string serialize(boost::shared_ptr<ArbitraryNativeConstant> constant)
		{
		boost::shared_ptr<ArbitraryNativeConstantForString> c =
			boost::dynamic_pointer_cast<ArbitraryNativeConstantForString>(constant);

		lassert(c);

		return c->getString();
		}