Exemplo n.º 1
0
SysStatus
HATDefaultBase<ALLOC>::detachVP(VPNum vp)
{
    SysStatus rc;
    SegmentList<ALLOC>* restart;
    SegmentHATRef ref;
    HATRef hatRef = getRef();
    uval segmentAddr, segmentEnd;
    VPSet* dummy;

    byVP[vp].lock.acquire();
    tassertMsg(byVP[vp].pp == Scheduler::GetVP(),
	       "detachVP called on wrong pp\n");
    tassertMsg(byVP[vp].segps != exceptionLocal.currentSegmentTable,
	       "detaching current segment table\n");
    restart = &(byVP[vp].segmentList);
    while (0<=SegmentList<ALLOC>::FindNextSegment(
	0, uval(-1), segmentAddr, segmentEnd, ref, dummy, restart)) {
	rc = DREF(ref)->unmapRange(hatRef, segmentAddr, segmentEnd,
				   0, uval(-1), vp);
	tassertMsg(_SUCCESS(rc), "oops\n");
    }
    byVP[vp].lock.release();
    return 0;
}
Exemplo n.º 2
0
SysStatus
HATDefaultBase<ALLOC>::attachVP(VPNum vp)
{
    SysStatus rc;
    SegmentTable* segp;
    SegmentList<ALLOC>* restart;
    SegmentHATRef ref;
    uval segmentAddr, segmentEnd;
    VPSet* ppset;

    byVP[vp].lock.acquire();
    rc = getSegmentTable(vp, segp);
    tassertMsg(_SUCCESS(rc), "but this cant fail\n");
    // fix up segment table after move - for example get rid of
    // segment mappings which may now be wrong
    segp->changePP();
    // tell each SegmentHAT that the vp to pp map has changed
    // also, add this pp to the ppset in the global list by looking it up
    restart = &(byVP[vp].segmentList);
    while (0<=SegmentList<ALLOC>::FindNextSegment(
	0, uval(-1), segmentAddr, segmentEnd, ref, ppset, restart)) {
	rc = DREF(ref)->changePP(vp);
	tassertMsg(_SUCCESS(rc),"how can this fail?\n");
	// side effect of findSegmentGlobal is recording pp
	rc = findSegmentGlobal(segmentAddr, ref, 0 /*dont create*/);
	tassertMsg(_SUCCESS(rc),"how can this fail?\n");
    }
    byVP[vp].lock.release();
    /*
     * if we just keep on adding new processors to the global ppset of
     * each segment without removing old ones, we will eventually
     * visit every processor for every unmap.  But just because on vp
     * has migratated off a pp doesn't mean another isn't still there.
     * So we do a brute force check to bound the problem.  We compute
     * a set of pp's we can possibly be interested in and intersect it
     * with each global segment ppset.
     */
    VPSet activeSet;
    activeSet.init();

    /*
     * if more than one attach is happening (can it?) it
     * doesn't matter which one does this first,
     * as long as one of them does it last without
     * interference.
     */
    glock.acquire();
    for(vp = 0; vp<Scheduler::VPLimit;vp++) {
	byVP[vp].lock.acquire();
	activeSet.addVP(byVP[vp].pp);
	byVP[vp].lock.release();	
    }
    restart = &segmentList;
    while (0<=SegmentList<ALLOC>::FindNextSegment(
	0, uval(-1), segmentAddr, segmentEnd, ref, ppset, restart)) {
	ppset->intersect(activeSet);
    }
    glock.release();
    return 0;
}
Exemplo n.º 3
0
void
fixupExceptionHandlers(VPNum vp)
{
    uval physAddr;

    physAddr = PageAllocatorKernPinned::virtToReal(uval(&exc_exi_handler));
    exceptionLocal.handlers[EXC_IDX(EXC_EXI)] = (codeAddress) physAddr;

    physAddr = PageAllocatorKernPinned::virtToReal(uval(&exc_dec_handler));
    exceptionLocal.handlers[EXC_IDX(EXC_DEC)] = (codeAddress) physAddr;

    physAddr = PageAllocatorKernPinned::virtToReal(uval(&exc_perf_handler));
    exceptionLocal.handlers[EXC_IDX(EXC_PERF)] = (codeAddress)physAddr;
}
Exemplo n.º 4
0
/* virtual */ SysStatus
FileLinuxFile::processCallBack(uval request)
{
    FLFDEBUG("processCallBack");

    SysStatus rc;

    AutoLock<StreamLockType> al(&streamLock); // locks now, unlocks on return

    uval flen, off;
#ifndef LAZY_SHARING_SETUP
    flen = uval(~0);
#endif // #ifndef LAZY_SHARING_SETUP

    if (request == CALLBACK_REQUEST_SWITCH) {
	useType = SHARED;
#ifndef LAZY_SHARING_SETUP
	if (buffer) {
	    // FIXME: for now only from exclusive to shared
	    buffer->switchToShared(flen, off);
	}
#else
	// FIXME: for now only from exclusive to shared
	buffer->switchToShared(flen, off);
#endif // #ifndef LAZY_SHARING_SETUP
    } else {
#ifndef LAZY_SHARING_SETUP
	if (buffer) {
	    tassertMsg(request == CALLBACK_REQUEST_INFO, "req is %ld\n",
		       (uval) request);
	    rc = buffer->getLengthOffset(flen, off);
	    tassertMsg(_SUCCESS(rc), "?");
	}
#else
	tassertMsg(request == CALLBACK_REQUEST_INFO, "req is %ld\n",
		   (uval) request);
	rc = buffer->getLengthOffset(flen, off);
	tassertMsg(_SUCCESS(rc), "?");
#endif // #ifndef LAZY_SHARING_SETUP
    }

    tassertMsg(callBackUseTypeRegistered == 1, "how come? it is %ld\n",
	       callBackUseTypeRegistered);
    tassertMsg(stub.getOH().valid(), "stub not valid\n");

#ifdef DEBUG_USE_TYPE
#ifdef HACK_FOR_FR_FILENAMES
    char name[255];
    SysStatusUval rclen = stub._getFileName(name, sizeof(name));
    tassertMsg(_SUCCESS(rclen), "?");
    err_printf("processCallBack(%s) for request %ld returning flen %ld off %ld\n",
	       name, request, flen, off);
#endif // #ifdef HACK_FOR_FR_FILENAMES
#endif // #ifdef DEBUG_USE_TYPE

    rc = stub._ackUseTypeCallBack(request, flen, off);
    tassertMsg(_SUCCESS(rc), "how? ref %p\n", getRef());

    return 0;
}
Exemplo n.º 5
0
Arquivo: FD.C Projeto: BillTheBest/k42
    virtual void ready(FileLinuxRef fl, uval state) {

	// Atomically clear spo->result, if it is not NULL.
	volatile SysStatusUval *addr = NULL;
	do {
	    addr = spo->result;
	    if (!addr) break;
	} while (!CompareAndStoreVolatile((uval*)&spo->result,
					  uval(addr), 0ULL));

	if (addr && spo->thread != Scheduler::NullThreadID) {
	    ThreadID thr = spo->thread;
	    spo->thread = Scheduler::NullThreadID;

	    // File is being close, file descriptor not valid
	    if (unlikely(state&FileLinux::DESTROYED)) {
		*addr = _SERROR(2671, 0, EINVAL);
	    } else {
		// Change *addr last; thread may be waiting for it
		// to change
		pfd->revents = state;
		*addr = _SRETUVAL(1);
	    }
	    Scheduler::Unblock(thr);
	}

	release();
    }
Exemplo n.º 6
0
SysStatusUval /* __async */
FileLinuxFile::CallBackUseType::_callBack(__in uval arg, __XHANDLE xh)
{
    FLFDEBUG("_callBack for UseType");

    /* FIXME: so far only accepting call backs to go from exclusive to shared.
     * We have only one argument in the interface and we need to deal with
     * two types of requests (CALLBACK_REQUEST_INFO and
     * CALLBACK_REQUEST_SWITCH). Once we use the hot swapping infrastructure
     * we'll eliminate this problem */
    tassertMsg((arg == CALLBACK_REQUEST_INFO ||arg == CALLBACK_REQUEST_SWITCH),
	       "unexpected arg (%ld)\n", arg);

    /* check if destruction is going on and indicate that _callBack ack is
     * needed: set last bit */
    uval64 oldDestroyAck = FetchAndOr64(destroyAckSyncAddr, 1);
    if (oldDestroyAck > 1) {
	// destruction is already going on; nothing to do
#ifdef DILMA_DEBUG_SWITCH
	err_printf("_callBack can't proceed: object being destroyed "
		   "value %lld\n flref %p", oldDestroyAck, flref);
#endif //#ifdef DILMA_DEBUG_SWITCH
	return 0;
    }

    SysStatus rc = DREF(flref)->processCallBack(arg);
    tassertMsg(_SUCCESS(rc), "ops");

    // restore last bit in destroyAckSync to 0
    AtomicAnd64(destroyAckSyncAddr, uval(~1));

    return 0;
}
Exemplo n.º 7
0
REAL ElTimer::ValAndInit()
{
    REAL aVal = uval();
    reinit();

    return aVal;
} 
Exemplo n.º 8
0
Arquivo: FD.C Projeto: BillTheBest/k42
    void release() {
	//Decrement ref count
	uval count = FetchAndAdd(&refCount, uval(-1LL));
	if (count == 1) {
	    //We were last decrementer
	    delete this;
	}
    }
Exemplo n.º 9
0
    void * operator new(size_t size, MemoryMgrPrimitive *pa) {
	uval addr;
	if (pa != NULL) {
	    pa->alloc(addr, size, sizeof(uval));
	} else {
	    addr = uval(allocGlobalPadded(size));
	}
	return (void *) addr;
    }
Exemplo n.º 10
0
void
ProcessAnnexMachine::init(uval userMode,
			  Dispatcher *disp,
			  SegmentTable *segTable)
{
    /* FIXME -- X86-64 */
    dispatcherPhysAddr = PageAllocatorKernPinned::virtToReal(uval(disp));
    msr = PSL_EE | (userMode ? PSL_USERSET : PSL_KERNELSET);
}
Exemplo n.º 11
0
/* virtual */ SysStatusUval
FileLinuxFile::setFilePosition(sval position, At at)
{
    FLFDEBUG("setFilePosition");

    AutoLock<StreamLockType> al(&streamLock); // locks now, unlocks on return

    if (buffer == NULL) locked_initBuffer(uval(~0));
    return buffer->setFilePosition(position, at);
}
Exemplo n.º 12
0
/* virtual */ SysStatus
FileLinuxFile::ftruncate(off_t length)
{
    FLFDEBUG("ftruncate");

    AutoLock<StreamLockType> al(&streamLock); // locks now, unlocks on return

    if (buffer == NULL) locked_initBuffer(uval(~0));
    return buffer->ftruncate((uval) length);
}
Exemplo n.º 13
0
/*
 * unmaps the page from the process this region is attached to
 * argument is the FCM offset - which the Region can convert
 * to a virtual address
 * we find the specific vp this offset is in
 */
SysStatus
RegionPerProcessorKernel::unmapPage(uval offset)
{
    if (hat) {
	VPNum vp;
	//calculate which vp this offset is "in"
	vp = (offset-fileOffset)/regionSize;
	/* vp should be local vp, although not having the vp is ok as well.
	 * this actually may not be fatal if for some reason we share these
	 */
	VPNum pp;
	tassert(_FAILURE(DREF(proc)->vpnumToPpnum(vp, pp))
		|| (pp == Scheduler::GetVP()),
		err_printf("RPP::unmapPageLocal wrong proc: %ld != %ld\n",
			   uval(pp), uval(Scheduler::GetVP())));
	DREF(hat)->unmapPage((offset-fileOffset)-vp*regionSize+regionVaddr);
    }
    return 0;
}
Exemplo n.º 14
0
void
killExceptionHandlers()
{
    //Wipe out everything but I/O interrupts
    uval physAddr;

    physAddr = PageAllocatorKernPinned::virtToReal(uval(&exc_null_handler));
    exceptionLocal.handlers[EXC_IDX(EXC_DEC)] = (codeAddress) physAddr;
    exceptionLocal.handlers[EXC_IDX(EXC_PERF)] = (codeAddress)physAddr;
}
Exemplo n.º 15
0
void *
MPMsgMgr::operator new(size_t size, MemoryMgrPrimitive *pa)
{
    uval space;
    if (pa != NULL) {
	pa->alloc(space, size, sizeof(uval));
    } else {
	space = uval(allocGlobalPadded(size));
    }
    return (void *) space;
}
Exemplo n.º 16
0
void
MPMsgMgr::addPendingMsg(MsgHeader *hdr, MsgQueue &q)
{
    SysStatus rc;
    MsgHeader *oldHead;

    do {
	oldHead = q.head;
	hdr->next = oldHead;
	/*
	 * FIXME: really just need a sync here to ensure changes to
	 * msg complete before putting on queue, re-write
	 * machine specific assembly, e.g., an enqueue function
	 */
    } while (!CompareAndStoreSynced((uval *)(&q.head),
				    uval(oldHead), uval(hdr)));
    if (oldHead == NULL) {
	rc = DREFGOBJ(TheProcessRef)->sendInterrupt(thisDspID, q.interruptBit);
	tassert(_SUCCESS(rc), err_printf("sendInterrupt failed.\n"));
    }
}
Exemplo n.º 17
0
void
SchedulerTimer::printStatus() {
    SysTime curr = now;
    err_printf("Timer status at [%lx] <- %lx[%lx] -> [%lx] next %lx[%lx]\n",
	       bucketStart(curr), uval(curr), timeIndex(curr), bucketEnd(curr),
	       uval(when), timeIndex(when));

    for (uval i = 0; i<TABLE_SIZE; ++i) {
	TimerEvent *t = (TimerEvent*)table[i].next();
	if (t) {
	    err_printf("Bucket index: %lx\n",i);
	}
	while (t) {
	    err_printf("\tevent: %p when: %lx[%lx] vptr %p\n",
		       t, uval(t->when), timeIndex(t->when), *((uval**)t));
	    t = (TimerEvent*)t->next();

	}
    }

}
Exemplo n.º 18
0
void
MPMsgMgr::init(DispatcherID dspid, MemoryMgrPrimitive *pa,
	       MPMsgMgrRegistryRef &registry)
{
    SysStatus rc;

    sendQueue.init();
    replyQueue.init();

    allocMsgLock.init();
    thisDspID = dspid;

    // allocate array of buffers
    tassert((sizeof(MsgHolder) == MSG_HOLDER_SIZE), err_printf("oops\n"));
    const uval amt = NUM_MSGS * sizeof(MsgHolder);
    uval space;
    if (pa != NULL) {
	pa->alloc(space, amt, MSG_CHUNK_SIZE);
    } else {
	space = uval(allocGlobalPadded(amt));
    }
    tassert(space != 0, err_printf("couldn't allocate msg buffers\n"));
    msgHolder = (MsgHolder *) space;

    uval i;
    for (i = 0; i < NUM_MSGS; i++) {
	msgHolder[i].manager = this;
	msgHolder[i].busy = 0;
    }
    nextMsgIdx = 0;

    // Create the registry, but don't register ourselves yet because our
    // interrupt handlers haven't been installed.
    if (dspid == SysTypes::DSPID(0,0)) {
	if (registry!=NULL) {
	    uval* y = (uval*)PAGE_ROUND_UP((uval)&registry);
	    uval* x = (uval*)PAGE_ROUND_DOWN((uval)&registry);
	    while (x < y) {
		if (*x) {
		    err_printf("%p: %lx\n",x,*x);
		}
		++x;
	    }
	}

	passertMsg(registry == NULL,"MPMsgMgr already initialized %p\n",
		   registry);
	rc = MPMsgMgrRegistry::Create(registry, pa);
	tassert(_SUCCESS(rc), err_printf("MPMsgMgrRegistry::Create failed\n"));
    }
    registryRef = registry;
}
Exemplo n.º 19
0
/* virtual */SysStatus
FileLinuxFile::getStatus(FileLinux::Stat *status)
{
    FLFDEBUG("getStatus");

    AutoLock<StreamLockType> al(&streamLock); // locks now, unlocks on return
    SysStatus rc;

#ifdef LAZY_SHARING_SETUP
    if (useType == LAZY_INIT) {
	tassertMsg(callBackUseTypeRegistered == 0, "ops");
	rc = locked_registerCallBackUseType();
	tassertMsg(_SUCCESS(rc), "ops");
	tassertMsg(bufferInitData.fileLength != uval(~0), "?");
	buffer = new Buffer(bufferInitData.fileLength,
			    bufferInitData.initialOffset,
			    openFlags, stub.getOH(), useType, uval(~0));
    }
#else
    passertMsg(useType != LAZY_INIT, "?");
#endif // #ifdef LAZY_SHARING_SETUP

    rc = stub._getStatus(*status);
    _IF_FAILURE_RET(rc);

    if (useType == NON_SHARED) {
#ifndef LAZY_SHARING_SETUP
	if (buffer == NULL) locked_initBuffer(uval(~0));
#endif // #ifndef LAZY_SHARING_SETUP
	uval flength, dummy;
	rc = buffer->getLengthOffset(flength, dummy);
	status->st_size = flength;
    }

    return rc;
}
Exemplo n.º 20
0
SysStatus
FCMDefault::giveBack(PM::MemLevelState memLevelState)
{
    SysStatus rc;
    uval numPagesStart;

    if (!pageable) {
	return _SRETUVAL(PageAllocatorKernPinned::PINNED);
    }

    // we sweep through the pagelist collecting up to listSize pages to
    // writeback, release locks do write, and start again if there is
    // more left
    lock.acquire();
    numPagesStart = pageList.getNumPagesFree();
    switch(memLevelState) {
    case PM::HIGH:
	rc = 0;				// no pages needed
	numPagesStart = 0;		// no need to checkEmpty
	break;
    case PM::MID:
	rc = locked_giveBack(numPagesStart/4 + 1);
	break;
    case PM::LOW:
	rc = locked_giveBack(numPagesStart/2 + 1);
	break;
    case PM::CRITICAL:
	locked_pageScan(memLevelState);
	rc = locked_giveBack(numPagesStart);
	break;
    default:
	passertMsg(0, "Bogus memLevelState %ld in giveBack\n",
		   uval(memLevelState));
	rc = -1;
    }
    locked_pageScan(memLevelState);
    lock.release();

    if (numPagesStart > 0) {
	/* Only checkEmpty if we had frames, since we
	 * don't want to repeatedly signal a transition to
	 * empty when nothing happened.
	 */
	checkEmpty();
    }
    return rc;
}
Exemplo n.º 21
0
/* virtual */ SysStatusUval
FileLinuxFile::pread(const char *buf, uval nbytes, uval offset)
{
    FLFDEBUG("readv");

    SysStatusUval rc, rcret;
    char* buf_read;

    lock();

    // retrieve and save current position
    if (buffer == NULL) locked_initBuffer(uval(~0));
    rc = buffer->setFilePosition(0, FileLinux::RELATIVE);
    _IF_FAILURE_RET(rc);
    uval savedOffset = _SGETUVAL(rc);
    rc = buffer->setFilePosition(offset, FileLinux::ABSOLUTE);
    // tassertMsg only because it didn't fail on the first call, it should
    // work now
    tassertMsg(_SUCCESS(rc), "rc 0x%lx\n", rc);

    rcret = locked_readAlloc(nbytes, buf_read, NULL);

    // restore previous file offset
    rc = buffer->setFilePosition(savedOffset, FileLinux::ABSOLUTE);
    // tassertMsg only because it didn't fail on the first 2 calls, it should
    // work now
    tassertMsg(_SUCCESS(rc), "rc 0x%lx\n", rc);

    // return value from locked_readAlloc
    if (_FAILURE(rcret)) {
	//semantics shift from EOF error to 0 length on EOF
	if (_SCLSCD(rc) == FileLinux::EndOfFile) {
	    rcret = _SRETUVAL(0);
	}
    } else {
	tassertMsg(_SGETUVAL(rcret) > 0, "?");
	memcpy((void*)buf, buf_read, _SGETUVAL(rcret));
	locked_readFree(buf_read);
    }

    unLock();
    return rcret;
}
Exemplo n.º 22
0
int main(int argc, char *argv[])
{
    NativeProcess();

    VPNum numVP;
    SysStatus rc;

    if (argc != 1) {
	cprintf("Usage: %s\n", argv[0]);
	return 8;
    }

    // Figure out how many processors there are:
    numVP = DREFGOBJ(TheProcessRef)->ppCount();
    cprintf("There are %ld processors\n", uval(numVP));

    // Create a virtual processor for each physical processor:
    for (VPNum vp = 1; vp < numVP; vp++) {
	rc = ProgExec::CreateVP(vp);
	passert(_SUCCESS(rc), {});
    }

    SimpleThread *threads[numVP];

    // Create a thread on each processor:
    for (VPNum vp=0; vp < numVP; vp++) {
	cprintf("Spawning thread %ld\n", vp);
	threads[vp] = SimpleThread::Create(child_func, (void *)vp,
					   SysTypes::DSPID(0, vp));
	passert(threads[vp] != 0, err_printf("Thread creation failed\n"));
    }

    // Wait for those threads to finish:
    for (VPNum vp=0; vp < numVP; vp++) {
	SysStatus rc;
	rc = SimpleThread::Join(threads[vp]);
	cprintf("Thread %ld terminated, status = %ld\n", vp, rc);
    }

    return 0;
}
Exemplo n.º 23
0
static SysStatus
PushStrings(char *const inVec[], WORD* outVec, uval vecSize,
	    uval &remoteTop, uval &localTop, uval bottom)
{
    uval offset = localTop - remoteTop;
    uval top = localTop;
    while (vecSize) {
	--vecSize;
	int len = strlen(inVec[vecSize])+1;
	top -= len;
	top &= ~(sizeof(WORD)-1); //Align down to 8-bytes

	if (top<bottom) return _SERROR(1324, 0, ENOMEM);
	memcpy((char*)top, inVec[vecSize], len);
	//outVec contains translated addresses
	outVec[vecSize] = (WORD)(top - offset);
    }
    remoteTop -= localTop - top;
    localTop = uval(top);
    return 0;
}
Exemplo n.º 24
0
/* virtual */ SysStatusUval
FileLinuxFile::pwrite(const char *buf, uval nbytes, uval offset)
{
    FLFDEBUG("readv");

    SysStatusUval rc, rcret;
    char* buf_write;

    lock();

    // retrieve and save current position
    if (buffer == NULL) locked_initBuffer(uval(~0));
    rc = buffer->setFilePosition(0, FileLinux::RELATIVE);
    _IF_FAILURE_RET(rc);
    uval savedOffset = _SGETUVAL(rc);
    rc = buffer->setFilePosition(offset, FileLinux::ABSOLUTE);
    // tassertMsg only because it didn't fail on the first call, it should
    // work now
    tassertMsg(_SUCCESS(rc), "rc 0x%lx\n", rc);

    rcret = locked_writeAlloc(nbytes, buf_write, NULL);

    // restore previous file offset
    rc = buffer->setFilePosition(savedOffset, FileLinux::ABSOLUTE);
    // tassertMsg only because it didn't fail on the first 2 calls, it should
    // work now
    tassertMsg(_SUCCESS(rc), "rc 0x%lx\n", rc);

    if (_SUCCESS(rcret)) {
	uval length_write = _SGETUVAL(rcret);
	if (length_write) {
	    memcpy(buf_write, (void*)buf, length_write);
	    locked_writeFree(buf_write);
	}
    }

    unLock();
    return rcret;
}
Exemplo n.º 25
0
// if pathNameTo is provided, lookup file before that
SysStatus
FileSystemK42RamFS::lookup(PathName *pathName, uval pathLen,
			   PathName *pathNameTo, FileToken *retToken)
{
    PathName *currentName = pathName;
    PathName *endName;
    char buf[PATH_MAX+1];
    FileToken rtoken, entryToken;

    if (pathNameTo) {
	endName = pathNameTo;
    } else {
	endName = (PathName*)(uval(pathName) + pathLen);
    }
    tassertMsg((currentName <= endName), "currentName > endName");

    SysStatus rc;

    rtoken = root;

    uval currentNameLen = pathLen;
    while (currentName < endName) {
	currentNameLen = currentName->getCompLen(currentNameLen);
	memcpy(buf, currentName->getCompName(currentNameLen), currentNameLen);
	buf[currentNameLen] = 0;

	rc = lookup(rtoken, buf, currentNameLen, &entryToken);
	_IF_FAILURE_RET(rc);

	rtoken = entryToken;
	currentName = currentName->getNext(currentNameLen);
    }

    *retToken = rtoken;

    return 0;
}
Exemplo n.º 26
0
/*
 * This routine may need to drop locks to handle cases where the page is
 * not in the cache.  To reinforce this, locks should not be held when
 * calling this routine.  Lock is held on return so that operation can
 * complete automatically.  In the future, we may return with only the
 * page individually locked.
 * N.B. lock is NOT held if no page found and can't allocate one
 */
PageDesc *
FCMDefault::findOrAllocatePageAndLock(uval fileOffset, SysStatus &rc,
				      uval &needsIO, uval flags)
{
    PageDesc *pg;
    uval paddr;

    needsIO = 0;
    rc = 0;

#ifndef ENABLE_FCM_SWITCHING
    lock.acquire();
#else /* #ifndef ENABLE_FCM_SWITCHING */
    if (!lock.tryAcquire()) {
	if (!KernelInfo::ControlFlagIsSet(KernelInfo::RUN_SILENT)) {
	    //err_printf("Lock contended... initiating switch.\n");
	}
	//breakpoint();

	((FCMDefaultRoot *)myRoot)->switchImplToMultiRep();
	// use above for normal switching below for testing null hot swap times
	//if (performedSwitch == 0) {
	//    TraceOSClustObjSwapStart((uval64)myRoot);
	//    ((FCMDefaultRoot *)myRoot)->
	//	switchImplToSameRep((FCMDefaultRoot *)myRoot);
	//    TraceOSClustObjSwapDone((uval64)myRoot);
	//    performedSwitch = 1;
	//}

	lock.acquire();
    }
#endif /* #ifndef ENABLE_FCM_SWITCHING */

    pg = findPage(fileOffset);

    if (pg != NULL) {

	//err_printf("H");
	TraceOSMemFCMDefFoundPage(fileOffset, pg->paddr);

	if (pg->doingIO) {
	    pg->freeAfterIO = PageDesc::CLEAR;
	}

    } else {
	//err_printf("[TID#%lx:]", Scheduler::GetCurThread());
	//err_printf("X");
	// allocate a new page
	pg = addPage(fileOffset, uval(-1), pageSize);
	pg->doingIO = PageDesc::SET;
	needsIO = 1;

	rc = getFrame(paddr,flags);

	if (_FAILURE(rc)) {
	    // must clean up pagedesc we added and wakeup anyone waiting
	    // notify but retain lock
	    notify(pg, 0, 0, 1);
	    pageList.remove(fileOffset);
	    lock.release();
	    return NULL;	// no paging space
	}

	// traceStep8: add the code in a .C/.H file to log the event
	// an example may be found in mem/FCMDeafult.C traceStep8
	TraceOSMemFCMDefGetPage(fileOffset, paddr);

	// set pagedesc paddr
	pg->paddr = paddr;

	// indicate that we have taken ownership of page
	//PageAllocatorKernPinned::initFrameDesc(paddr, getRef(),
	//				       fileOffset, 0);
    }

    return pg;
}
Exemplo n.º 27
0
SysStatus
FileLinuxFile::locked_registerCallBackUseType()
{
    FLFDEBUG("locked_registerCallBackUseType");

     _ASSERT_HELD(streamLock);

    tassertMsg(callBackUseTypeRegistered==0, "registered already?\n");

#ifdef LAZY_SHARING_SETUP
    tassertMsg((useType == LAZY_INIT),
	       "invalid useType %ld\n", (uval) useType);
#else
    // FIXME: in the near future we should have SHARED objects registering
    // themselves also!
    tassertMsg(useType == NON_SHARED, "invalid useType %ld\n", (uval) useType);
#endif // #ifdef LAZY_SHARING_SETUP

    SysStatus rc;
    ObjectHandle callbackOH;

    rc = CallBackUseType::Create(callBackUseTypeObjRef,
				 (FileLinuxFileRef) getRef(), &destroyAckSync);
    tassertMsg(_SUCCESS(rc), "Create failed\n");

    rc = DREF(callBackUseTypeObjRef)->giveAccessByServer(callbackOH,
						      stub.getPid());
    tassertMsg(_SUCCESS(rc), "giveAccess Failed\n");

    if (_SUCCESS(rc)) {
	uval flen, offset;
	uval ut = useType;
	tassertMsg(stub.getOH().valid(), "stub not valid\n");
	rc = stub._registerCallback(callbackOH, FileLinux::USETYPE_CALL_BACK,
				    ut, flen, offset);
	tassertMsg(_SUCCESS(rc),
		   "error register callback rc=(%ld,%ld,%ld)\n",
		   _SERRCD(rc), _SCLSCD(rc), _SGENCD(rc));
	passertMsg((ut == NON_SHARED || ut == SHARED), "ut %ld", (uval) ut);
	tassertMsg(flen != uval(~0), "flen %ld\n", flen);

	if (ut == SHARED) {
	    tassertMsg(buffer == NULL, "?");
	    bufferInitData.fileLength = flen;
	    // not setting up offset because it'll get from the server anyway?
	} else {
#ifdef LAZY_SHARING_SETUP
	    if (useType == NON_SHARED) {
		// actually this is not possible ...
		passertMsg(0, "impossible\n");
	    } else {
		tassertMsg(useType == LAZY_INIT, "was %ld\n",
			   (uval) useType);
		tassertMsg(buffer == NULL, "?");
		bufferInitData.initialOffset = offset;
		bufferInitData.fileLength = flen;
	    }
#else
	    tassertMsg(buffer == NULL, "?");
	    bufferInitData.initialOffset = offset;
	    bufferInitData.fileLength = flen;
#endif // #ifdef LAZY_SHARING_SETUP
	}
	useType = (UseType) ut;
    } else {
	tassertMsg(0, "????");
    }

    callBackUseTypeRegistered = 1;

    return rc;

}
Exemplo n.º 28
0
void
FCMDefault::locked_pageScan(PM::MemLevelState memLevelState)
{
    uval numScan, numFree, numFreeTarget;
    uval i;
    PageDesc *pg, *nextpg;
    uval numToWrite;
    uval gaveBack, unmapped, doingIO, established;

    if (beingDestroyed) {
	return;
    }

    numScan =  pageList.getNumPages();
    numFree = pageList.getNumPagesFree();
    numFreeTarget = numScan>>5;
    if (PM::IsLow((memLevelState))) {
	if (PM::IsCrit(memLevelState)) {
	    numFreeTarget = numScan;
	} else {
	    numFreeTarget = numScan>>3;
	}
    }
    if (numFreeTarget == 0) {
	numFreeTarget = 1;
    }
    if (numFree>=numFreeTarget) {
	return;
    }

#ifdef PAGING_VERBOSE
    err_printf("FCM %p scanning %ld out of %ld pages\n", getRef(),
	       numScan, pageList.getNumPages());
#endif /* #ifdef PAGING_VERBOSE */
    numToWrite = 0;
    gaveBack = unmapped = doingIO = established = 0;
    nextpg = pageList.getNext(nextOffset);
    for (i = 0; i < numScan; i++) {
	pg = nextpg;
	if (pg == NULL) {
	    // either the end or empty
	    pg = pageList.getNext(uval(-1));
	    if (pg == NULL) break;
	}
	nextpg = pageList.getNext(pg);
	if (nextpg == NULL) {
	    // offset of -1 starts searching from the beginning
	    nextOffset = uval(-1);
	} else {
	    nextOffset = nextpg->fileOffset;
	}
#if 0
	err_printf("FCM %lx got page %lx/%lx, d %ld, io %ld, m %ld\n",
		   getRef(), pg->fileOffset, pg->paddr, pg->dirty,
		   pg->doingIO, pg->mapped);
#endif /* #if 0 */
	if (pg->doingIO) {
	    doingIO++;
	    continue;
	}
	if (pg->established) {		// pinned pages, just quietly skip
	    established++;
	    continue;
	}
	if (pg->mapped) {
	    tassert(!pg->free, err_printf("oops\n"));
	    unmapPage(pg);
	    tassert(pg->ppset == 0 && !pg->mapped, err_printf("oops\n"));
	    unmapped++;
	}
	tassert(pg->ppset == 0, err_printf("oops\n"));
	if (!pg->free) {
	    pg->free = PageDesc::SET;
	    pageList.enqueueFreeList(pg);
	    gaveBack++;
	    numFree++;
	    // stop when freelist is 1 percent of pages
	    if (numFree>=numFreeTarget) break;
	}
    }

#ifdef PAGING_VERBOSE
    err_printf("FCM %p: g %ld, um %ld, io %ld, est %ld, scan %ld, tot %ld\n",
	       getRef(), gaveBack, unmapped, doingIO, established, i, numFree);
#endif /* #ifdef PAGING_VERBOSE */

    return;
}
Exemplo n.º 29
0
SysStatusUval
FCMDefault::getForkPage(
    PageDesc* callerPg, uval& returnUval, FCMComputationRef& childRef,
    PageFaultNotification *fn, uval copyOnWrite)
{
    SysStatusUval rc;
    PageDesc *pg;
    uval fileOffset;

    fileOffset = callerPg->fileOffset;

    rc = getPageInternal(fileOffset, fn, pg, copyOnWrite);

    /*
     * rc == 0 if page was gotten and FCM was locked
     * rc >0 if io is in progress and fn will be posted
     * rc <0 failure
     */

    if (_SUCCESS(rc) && (rc == 0)) {
	//available
	callerPg->paddr = pg->paddr;

// cant do copyonwrite to we can call back to unmap
#if 0
	/* check for copy on write first.  Thus, a frame that
	 * we could give to the child is instead mapped
	 * copyOnWrite.
	 *
	 * The normal case is a child/parent pair (the shell)
	 * continuously creating a new second child which then
	 * terminates.
	 *
	 * In that case, this order collects all the read only
	 * data pages in the fork parent, and they never are
	 * unmapped in the shell.  Only the written pages
	 * will be moved from the shell child to the parent
	 * at fork.
	 *
	 * The down side is that a read/write sequence on a
	 * page which could be moved up will be more expensive,
	 * particularly if the page is already dirty so we could
	 * have mapped it read/write in the child immediately.
	 *
	 * The alternative is to move this check below the
	 * check for giving the frame to the parent.
	 */

	if (copyOnWrite) {
	    //caller can accept copy on write mapping
	    callerPg->paddr = pg->paddr;
	    pg->ppset |= uval(1) << Scheduler::GetVP();

	    if (pg->cacheSynced == PageDesc::CLEAR) {
		// Machine dependent operation.
		setPFBit(CacheSynced);
		CacheSync(pg);
	    }

	    if (pg->mapped != PageDesc::SET) {
		// mark page mapped
		pg->mapped = PageDesc::SET;
		// also mark framearray to indicate page is now mapped
		// PageAllocatorKernPinned::setAccessed(pg->paddr);
	    }
	    lock.release();
	    return MAPPAGE;
	}
#endif
	// page lock held until unLockPage call
	pg->doingIO = PageDesc::SET;

	// copy our ppset to caller to it can unmap if needed
	// (only happens if copyonwrite logic is enabled)
	callerPg->ppset = pg->ppset;
	callerPg->mapped = pg->mapped;
	returnUval = uval(pg);	// caller needs this to unlock
	lock.release();
	return FRAMETOCOPY;
    }

    // doingIO
    return (rc>0)?DOINGIO:rc;
}
Exemplo n.º 30
0
/*virtual*/ SysStatus
FileLinuxFile::dup(FileLinuxRef& newfile)
{
    FLFDEBUG("dup");

    ObjectHandle newoh;
    SysStatus rc;
    uval flength, offset;

    streamLock.acquire();

    if (useType == NON_SHARED && buffer) {
	tassertMsg(callBackUseTypeRegistered == 1, "?");
	// file has been actually used locally; need to update server
	rc = buffer->getLengthOffset(flength, offset);
	tassertMsg(_SUCCESS(rc), "?");
	rc = stub._setLengthOffset(flength, offset);
	tassertMsg(_SUCCESS(rc), "?");
	(void) buffer->afterServerUpdate();
    } else { // offset information irrelevant to server
	offset = uval(~0);
    }

    tassertMsg((useType == SHARED || useType == NON_SHARED ||
		useType == FIXED_SHARED || useType == LAZY_INIT),
	       "how come?\n useType is %ld", (uval) useType);
    uval ut, newUseType;
    ut = (uval) useType;

#ifndef LAZY_SHARING_SETUP
    passertMsg(useType != LAZY_INIT, "?");
    streamLock.release();
#endif

#ifndef LAZY_SHARING_SETUP
    // FIXME: if we don't have the lazy thing in, we don't actually need to
    // send our useType ...
#endif // #ifndef LAZY_SHARING_SETUP

    rc = stub._dup(_SGETPID(DREFGOBJ(TheProcessRef)->getPID()), ut,
		   newUseType, flength, offset, newoh);

#ifdef DEBUG_USE_TYPE
    char name[255];
#ifdef HACK_FOR_FR_FILENAMES
    SysStatusUval rclen = stub._getFileName(name, sizeof(name));
    tassertMsg(_SUCCESS(rclen), "?");
#else
    name[0] = '\0';
#endif // #ifdef HACK_FOR_FR_FILENAMES
    err_printf("FileLinuxFile.C::dup, file %s, pid 0x%lx, gotback newUseType %ld,"
	       " rc 0x%lx\n", name,
	       (uval) _SGETPID(DREFGOBJ(TheProcessRef)->getPID()),
	       newUseType, rc);
#endif // #ifdef DEBUG_USE_TYPE

    _IF_FAILURE_RET(rc);

#ifdef LAZY_SHARING_SETUP
    if (ut == LAZY_INIT) {
	tassertMsg(buffer == NULL, "?");
    }
#endif // #ifdef LAZY_SHARING_SETUP

#ifdef LAZY_SHARING_SETUP
    tassertMsg((newUseType == SHARED || newUseType == NON_SHARED ||
		newUseType == FIXED_SHARED || newUseType == LAZY_INIT),
	       "how come?\n newUseType after _dup is %ld", newUseType);
#else
    tassertMsg(newUseType == SHARED || newUseType == NON_SHARED
	       || newUseType == FIXED_SHARED,
	       "how come?\n newUseType after _dup is %ld (without LAZY_SHARING)",
	       newUseType);
#endif // #ifdef LAZY_SHARING_SETUP

#ifdef LAZY_SHARING_SETUP
    streamLock.release();
#endif // #ifdef LAZY_SHARING_SETUP

    return Create(newfile, newoh, openFlags, newUseType);
}