示例#1
0
TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory)
	{
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d",
									this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
	__ASSERT_MUTEX(DCodeSeg::CodeSegLock);

	TMappingCreateFlags createFlags = EMappingCreateExactVirtual;

	if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique))
		{
		// codeseg memory address is globally unique, (common address across all processes)...
		FlagSet(createFlags,EMappingCreateCommonVirtual);
		}

	if(aMemory->iCodeSeg->IsExe())
		{
		// EXE codesegs have already had their virtual address allocated so we must adopt that...
		__NK_ASSERT_DEBUG(iCodeVirtualAllocSize);
		__NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr);
		iCodeVirtualAllocSize = 0;
		iCodeVirtualAllocAddress = 0;
		FlagSet(createFlags,EMappingCreateAdoptVirtual);
		}

	DMemoryMapping* mapping;
	return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr);
	}
示例#2
0
TInt DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
	{
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));

	TInt r = KErrNone;
	Kern::MutexWait(*iProcessLock);
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));

	__NK_ASSERT_DEBUG(client);
	__NK_ASSERT_DEBUG(client->iAccessCount == 0);

	DestroyMappings(client, KMaxTInt);
	delete client;

	if (aProcess != K::TheKernelProcess)
		{
		// Remove reserved handles
		r = aProcess->iHandles.Reserve(-iTotalBuffers);
		}

	Kern::MutexSignal(*iProcessLock);

	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));

	return r;
	}
示例#3
0
void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread)
	{
	// Move all queued user-mode callbacks from the source thread to the destination thread, and
	// prevent any more from being queued.  Used by the kernel thread code so that callbacks get
	// cancelled in another thread if the thread they were originally queued on dies.

	// Atomically remove list of callbacks and set pointer to 1
	// The latter ensures any subsequent attempts to add callbacks fail
	TUserModeCallback* sourceListStart =
		(TUserModeCallback*)__e32_atomic_swp_ord_ptr(&aSrcThread->iUserModeCallbacks, (TAny*)1);
	__NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0);  // check this only gets called once per thread

	if (sourceListStart == NULL)
		return;
	
	TUserModeCallback* sourceListEnd = sourceListStart;
	while (sourceListEnd->iNext != NULL)
		sourceListEnd = sourceListEnd->iNext;
	
	NKern::Lock();
	TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks;
	do
		{
		__NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0);  // dest thread must not die
		sourceListEnd->iNext = destListStart;
		} while (!__e32_atomic_cas_ord_ptr(&aDestThread->iUserModeCallbacks, &destListStart, sourceListStart));
	NKern::Unlock();
	}
示例#4
0
void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread)
	{
	// Move all queued user-mode callbacks from the source thread to the destination thread, and
	// prevent any more from being queued.  Used by the kernel thread code so that callbacks get
	// cancelled in another thread if the thread they were originally queued on dies.

	NKern::Lock();	
	TUserModeCallback* sourceListStart = aSrcThread->iUserModeCallbacks;
	aSrcThread->iUserModeCallbacks = (TUserModeCallback*)1;
	NKern::Unlock();
	__NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0);  // check this only gets called once per thread

	if (sourceListStart == NULL)
		return;

	TUserModeCallback* sourceListEnd = sourceListStart;
	while (sourceListEnd->iNext != NULL)
		sourceListEnd = sourceListEnd->iNext;

	NKern::Lock();
	TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks;
	__NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0);
	sourceListEnd->iNext = destListStart;
	aDestThread->iUserModeCallbacks = sourceListStart;
	NKern::Unlock();
	}
示例#5
0
/**
@return True if segment still exists, false if segment was deleted.
*/
TBool RPageArray::TSegment::Unlock(TSegment*& aSegment, TUint aCount)
	{
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());

	TSegment* s = aSegment;
	__NK_ASSERT_DEBUG(s);

	TUint oldCounts = (TUint)__e32_atomic_add_ord32(&s->iCounts, (TUint32)-(TInt)aCount);
	__NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing

#ifdef _DEBUG
	if((oldCounts&KPageArraySegmentLockCountMask)==aCount)
		{
		// check alloc count is consistent...
		TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift;
		__NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize);
		TUint realAllocCount = 0;
		TPhysAddr* p = s->iPages;
		TPhysAddr* pEnd = p+KPageArraySegmentSize;
		do
			{
			if(IsPresent(*p++))
				++realAllocCount;
			}
		while(p<pEnd);
		if(realAllocCount!=allocCount)
			{
			Kern::Printf("TSegment::Unlock alloc count missmatch %u!=%u",realAllocCount,allocCount);
			__NK_ASSERT_DEBUG(0);
			}
		}
示例#6
0
/** Starts a nanokernel timer in one-shot mode with ISR or DFC callback.
	
	Queues the timer to expire in the specified number of nanokernel ticks. The
	actual wait time will be at least that much and may be up to one tick more.
	For normal timers (constructed with NTimerFn) the expiry handler will be
	called in either ISR context or in the context of the nanokernel timer
	thread (DfcThread1). For mutating timers (constructed with TDfcFn) the
	expiry handler is called in the context of the thread running the relevant
	TDfcQue.

    Note that NKern::TimerTicks() can be used to convert milliseconds to ticks.

	@param	aTime Timeout in nanokernel ticks
	@param	aDfc TRUE if DFC callback required, FALSE if ISR callback required.
			Note that this parameter is ignored for mutating timers.
	
	@return	KErrNone if no error
	@return	KErrInUse if timer is already active.
	@return	KErrDied if tied thread/group has exited
	
	@pre	Any context
	
	@see    NKern::TimerTicks()
 */
EXPORT_C TInt NTimer::OneShot(TInt aTime, TBool aDfc)
	{
	__NK_ASSERT_DEBUG(aTime>=0);
	/** iFn could be set to NULL after NTimer::OneShot(TInt, TDfc&) call.
	Call-back mechanism cannot be changed in the life time of a timer. */
	__NK_ASSERT_DEBUG(iFn!=NULL);

	TInt irq = TheTimerQ.iTimerSpinLock.LockIrqSave();
	if (!IsValid())
		{
		TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
		return KErrDied;
		}
	TUint16 state = i8816.iHState16;
	if (IsNormal())
		state &= 0xFF;
	else
		aDfc = FALSE;	// mutating timers start as ISR completion
	if (state!=EIdle)
		{
		TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
		return KErrInUse;
		}
	mb();	// ensure that if we observe an idle state all accesses to the NTimer have also been observed
	i_NTimer_iCompleteInDfc=TUint8(aDfc?1:0);
	iTriggerTime=TheTimerQ.iMsCount+(TUint32)aTime;
	TheTimerQ.Add(this);
	TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
	return KErrNone;
	}
示例#7
0
void TScheduler::Remove(NThreadBase* aThread)
{
    __NK_ASSERT_DEBUG(!aThread->iHeldFastMutex);	// can't block while holding fast mutex
    iMadeUnReadyCounter++;
    aThread->iTime=aThread->iTimeslice;		// thread has blocked so it gets a fresh timeslice for next time
    TPriListBase::Remove(aThread);
}
示例#8
0
EXPORT_C void TAsyncRequest::Send(NFastSemaphore* aCompletionSemaphore)
	{
	__NK_ASSERT_DEBUG(!iCompletionObject);
	iCancel = EFalse;
	iCompletionObject = aCompletionSemaphore;
	TDfc::Enque();
	}
示例#9
0
TInt DMemModelAlignedShBuf::UnMap(DProcess* aProcess)
	{
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::UnMap()"));

	TInt r = KErrNone;

	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);

	DShBufMapping* m = NULL;
	TWait wait;

	for(;;)
		{
		iPool->LockPool();
		r = FindMapping(m, pP);

		if (r != KErrNone)
			{
			iPool->UnlockPool();
			return KErrNotFound;
			}

		if (m->iTransitioning)
			{
			wait.Link(m->iTransitions);
			iPool->UnlockPool();
			wait.Wait();
			}
		else
			{
			break;
			}
		}

	m->iTransitioning = ETrue;
	iPool->UnlockPool();

	MM::MappingUnmap(m->iMapping);

	iPool->LockPool();
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));

	__NK_ASSERT_DEBUG(client);

	TWait* list = m->iTransitions;
	m->iTransitions = NULL;
	m->iObjLink.Deque();
	m->iTransitioning = EFalse;

	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
	pool->ReleaseMapping(m, client);

	if (aProcess == K::TheKernelProcess)
	    iRelAddress = NULL;

	iPool->UnlockPool();

	wait.SignalAll(list);
	return KErrNone;
	}
示例#10
0
TInt DMemModelAlignedShPool::ReleaseMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
	{
	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping(0x%08x,0x%08x)",aMapping,aClient));
	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());

	TInt r = KErrNone;

	if (aClient)
		{
		aClient->iMappingFreeList.AddHead(&aMapping->iObjLink);
		aMapping = NULL;
		}
	else
		{
		// pool has probably been closed delete mapping
		r = KErrNotFound;
		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping delete 0x%08x",aMapping));
		UnlockPool(); // have to release fast lock for MappingDestroy
		MM::MappingDestroy(aMapping->iMapping);
		delete aMapping;
		aMapping = NULL;
		LockPool();
		}

	return r;
	}
示例#11
0
void DMemModelThread::SetPaging(TUint& aCreateFlags)
	{
	TUint pagingAtt = aCreateFlags & EThreadCreateFlagPagingMask;
	TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
	if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging ||
		!(K::MemModelAttributes & EMemModelAttrDataPaging))
		{// No paging policy set or no data paging device installed.
		pagingAtt = EThreadCreateFlagUnpaged;
		}
	else if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
		{
		pagingAtt = EThreadCreateFlagPaged;
		}
	else if (pagingAtt == EThreadCreateFlagPagingUnspec)
		{// No data paging attribute specified for this chunk so use the process's.
		if (iOwningProcess->iAttributes & DProcess::EDataPaged)
			pagingAtt = EThreadCreateFlagPaged;
		else
			pagingAtt = EThreadCreateFlagUnpaged;
		}
#ifdef _DEBUG
	else
		{
		__NK_ASSERT_DEBUG(	pagingAtt == EThreadCreateFlagPaged || 
							pagingAtt == EThreadCreateFlagUnpaged);
		}
#endif
	// Save the paging attributes for when the stack and heap are created later.
	aCreateFlags &= ~EThreadCreateFlagPagingMask;
	aCreateFlags |= pagingAtt;
	}
示例#12
0
EXPORT_C void TAsyncRequest::Send(TDfc* aCompletionDfc)
	{
	__NK_ASSERT_DEBUG(!iCompletionObject);
	iCancel = EFalse;
	iCompletionObject = (TAny*)((TLinAddr)aCompletionDfc|1);
	TDfc::Enque();
	}
示例#13
0
TUint8* DMemModelNonAlignedShPool::Base(DProcess* aProcess)
	{
	TUint8 *base = 0;

	LockPool();
	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));

	__NK_ASSERT_DEBUG(client); // ASSERT because pool must be already opened in the clients address space
	__NK_ASSERT_DEBUG(client->iMapping); // ASSERT because non-aligned buffers are mapped by default in user space

	base = reinterpret_cast<TUint8*>(MM::MappingBase(client->iMapping));

	UnlockPool();

	return base;
	}
示例#14
0
/**
Determine whether this process should be data paged.

@param aInfo	A reference to the create info for this process.
 */
TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo)
	{
	TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask;
	// If KImageDataPaged and KImageDataUnpaged flags present then corrupt
	// Check this first to ensure that it is always verified.
	if (pagedFlags == TProcessCreateInfo::EDataPagingMask)
		{
		return KErrCorrupt;
		}

	if (aInfo.iAttr & ECodeSegAttKernel ||
		!(K::MemModelAttributes & EMemModelAttrDataPaging))
		{// Kernel process shouldn't be data paged or no data paging device installed.
		return KErrNone;
		}

	TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
	if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
		{
		iAttributes |= EDataPaged;
		return KErrNone;
		}
	if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
		{// No paging allowed so just return.
		return KErrNone;
		}
	if (pagedFlags == TProcessCreateInfo::EDataPaged)
		{
		iAttributes |= EDataPaged;
		return KErrNone;
		}
	if (pagedFlags == TProcessCreateInfo::EDataUnpaged)
		{// No paging set so just return.
		return KErrNone;
		}
	// Neither paged nor unpaged set so use default paging policy.
	// dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or 
	// EKernelConfigDataPagingPolicyDefaultPaged.
	__NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified);
	__NK_ASSERT_DEBUG(	dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged ||
						dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged);
	if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged)
		{
		iAttributes |= EDataPaged;
		}
	return KErrNone;
	}
示例#15
0
void DMemModelShPool::DestroyClientResources(DProcess* aProcess)
	{
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DestroyClientResources"));

	TInt r = DestroyAllMappingsAndReservedHandles(aProcess);
	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
	(void)r;		// Silence warnings
	}
示例#16
0
DMemModelAlignedShBuf::~DMemModelAlignedShBuf()
	{
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::~DMemModelAlignedShBuf()"));

	__NK_ASSERT_DEBUG(iMappings.IsEmpty());

	MM::MemoryDestroy(iMemoryObject);
	}
示例#17
0
/** Construct a DFC specifying a DFC queue.

	@param aFunction = function to call
	@param aPtr = parameter to be passed to function
	@param aDfcQ = pointer to DFC queue which this DFC should use
	@param aPriority = priority of DFC within the queue (0-7)
 */
EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority)
	: iPtr(aPtr), iFunction(aFunction), iDfcQ(aDfcQ)
	{
	__NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities);
	iPriority=TUint8(aPriority);
	iSpare1=0;
	iOnFinalQ=FALSE;
	iQueued=FALSE;
	}
示例#18
0
/** Create a spin lock

	@publishedPartner
	@released
*/
EXPORT_C TRWSpinLock::TRWSpinLock(TUint aOrder)
	{
	(void)aOrder;
	__NK_ASSERT_DEBUG( (aOrder==TSpinLock::EOrderNone) || ((aOrder&0x7f)<0x20) );
	if (aOrder>=0x80 && aOrder!=TSpinLock::EOrderNone)
		aOrder -= 0x60;
	aOrder |= 0xFF00u;
	iLock = TUint64(aOrder)<<48;	// byte 6 = 00-1F for interrupt, 20-3F for preemption
									// byte 7 = FF if not held
	}
示例#19
0
TAny* RAddressedContainer::Find(TLinAddr aAddress, TUint& aOffset)
	{
	if(iReadLock)
		__NK_ASSERT_DEBUG(iReadLock->HeldByCurrentThread());
	else
		__NK_ASSERT_DEBUG(CheckWriteLock());

	TUint i = FindIndex(aAddress);
	TEntry* entry = iList+i;
	if(i==0)
		return 0;
	--entry;

	aOffset = aAddress-entry->iAddress;

	TAny* result = entry->iObject;
	__NK_ASSERT_DEBUG(result);
	return result;
	}
示例#20
0
/** Queue a DFC (not an IDFC) from an IDFC or thread with preemption disabled.

	This function is the preferred way to queue a DFC from an IDFC. It should not
	be used to queue an IDFC - use TDfc::Add() for this.

	This function does nothing if the DFC is already queued.

	@return	TRUE if DFC was actually queued by this call
			FALSE if DFC was already queued on entry so this call did nothing
	@pre Call only from IDFC or thread with preemption disabled.
	@pre Do not call from ISR or thread with preemption enabled.

	@see TDfc::Add()
	@see TDfc::Enque()
 */
EXPORT_C TBool TDfc::DoEnque()
	{
	__ASSERT_WITH_MESSAGE_DEBUG(  (NKern::CurrentContext()==NKern::EIDFC )||( NKern::CurrentContext()==NKern::EThread  &&  TheScheduler.iKernCSLocked),"Do not call from ISR or thread with preemption enabled","TDfc::DoEnque");
	__NK_ASSERT_DEBUG(!IsIDFC());
	__ASSERT_WITH_MESSAGE_DEBUG(  iDfcQ, "DFC queue not set", "TDfc::Add");

	// Check not already queued and then mark queued to prevent ISRs touching this DFC
	TBool ok = !TestAndSetQueued();
	if (ok)
		DoEnqueFinal();
	return ok;
	}
示例#21
0
RPageArray::TSegment* RPageArray::TSegment::Delete(TSegment* aSegment)
	{
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
	__NK_ASSERT_DEBUG(aSegment->iCounts==0);
#ifdef _DEBUG
	TPhysAddr* p = aSegment->iPages;
	TPhysAddr* pEnd = p+KPageArraySegmentSize;
	do
		{
		TPhysAddr a = *p++;
		if(IsPresent(a))
			{
			Kern::Printf("TSegment Delete with allocated pages! [%d]=0x%08x",p-aSegment->iPages-1,a);
			__NK_ASSERT_DEBUG(0);
			}
		}
	while(p<pEnd);
#endif
	PageSegmentAllocator.Free(aSegment);
	return 0;
	}
示例#22
0
/** Starts a nanokernel timer in one-shot mode with callback in dfc thread that provided DFC belongs to.
	
	Queues the timer to expire in the specified number of nanokernel ticks. The
	actual wait time will be at least that much and may be up to one tick more.
	On expiry aDfc will be queued in ISR context.

    Note that NKern::TimerTicks() can be used to convert milliseconds to ticks.

	@param	aTime Timeout in nanokernel ticks
	@param	aDfc - Dfc to be queued when the timer expires.
	
	@return	KErrNone if no error
	@return	KErrInUse if timer is already active.
	@return	KErrDied if tied thread/group has exited
	
	@pre	Any context
	@pre	Must not be a mutating timer (constructed with TDfcFn)
	
	@see    NKern::TimerTicks()
 */
EXPORT_C TInt NTimer::OneShot(TInt aTime, TDfc& aDfc)
	{
	__NK_ASSERT_DEBUG(!IsMutating());
	__NK_ASSERT_DEBUG(aTime>=0);
	TInt irq = TheTimerQ.iTimerSpinLock.LockIrqSave();
	if (iHType != EEventHandlerNTimer)
		{
		TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
		return KErrDied;
		}
	if (i_NTimer_iState!=EIdle)
		{
		TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
		return KErrInUse;
		}
	mb();	// ensure that if we observe an idle state all accesses to the NTimer have also been observed
	i_NTimer_iCompleteInDfc = 0;
	iFn = NULL;
	iPtr = (TAny*) &aDfc;
	iTriggerTime=TheTimerQ.iMsCount+(TUint32)aTime;
	TheTimerQ.Add(this);
	TheTimerQ.iTimerSpinLock.UnlockIrqRestore(irq);
	return KErrNone;
	}
示例#23
0
/************************************************************************************
 *            DDisplayLdd LDD class implementation
 ************************************************************************************/
DDisplayLdd::DDisplayLdd()		
	{	
	__DEBUG_PRINT("DDisplayLdd::DDisplayLdd()\n");
	// store the pointer to the current thread for request completion
  	iClient = &Kern::CurrentThread();
    __NK_ASSERT_DEBUG(iClient);
	// Open a reference on the client thread so it's control block can't disappear until the driver has finished with it.
    iClient->Open();
    iCurrentPostCount   = 0;
    iRequestedPostCount = 0;
    iCompositionBuffIdx = 0;
    iUnit				= -1;
    iThreadOpenCount    = 0;
    iAsyncReqCount		= 0;
    iClientRequestMutex = 0;	
	}
示例#24
0
TInt DThread::AllocateUserStack(TInt aSize, TBool aPaged)
	{
	__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::AllocateUserStack %O %x",this,aSize));
	aSize = MM::RoundToPageSize(aSize);
	if(aSize>PP::MaxUserThreadStack)
		return KErrTooBig;

	TMemoryObjectType memoryType = (aPaged)? EMemoryObjectPaged : EMemoryObjectMovable;
	TUint guardSize = PP::UserThreadStackGuard;
	TUint virtualSize = guardSize+aSize;
	// wipe user thread stack with 0x29292929
	TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateDefault | EMemoryCreateUseCustomWipeByte | (0x29 << EMemoryCreateWipeByteShift));
	DMemoryObject* memory;
	TInt r = MM::MemoryNew(memory, memoryType, MM::BytesToPages(virtualSize),flags);
	if(r==KErrNone)
		{
		r = MM::MemoryAlloc(memory,MM::BytesToPages(guardSize),MM::BytesToPages(aSize));
		if(r==KErrNone)
			{
			DMemoryMapping* mapping;
			// Get os asid, no need to open a reference as this only invoked where 
			// the current thread is owned by iOwningProcess, iOwningProcess is 
			// the kernel process or this is the first thread of a process that 
			// isn't fully created yet.
			TUint osAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
			r = MM::MappingNew(mapping,memory,EUserReadWrite,osAsid);
			if(r==KErrNone)
				{
				__NK_ASSERT_DEBUG(!((DMemModelThread*)this)->iUserStackMapping);
				((DMemModelThread*)this)->iUserStackMapping = mapping;
				iUserStackSize = aSize;
				iUserStackRunAddress = MM::MappingBase(mapping)+guardSize;
				__KTRACE_OPT(KTHREAD,Kern::Printf("User stack at %x, size %x",iUserStackRunAddress,iUserStackSize));
				}
			}
		if(r!=KErrNone)
			MM::MemoryDestroy(memory);
		else
			{
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsUserStack,memory,&iNThread);
#endif
			}
		}
	return r;
	}
示例#25
0
RPageArray::TSegment* RPageArray::TSegment::New()
	{
	__NK_ASSERT_DEBUG(!MmuLock::IsHeld());

	// allocate segment...
	TSegment* s = PageSegmentAllocator.Alloc();
	if(!s)
		return s;

	// initialise segment...
	s->iCounts = 1; // lock count = 1, alloc count = 0
	TPhysAddr* p = s->iPages;
	TPhysAddr* pEnd = p+KPageArraySegmentSize;
	TPhysAddr nullPage = EEmptyEntry;
	do *p++ = nullPage;
	while(p<pEnd);

	return s;
	}
示例#26
0
void TScheduler::RotateReadyList(TInt p)
//
// rotate the ready list for priority p
//
{
    __NK_ASSERT_DEBUG(p>=0 && p<KNumPriorities);
    SDblQueLink* pQ=iQueue[p];
    if (pQ)
    {
        SDblQueLink* pN=pQ->iNext;
        if (pN!=pQ)
        {
            NThread* pT=(NThread*)pQ;
            pT->iTime=pT->iTimeslice;
            iQueue[p]=pN;
            if (pQ==iCurrentThread)
                RescheduleNeeded();
        }
    }
}
示例#27
0
TInt DMemModelAlignedShBuf::FindMapping(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
	{
	// Must be in critical section so we don't leak os asid references.
	__ASSERT_CRITICAL;
	__NK_ASSERT_DEBUG(iPool->iLock.HeldByCurrentThread());

	TInt r = KErrNotFound;
	aMapping = NULL;

	// Open a reference on aProcess's os asid so that it can't be freed and 
	// reused while searching.
	TInt osAsid = aProcess->TryOpenOsAsid();
	if (osAsid < 0)
		{// aProcess has died and freed its os asid.
		return KErrDied;
		}

	SDblQueLink* pLink = iMappings.First();
	SDblQueLink* end = reinterpret_cast<SDblQueLink*>(&iMappings);
	DShBufMapping* m = NULL;

	while (pLink != end)
		{
		m = _LOFF(pLink, DShBufMapping, iObjLink);

		if (m->iOsAsid == osAsid)
			{
			aMapping = m;
			r = KErrNone;
			break;
			}
		pLink = pLink->iNext;
		}

	// Close the reference on the os asid as if we have a mapping then its lifetime will 
	// determine whether the process still owns an os asid.
	aProcess->CloseOsAsid();	
	return r;
	}
示例#28
0
TInt DThread::AllocateSupervisorStack()
	{
	__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::AllocateSupervisorStack %O %x",this,iSupervisorStackSize));
	iSupervisorStackSize = MM::RoundToPageSize(iSupervisorStackSize);
	if(iThreadType==EThreadInitial)
		return KErrNone;

	TUint guardSize = PP::SupervisorThreadStackGuard;
	TUint virtualSize = guardSize+iSupervisorStackSize;
	DMemoryObject* memory;
	TInt r = MM::MemoryNew(memory,EMemoryObjectUnpaged,MM::BytesToPages(virtualSize));
	if(r==KErrNone)
		{
		r = MM::MemoryAlloc(memory,MM::BytesToPages(guardSize),MM::BytesToPages(iSupervisorStackSize));
		if(r==KErrNone)
			{
			DMemoryMapping* mapping;
			r = MM::MappingNew(mapping,memory,ESupervisorReadWrite,KKernelOsAsid);
			if(r==KErrNone)
				{
				__NK_ASSERT_DEBUG(!((DMemModelThread*)this)->iKernelStackMapping);
				((DMemModelThread*)this)->iKernelStackMapping = mapping;
				iSupervisorStack = (TAny*)(MM::MappingBase(mapping)+guardSize);
				__KTRACE_OPT(KTHREAD,Kern::Printf("Supervisor stack at %x, size %x",iSupervisorStack,iSupervisorStackSize));
				}
			}
		if(r!=KErrNone)
			MM::MemoryDestroy(memory);
		else
			{
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsSupervisorStack,memory,&iNThread);
#endif
			}
		}
	return r;
	}
示例#29
0
TInt DMemModelAlignedShPool::GetFreeMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
	{
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GetFreeMapping()"));
	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());

	TInt r = KErrNotFound;
	aMapping = NULL;

	if (aClient)
		{
		if (!aClient->iMappingFreeList.IsEmpty())
			{
			aMapping = _LOFF(aClient->iMappingFreeList.GetFirst(), DShBufMapping, iObjLink);
			r = KErrNone;
			}
		else
			{
			r = KErrNoMemory;
			}
		}

	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::GetFreeMapping(0x%08x, 0x%08x) returns %d", aMapping, aClient, r));
	return r;
	}
示例#30
0
extern "C" void DebugMsgNKFMWait(int a)
	{
	__NK_ASSERT_DEBUG(!TheScheduler.iCurrentThread->iHeldFastMutex);
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKFMWait %M",a));
	}