Ejemplo n.º 1
0
//SP contains: r13,r14 (usr mode), r0-r12,lr
int updateThreadRegs(q_elem *next, volatile unsigned int *sp[])
{
	#if DEBUG_THREAD_REGS
	kprintf("update thread Regs");
	#endif
	if(next!=0)
	{
		if(next==thread.current && !force_updateRegs)
		{
			#if DEBUG_THREAD_REGS
			kprintf("next thread is current one\r\n");
			#endif
			return 0;
		}
		else
		{
			thread.current=next;
			(*sp)[0] = thread.current->tcb.regs[13];
			(*sp)[1] = thread.current->tcb.regs[14];
			(*sp)[15] = thread.current->tcb.regs[15];
			#if DEBUG_THREAD_REGS
			kprintf("next PC should be %x\r\n", thread.current->tcb.regs[15]);
			#endif
			int i;
			for(i=0;i<=12;i++)
			{
				(*sp)[i+2]=thread.current->tcb.regs[i];
			}
			if(force_updateRegs)
				force_updateRegs = 0;
			set_ttbr0(get_mmu(next->tcb.pid));
			tlbiall();
			return 1;
		}
	}
	else
	{
		return -1;
	}
}
Ejemplo n.º 2
0
enum gcerror mmu2d_set_master(struct mmu2dcontext *ctxt)
{
#if MMU_ENABLE
	enum gcerror gcerror;
	struct gcmommumaster *gcmommumaster;
	struct gcmommuinit *gcmommuinit;
	unsigned int size, status, enabled;
	struct mmu2dprivate *mmu = get_mmu();

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	/* Read the MMU status. */
	status = gc_read_reg(GCREG_MMU_CONTROL_Address);
	enabled = GETFIELD(status, GCREG_MMU_CONTROL, ENABLE);

	/* Is MMU enabled? */
	if (enabled) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is already enabled.\n",
			__func__, __LINE__);

		/* Allocate command buffer space. */
		gcerror = cmdbuf_alloc(sizeof(struct gcmommumaster),
					(void **) &gcmommumaster, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_SET);

		/* Program master table address. */
		gcmommumaster->master_ldst = gcmommumaster_master_ldst;
		gcmommumaster->master = ctxt->physical;
	} else {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is disabled, enabling.\n",
			__func__, __LINE__);

		/* MMU disabled, force physical mode. */
		cmdbuf_physical(true);

		/* Allocate command buffer space. */
		size = sizeof(struct gcmommuinit) + cmdbuf_flush(NULL);
		gcerror = cmdbuf_alloc(size, (void **) &gcmommuinit, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_INIT);

		/* Program the safe zone and the master table address. */
		gcmommuinit->safe_ldst = gcmommuinit_safe_ldst;
		gcmommuinit->safe = mmu->safezone.physical;
		gcmommuinit->mtlb = ctxt->physical;

		/* Execute the buffer. */
		cmdbuf_flush(gcmommuinit + 1);

		/* Resume normal mode. */
		cmdbuf_physical(false);

		/*
		* Enable MMU. For security reasons, once it is enabled,
		* the only way to disable is to reset the system.
		*/
		gc_write_reg(
			GCREG_MMU_CONTROL_Address,
			SETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE));
	}

	return GCERR_NONE;
#else
	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	return GCERR_NONE;
#endif
}
Ejemplo n.º 3
0
// init new thread and append at the beginning of the queue
int createThread(void (*startFun)(void* args, int len), void *args, int len, int prio, int newProcess)
{
	if(thread.free_elems)
	{
		q_elem *newfree = thread.free_elems;
		thread.free_elems = thread.free_elems->next;
		thread.free_elems->prev = 0;
		newfree->next = thread.head;

		// if we're not adding the idle thread and prio is 
		// less than zero throw error
		if(thread.thread_count>0 && prio<=0)
		{
			return -2;
		}
		else
		{
			newfree->priority = prio;
		}

		appendElem(newfree, &thread.head);

		// we're going to copy args to begining of thread stack
		int argbuffer[32]; // if it is a new thread we need to copy data into kernel 
				   // space so we can copy it to the new thread
		if(newProcess)
		{
			len = len>32?32:len; // no buffer overflow ;)
			int i;
			for(i=0;i<len;i++)
			{
				argbuffer[i] = ((int *) args)[i];
			}
			args=argbuffer;
			unsigned int newpid;
			for(newpid=0;threadsPerPidCount[newpid] && newpid<MAX_PIDS;newpid++);
			if(newpid==MAX_PIDS)
				return -1;
			int newMemAdr = nextFreeBlock();
			if(newMemAdr==-1)
				return -1;
			newMemAdr+=0xA;
			mmap(newpid, 0xA, newMemAdr, mmu_readwrite); // data segment - just one per process
			newfree->tcb.pid = newpid;
			set_ttbr0(get_mmu(newpid)); // load new table
			tlbiall(); // flush tlb
		}
		else
		{
			newfree->tcb.pid = thread.current->tcb.pid;
		}

		threadsPerPidCount[newfree->tcb.pid]++;

		int newMemAdr = nextFreeBlock();
		if(newMemAdr==-1)
			return -1;
		newMemAdr+=0xA;
		mmap(newfree->tcb.pid, 0xB+newfree->id, newMemAdr, mmu_readwrite); // stack one for each thread
		newfree->tcb.regs[0] = 0xBffffc + (newfree->id<<20) - (len<<2);

		newfree->tcb.regs[1] = len;
		// SP = USR BASE + Thread Count Offset 64k - args (copied to stack)
		newfree->tcb.regs[13] = newfree->tcb.regs[0]; // sp
		newfree->tcb.regs[14] = (int) &exitThread; // lr 
		//printf("set LR to: %x\r\n", thread.head->tcb.regs[14]);
		newfree->tcb.regs[15] = (int) startFun; // pc
		// init cpsr, user mode enable IRQs, no thumb no FIQ
		newfree->tcb.cpsr = 0b1010000;
		// copy args to stack
		int i;
		int curAddr = newfree->tcb.regs[13];
		for(i=0;i<len;i++)
		{
			int arg = ((int *) args)[i];
			write_u32(curAddr,arg);
			curAddr+=4;
		}
		thread.thread_count++;
		thread.threads_ready++;

		if(newProcess && thread.current)
		{
			set_ttbr0(get_mmu(thread.current->tcb.pid)); // load current table
			tlbiall(); // flush tlb
		}

		// if there was currently just the idle Thread read 
		// and we got a new one -> force reschedule!
		if(thread.threads_ready==2)
		{
		//	setTimer(0);
		        #if DEBUG
		        kprintf("create force \r\n");
		        #endif
			force_reschedule=1;
		}
		return 1;
	}
	else
		return -1;
}
Ejemplo n.º 4
0
enum gcerror mmu2d_create_context(struct mmu2dcontext *ctxt)
{
	enum gcerror gcerror;

#if MMU_ENABLE
	int i;
#endif

	struct mmu2dprivate *mmu = get_mmu();

	if (ctxt == NULL)
		return GCERR_MMU_CTXT_BAD;

	memset(ctxt, 0, sizeof(struct mmu2dcontext));

#if MMU_ENABLE
	/* Allocate MTLB table. */
	gcerror = gc_alloc_pages(&ctxt->master, MMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto fail;
	}

	/* Allocate an array of pointers to slave descriptors. */
	ctxt->slave = kmalloc(MMU_MTLB_SIZE, GFP_KERNEL);
	if (ctxt->slave == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLBIDX_ALLOC);
		goto fail;
	}
	memset(ctxt->slave, 0, MMU_MTLB_SIZE);

	/* Invalidate all entries. */
	for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1)
		ctxt->master.logical[i] = MMU_MTLB_ENTRY_VACANT;

	/* Configure the physical address. */
	ctxt->physical
	= SETFIELD(~0U, GCREG_MMU_CONFIGURATION, ADDRESS,
	  (ctxt->master.physical >> GCREG_MMU_CONFIGURATION_ADDRESS_Start))
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_ADDRESS, ENABLED)
	& SETFIELD(~0U, GCREG_MMU_CONFIGURATION, MODE, MMU_MTLB_MODE)
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_MODE, ENABLED);
#endif

	/* Allocate the first vacant arena. */
	gcerror = mmu2d_get_arena(mmu, &ctxt->vacant);
	if (gcerror != GCERR_NONE)
		goto fail;

	/* Everything is vacant. */
	ctxt->vacant->mtlb  = 0;
	ctxt->vacant->stlb  = 0;
	ctxt->vacant->count = MMU_MTLB_ENTRY_NUM * MMU_STLB_ENTRY_NUM;
	ctxt->vacant->next  = NULL;

	/* Nothing is allocated. */
	ctxt->allocated = NULL;

#if MMU_ENABLE
	/* Allocate the safe zone. */
	if (mmu->safezone.size == 0) {
		gcerror = gc_alloc_pages(&mmu->safezone,
						MMU_SAFE_ZONE_SIZE);
		if (gcerror != GCERR_NONE) {
			gcerror = GCERR_SETGRP(gcerror,
						GCERR_MMU_SAFE_ALLOC);
			goto fail;
		}

		/* Initialize safe zone to a value. */
		for (i = 0; i < MMU_SAFE_ZONE_SIZE / sizeof(u32); i += 1)
			mmu->safezone.logical[i] = 0xDEADC0DE;
	}
#endif

	/* Reference MMU. */
	mmu->refcount += 1;
	ctxt->mmu = mmu;

	return GCERR_NONE;

fail:
#if MMU_ENABLE
	gc_free_pages(&ctxt->master);
	if (ctxt->slave != NULL)
		kfree(ctxt->slave);
#endif

	return gcerror;
}