Esempio n. 1
0
struct ko_thread *kt_create_kernel(void *entry, unsigned long para)
{
	struct kt_thread_creating_context ctx = {0};
	
	ctx.para			= para;
	ctx.thread_entry	= entry;
	ctx.flags			= KT_CREATE_RUN;
	return kt_create(kp_get_system(), &ctx);
}
Esempio n. 2
0
struct ko_thread *kt_create_driver_thread(void *ring0_stack, int stack_size, void *entry, unsigned long para)
{
	struct kt_thread_creating_context ctx = {0};

	ctx.thread_entry	= entry;
	ctx.stack0			= ring0_stack;
	ctx.stack0_size		= stack_size;
	ctx.para			= para;
	return kt_create(kp_get_system(), &ctx);
}
Esempio n. 3
0
static void init_idle_thread(struct kc_cpu *cpu, struct ko_thread **idle)
{
	struct ko_thread *p;
	
	p = cl_object_create(&thread_type);
	if (!p) 
		goto end;
	p->process = kp_get_system();
	cpu->cur = cpu->idle = p;
	kt_wakeup(p);

end:
	*idle = p;
}
Esempio n. 4
0
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos)
{	
	unsigned long entry, oldl1, oldl2;
	unsigned long G_FLAG;
	int idx;
	int oldpid;

	/* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */
	oldpid = read_c0_entryhi();

	/* Entry HI */	
	asid = asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);
	mtc0_tlbw_hazard();
	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	oldl1 = read_c0_entrylo0();
	oldl2 = read_c0_entrylo1();
	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;

	/* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */
	mtc0_tlbw_hazard();
	if (unlikely(idx < 0))
		tlb_write_random();
	else
	{
		if (write == 2)
		{
			printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2);
		}
		
		tlb_write_indexed();
	}
	tlbw_use_hazard();

	/* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */
	if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/)
 		printk("Why old = %x, asid = %x. ", oldpid, asid);
}
Esempio n. 5
0
/**
	@brief Refill the TLB entry

	@note
		Have to in interrupt disabled
*/
static void refill_tbl(struct km_walk_ctx * ctx)
{
	unsigned char asid;
	unsigned long entry;
	unsigned long G_FLAG;

	/* Entry HI */
	asid = ctx->mem->asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);

	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;
	
	/* Write by type */
	mtc0_tlbw_hazard();
	tlb_write_random();
	tlbw_use_hazard();
}
Esempio n. 6
0
/**
	@brief the page fault happened

	@param[in] regs thread ctx
	@param[in] write 0 is read , 1 is write, 2 is the tlb refill
	@param[in] address the error address where the fault happened

	@note
		Interrupt must be disabled!
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address)
{
	struct km * mem;
	unsigned long pte;
	struct km_walk_ctx ctx;
	struct ko_process *real;
	bool in_user = kmm_arch_address_in_user(address);

	//trace_page_fault(regs, write, address);

	/*
		The fault is triggered when accessing kernel addressing space
		when the thread is in user mode?
	*/
	if (!(regs->cp0_status & ST0_CU0))
	{
		if (in_user == false)
			goto kill;
	} 	

	//TODO: check interrupt status
	
	/* We are using the kernel space? The mem ctx should come from kernel process */
	if (in_user == false)
		real = kp_get_system();
	else
		real = KT_GET_KP(kt_current());
	mem = kp_get_mem(real);
	KM_WALK_INIT(mem, &ctx);
	if (unlikely(km_walk_to(&ctx, address) == false))
	{
		printk("MIPS 页表异常时无法定位项目.\n");
		goto kill2;
	}

	/* Read out the PTE */
	pte = km_pte_read(&ctx);

	/* Just refill ? */
	if (write == 2)
	{
		/* If is valid entry */
		if (pte & PAGE_FLAG_VALID)
		{
			/* 
				TODO: use the random version,have to close interrupt and scheduler, but the asm opened interrupt.
				否则认为是REFILL,但是可能被其他线程给填充了,导致TLB中有相同的转换信息。
			*/
			refill_tbl_to(&ctx, mem->asid/*If in user space, the asid is what we want, if the process in kernel space, refill will add G, asid not used */ , write, 0);
			mem->hw_refill++;
			goto end;
		}
	}
	else
	{
		/* Sanity check */
		/*
			The exception handler may create a new page and write it just in the handler
			before it's updated to TLB. This is forbidden, because the TLB will be refill
			again after the recover
		*/
		if (write == 1)
		{
			if (pte & PAGE_FLAG_WRITE && pte & PAGE_FLAG_VALID)
			{
				hal_panic("TLB错误: PTE 项是正确的并应该触发REFILL, 而不是WRITE.\n");
			}
		}
		else
		{
			if (pte & (PAGE_FLAG_VALID))
			{
				trace_page_fault(regs, write, address);
				printk("ASID %d.\n", mem->asid);
				hal_panic("TLB错误: PTE 项是正确的并应该触发REFILL, 而不是READ.\n");
			}
		}
	}
	kp_put_mem(mem);

	/* The PTE is not Refill nor is valid, we need to recover the page to memory */
	if (unlikely(recover(address, write, pte & PAGE_FLAG_VALID) == false))
		goto kill;
	
	/*
			REFILL exception
			Check the translation hierarchy and write to TLB.
			The refill may be triggered by read or write, but we don't care, just fill the TLB.
			Access violation will be checked if happened in second turn, but in normal case this is the end of the exception except COW.
	*/
	
	mem = kp_get_mem(real);
	refill_tbl_to(&ctx, mem->asid, write, 2);
		
end:
	kp_put_mem(mem);
	return;
kill2:
	kp_put_mem(mem);
kill:
	trace_page_fault(regs, write, address);
	kt_delete_current();
}