Ejemplo n.º 1
0
/*******************************************************************************
 * Name:    new_pgt 
 *
 * Desc:    Allocates and intializes a new page table.
 *
 * Params:  None.
 *
 * Returns: Pointer to the newly allocated pgt. NULL on error.
 ******************************************************************************/
pt_t *
new_pgt(void)
{
    pt_t *new_pt = NULL;
    frm_map_t *new_frame = NULL;
    STATWORD ps;

    disable(ps);
    DTRACE_START;

    new_frame = get_frm(FR_PTBL);
    if (NULL == new_frame) {
        DTRACE("DBG$ %d %s> get-frm() failed\n", currpid, __func__);
        kprintf("\n\n");
        kprintf("FATAL ERROR: Ran out of free frames for page tables!\n");
        kprintf("Process '%s' with PID '%d' will be terminated.\n", \
                P_GET_PNAME(currpid), currpid);
        kprintf("\n\n");
        sleep(9);
        DTRACE_END;
        restore(ps);
        kill(getpid());
        goto RESTORE_AND_RETURN_NULL;
    }
    new_frame->fr_type = FR_PTBL;

    /* The PT would start at the PA of the frame that we just got. Since, it's
     * coniguous, we can fill-in the values in a looped manner.
     */
    new_pt = (pt_t *) FR_ID_TO_PA(new_frame->fr_id);
    init_pgt(new_pt);

    DTRACE("DBG$ %d %s> returning new pgt at 0x%08x\n", \
            currpid, __func__, new_pt);
    DTRACE_END;
    restore(ps);
    return new_pt;

RESTORE_AND_RETURN_NULL:
    DTRACE("DBG$ %d %s> returning NULL\n", currpid, __func__);
    DTRACE_END;
    restore(ps);
    return NULL;
}
Ejemplo n.º 2
0
/*
 * Create a duplicated process address space from current proc's mm_struct.
 * Also, we copy all needed page mapping into the new process too
 */
mm_struct_t *
mm_struct_dup(void)
{
	addr_t pgt_pa = 0;
	addr_t addr = 0;
	mm_struct_t *mm_p = current->mm;

	mm_struct_t *mm_new = (mm_struct_t *)(get_object( objcache_mm_struct_head ));

	mm_new->code_start = mm_p->code_start;
	mm_new->code_end = mm_p->code_end;
	mm_new->data_start = mm_p->data_start;
	mm_new->data_end = mm_p->data_end;
	mm_new->brk_start = mm_p->brk_start;
	mm_new->brk_end = mm_p->brk_end;
	mm_new->stack_start = mm_p->stack_start;

	/* Duplicate VMAs */
	mm_new->mmap = vma_duplicate(mm_p->mmap);

    /* setup page table */
    mm_new->pgt   = get_zeroed_page( PG_PGT | PG_SUP | PG_OCP );
    pgt_pa      = get_pa_from_va( mm_new->pgt );
    init_pgt( (mm_new->pgt) );

	/* set lv1 page table entry: self-reference entry */
	addr = ((addr_t)(mm_new->pgt)) + (8*PGT_ENTRY_LV1_SELFREF);
	set_pgt_entry( addr, pgt_pa          , PGT_P, PGT_EXE,
			0x0, 0x0, PGT_RW | PGT_SUP );

	/* set lv1 page table entry: kernel page */
	addr = ((addr_t)(mm_new->pgt)) + (8*PGT_ENTRY_LV1_KERNEL );
	set_pgt_entry( addr, def_pgt_paddr_lv2, PGT_P, PGT_EXE,
			0x0, 0x0, PGT_RW | PGT_SUP );

	/* Duplicate user address space (including user stack) */
	dup_upgt_self(mm_new->pgt);

    return mm_new;
}
Ejemplo n.º 3
0
/* FIXME: not tested yet */
mm_struct_t *
mm_struct_new (
    addr_t      code_start      ,
    addr_t      code_end        ,
    addr_t      data_start      ,
    addr_t      data_end        ,
    addr_t      file            ,
    addr_t      code_ofs        ,
    addr_t      data_ofs        ,
    uint64_t    bss_size   
)
{
    addr_t  pgt_pa      = 0;
	addr_t  addr        = 0;
    vma_t   *vma_tmp    = NULL;
    vma_t   *vma_current= NULL;

    mm_struct_t *mm_s   = (mm_struct_t *)(get_object( objcache_mm_struct_head ));

    mm_s->code_start    = code_start;
    mm_s->code_end      = code_end  ;
    mm_s->data_start    = data_start;
    mm_s->data_end      = data_end  ;
	mm_s->stack_start	= USTACK_TOP - __PAGE_SIZE;
	mm_s->brk_start		= data_end + bss_size;
	mm_s->brk_end		= mm_s->brk_start;

    /* setup vma for code */
    vma_tmp             = (vma_t *)get_object( objcache_vma_head );
	vma_set( vma_tmp, code_start, code_end         , NULL      , NULL,
             0, file, code_ofs, 0, 0 );
    mm_s->mmap          = vma_tmp;
    vma_current         = vma_tmp;

    /* setup vma for data */
	if (data_start < data_end) {
		vma_tmp             = (vma_t *)get_object( objcache_vma_head );
		vma_set( vma_tmp, data_start, data_end         , mm_s->mmap, vma_current,
				0, file, data_ofs, 0, 0 );
		vma_current->next   = vma_tmp;
		mm_s->mmap->prev    = vma_tmp;
	}

    /* setup vma for bss  */
	if (bss_size > 0) {
		vma_tmp             = (vma_t *)get_object( objcache_vma_head );
		vma_set( vma_tmp, data_end  , data_end+bss_size, mm_s->mmap, vma_current,
				0, file, data_ofs, 0, 0 );
		vma_current->next   = vma_tmp;
		mm_s->mmap->prev    = vma_tmp;
	}

	/* XXX: we always provide user mode stack */
	vma_tmp = (vma_t *)get_object(objcache_vma_head);
	vma_set(vma_tmp, mm_s->stack_start, USTACK_TOP, mm_s->mmap, vma_current,
			0, file, data_ofs, 0, 0);
	vma_current->next = vma_tmp;
	mm_s->mmap->prev = vma_tmp;

    /* setup page table */

    mm_s->pgt   = get_zeroed_page( PG_PGT | PG_SUP | PG_OCP );
    pgt_pa      = get_pa_from_va( mm_s->pgt );
    init_pgt( (mm_s->pgt) );

	/* set lv1 page table entry: self-reference entry */
	addr = ((addr_t)(mm_s->pgt)) + (8*PGT_ENTRY_LV1_SELFREF);
	set_pgt_entry( addr, pgt_pa          , PGT_P, PGT_EXE,
			0x0, 0x0, PGT_RW | PGT_SUP );

	/* set lv1 page table entry: kernel page */
	addr = ((addr_t)(mm_s->pgt)) + (8*PGT_ENTRY_LV1_KERNEL );
	set_pgt_entry( addr, def_pgt_paddr_lv2, PGT_P, PGT_EXE,
			0x0, 0x0, PGT_RW | PGT_SUP );

    return mm_s;
} /* mm_struct_new() */