struct buf * vr_rclone(struct buf * old_region) { struct buf * new_region; const size_t rsize = old_region->b_bufsize; /* "Lock", ensure that the region is not freed during the operation. */ vrref(old_region); new_region = geteblk(rsize); if (!new_region) { KERROR(KERROR_ERR, "Out of memory"); return 0; } #if configDEBUG >= KERROR_DEBUG { char buf[80]; ksprintf(buf, sizeof(buf), "clone %x -> %x, %u bytes", old_region->b_data, new_region->b_data, rsize); KERROR(KERROR_DEBUG, buf); } #endif /* Copy data */ memcpy((void *)(new_region->b_data), (void *)(old_region->b_data), rsize); /* Copy attributes */ new_region->b_uflags = ~VM_PROT_COW & old_region->b_uflags; new_region->b_mmu.vaddr = old_region->b_mmu.vaddr; /* num_pages already set */ new_region->b_mmu.ap = old_region->b_mmu.ap; new_region->b_mmu.control = old_region->b_mmu.control; /* paddr already set */ new_region->b_mmu.pt = old_region->b_mmu.pt; vm_updateusr_ap(new_region); /* Release "lock". */ brelse(old_region); return new_region; }
struct buf * geteblk(size_t size) { size_t iblock; /* Block index of the allocation */ const size_t orig_size = size; size = memalign_size(size, MMU_PGSIZE_COARSE); const size_t pcount = VREG_PCOUNT(size); struct vregion * vreg; struct buf * retval = NULL; if (get_iblocks(&iblock, pcount, &vreg)) return NULL; retval = kcalloc(1, sizeof(struct buf)); if (!retval) return 0; /* Can't allocate vm_region struct */ mtx_init(&(retval->lock), MTX_TYPE_SPIN | MTX_TYPE_TICKET); /* Update target struct */ retval->b_mmu.paddr = VREG_I2ADDR(vreg, iblock); retval->b_mmu.num_pages = pcount; retval->b_data = retval->b_mmu.paddr; /* Currently this way as * kernel space is 1:1 */ retval->b_bufsize = VREG_BYTESIZE(pcount); retval->b_bcount = orig_size; retval->b_flags = B_BUSY; retval->refcount = 1; retval->allocator_data = vreg; retval->vm_ops = &vra_ops; retval->b_uflags = VM_PROT_READ | VM_PROT_WRITE; vm_updateusr_ap(retval); vreg->count += pcount; /* Update stats */ vmem_used += size; last_vreg = vreg; return retval; }
static pthread_t create_uinit_main(void * stack_addr) { struct _sched_pthread_create_args init_ds = { .param.sched_policy = SCHED_OTHER, .param.sched_priority = NZERO, .stack_addr = stack_addr, .stack_size = configUSRINIT_SSIZE, .flags = 0, .start = uinit, /* We have to first get into user space to use exec * and mount the rootfs. */ .arg1 = (uintptr_t)rootfs, .del_thread = (void (*)(void *))uinit_exit, }; return thread_create(&init_ds, THREAD_MODE_PRIV); } /** * Map vmstack to proc. */ static void map_vmstack2proc(struct proc_info * proc, struct buf * vmstack) { struct vm_pt * vpt; (*proc->mm.regions)[MM_STACK_REGION] = vmstack; vm_updateusr_ap(vmstack); vpt = ptlist_get_pt(&proc->mm, vmstack->b_mmu.vaddr, MMU_PGSIZE_COARSE, VM_PT_CREAT); if (vpt == 0) panic("Couldn't get vpt for init stack"); vmstack->b_mmu.pt = &(vpt->pt); vm_map_region(vmstack, vpt); } /** * Create init process. */ int __kinit__ kinit(void) { SUBSYS_DEP(sched_init); SUBSYS_DEP(proc_init); SUBSYS_DEP(ramfs_init); SUBSYS_DEP(sysctl_init); SUBSYS_INIT("kinit"); char strbuf[80]; /* Buffer for panic messages. */ struct buf * init_vmstack; pthread_t tid; pid_t pid; struct thread_info * init_thread; struct proc_info * init_proc; /* * FIXME Memory allocation, protection or manipulation bug! * There is a critical bug causing random crashes in userland. I suspect * something is overwriting user space allocation from the kernel space. * Allocating some memory before init is executed seems to fix this issue, * however naturally this is not the proper way to fix the bug. * Without the allocation here the issue is sometimes seen in init or * usually after couple of fork + exec + exit cycles. The usual symptom is * that the userland app first calls some 0:0 syscalls and then tries to * execute undefined instruction, which probably means that either some * jump table in the heap or some part of the executable code is modified * by a bad access in kernel mode just before this happens. */ (void)geteblk(MMU_PGSIZE_COARSE * 10); mount_tmp_rootfs(); /* * User stack for init */ init_vmstack = create_vmstack(); if (!init_vmstack) panic("Can't allocate a stack for init"); /* * Create a thread for init */ tid = create_uinit_main((void *)(init_vmstack->b_mmu.paddr)); if (tid < 0) { ksprintf(strbuf, sizeof(strbuf), "Can't create a thread for init. %i", tid); panic(strbuf); } /* * pid of init */ pid = proc_fork(); if (pid <= 0) { ksprintf(strbuf, sizeof(strbuf), "Can't fork a process for init. %i", pid); panic(strbuf); } init_thread = thread_lookup(tid); if (!init_thread) { panic("Can't get thread descriptor of init_thread!"); } init_proc = proc_ref(pid); if (!init_proc || (init_proc->state == PROC_STATE_INITIAL)) { panic("Failed to get proc struct or invalid struct"); } init_thread->pid_owner = pid; init_thread->curr_mpt = &init_proc->mm.mpt; /* * Map the previously created user stack with init process page table. */ map_vmstack2proc(init_proc, init_vmstack); /* * Map tkstack of init with vm_pagetable_system. */ mmu_map_region(&init_thread->kstack_region->b_mmu); init_proc->main_thread = init_thread; KERROR_DBG("Init created with pid: %u, tid: %u, stack: %p\n", pid, tid, (void *)init_vmstack->b_mmu.vaddr); proc_unref(init_proc); return 0; }