Esempio n. 1
0
int vm_rawmap(int n,size_t vp,size_t pp){
	vp=GET_PAGE(vp);
	if(vp<VM_KERNEL)return 0;
	pp=GET_PAGE(pp);

	size_t i1=vp/0x8000000000;
	vp-=i1*0x8000000000;
	size_t i2=vp/0x40000000;
	vp-=i2*0x40000000;
	size_t i3=vp/0x200000;
	vp-=i3*0x200000;
	size_t i4=vp/0x1000;
	
	if(vi[n].pagetable==NULL){
		vi[n].pagetable=pmm_alloc();
		if(vi[n].pagetable==NULL)return 0;
		memset(vi[n].pagetable,0,4096);
	}

	u64 *p=(u64*)GET_PAGE(vi[n].pagetable[i1]),*q;
	if(p==NULL){
		p=vi[n].pagetable[i1]=pmm_alloc();
		if(p==NULL)return 0;
		vi[n].pagetable[i1]|=3;
		memset(p,0,4096);
	}

	if((u64*)p[i2]==NULL){
		q=p[i2]=pmm_alloc();
		if(q==NULL)return 0;
		p[i2]|=3;
		p=q;
		memset(p,0,4096);
	}else{
		p=GET_PAGE(p[i2]);
	}

	if((u64*)p[i3]==NULL){
		q=p[i3]=pmm_alloc();
		if(q==NULL)return 0;
		p[i3]|=3;
		p=q;
		memset(p,0,4096);
	}else{
		p=GET_PAGE(p[i3]);
	}

	p[i4]=pp|3;
	return 1;
}
Esempio n. 2
0
proc_t *proc_create(void)
{
  proc_t *proc = malloc(sizeof(*proc));
  if (!proc)
    return 0;

  proc->pml4_table = pmm_alloc();
  if (!proc->pml4_table)
  {
    free(proc);
    return 0;
  }

  if (!vmm_init_pml4(proc->pml4_table))
  {
    pmm_free(proc->pml4_table);
    free(proc);
    return 0;
  }

  proc->vmm_lock = SPIN_UNLOCKED;

  if (!seg_init(&proc->segments))
  {
    pmm_free(proc->pml4_table);
    free(proc);
    return 0;
  }

  proc->state = PROC_RUNNING;
  list_init(&proc->thread_list);
  return proc;
}
Esempio n. 3
0
vaddr_t vmm_km_zalloc(size_t size) {
  // Pre kernel heap unmanaged memory allocator
  // This should not only be used before kheap_init has been called
  static vaddr_t placement_addr = 0;
  if(placement_addr == 0) {
    pmap_virtual_space(NULL, &kernel_vend);
    placement_addr = kernel_vend;
  }

  // Make sure enough memory is left!
  kassert((UINT32_MAX-placement_addr) >= size);

	vaddr_t start = placement_addr;
	vaddr_t end = placement_addr + size;

	// Allocate a new page if there is not enough memory
	if(end >= kernel_vend) {
    // Loop through and allocate pages until we have enough memory to serve the requested size
    for( ; kernel_vend < end; kernel_vend+=PAGESIZE) {
      paddr_t pa = pmm_alloc();
      pmap_kenter_pa(kernel_vend, pa, VM_PROT_DEFAULT, PMAP_WRITE_BACK);
    }
	}

	// Zero the memory
	memset((void*)placement_addr, 0x0, size);

	placement_addr = end;

	return(start);
}
Esempio n. 4
0
// TODO: This function shouldn't need to exist. Find another way
vaddr_t vmm_km_heap_extend(size_t size) {
  vregion_t *region = &vmap_kernel()->regions[2];
  kassert((UINT32_MAX - region->vend) > ROUND_PAGE(size));


  vaddr_t prev_vend = region->vend;
  region->vend += ROUND_PAGE(size);

  for(vaddr_t va = prev_vend; va < region->vend; va += PAGESIZE) {
    // Allocate a free page if one should be available else panic
    paddr_t pa = pmm_alloc();
    kassert(pa != UINTPTR_MAX);

    // TODO: Use pmap_enter here instead
    pmap_kenter_pa(va, pa, region->vm_prot, PMAP_WIRED | PMAP_WRITE_COMBINE); 

    // Enter the information into the amap
    region->aref.amap->aslots[(uint32_t)((double)(va-region->vstart)/(double)PAGESIZE)]->page->vaddr = va;
  }

  memset((vaddr_t*)prev_vend, 0, PAGESIZE);
  vmap_kernel()->heap_end = region->vend;
  
  uint32_t new_size = region->vend - region->vstart;
  region->aref.amap->maxslots = region->aref.amap->nslots = (uint32_t)((double)new_size/(double)PAGESIZE);

  return prev_vend;
}
Esempio n. 5
0
void pmm_demo() {
	
	kset_color(0x12);
	
	uint32_t* b1 = (uint32_t*)pmm_alloc();
	uint32_t* b2 = (uint32_t*)pmm_allocs(2);
	
	kprintf("b1 allocataed 1 block at 0x%x: \n",b1);
	kprintf("b2 allocataed 2 blocks at 0x%x: \n",b2);

	pmm_dealloc(b1);
	b1 = (uint32_t*)pmm_alloc();
	kprintf("b1 re-allocataed at 0x%x: \n",b1);
	
	pmm_dealloc(b1);
	pmm_deallocs(b2,2);
}
void syscall_malloc_pages(uint32_t *ebx, uint32_t *ecx, uint32_t *edx) {
  size_t pages = *ebx;
  
  uintptr_t vaddr_start = (uintptr_t) vmm_find(current_context, pages, VADDR_USER_HEAP_START, VADDR_USER_HEAP_END);
  
  int i;
  for(i = 0; i < pages; i++) {
    uintptr_t vaddr = vaddr_start + i*PAGE_SIZE;
    uintptr_t paddr = (uintptr_t) pmm_alloc();
    vmm_map_page(current_context, vaddr, paddr, VMM_USER_FLAGS);
  }
  
  *ecx = vaddr_start;
  current_proc->used_mem_pages += pages;
}
Esempio n. 7
0
// Allocates /num_frames/ continuous physical frames
uint32 pmm_alloc_continuous(uint32 num_frames) {
	if (num_frames < 2)
		return pmm_alloc();

	INTERRUPT_LOCK;

	bool success = false;
	uint32 start = _pmm_first_free_frame(0);

	/*
	 * The idea behind this (naïve but relatively simple) algorithm is:
	 * 1) Find the first free frame
	 * 2) Are (frame+1), (frame+2), (frame+...), (frame + (num_frames - 1)) also free?
	 *	3) If yes: we're done, allocate them and return
	 *  4) If no: find the next free frame; start looking *after* the used one we found in step 2
	 */

	while (!success) {
		success = true; // if set when the for loop breaks, we're done
		if (start + (num_frames - 1) * PAGE_SIZE > mem_end_page)
			panic("pmm_alloc_continuous: no large enough continuous region found");

		for (uint32 i=1; i < num_frames; i++) { // we know that start + 0 is free, so start looking at 1
			if (_pmm_test_frame(start + (i * PAGE_SIZE)) != 0) {
				// We found a non-free frame! D'oh!
				// Start over at the next possibly free address.
				start = start + ((i+1) * PAGE_SIZE);
				success = false;
				break;
			}
		}
		// if the for loop didn't break because of finding a page, success == true and we'll exit
	}

	// Phew! /num_frames/ starting at (and including) /start/ ought to be free now.
	for(uint32 i=0; i < num_frames; i++) {
		_pmm_set_frame(start + i * PAGE_SIZE);
	}

	INTERRUPT_UNLOCK;

	return start;
}
Esempio n. 8
0
int vmm_init_raw(int n){
	size_t p,i;
	for((p=(size_t)get_phy_kernel_start()),(i=0);
		p<(size_t)get_phy_kernel_end();
		(p+=4096),(i+=4096)){
		if(!vm_rawmap(n,VM_KERNEL_CODE+i,p)){
			return 0;
		}
	}

	if(n==0)vmm_initphy();
	else{
		for(i=0;i<VM_KERNEL/0x8000000000;i++)
			vi[n].pagetable[i]=GET_PAGE(vi[0].pagetable[i])|3;
	}

	processor_info *pi=pmm_alloc();
	if(pi==NULL)return 0;
	if(!vm_rawmap(n,MP_PROCESSOR_INFO,pi))return 0;
	pi->n=n;

	return 1;
}
Esempio n. 9
0
File: exec.c Progetto: bajdcc/MiniOS
int exec(char *path, char **argv) {
    int i;
    char *s, *name;
    uint32_t sz, sp, off, argc, pa, ustack[3 + MAX_ARGC + 1];
    pde_t *pgdir, *old_pgdir;
    struct inode *ip;
    struct elf32hdr eh;
    struct proghdr ph;

    pgdir = 0;
    i = off = 0;

    pgdir = (pde_t *)pmm_alloc();
    kvm_init(pgdir);

    // exception handle pgdir
    //
    if ((ip = p2i(path)) == 0) {
        goto bad;
    }
    ilock(ip);

    // read elf header
    if (iread(ip, (char *)&eh, 0, sizeof(eh)) < (int)sizeof(eh)) {
        goto bad;
    }

    if (eh.magic != ELF_MAGIC) {
        goto bad;
    }

    // load program to memory
    sz = USER_BASE;
    for (i = 0, off = eh.phoff; i < eh.phnum; i++, off += sizeof(ph)) {
        if (iread(ip, (char *)&ph, off, sizeof(ph)) != sizeof(ph)) {
            goto bad;
        }
        if (ph.type != ELF_PROG_LOAD) {
            continue;
        }
        if (ph.memsz < ph.filesz) {
            goto bad;
        }
        if ((sz = uvm_alloc(pgdir, sz, ph.vaddr + ph.memsz)) == 0) {
            goto bad;
        }
        if (uvm_load(pgdir, ph.vaddr, ip, ph.off, ph.filesz) < 0) {
            goto bad;
        }
    }

    iunlockput(ip);
    ip = 0;

    /* build user stack */
    sz = PAGE_ALIGN_UP(sz);
    if ((sz = uvm_alloc(pgdir, sz, sz + 2*PAGE_SIZE)) == 0) {
        goto bad;
    }

    /* leave a unaccessable page between kernel stack */
    if (vmm_get_mapping(pgdir, sz - 2*PAGE_SIZE, &pa) == 0) {  // sz is no mapped
        goto bad;
    }
    vmm_map(pgdir, sz - 2*PAGE_SIZE, pa, PTE_K | PTE_P | PTE_W);

    sp = sz;
    if (vmm_get_mapping(pgdir, sz - PAGE_SIZE, &pa) == 0) {  // sz is no mapped
        goto bad;
    }
    pa += PAGE_SIZE;

    for (argc = 0; argv[argc]; argc++) {
        if (argc > MAX_ARGC) {
            goto bad;
        }
        // "+1" leava room for '\0'  "&~3" align 4
        sp = (sp - (strlen(argv[argc]) + 1)) & ~3;    // sync with pa
        pa = (pa - (strlen(argv[argc]) + 1)) & ~3;    

        strcpy((char *)pa, argv[argc]);
        ustack[3+argc] = sp;  // argv[argc]
    }

    ustack[3+argc] = 0;

    ustack[0] = 0xffffffff;
    ustack[1] = argc;   // count of arguments
    ustack[2] = sp - (argc+1)*4;    // pointer of argv[0]

    sp -= (3 + argc + 1)*4;
    pa -= (3 + argc + 1)*4;
    memcpy((void *)pa, ustack, (3 + argc + 1)*4);   // combine

    for (name = s = path; *s; s++) {
        if (*s == '/') {
            name = s + 1;
        }
    }

    cli();
    strncpy(proc->name, name, sizeof(proc->name));

    old_pgdir = proc->pgdir;
    proc->pgdir = pgdir;
    proc->size = sz - USER_BASE;
    proc->fm->eip = eh.entry;
    proc->fm->user_esp = sp;
    uvm_switch(proc);

    uvm_free(old_pgdir);
    old_pgdir  = 0;
    old_pgdir ++;
    sti();

    return 0;

bad:
    if (pgdir) {
        uvm_free(pgdir);
    }
    if (ip) {
        iunlockput(ip);
    }
    return -1;
}