Example #1
0
void vm86_init()
{
    page_map(0xa0000, 0xa0000, 32, L2E_V|L2E_W|L2E_U);//128K Video RAM
    page_map(0xc0000, 0xc0000, 16, L2E_V|      L2E_U);// 64K Video ROM BIOS
    page_map(0xe0000, 0xe0000, 16, L2E_V|L2E_W|L2E_U);// 64K UMA (for Qemu)
    page_map(0xf0000, 0xf0000, 16, L2E_V|      L2E_U);// 64K System ROM BIOS

    page_map(0, 0, 1, L2E_V|L2E_W|L2E_U);
}
Example #2
0
File: uvm.c Project: Razbit/razos
/* Free the last page allocated to uvm */
void uvm_page_free()
{
	uint32_t addr = (uint32_t)cur_task->uheap_end - PAGE_SIZE;
	frame_free(get_phys(addr, cur_task->page_dir));
	page_map(addr, 0, 0, cur_task->page_dir);
	cur_task->uheap_end -= PAGE_SIZE;
}
Example #3
0
File: heap.c Project: zrho/Oxygen
uint64_t heap_sbrk(intptr_t increase)
{
    // Acquire lock
    spinlock_acquire(&heap_lock);
    
    // Increase?
    if (increase > 0) {
        // Align
        increase = mem_align((uintptr_t) increase, 0x1000);
        
        // Determine amount of pages
        size_t pages = increase / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Allocate frame
            uintptr_t phys = frame_alloc();
            
            // Map frame
            page_map(
                heap_begin + heap_length,
                phys,
                PG_PRESENT | PG_GLOBAL | PG_WRITABLE);
                
            // Increase length
            heap_length += 0x1000;
        }
        
    // Decrease
    } else if (increase < 0) {
        // Align decrease
        uintptr_t decrease = mem_align((uintptr_t) (-increase), 0x1000);
        
        // Determine amount of pages
        size_t pages = decrease / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Get (virtual) begin address of last page
            uintptr_t virt = heap_begin + heap_length;
            
            // Get physical address
            uintptr_t phys = page_get_physical(virt);
            
            // Unmap page
            page_unmap(virt);
            
            // Decrease length
            heap_length -= 0x1000;
        }
    }
    
    // Release lock
    spinlock_release(&heap_lock);
    
    // Beginning of the heap
    return heap_begin;
}
IMPLEMENT
void*
Vmem_alloc::page_alloc (void *address, Zero_fill zf, unsigned mode)
{
  void *vpage = 0;
  Address page;

  vpage = Mapped_allocator::allocator()->alloc(Config::PAGE_SHIFT);

  if (EXPECT_FALSE(!vpage))
    return 0;

  // insert page into master page table
  Pdir::Iter e = Kmem::kdir->walk(Virt_addr(address), 100,
                                  Mapped_allocator::allocator());
  if (EXPECT_FALSE(e.e->valid()))
    {
      kdb_ke("page_alloc: address already mapped");
      goto error;
    }

  if (e.shift() != Config::PAGE_SHIFT)
    goto error;

  if (zf == ZERO_FILL)
    memset(vpage, 0, Config::PAGE_SIZE);

  page = Mem_layout::pmem_to_phys((Address)vpage);

  *e.e = page | Pt_entry::Writable | Pt_entry::Dirty
    | Pt_entry::Valid | Pt_entry::Referenced | Pt_entry::global();
  page_map (address, 0, zf, page);

  if (mode & User)
    e.e->add_attr(Pt_entry::User);

  return address;

error:
  Mapped_allocator::allocator()->free(Config::PAGE_SHIFT, vpage); // 2^0 = 1 page
  return 0;
}
Example #5
0
File: uvm.c Project: Razbit/razos
/* Allocate a page for uvm use, return its address */
void* uvm_page_alloc()
{
	/* Make sure there is virtual address space left to allocate */
	if (cur_task->uheap_end < (void*)USTACK_BEGIN)
	{
		void* ret = page_map((uint32_t)(cur_task->uheap_end), \
		                     frame_alloc(), PF_PRES | PF_RW | PF_USER,\
		                     cur_task->page_dir);
		if (ret == NULL)
			return NULL; /* page_map sets errno */

		cur_task->uheap_end += PAGE_SIZE;
		memset(ret, 0, PAGE_SIZE);
		return ret;
	}
	else
	{
		errno = ENOMEM;
		return NULL;
	}
}
Example #6
0
File: elf64.c Project: 8l/Hydrogen
void elf64_load(void *binary)
{
    elf64_ehdr_t *ehdr = (elf64_ehdr_t *) binary;

    size_t i;
    for (i = 0; i < ehdr->e_phnum; ++i) {
        elf64_phdr_t *phdr = (elf64_phdr_t *) ((uintptr_t) binary + ehdr->e_phoff + i * ehdr->e_phsize);

        if (ELF_PT_LOAD != phdr->p_type)
            continue;

        uintptr_t source = (uintptr_t) binary + phdr->p_offset;
        uintptr_t target = (uintptr_t) heap_alloc(phdr->p_memsz);

        memcpy((void *) target, (void *) source, phdr->p_filesz);
        memset((void *) (target + phdr->p_filesz), 0, phdr->p_memsz - phdr->p_filesz);

        size_t offset;
        for (offset = 0; offset < phdr->p_memsz; offset += 0x1000) {
            page_map(target + offset, phdr->p_vaddr + offset, PAGE_FLAG_WRITABLE | PAGE_FLAG_GLOBAL);
        }
    }
}
Example #7
0
File: elf.c Project: ahixon/papaya
static int load_segment_directly_into_vspace(addrspace_t dest_as,
                                    char *src, unsigned long segment_size,
                                    unsigned long file_size, unsigned long dst,
                                    unsigned long permissions) {
    assert(file_size <= segment_size);

    unsigned long pos;

    struct as_region* reg = as_define_region (dest_as, dst, segment_size,
        permissions, REGION_GENERIC);

    if (!reg) {
        return 1;
    }

    /* We work a page at a time in the destination vspace. */
    pos = 0;
    while(pos < segment_size) {
        seL4_CPtr sos_cap, frame_cap;
        seL4_Word vpage, kvpage;

        unsigned long kdst;
        int nbytes;
        int err;

        kdst   = dst + PROCESS_SCRATCH_START;
        vpage  = PAGE_ALIGN(dst);
        kvpage = PAGE_ALIGN(kdst);
        //kvpage = PROCESS_SCRATCH + 0x1000;

        /* Map the page into the destination address space */
        int status = PAGE_FAILED;
        struct pt_entry* page = page_map (dest_as, reg, vpage, &status,
            NULL, NULL);

        if (!page || status != PAGE_SUCCESS) {
            /* we should really only be using this function at boot time.
             * load_segment_into_vspace will handle lazy loading/swap events
             * for you - early on in the boot we can assume that swapping is NOT
             * an option */
            return 1;
        }

        /* Map the frame into SOS as well so we can copy into it */
        /* FIXME: WOULD BE MUCH NICER(!) if we just used cur_addrspace - 
         * you will need to create a region in main's init function */
        sos_cap = page->cap;
        assert (sos_cap);

        frame_cap = cspace_copy_cap (cur_cspace, cur_cspace, sos_cap,
            seL4_AllRights);

        if (!frame_cap) {
            return 1;
        }
        
        err = map_page (frame_cap, seL4_CapInitThreadPD, kvpage, seL4_AllRights,
            seL4_ARM_Default_VMAttributes);

        if (err) {
            return 1;
        }

        /* Now copy our data into the destination vspace */
        nbytes = PAGESIZE - (dst & PAGEMASK);
        if (pos < file_size){
            memcpy((void*)kdst, (void*)src, MIN(nbytes, file_size - pos));
        }

        /* Not observable to I-cache yet so flush the frame */
        seL4_ARM_Page_FlushCaches(frame_cap);

        /* unmap page + delete cap copy */
        err = seL4_ARM_Page_Unmap (frame_cap);
        if (err) {
            return 1;
        }

        cspace_delete_cap (cur_cspace, frame_cap);

        pos += nbytes;
        dst += nbytes;
        src += nbytes;
    }

    return 0;
}
Example #8
0
/*
 * XXX
 * receive could take a task-local port number like a fd and speed lookup and
 * minimize locking.
 */
int
ipc_port_receive(ipc_port_t port, struct ipc_header *ipch, void **vpagep)
{
	struct ipc_message *ipcmsg;
	struct ipc_port *ipcp;
	struct task *task;
	vaddr_t vaddr;
	int error, error2;

	task = current_task();

	ASSERT(task != NULL, "Must have a running task.");
	ASSERT(ipch != NULL, "Must be able to copy out header.");

	IPC_PORTS_LOCK();
	ipcp = ipc_port_lookup(port);
	if (ipcp == NULL) {
		IPC_PORTS_UNLOCK();
		return (ERROR_NOT_FOUND);
	}
	IPC_PORTS_UNLOCK();

	if (!ipc_port_right_check(ipcp, task, IPC_PORT_RIGHT_RECEIVE)) {
		IPC_PORT_UNLOCK(ipcp);
		return (ERROR_NO_RIGHT);
	}

	if (TAILQ_EMPTY(&ipcp->ipcp_msgs)) {
		IPC_PORT_UNLOCK(ipcp);
		return (ERROR_AGAIN);
	}

	ipcmsg = TAILQ_FIRST(&ipcp->ipcp_msgs);
	ASSERT(ipcmsg != NULL, "Queue must not change out from under us.");
	ASSERT(ipcmsg->ipcmsg_header.ipchdr_dst == ipcp->ipcp_port,
	       "Destination must be this port.");
	TAILQ_REMOVE(&ipcp->ipcp_msgs, ipcmsg, ipcmsg_link);
	IPC_PORT_UNLOCK(ipcp);

	/*
	 * Insert any passed rights.
	 */
	if (ipcmsg->ipcmsg_header.ipchdr_right != IPC_PORT_RIGHT_NONE) {
		ipcp = ipc_port_lookup(ipcmsg->ipcmsg_header.ipchdr_src);
		if (ipcp == NULL)
			panic("%s: port disappeared.", __func__);
		error = ipc_port_right_insert(ipcp, task, ipcmsg->ipcmsg_header.ipchdr_right);
		if (error != 0)
			panic("%s: grating rights failed: %m", __func__,
			      error);
		IPC_PORT_UNLOCK(ipcp);
	}

	if (ipcmsg->ipcmsg_page == NULL) {
		if (vpagep != NULL)
			*vpagep = NULL;
	} else {
		if (vpagep == NULL) {
			/*
			 * A task may refuse a page flip for any number of reasons.
			 */
			page_release(ipcmsg->ipcmsg_page);
		} else {
			/*
			 * Map this page into the receiving task.
			 */
			if ((task->t_flags & TASK_KERNEL) == 0) {
				/*
				 * User task.
				 */
				error = vm_alloc_address(task->t_vm, &vaddr, 1, false);
				if (error != 0) {
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
					return (error);
				}

				error = page_map(task->t_vm, vaddr, ipcmsg->ipcmsg_page);
				if (error != 0) {
					error2 = vm_free_address(task->t_vm, vaddr);
					if (error2 != 0)
						panic("%s: vm_free_address failed: %m", __func__, error);
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
				}
			} else {
				/*
				 * Kernel task.
				 */
				error = page_map_direct(&kernel_vm, ipcmsg->ipcmsg_page, &vaddr);
				if (error != 0) {
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
					return (error);
				}
			}
			*vpagep = (void *)vaddr;
		}
	}

	*ipch = ipcmsg->ipcmsg_header;

	free(ipcmsg);

	return (0);
}