Example #1
0
/** Create a temporary page.
 *
 * The page is mapped read/write to a newly allocated frame of physical memory.
 * The page must be returned back to the system by a call to
 * km_temporary_page_put().
 *
 * @param[inout] framep	Pointer to a variable which will receive the physical
 *			address of the allocated frame.
 * @param[in] flags	Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE
 *			and FRAME_ATOMIC bits are allowed.
 * @return		Virtual address of the allocated frame.
 */
uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags)
{
	uintptr_t frame;
	uintptr_t page;

	ASSERT(THREAD);
	ASSERT(framep);
	ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC)));

	/*
	 * Allocate a frame, preferably from high memory.
	 */
	frame = (uintptr_t) frame_alloc(ONE_FRAME,
	    FRAME_HIGHMEM | FRAME_ATOMIC | flags); 
	if (frame) {
		page = km_map(frame, PAGE_SIZE,
		    PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE);
		ASSERT(page);	// FIXME
	} else {
		frame = (uintptr_t) frame_alloc(ONE_FRAME,
		    FRAME_LOWMEM | flags);
		if (!frame)
			return (uintptr_t) NULL;
		page = PA2KA(frame);
	}

	*framep = frame;
	return page;	
}
Example #2
0
File: main.c Project: gapry/AOS
static void
frametable_test(uint32_t test_mask) {
    int err;
    //srand(0); this will eventually cause ft_test 2 to try
    //and access invalid kvaddr srand(1) will last longer before
    //hitting kvaddr == 0,(which is invalid)
    srand(1);
    if (test_mask & TEST_1) {
        dprintf(3, "Starting test 1...\n");
        dprintf(3, "Allocate %d frames and touch them\n", TEST_N_FRAMES);
        ftc1 = 0;
        err = frame_alloc(0,NULL,PROC_NULL,true,ft_test_1, NULL);
        assert(!err);
    }
    if (test_mask & TEST_2) {
        dprintf(3, "Starting test 2...\n");
        dprintf(3, "Test that frame_alloc runs out of memory after a while\n");
        ftc2 = 0;
        err = frame_alloc(0,NULL,PROC_NULL,true,ft_test_2, NULL);
        assert(!err);
    }
    if (test_mask & TEST_3) {
        dprintf(3, "Starting test 3...\n");
        dprintf(3, "Test that you never run out of memory if you always free frames.\n");
        ftc3 = 0;
        err = frame_alloc(0,NULL,PROC_NULL,true,ft_test_3, NULL);
        assert(!err);
    }
}
Example #3
0
int paging_init(uint32_t mem_size)
{
    uint64_t mem_end = (uint64_t)mem_size * 1024ULL;
    frames_count = (uint32_t)(mem_end / 0x1000ULL);
    frames = (uint32_t*)static_alloc(BIT_INDEX(frames_count));
    memset(frames, 0, BIT_INDEX(frames_count));
    /* Create kernel page directory.
     */
    kernel_directory = static_alloc(sizeof(*kernel_directory));
    memset(kernel_directory, 0, sizeof(*kernel_directory));
    /* Identity map pages up to heap address. We make the first page non-present so that NULL-pointer dereferences cause
     * a page fault.
     */
    frame_alloc(page_get(0, kernel_directory, true), 0);
    uint32_t i = 0;
    for (i = PAGE_SIZE; i < heap_addr; i += PAGE_SIZE) {
        frame_alloc(page_get(i, kernel_directory, true), PAGE_FLAGS_PRESENT);
    }
    /* Map pages for the heap.
    */
    for (; i < HEAP_ADDRESS + HEAP_SIZE_INIT; i += PAGE_SIZE) {
        page_get(i, kernel_directory, true);
    }
    /* Set page fault handler.
     */
    set_interrupt_handler(ISR_PAGE_FAULT, page_fault_handler);
    page_directory_load(kernel_directory);
    page_enable();
    return 0;
}
Example #4
0
bool 
spage_table_load (struct spage_table_entry *spte, enum spage_types type)
{
  // P3: reduce race condition with frame eviction process.
  spte->inevictable = true;

  if (spte->in_memory) return false;

  if (type == SWAP)
    {
      uint8_t *f = frame_alloc (PAL_USER, spte);
      if (!f) return false;

      if (!install_page (spte->upage, f, spte->writable))
	{
	  frame_free (f);
	  return false;
	}

      swap_in (spte->swap_slot_id, spte->upage);
      spte->in_memory = true;
    }
  
  if (type == FILE || type == MMAP)
    {
      enum palloc_flags fg = PAL_USER;
      if (spte->file_read_bytes == 0) fg |= PAL_ZERO;

      uint8_t *f = frame_alloc (fg, spte);
      if (!f) return false;

      if (spte->file_read_bytes > 0)
	{
	  lock_acquire (&lock_f);
	  if ((int) spte->file_read_bytes != file_read_at (spte->file, f, spte->file_read_bytes,
							   spte->file_offset))
	    {
	      lock_release (&lock_f);
	      frame_free (f);
	      return false;
	    }
	  lock_release (&lock_f);
	  memset (f + spte->file_read_bytes, 0, spte->file_zero_bytes);
	}
      
      if (!install_page (spte->upage, f, spte->writable))
	{
	  frame_free (f);
	  return false;
	}
      
      spte->in_memory = true;  
    }
  
  return true;
}
Example #5
0
void ras_init(void)
{
	uintptr_t frame =
	    frame_alloc(1, FRAME_ATOMIC | FRAME_HIGHMEM, 0);
	if (!frame)
		frame = frame_alloc(1, FRAME_LOWMEM, 0);
	
	ras_page = (uintptr_t *) km_map(frame,
	    PAGE_SIZE, PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_CACHEABLE);
	
	memsetb(ras_page, PAGE_SIZE, 0); 
	ras_page[RAS_START] = 0;
	ras_page[RAS_END] = 0xffffffff;
}
Example #6
0
static void
_sos_page_map_2_alloc_cap_pt(void* token, seL4_Word kvaddr) {
    dprintf(3, "sos_page_map 2\n");
    sos_page_map_cont_t* cont = (sos_page_map_cont_t*)token;

    if (kvaddr == 0) {
        dprintf(3, "warning: _sos_page_map_2_alloc_cap_pt not enough memory for lvl2 pagetable\n");
        cont->callback(cont->token, ENOMEM);
        free(cont);
        return;
    }

    seL4_Word vpage = PAGE_ALIGN(cont->vpage);
    int x = PT_L1_INDEX(vpage);
    cont->as->as_pd_regs[x] = (pagetable_t)kvaddr;

    /* Allocate memory for the 2nd level pagetable for caps */
    int err = frame_alloc(0, NULL, PROC_NULL, true, _sos_page_map_3, token);
    if (err) {
        frame_free(kvaddr);
        cont->callback(cont->token, EFAULT);
        free(cont);
        return;
    }
}
Example #7
0
/** Allocate external configuration frames from low memory. */
pfn_t zone_external_conf_alloc(size_t count)
{
	size_t frames = SIZE2FRAMES(zone_conf_size(count));
	
	return ADDR2PFN((uintptr_t)
	    frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
}
Example #8
0
static void
_sos_page_map_4_alloc_frame(void* token) {
    dprintf(3, "sos_page_map 4\n");
    sos_page_map_cont_t* cont = (sos_page_map_cont_t*)token;

    int x = PT_L1_INDEX(cont->vpage);
    int y = PT_L2_INDEX(cont->vpage);

    if ((cont->as->as_pd_regs[x][y] & PTE_IN_USE_BIT) &&
            !(cont->as->as_pd_regs[x][y] & PTE_SWAPPED)) {
        /* page already mapped */
        cont->callback(cont->token, EINVAL);
        free(cont);
        return;
    }

    /* Allocate memory for the frame */
    int err = frame_alloc(cont->vpage, cont->as, cont->pid, cont->noswap, _sos_page_map_5, token);
    if (err) {
        dprintf(3, "_sos_page_map_4_alloc_frame: failed to allocate frame\n");
        cont->callback(cont->token, EINVAL);
        free(cont);
        return;
    }
}
static errval_t alloc_local(void)
{
    errval_t err;

    size_t frame_size = 0;
    if (disp_xeon_phi_id() == 0) {
        frame_size = XPHI_BENCH_FRAME_SIZE_HOST;
    } else {
        frame_size = XPHI_BENCH_FRAME_SIZE_CARD;
    }

    if (!frame_size) {
        frame_size = 4096;
    }

    debug_printf("Allocating a frame of size: %lx\n", frame_size);

    size_t alloced_size = 0;
    err = frame_alloc(&local_frame, frame_size, &alloced_size);
    assert(err_is_ok(err));
    assert(alloced_size >= frame_size);

    struct frame_identity id;
    err = invoke_frame_identify(local_frame, &id);
    assert(err_is_ok(err));
    local_base = id.base;
    local_frame_sz = alloced_size;

    err = vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL);

    return err;
}
Example #10
0
static errval_t copy_bios_mem(void) {
    errval_t err = SYS_ERR_OK;

    // Get a copy of the VBE BIOS before ACPI touches it
    struct capref bioscap;

    err = mm_alloc_range(&pci_mm_physaddr, BIOS_BITS, 0,
                       1UL << BIOS_BITS, &bioscap, NULL);
    assert(err_is_ok(err));

    void *origbios;
    struct vregion *origbios_vregion;
    err = vspace_map_one_frame(&origbios, 1 << BIOS_BITS, bioscap,
                               NULL, &origbios_vregion);
    assert(err_is_ok(err));

    err = frame_alloc(&biosmem, 1 << BIOS_BITS, NULL);
    assert(err_is_ok(err));

    void *newbios;
    struct vregion *newbios_vregion;
    err = vspace_map_one_frame(&newbios, 1 << BIOS_BITS, biosmem,
                               NULL, &newbios_vregion);
    assert(err_is_ok(err));

    memcpy(newbios, origbios, 1 << BIOS_BITS);

    // Unmap both vspace regions again
    vregion_destroy(origbios_vregion);
    vregion_destroy(newbios_vregion);

    // TODO: Implement mm_free()

    return err;
}
Example #11
0
/**
 * \brief allocates a frame on a specific node
 *
 * \param dest      capref to store the frame
 * \param size      size of the frame to allocated
 * \param node      node on which the frame should be allocated
 * \param ret_size  returned size of the frame capability
 *
 * \returns SYS_ERR_OK on SUCCESS
 *          errval on FAILURE
 */
errval_t numa_frame_alloc_on_node(struct capref *dest,
                                  size_t size,
                                  nodeid_t node,
                                  size_t *ret_size)
{
    errval_t err;

    NUMA_DEBUG_ALLOC("allocating frame on node %" PRIuNODEID "\n", node);

    uint64_t min_base, max_limit;
    ram_get_affinity(&min_base, &max_limit);

    if (node >= numa_topology.num_nodes) {
        return NUMA_ERR_NODEID_INVALID;
    }

    uint64_t node_base = numa_node_base(node);
    uint64_t node_limit = node_base + numa_node_size(node, NULL);

    NUMA_DEBUG_ALLOC("setting affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n",
                     node_base, node_limit);

    ram_set_affinity(node_base, node_limit);

    err = frame_alloc(dest, size, ret_size);

    ram_set_affinity(min_base, max_limit);

    NUMA_DEBUG_ALLOC("restore affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n",
                     min_base, max_limit);

    return err;
}
Example #12
0
/** Create PTL0.
 *
 * PTL0 of 4-level page table will be created for each address space.
 *
 * @param flags Flags can specify whether ptl0 is for the kernel address space.
 *
 * @return New PTL0.
 *
 */
pte_t *ptl0_create(unsigned int flags)
{
	pte_t *dst_ptl0 = (pte_t *)
	    PA2KA(frame_alloc(PTL0_FRAMES, FRAME_LOWMEM, PTL0_SIZE - 1));
	
	if (flags & FLAG_AS_KERNEL)
		memsetb(dst_ptl0, PTL0_SIZE, 0);
	else {
		/*
		 * Copy the kernel address space portion to new PTL0.
		 */
		
		mutex_lock(&AS_KERNEL->lock);
		
		pte_t *src_ptl0 =
		    (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
		
		uintptr_t src = (uintptr_t)
		    &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
		uintptr_t dst = (uintptr_t)
		    &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
		
		memsetb(dst_ptl0, PTL0_SIZE, 0);
		memcpy((void *) dst, (void *) src,
		    PTL0_SIZE - (src - (uintptr_t) src_ptl0));
		
		mutex_unlock(&AS_KERNEL->lock);
	}
	
	return (pte_t *) KA2PA((uintptr_t) dst_ptl0);
}
Example #13
0
bool 
grow_stack (void *uaddr)
{
  void *upage = pg_round_down (uaddr);
  if((size_t)(PHYS_BASE - upage) > (1 << 23)) return false;

  struct spage_table_entry *spte = malloc (sizeof (struct spage_table_entry));
  if (!spte) return false;
  spte->upage = upage;
  spte->in_memory = true;
  spte->writable = true;
  spte->spage_type = SWAP;
  spte->inevictable = true;

  uint8_t *f = frame_alloc (PAL_USER, spte);
  if (!f)
    {
      free (spte);
      return false;
    }

  if (!install_page (spte->upage, f, spte->writable))
    {
      free (spte);
      frame_free (f);
      return false;
    }

  if (intr_context ()) spte->inevictable = false;

  return hash_insert (&thread_current ()->spage_table, &spte->elem) == NULL;
}
Example #14
0
/** Initializes page tables.
 *
 * 1:1 virtual-physical mapping is created in kernel address space. Mapping
 * for table with exception vectors is also created.
 */
void page_arch_init(void)
{
	int flags = PAGE_CACHEABLE;
	page_mapping_operations = &pt_mapping_operations;

	page_table_lock(AS_KERNEL, true);
	
	uintptr_t cur;

	/* Kernel identity mapping */
	for (cur = PHYSMEM_START_ADDR;
	    cur < min(config.identity_size, config.physmem_end);
	    cur += FRAME_SIZE)
		page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
	
#ifdef HIGH_EXCEPTION_VECTORS
	/* Create mapping for exception table at high offset */
	uintptr_t ev_frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_NONE);
	page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, ev_frame, flags);
#else
#error "Only high exception vector supported now"
#endif

	page_table_unlock(AS_KERNEL, true);
	
	as_switch(NULL, AS_KERNEL);
	
	boot_page_table_free();
}
Example #15
0
void alloc_local(void)
{
    errval_t err;

#ifndef __k1om__
    uint64_t minbase, maxlimit;
    ram_get_affinity(&minbase, &maxlimit);
    ram_set_affinity(XPHI_BENCH_RAM_MINBASE, XPHI_BENCH_RAM_MAXLIMIT);
#endif
    size_t alloced_size = 0;
    err = frame_alloc(&local_frame, XPHI_BENCH_MSG_FRAME_SIZE, &alloced_size);
    EXPECT_SUCCESS(err, "frame_alloc");

#ifndef __k1om__
    ram_set_affinity(minbase, maxlimit);
#endif

    struct frame_identity id;
    err = invoke_frame_identify(local_frame, &id);
    EXPECT_SUCCESS(err, "invoke_frame_identify");

    local_base = id.base;
    local_frame_sz = alloced_size;

    debug_printf("alloc_local | Frame base: %016lx, size=%lx\n", id.base,
                 1UL << id.bits);

    err =  vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL);
    EXPECT_SUCCESS(err, "vspace_map_one_frame");
}
Example #16
0
static void expand(addr new_size, struct vmem_heap* heap)
{
	/* Sanity check */
	ASSERT(new_size > heap->end_address - heap->start_address);

	/* Get the nearest following page boundary */
	if (new_size & 0xFFFFF000 != 0)
	{
		new_size &= 0xFFFFF000;
		new_size += 0x1000;
	}

	/* Make sure we are not overreaching ourselves */
	ASSERT(heap->start_address + new_size <= heap->max_address);

	/* This should always be on a page boundary */
	addr old_size = heap->end_address - heap->start_address;
	addr i = old_size;

	while (i < new_size)
	{
		frame_alloc( (struct page*)get_page(heap->start_address+i, 1, kernel_directory),
				(heap->supervisor)?1:0, (heap->readonly)?0:1);
		i += 0x1000; /* page size */
	}

	heap->end_address = heap->start_address+new_size;
}
Example #17
0
/**
 * \brief Allocate some slabs
 *
 * \param retbuf     Pointer to return the allocated memory
 * \param slab_type  Type of slab the memory is allocated for
 *
 * Since this region is used for backing specific slabs,
 * only those types of slabs can be allocated.
 */
errval_t vspace_pinned_alloc(void **retbuf, enum slab_type slab_type)
{
    errval_t err;
    struct pinned_state *state = get_current_pinned_state();

    // Select slab type
    struct slab_allocator *slab;
    switch(slab_type) {
    case VREGION_LIST:
        slab = &state->vregion_list_slab;
        break;
    case FRAME_LIST:
        slab = &state->frame_list_slab;
        break;
    default:
        return LIB_ERR_VSPACE_PINNED_INVALID_TYPE;
    }

    thread_mutex_lock(&state->mutex);

    // Try allocating
    void *buf = slab_alloc(slab);
    if (buf == NULL) {
        // Out of memory, grow
        struct capref frame;
        err = frame_alloc(&frame, BASE_PAGE_SIZE, NULL);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&state->mutex);
            DEBUG_ERR(err, "frame_alloc in vspace_pinned_alloc");
            return err_push(err, LIB_ERR_FRAME_ALLOC);
        }
        err = state->memobj.m.f.fill((struct memobj*)&state->memobj,
                                     state->offset, frame,
                                     BASE_PAGE_SIZE);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&state->mutex);
            DEBUG_ERR(err, "memobj_fill in vspace_pinned_alloc");
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }

        genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) +
            state->offset;
        void *slab_buf = (void*)vspace_genvaddr_to_lvaddr(gvaddr);
        slab_grow(slab, slab_buf, BASE_PAGE_SIZE);
        state->offset += BASE_PAGE_SIZE;

        // Try again
        buf = slab_alloc(slab);
    }

    thread_mutex_unlock(&state->mutex);

    if (buf == NULL) {
        return LIB_ERR_SLAB_ALLOC_FAIL;
    } else {
        *retbuf = buf;
        return SYS_ERR_OK;
    }
}
Example #18
0
File: heap.c Project: zrho/Oxygen
uint64_t heap_sbrk(intptr_t increase)
{
    // Acquire lock
    spinlock_acquire(&heap_lock);
    
    // Increase?
    if (increase > 0) {
        // Align
        increase = mem_align((uintptr_t) increase, 0x1000);
        
        // Determine amount of pages
        size_t pages = increase / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Allocate frame
            uintptr_t phys = frame_alloc();
            
            // Map frame
            page_map(
                heap_begin + heap_length,
                phys,
                PG_PRESENT | PG_GLOBAL | PG_WRITABLE);
                
            // Increase length
            heap_length += 0x1000;
        }
        
    // Decrease
    } else if (increase < 0) {
        // Align decrease
        uintptr_t decrease = mem_align((uintptr_t) (-increase), 0x1000);
        
        // Determine amount of pages
        size_t pages = decrease / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Get (virtual) begin address of last page
            uintptr_t virt = heap_begin + heap_length;
            
            // Get physical address
            uintptr_t phys = page_get_physical(virt);
            
            // Unmap page
            page_unmap(virt);
            
            // Decrease length
            heap_length -= 0x1000;
        }
    }
    
    // Release lock
    spinlock_release(&heap_lock);
    
    // Beginning of the heap
    return heap_begin;
}
Example #19
0
/* A faulting page walk. May need to alloc various frames for SOS and seL4
   page tables. */
int page_walk(sos_pcb *pcb, seL4_Word proc_vaddr, page_attrs attrs,
              page_table_cb cb, void *cb_data) {
    int err;
    seL4_Word pd_idx;

    dprintf(6, "page_walk: seeking %p\n", proc_vaddr);

    // malloc a structure to store all intermediate data
    page_walk_data *data = malloc(sizeof(struct page_walk_data));
    if (data == NULL) {
        dprintf(1, "sos_page_alloc: Could not allocate callback data (OOM)\n");
        return SOS_PAGE_TABLE_OOM;
    }

    // Stash the attrs and callbacks for later
    data->attrs = attrs;
    data->cb = cb;
    data->cb_data = cb_data;
    data->pcb = pcb;

    // Sanity checks - if these fail, process has been corrupted
    // These are ok - no need to check for abort since synchronous call
    conditional_panic(((void*) pcb->sos_pd == NULL), "No page directory");
    conditional_panic((pcb->cspace == NULL), "Process cspace does not exist");

    // Grab PD and PT indices from the given vaddr
    pd_idx = PAGE_TABLE(proc_vaddr);
    data->pt_idx = PAGE_TABLE_ENTRY(proc_vaddr);

    // Index into page directory, which *must* exist (else process is corrupt)
    // pt stores the address of the pointer to the PT, i.e. (**)

    data->pt = (sos_page_table *) (pcb->sos_pd + pd_idx);

    dprintf(6, "page_walk: setting up alloc or finaliser\n");

    if ((*(data->pt)) == NULL) {
        // PT we want doesn't exist, so we alloc it
        dprintf(6, "page_walk: time for frame_alloc\n");
        err = frame_alloc(&_page_table_frame_alloc_cb, (void*) data);
        // frame_alloc is asynchronous - it will finalise for us
        return err;
    } else {
        // Return to syscall loop, then finalise
        dprintf(6, "page_walk: ready to finalise\n");
        err = sos_task_add_ready(&_sos_page_alloc_finalise, (void *) data,
                                 SOS_TASK_PRIORITY_HIGH);
        if (err) {
            dprintf(1, "sos_page_alloc: Could not finalise (%d)\n", err);
            // XXX Could tear down the process here
            free(data);
        }
        return SOS_PAGE_TABLE_SUCCESS;
    }
}
Example #20
0
uintptr_t vhpt_set_up(void)
{
	uintptr_t vhpt_frame =
	    frame_alloc(SIZE2FRAMES(VHPT_SIZE), FRAME_ATOMIC, 0);
	if (!vhpt_frame)
		panic("Kernel configured with VHPT but no memory for table.");
	
	vhpt_base = (vhpt_entry_t *) PA2KA(vhpt_frame);
	vhpt_invalidate_all();
	return (uintptr_t) vhpt_base;
}
Example #21
0
void process_init(void) {
    // Map the thread map
    uintptr_t vaddr = MEMORY_PROCESS_MAP_VADDR;
    uint16_t pflags = PAGE_FLAG_GLOBAL | PAGE_FLAG_WRITEABLE;
    uintptr_t offset;

    for (offset = 0; offset < PROCESS_MAP_SIZE; offset += 0x1000)
        memory_map(vaddr + offset, frame_alloc(), pflags);

    // Clear the thread map
    memset((void *) vaddr, 0, PROCESS_MAP_SIZE);
}
Example #22
0
static void _process_create_thread_map(uint32_t pid) {
    // Map thread map
    uintptr_t thread_map = MEMORY_THREAD_MAP_VADDR + pid * THREAD_MAP_SIZE;
    uint16_t pflags = PAGE_FLAG_WRITEABLE | PAGE_FLAG_GLOBAL;
    size_t offset;

    for (offset = 0; offset < THREAD_MAP_SIZE; offset += 0x1000)
        memory_map(thread_map + offset, frame_alloc(), pflags);

    // Clear thread map
    memset((void *) thread_map, 0, THREAD_MAP_SIZE);
}
Example #23
0
File: frame.c Project: eXeC64/piker
size_t frame_alloc_mult(uintptr_t* frames, size_t num)
{
    for(uint32_t i = 0; i < num; ++i) {
        uint32_t result = frame_alloc(&(frames[i]));
        if(result == 0) {
            /* Allocation failed, we're out of frames */
            return i;
        }
    }

    return num;
}
void frame_alloc_test(void* arg, struct modtest_result* result) {
	struct sk_buff* skb = frame_alloc(9, 0x0F, true);
	struct xb_frameheader* frm = NULL;

	frm = (struct xb_frameheader*)skb->data;

	FAIL_IF_NOT_EQ(0x7E, skb->data[0]);
	FAIL_IF_NOT_EQ(0x00, skb->data[1]);
	FAIL_IF_NOT_EQ(0x0a, skb->data[2]);
	FAIL_IF_NOT_EQ(0x0F, skb->data[3]);
	TEST_SUCCESS();
}
Example #25
0
/**
 * \brief initializes a dma descriptor ring and allocates memory for it
 *
 * \param ring  the ring structure to initialize
 * \param size  number of elements in the ring
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring,
                                      uint16_t size)
{
    errval_t err;

    memset(ring, 0, sizeof(*ring));

    assert(size < (XEON_PHI_DMA_DESC_RING_MAX));
    assert(IS_POW2(size));

#ifndef __k1om__
    /*
     * we set the ram affinity to the maximum range mapped by the system memory
     * page tables when being on the host. Otherwise the card cannot access it.
     */
    uint64_t minbase, maxlimit;
    ram_get_affinity(&minbase, &maxlimit);
    ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE);
#endif

    size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE;
    err = frame_alloc(&ring->cap, frame_size, NULL);

#ifndef __k1om__
    ram_set_affinity(minbase, maxlimit);
#endif

    if (err_is_fail(err)) {
        return err;
    }

    err = vspace_map_one_frame_attr(&ring->vbase,
                                    frame_size,
                                    ring->cap,
                                    VREGION_FLAGS_READ_WRITE,
                                    NULL,
                                    NULL);
    if (err_is_fail(err)) {
        cap_destroy(ring->cap);
        return err;
    }

    struct frame_identity id;
    err = invoke_frame_identify(ring->cap, &id);
    assert(err_is_ok(err));
    ring->pbase = id.base;
    ring->size = size;

    memset(ring->vbase, 0, frame_size);

    return SYS_ERR_OK;
}
Example #26
0
File: pager.c Project: gapry/aos-1
/*
 * This function loads the entire elf file into the phsical frames and
 * maps fpages corresponding to virtual address in elf file to the process
 */
int load_code_segment_virtual(char *elfFile,L4_ThreadId_t new_tid) {
  uint32_t min[2];
  uint32_t max[2];
  elf_getMemoryBounds(elfFile, 0, (uint64_t*)min, (uint64_t*)max);
  //Now we need to reserve memory between min and max
  L4_Word_t lower_address = ((L4_Word_t) min[1] / PAGESIZE) * PAGESIZE; 
  L4_Word_t upper_address = ((L4_Word_t) max[1] / PAGESIZE) * PAGESIZE;
 
  while(lower_address <= upper_address) {
    L4_Word_t frame = frame_alloc();
    if(!frame) {
      //Oops out of frames
      unmap_process(new_tid);
      return -1;
    } else {
      L4_Fpage_t targetpage = L4_FpageLog2(lower_address,12);
      lower_address += PAGESIZE;
      //Now map fpage
      L4_Set_Rights(&targetpage,L4_FullyAccessible);
      L4_PhysDesc_t phys = L4_PhysDesc(frame, L4_DefaultMemory);
      //Map the frame to root task but enter entries in pagetable with tid since we will update the mappings once elf loading is done
      if (L4_MapFpage(L4_Myself(), targetpage, phys) ) {
	page_table[(frame-new_low)/PAGESIZE].tid = new_tid;
	page_table[(frame-new_low)/PAGESIZE].pinned = 1;
	page_table[(frame-new_low)/PAGESIZE].pageNo = targetpage;
      } else {
	unmap_process(new_tid);
      }
    }
  }
  //Now we have mapped the pages, now load elf_file should work with the virtual addresses
  if(elf_loadFile(elfFile,0) == 1) {
      //Elffile was successfully loaded
      //Map the fpages which were previously mapped to Myself to the tid
    for(int i=0;i<numPTE;i++) {
      if(L4_ThreadNo(new_tid) == L4_ThreadNo(page_table[i].tid)) {
	//Now remap the pages which were mapped to root task to the new tid
	L4_UnmapFpage(L4_Myself(),page_table[i].pageNo);
	L4_PhysDesc_t phys = L4_PhysDesc(new_low + i * PAGESIZE, L4_DefaultMemory);
	if(!L4_MapFpage(new_tid, page_table[i].pageNo, phys)) {
	  unmap_process(new_tid);
	  return -1;
	}
      }
    }
  } else {
    unmap_process(new_tid);
  }
  //Remove later
  L4_CacheFlushAll();
  return 0;
}
Example #27
0
static inline void *
__page_get(void)
{
	void *hp = cos_get_vas_page();
	struct frame *f = frame_alloc();

	assert(hp && f);
	frame_ref(f);
	if (cos_mmap_cntl(COS_MMAP_GRANT, 0, cos_spd_id(), (vaddr_t)hp, frame_index(f))) {
		BUG();
	}
	return hp;
}
Example #28
0
int
sos_page_map(pid_t pid, addrspace_t *as, seL4_Word vaddr, uint32_t permissions,
             sos_page_map_cb_t callback, void* token, bool noswap) {
    dprintf(3, "sos_page_map\n");
    if (as == NULL) {
        return EINVAL;
    }

    if (as->as_pd_caps == NULL || as->as_pd_regs == NULL) {
        /* Did you even call as_create? */
        dprintf(3, "sos_page_map err einval 0\n");
        return EFAULT;
    }

    seL4_Word vpage = PAGE_ALIGN(vaddr);

    sos_page_map_cont_t* cont = malloc(sizeof(sos_page_map_cont_t));
    if(cont == NULL) {
        dprintf(3, "sos_page_map err nomem\n");
        return ENOMEM;
    }
    cont->pid = pid;
    cont->as = as;
    cont->vpage = vpage;
    cont->permissions = permissions;
    cont->callback = callback;
    cont->token = token;
    cont->noswap = noswap;

    int x, err;

    x = PT_L1_INDEX(vpage);

    if (as->as_pd_regs[x] == NULL) {
        /* Create pagetable if needed */

        assert(as->as_pd_caps[x] == NULL);

        /* Allocate memory for the 2nd level pagetable for regs */
        err = frame_alloc(0, NULL, PROC_NULL, true, _sos_page_map_2_alloc_cap_pt, (void*)cont);
        if (err) {
            free(cont);
            return EFAULT;
        }
        return 0;
    }

    _sos_page_map_4_alloc_frame((void*)cont);
    return 0;
}
Example #29
0
static errval_t refill_slabs(struct pmap_arm *pmap, size_t request)
{
    errval_t err;

    /* Keep looping till we have #request slabs */
    while (slab_freecount(&pmap->slab) < request) {
        // Amount of bytes required for #request
        size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab),
                                        sizeof(struct vnode));

        /* Get a frame of that size */
        struct capref cap;
        err = frame_alloc(&cap, bytes, &bytes);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_ALLOC);
        }

        /* If we do not have enough slabs to map the frame in, recurse */
        size_t required_slabs_for_frame = max_slabs_required(bytes);
        if (slab_freecount(&pmap->slab) < required_slabs_for_frame) {
            // If we recurse, we require more slabs than to map a single page
            assert(required_slabs_for_frame > 4);

            err = refill_slabs(pmap, required_slabs_for_frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_SLAB_REFILL);
            }
        }

        /* Perform mapping */
        genvaddr_t genvaddr = pmap->vregion_offset;
        pmap->vregion_offset += (genvaddr_t)bytes;

        // if this assert fires, increase META_DATA_RESERVED_SPACE
        assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) +
               vregion_get_size(&pmap->vregion)));

        err = do_map(pmap, genvaddr, cap, 0, bytes,
                     VREGION_FLAGS_READ_WRITE, NULL, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }

        /* Grow the slab */
        lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
        slab_grow(&pmap->slab, (void*)buf, bytes);
    }

    return SYS_ERR_OK;
}
Example #30
0
static void frame_allocate_and_map(void **retbuf, struct capref *retcap, size_t bytes)
{
    errval_t err;
    size_t retbytes;

    err = frame_alloc(retcap, bytes, &retbytes);
    assert(err_is_ok(err));
    assert(retbytes == bytes);


    err = vspace_map_one_frame_attr(retbuf, bytes, *retcap,
                                    VREGION_FLAGS_READ_WRITE_NOCACHE,
                                    NULL, NULL);
    assert(err_is_ok(err));
}