Exemplo n.º 1
0
/**
 * \brief sets the memory allocation mask.
 *
 * \param nodemask  bitmap representing the nodes
 *
 * The task will only allocate memory from the nodes set in nodemask.
 *
 * an empty mask or not allowed nodes in the mask will result in an error
 */
errval_t numa_set_membind(struct bitmap *nodemask)
{
    assert(numa_alloc_bind_mask);
    assert(numa_alloc_interleave_mask);

    if (!nodemask) {
        return NUMA_ERR_BITMAP_PARSE;
    }

    if (bitmap_get_nbits(nodemask) < NUMA_MAX_NUMNODES) {
        NUMA_WARNING("supplied interleave mask (%p) has to less bits!", nodemask);
        return NUMA_ERR_BITMAP_RANGE;
    }

    /* copy new membind mask and clear out invalid bits */
    bitmap_copy(numa_alloc_bind_mask, nodemask);
    bitmap_clear_range(numa_alloc_bind_mask, numa_num_configured_nodes(),
                       bitmap_get_nbits(numa_alloc_bind_mask));

    if (bitmap_get_weight(numa_alloc_bind_mask) == 0) {
        /* cannot bind to no node, restore with all nodes pointer*/
        bitmap_copy(numa_alloc_bind_mask, numa_all_nodes_ptr);
        return NUMA_ERR_NUMA_MEMBIND;
    }

    /* disable interleaving mode */
    bitmap_clear_all(numa_alloc_interleave_mask);

    return SYS_ERR_OK;
}
Exemplo n.º 2
0
/** Create new frame zone.
 *
 * @param zone     Zone to construct.
 * @param start    Physical address of the first frame within the zone.
 * @param count    Count of frames in zone.
 * @param flags    Zone flags.
 * @param confdata Configuration data of the zone.
 *
 * @return Initialized zone.
 *
 */
NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count,
    zone_flags_t flags, void *confdata)
{
	zone->base = start;
	zone->count = count;
	zone->flags = flags;
	zone->free_count = count;
	zone->busy_count = 0;
	
	if (flags & ZONE_AVAILABLE) {
		/*
		 * Initialize frame bitmap (located after the array of
		 * frame_t structures in the configuration space).
		 */
		
		bitmap_initialize(&zone->bitmap, count, confdata +
		    (sizeof(frame_t) * count));
		bitmap_clear_range(&zone->bitmap, 0, count);
		
		/*
		 * Initialize the array of frame_t structures.
		 */
		
		zone->frames = (frame_t *) confdata;
		
		for (size_t i = 0; i < count; i++)
			frame_initialize(&zone->frames[i]);
	} else {
		bitmap_initialize(&zone->bitmap, 0, NULL);
		zone->frames = NULL;
	}
}
Exemplo n.º 3
0
/** Disable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task	 Task.
 * @param ioaddr Starting I/O space address.
 * @param size	 Size of the disabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 */
int ddi_iospace_disable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
    if (!task->arch.iomap)
        return EINVAL;

    uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
    size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE);
    bitmap_clear_range(task->arch.iomap, iopage, size / 4);

    return EOK;
}
Exemplo n.º 4
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task   Task.
 * @param ioaddr Starting I/O space address.
 * @param size   Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 *
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
	size_t elements = ioaddr + size;
	if (elements > IO_PORTS)
		return ENOENT;
	
	if (task->arch.iomap.elements < elements) {
		/*
		 * The I/O permission bitmap is too small and needs to be grown.
		 */
		
		void *store = malloc(bitmap_size(elements), FRAME_ATOMIC);
		if (!store)
			return ENOMEM;
		
		bitmap_t oldiomap;
		bitmap_initialize(&oldiomap, task->arch.iomap.elements,
		    task->arch.iomap.bits);
		
		bitmap_initialize(&task->arch.iomap, elements, store);
		
		/*
		 * Mark the new range inaccessible.
		 */
		bitmap_set_range(&task->arch.iomap, oldiomap.elements,
		    elements - oldiomap.elements);
		
		/*
		 * In case there really existed smaller iomap,
		 * copy its contents and deallocate it.
		 */
		if (oldiomap.bits) {
			bitmap_copy(&task->arch.iomap, &oldiomap,
			    oldiomap.elements);
			
			free(oldiomap.bits);
		}
	}
	
	/*
	 * Enable the range and we are done.
	 */
	bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size);
	
	/*
	 * Increment I/O Permission bitmap generation counter.
	 */
	task->arch.iomapver++;
	
	return EOK;
}
Exemplo n.º 5
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task	 Task.
 * @param ioaddr Starting I/O space address.
 * @param size	 Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
    if (!task->arch.iomap) {
        task->arch.iomap = malloc(sizeof(bitmap_t), 0);
        if (task->arch.iomap == NULL)
            return ENOMEM;

        void *store = malloc(bitmap_size(IO_MEMMAP_PAGES), 0);
        if (store == NULL)
            return ENOMEM;

        bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, store);
        bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES);
    }

    uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
    size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE);
    bitmap_set_range(task->arch.iomap, iopage, size / 4);

    return EOK;
}
Exemplo n.º 6
0
/** Merge two zones.
 *
 * Assume z1 & z2 are locked and compatible and zones lock is
 * locked.
 *
 * @param z1       First zone to merge.
 * @param z2       Second zone to merge.
 * @param old_z1   Original data of the first zone.
 * @param confdata Merged zone configuration data.
 *
 */
NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,
    void *confdata)
{
	ASSERT(zones.info[z1].flags & ZONE_AVAILABLE);
	ASSERT(zones.info[z2].flags & ZONE_AVAILABLE);
	ASSERT(zones.info[z1].flags == zones.info[z2].flags);
	ASSERT(zones.info[z1].base < zones.info[z2].base);
	ASSERT(!overlaps(zones.info[z1].base, zones.info[z1].count,
	    zones.info[z2].base, zones.info[z2].count));
	
	/* Difference between zone bases */
	pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
	
	zones.info[z1].count = base_diff + zones.info[z2].count;
	zones.info[z1].free_count += zones.info[z2].free_count;
	zones.info[z1].busy_count += zones.info[z2].busy_count;
	
	bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count,
	    confdata + (sizeof(frame_t) * zones.info[z1].count));
	bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count);
	
	zones.info[z1].frames = (frame_t *) confdata;
	
	/*
	 * Copy frames and bits from both zones to preserve parents, etc.
	 */
	
	for (size_t i = 0; i < old_z1->count; i++) {
		bitmap_set(&zones.info[z1].bitmap, i,
		    bitmap_get(&old_z1->bitmap, i));
		zones.info[z1].frames[i] = old_z1->frames[i];
	}
	
	for (size_t i = 0; i < zones.info[z2].count; i++) {
		bitmap_set(&zones.info[z1].bitmap, base_diff + i,
		    bitmap_get(&zones.info[z2].bitmap, i));
		zones.info[z1].frames[base_diff + i] =
		    zones.info[z2].frames[i];
	}
}
Exemplo n.º 7
0
/**
 * \brief sets the memory interleave mask for the current task to nodemask
 *
 * \param nodemask bitmask representing the nodes
 *
 * All new memory allocations are page interleaved over all nodes in the interleave
 * mask. Interleaving can be turned off again by passing an empty mask.
 *
 * This bitmask is considered to be a hint. Fallback to other nodes may be possible
 */
void numa_set_interleave_mask(struct bitmap *nodemask)
{
    assert(numa_alloc_interleave_mask);

    if (!nodemask) {
        bitmap_clear_all(numa_alloc_interleave_mask);
        return;
    }

    if (bitmap_get_nbits(nodemask) < NUMA_MAX_NUMNODES) {
        NUMA_WARNING("supplied interleave mask (%p) has to less bits!", nodemask);
        return;
    }
    bitmap_copy(numa_alloc_interleave_mask, nodemask);

    /* clear out the invalid nodes */
    bitmap_clear_range(numa_alloc_interleave_mask, numa_num_configured_nodes(),
                       bitmap_get_nbits(numa_alloc_interleave_mask));

    /* clear the bind mask as we are using interleaving mode now */
    bitmap_clear_all(numa_alloc_bind_mask);
}
Exemplo n.º 8
0
/**
 * \brief allocates size bytes of memory page interleaved the nodes specified in
 *        the nodemask.
 *
 * \param size     size of the memory region in bytes
 * \param nodemask subset of nodes to consider for allocation
 * \param pagesize  preferred page size to be used
 *
 * \returns pointer to the mapped memory region
 *
 * should only be used for large areas consisting of multiple pages.
 * The memory must be freed with numa_free(). On errors NULL is returned.
 */
void *numa_alloc_interleaved_subset(size_t size, size_t pagesize,
                                    struct bitmap *nodemask)
{
    errval_t err;

    /* clear out invalid bits */
    bitmap_clear_range(nodemask, numa_num_configured_nodes(),
                       bitmap_get_nbits(nodemask));

    /* get the number of nodes */
    nodeid_t nodes = bitmap_get_weight(nodemask);
    if (nodes == 0) {
        return NULL;
    }

    NUMA_DEBUG_ALLOC("allocating interleaved using %" PRIuNODEID " nodes\n", nodes);

    assert(nodes <= numa_num_configured_nodes());

    vregion_flags_t flags;
    validate_page_size(&pagesize, &flags);
    size_t stride = pagesize;

    size_t node_size = size / nodes;
    node_size = (node_size + pagesize - 1) & ~(pagesize - 1);

    /* update total size as this may change due to rounding of node sizes*/
    size = nodes * node_size;

    /*
     * XXX: we may want to keep track of numa alloced frames
     */

    struct memobj_numa *memobj = calloc(1, sizeof(struct memobj_numa));
    err = memobj_create_numa(memobj, size, 0, numa_num_configured_nodes(), stride);
    if (err_is_fail(err)) {
        return NULL;
    }

    bitmap_bit_t node = bitmap_get_first(nodemask);
    nodeid_t node_idx=0;
    while(node != BITMAP_BIT_NONE) {
        struct capref frame;
        err = numa_frame_alloc_on_node(&frame, node_size, (nodeid_t)node, NULL);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "numa_frame_alloc_on_node");
            goto out_err;
        }
        memobj->m.f.fill(&memobj->m, node_idx, frame, 0);
        ++node_idx;
        node = bitmap_get_next(nodemask, node);
    }

    struct vregion *vreg = calloc(1, sizeof(struct vregion));
    if (vreg == NULL) {
        goto out_err;
    }
    err = vregion_map_aligned(vreg, get_current_vspace(), &memobj->m, 0, size,
                        flags, pagesize);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "vregion_map_aligned");
        goto out_err;
    }

    err = memobj->m.f.pagefault(&memobj->m, vreg, 0, 0);
    if (err_is_fail(err)) {
        vregion_destroy(vreg);
        free(vreg);
        DEBUG_ERR(err, "memobj.m.f.pagefault");
        goto out_err;
    }

    // XXX - Is this right?
    return (void *)(uintptr_t)vregion_get_base_addr(vreg);

    out_err:
    for (int i = 0; i < node_idx; ++i) {
        struct capref frame;
        memobj->m.f.unfill(&memobj->m, node_idx, &frame, NULL);
        cap_delete(frame);
    }
    return NULL;

}