Example #1
0
L4_ThreadId_t thread_new(AddrSpace_t *space)
{
    assert (space != NULL);

    L4_Word_t tno;
    L4_ThreadId_t tid;
    L4_ThreadId_t space_spec;
    L4_Word_t utcb_location;
    slab_t *sb;
    list_t *li;
    thread_t *this;

    mutex_lock(&thrlock);
    tno = threadno_find_free(bitmap, MAX_TASKS);
    if (!tno) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }

    tid = L4_GlobalId(tno, 1);
    utcb_location = UTCB_AREA_LOCATION;

    space_spec = space->tid;
    tno = threadno_find_free(space->threads, MAX_THREADS_PER_TASK);
    if (!tno) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }
    utcb_location += tno * UTCB_SIZE;

    sb = slab_alloc(&thrpool);
    if (!sb) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }
    
    if (FALSE == (L4_ThreadControl(tid, space_spec, tid, space->pager, (void *) utcb_location))) {
        slab_free(&thrpool, sb);
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }

    li = LIST_TYPE(sb->data);
    this = (thread_t *) li->data;
    list_push(&thread_list, li);

    this->tid = tid;
    this->space = space;
    this->index = tno;
    this->creation = L4_SystemClock();

    threadno_alloc(bitmap, L4_ThreadNo(tid));
    threadno_alloc(space->threads, tno);
    mutex_unlock(&thrlock);
    return tid;
}
Example #2
0
void
test_append_full_block(void)
{
    struct index_logger_s *il = index_logger_create("data/", "indexlog.bin", false);
    CU_ASSERT(il != NULL);

    struct index_log_item_s *ili = slab_alloc(sizeof *ili);
    ili->index_key = slab_alloc(sizeof *ili->index_key);
    ili->index_value = slab_alloc(sizeof *ili->index_value);

    ili->type = index_append;

    for (int i=0; i<2000; i++) /* make sure that this will fill one log block to the full */
    {
        memset(ili->index_key, 0x00, sizeof(*ili->index_key));
        ili->index_key->uint64_array.qword[1] = i;
        ili->index_value->crc32 = i * 2;
        ili->index_value->block_id = i * 3;
        ili->index_value->data_size = i * 4;
        ili->index_value->offset = i * 5;
        CU_ASSERT(index_logger_append(il, ili) == RET_SUCCESS);
    }
    CU_ASSERT(index_logger_close(il) == RET_SUCCESS);
    index_logger_free_item(ili);

    il = index_logger_open("data/", "indexlog.bin");
    CU_ASSERT(il != NULL);

    for (unsigned int i=0; i<2000; i++)
    {
        ili = index_logger_fetch(il);
        CU_ASSERT(ili != NULL);
        CU_ASSERT(ili->type == index_append);
        CU_ASSERT(ili->index_key->uint64_array.qword[1] == i);
        CU_ASSERT(ili->index_value->crc32 == i * 2);
        CU_ASSERT(ili->index_value->block_id == i * 3);
        CU_ASSERT(ili->index_value->data_size == i * 4);
        CU_ASSERT(ili->index_value->offset == i * 5);
        index_logger_free_item(ili);
    }
    CU_ASSERT(index_logger_fetch(il) == NULL);
    CU_ASSERT(index_logger_close(il) == RET_SUCCESS);
}
Example #3
0
/** Allocate and initialize a call structure.
 *
 * The call is initialized, so that the reply will be directed to
 * TASK->answerbox.
 *
 * @param flags Parameters for slab_alloc (e.g FRAME_ATOMIC).
 *
 * @return If flags permit it, return NULL, or initialized kernel
 *         call structure with one reference.
 *
 */
call_t *ipc_call_alloc(unsigned int flags)
{
	call_t *call = slab_alloc(ipc_call_slab, flags);
	if (call) {
		_ipc_call_init(call);
		ipc_call_hold(call);
	}
	
	return call;
}
Example #4
0
struct bio_vec *alloc_bio_vec(dev_t dev, blkcnt_t blkcnt, blksize_t blksize)
{
	struct bio_vec *bio = blkcnt <= SMALL_BIO_MAX
		? slab_alloc(small_bio_cachep)
		: kmalloc(BIO_VEC_SIZE(blkcnt));
	bio->dev = dev;
	bio->blkcnt = blkcnt;
	bio->blksize = blksize;
	return bio;
}
struct vm_address_space *create_address_space(void)
{
    struct vm_address_space *space;

    space = slab_alloc(&address_space_slab);
    init_area_map(&space->area_map, PAGE_SIZE, KERNEL_BASE - 1);
    space->translation_map = create_translation_map();
    init_rwlock(&space->mut);

    return space;
}
Example #6
0
struct schema *schema_create(void) {
    struct schema *schema = slab_alloc(sizeof(struct schema));
    if (!schema) {
        log_error("can't alloc schema");
        return NULL;
    }

    memset(schema, 0, sizeof(struct schema));
    schema->primary_key_index = -1;
    log_notice("created a new schema: %p", schema);
    return schema;
}
Example #7
0
static bool allocate_iobuf(SBuf *sbuf)
{
	if (sbuf->io == NULL) {
		sbuf->io = slab_alloc(iobuf_cache);
		if (sbuf->io == NULL) {
			sbuf_call_proto(sbuf, SBUF_EV_RECV_FAILED);
			return false;
		}
		iobuf_reset(sbuf->io);
	}
	return true;
}
Example #8
0
File: slab.c Project: kezhuw/swiff
struct slab *
slab_pool_alloc(struct slab_pool *so, size_t nitem, size_t isize) {
	assert_isize(isize);
	assert_nitem(nitem);
	struct slab *sa = slab_alloc((struct slab *)so);
	slab_init(sa, isize, isize*nitem);

	struct slab *po = (struct slab *)so;
	sa->memctx = po->memctx;
	sa->alloc = po->alloc;
	sa->dealloc = po->dealloc;
	return sa;
}
Example #9
0
void* slab_cache_alloc(slab_cache_t* cache)
{
	slab_t* slab = cache->slabs_free;
	if(!cache->slabs_free) {
		slab = slab_create(cache);
		if (slab == NULL) {
			return NULL;
		}
	}


	return slab_alloc(slab);
}
Example #10
0
void thread_init() {
	slab_create(&thread_cache, sizeof(thread_t), page_alloc(1));

	init_thread           = slab_alloc(&thread_cache);

	active_thread         = init_thread;
	active_switch_stack   = init_thread->stack;
	init_thread->status   = THREAD_SCHEDULED;
	init_thread->user     = 0;
	init_thread->parent   = 0;
	init_thread->children = 0;
	init_thread->next     = 0;
}
Example #11
0
static char *dump_binstrescape(const char *mem, size_t size) {
    char *temp = slab_alloc(size * 4 + 1); // \xde\xad
    char *q = temp;
    for (size_t i = 0; i < size; i++) {
        uint8_t x = *(uint8_t *) &mem[i];
        *q++ = '\\';
        *q++ = 'x';
        *q++ = HEX[x / 16];
        *q++ = HEX[x % 16];
    }
    *q = '\0';
    return temp;
}
Example #12
0
static struct request *make_request(int rw, struct buffer *buf)
{
	struct request *req = slab_alloc(request_cachep);
	if (!req)
		panic("No memory for block request");
	// FIXME: block until memory available
	req->rw = rw;
	req->sector = SECTOR(buf);
	req->nr_sectors = NR_SECTORS(buf);
	req->mem = buf->b_data;
	req->buf = buf;
	buf->b_count++;
	return req;
}
Example #13
0
thr_t* __fastcall create_systhread(addr_t entry_ptr)
{
    static count_t  thr_cnt = 0;
    static count_t  slot = 1;

    thr_t   *thr;
    addr_t   thr_stack;

    DBG("%s\n", __FUNCTION__);

    thr = (thr_t*)slab_alloc(thr_slab,0);
    thr_stack = PA2KA(frame_alloc(2));

    thr_cnt++;

    thr->eax = (thr_cnt<<8)|slot;
    thr->tid = (thr_cnt<<8)|slot;

    thr->slot = slot;

    slot++;

    thr->pdir = KA2PA(&sys_pdbr);

    thr->ebx = 0;

    thr->edi = 0;
    thr->esi = 0;
    thr->ebp = 0;
    thr->edx = 0;
    thr->ecx = 0;

    thr->cs  = sel_srv_code;
    thr->eflags = EFL_IOPL1;
    thr->esp = thr_stack + 8192;
    thr->ss = sel_srv_stack;

    thr->thr_flags    = 0;

    thr->ticks_left   = 8;
    thr->quantum_size = 8;

    thr->eip = entry_ptr;

    //lock_enqueue(thr_ptr);       /* add to scheduling queues */

    return thr;
};
Example #14
0
int test_slab (int argc, char **argv) {
    // track runtime
    struct timeval t0, t1; 
    
    // malloc version
    gettimeofday (&t0, NULL);
    for (int p = 0; p < PASSES; ++p) {
        for (int i = 0; i < ALLOCS; ++i) {
            test_s *ts = malloc (sizeof(test_s));
            ts->a = i;
            ts->b = i;
            ts->c = (i % 2 == 0);
            tsps[i] = ts;
        }
        for (int i = 0; i < ALLOCS; ++i) {
            free (tsps[i]);
        }
    }
    gettimeofday (&t1, NULL);
    double mdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0;
    
    // slab alloc version
    gettimeofday (&t0, NULL);
    slab_init (SLAB_SIZE);
    for (int p = 0; p < PASSES; ++p) {
        slab_free ();
        for (int i = 0; i < ALLOCS; ++i) {
            test_s *ts = slab_alloc (sizeof(test_s));
            ts->a = i;
            ts->b = i;
            ts->c = (i % 2 == 0);
            tsps[i] = ts;
        }
    }    
    gettimeofday (&t1, NULL);
    double sdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0;

    /* Check that results are coherent, and collapse the wavefunction. */
    for (int i = 0; i < ALLOCS; ++i) {
        test_s ts = *(tsps[i]);
        if (ts.a != i || ts.b != i || ((i % 2 == 0) != ts.c)) {
            printf ("ERR ptr=%p, i=%d, a=%d, b=%f, c=%s\n", tsps[i], i, ts.a, ts.b, ts.c ? "EVEN" : "ODD");
        }
    }
    
    fprintf (stderr, "%f sec malloc, %f sec slab, speedup %f\n", mdt, sdt, mdt/sdt);
    return 0;
}
Example #15
0
struct thread *spawn_thread(struct vm_translation_map *map,
                            void (*start_function)(void *param),
                            void *param)
{
    struct thread *th = slab_alloc(&thread_slab);
    th->kernel_stack = (unsigned char*) kmalloc(0x2000) + 0x2000;
    th->current_stack = (unsigned char*) th->kernel_stack - 0x840;
    th->map = map;
    ((unsigned int*) th->current_stack)[0x814 / 4] = (unsigned int) thread_start;
    th->start_function = start_function;
    th->param = param;

    acquire_spinlock(&thread_q_lock);
    enqueue_thread(&ready_q, th);
    release_spinlock(&thread_q_lock);
}
Example #16
0
PUBLIC static
void
Fpu_alloc::free_state(Fpu_state *s) 
{
  if (s->_state_buffer) 
    {
      unsigned long sz = Fpu::state_size();
      Ram_quota *q = *((Ram_quota **)((char*)(s->_state_buffer) + sz));
      slab_alloc()->q_free (q, s->_state_buffer);
      s->_state_buffer = 0;

      // transferred FPU state may leed to quotas w/o a task but only FPU 
      // contexts allocated
      if (q->current()==0)
	delete q;
    }
}
Example #17
0
void *__heap_alloc(heap_t * self, size_t size, const char *file, int line)
{
	if (unlikely(self == NULL))
		throw_unexpected(HEAP_NULL);

	size = max(align(size, self->alloc_size), self->alloc_size);

	size_t slab_pos = size / self->alloc_size - 1;

	if (unlikely(self->slab_size < slab_pos))
		throw_unexpected(HEAP_ALLOC);

	if (unlikely(self->slab[slab_pos] == NULL))
		self->slab[slab_pos] = slab_new(size, 0);

	return slab_alloc(self->slab[slab_pos]);
}
Example #18
0
thread_t *thread_create(int (*func)(void)) {
	thread_t *new_thread = slab_alloc(&thread_cache);

	if(new_thread == 0) {
		return 0;
	}

	new_thread->parent   = active_thread;
	new_thread->next     = 0;
	new_thread->children = 0;


	new_thread->stack    = page_alloc(1);

	if(new_thread->stack == 0) {
		return 0;
	}

	new_thread->stack += (DEFAULT_STACK_SIZE - THREAD_STATE_SIZE);

	new_thread->user   = 0;

	new_thread->status = THREAD_SCHEDULED;


	*(addr_t *) (new_thread->stack + THREAD_ELR_OFFSET) = (addr_t) func;

	*(addr_t *) (new_thread->stack + THREAD_PSR_OFFSET) = THREAD_SYS_PSR;

	*(addr_t *) (new_thread->stack + THREAD_LR_OFFSET)  = (addr_t) &thread_exit;


	if(active_thread->children == 0) {
		active_thread->children = new_thread;
		return new_thread;
	} else {
		for(thread_t *i = active_thread->children; ; i = i->next) {
			if(i->next == 0) {
				i->next = new_thread;
				return new_thread;
			}
		}
	}

	return 0;
}
Example #19
0
File: pipe.c Project: drewt/Telos
long sys_pipe(int *read_end, int *write_end, int flags)
{
	if (vm_verify(&current->mm, read_end, sizeof(*read_end), VM_WRITE))
		return -EFAULT;
	if (vm_verify(&current->mm, write_end, sizeof(*write_end), VM_WRITE))
		return -EFAULT;

	int read_fd, write_fd;
	struct file *read_file = get_empty_file();
	struct file *write_file = get_empty_file();
	struct pipe_private *pipe = slab_alloc(pipe_cachep);

	if ((read_fd = get_fd(current, 0)) < 0)
		return read_fd;
	current->filp[read_fd] = read_file;
	if ((write_fd = get_fd(current, 0)) < 0) {
		current->filp[read_fd] = NULL;
		return write_fd;
	}
	current->filp[write_fd] = write_file;

	INIT_WAIT_QUEUE(&pipe->read_wait);
	INIT_WAIT_QUEUE(&pipe->write_wait);
	pipe->read_end = read_file;
	pipe->write_end = write_file;
	pipe->buf = flexbuf_alloc(0);

	read_file->f_inode = write_file->f_inode = NULL;
	read_file->f_flags = write_file->f_flags = flags;
	read_file->f_rdev = write_file->f_rdev = 0;

	read_file->f_mode = O_READ;
	read_file->f_op = &pipe_read_operations;
	read_file->f_private = pipe;

	write_file->f_mode = O_WRITE;
	write_file->f_op = &pipe_write_operations;
	write_file->f_private = pipe;

	current->filp[read_fd] = read_file;
	current->filp[write_fd] = write_file;
	*read_end = read_fd;
	*write_end = write_fd;
	return 0;
}
Example #20
0
static int read_fatsb (struct mountpoint *mp) {
    bpb *bootparam;
    int err = -1;
    int major;
    if(! mp->dev) {
        goto out;
    }
    major = MAJOR(mp->dev->dev);
    bootparam = (void *)slab_alloc (sizeof(bpb), 0);
    do_data_transfer(mp->dev,0,blkdev[major].block_size,(unsigned char *)bootparam, READ);
    printf ("FAT12: SuperBlock read\n");
    mp->fs->fssb.blocksize = bootparam->sec_clu * bootparam->bytes_sec;
    mp->fs->fssb.root_inode = bootparam->no_fats*bootparam->fat_sectors + bootparam->rsvd_sec;
    mp->fs->fssb.root_size = bootparam->no_root;
    mp->fs->fssb.spc_fs = bootparam;
    err = 0;
out:
    return err;
}
Example #21
0
/*
 * returns a string containing all bucket name
 * returns NULL if it fails
 */
char *bucket_list(void) {
    /* load buckets from filesystem */
    int size;
    int pos = 0;
    char *result;
    struct bucket *bucket;
    int len;

    DIR *dir = opendir(".");
    if (dir) {
        struct dirent *dirent;
        while ((dirent = readdir(dir))) {
            if ((dirent->d_type & DT_DIR) && is_valid_name(dirent->d_name))
                bucket_get(dirent->d_name, CAN_RETURN_NULL);
        }
        closedir(dir);
    }


    pthread_rwlock_rdlock(&rwlock);
    
    size = len = 0;
    for (bucket = buckets; bucket; bucket = (struct bucket *)bucket->hh.next) {
        len = strlen(bucket->name) + 1;
        size += len;
    }

    result = slab_alloc(size);
    if (!result)
        return NULL;

    size = len = 0;
    for (bucket = buckets; bucket; bucket = (struct bucket *)bucket->hh.next) {
        len = strlen(bucket->name) + 1;
        size += len;
        memcpy(result + pos, bucket->name, len);
        pos = size;
    }

    pthread_rwlock_unlock(&rwlock);
    result[pos] = '\0';
    return result;
}
Example #22
0
/**
 * \brief Allocates a new VNode, adding it to the page table and our metadata
 */
static errval_t alloc_vnode(struct pmap_arm *pmap_arm, struct vnode *root,
                            enum objtype type, uint32_t entry,
                            struct vnode **retvnode)
{
    assert(root->is_vnode);
    errval_t err;

    struct vnode *newvnode = slab_alloc(&pmap_arm->slab);
    if (newvnode == NULL) {
        return LIB_ERR_SLAB_ALLOC_FAIL;
    }
    newvnode->is_vnode = true;

    // The VNode capability
    err = slot_alloc(&newvnode->u.vnode.cap);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    err = vnode_create(newvnode->u.vnode.cap, type);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_CREATE);
    }

    err = vnode_map(root->u.vnode.cap, newvnode->u.vnode.cap, entry,
                    KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, 0, 1);

    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_MAP);
    }

    // The VNode meta data
    newvnode->entry            = entry;
    newvnode->next             = root->u.vnode.children;
    root->u.vnode.children     = newvnode;
    newvnode->u.vnode.children = NULL;

    if (retvnode) {
        *retvnode = newvnode;
    }
    return SYS_ERR_OK;
}
Example #23
0
void thread_create_arch(thread_t *t)
{
	if ((t->uspace) && (!t->arch.uspace_window_buffer))
		{
		/*
		 * The thread needs userspace window buffer and the object
		 * returned from the slab allocator doesn't have any.
		 */
		t->arch.uspace_window_buffer = slab_alloc(uwb_cache, 0);
	} else {
		uintptr_t uw_buf = (uintptr_t) t->arch.uspace_window_buffer;

		/*
		 * Mind the possible alignment of the userspace window buffer
		 * belonging to a killed thread.
		 */
		t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
		    UWB_ALIGNMENT);
	}
}
Example #24
0
int
adapt2_init(void **data, int *level, int nthreads, uint64_t chunksize,
	    int file_version, compress_op_t op)
{
	struct adapt_data *adat = (struct adapt_data *)(*data);
	int rv = 0, lv;

	if (!adat) {
		adat = (struct adapt_data *)slab_alloc(NULL, sizeof (struct adapt_data));
		adat->adapt_mode = 2;
		adat->ppmd_data = NULL;
		adat->bsc_data = NULL;
		lv = *level;
		if (lv > 10) lv = 10;
		rv = ppmd_state_init(&(adat->ppmd_data), level, 0);
		lv = *level;
		if (rv == 0)
			rv = lzma_init(&(adat->lzma_data), &lv, nthreads, chunksize, file_version, op);
		lv = *level;
#ifdef ENABLE_PC_LIBBSC
		if (rv == 0)
			rv = libbsc_init(&(adat->bsc_data), &lv, nthreads, chunksize, file_version, op);
#endif
		/*
		 * LZ4 is used to tackle some embedded archive headers and/or zero paddings in
		 * otherwise incompressible data. So we always use it at the lowest and fastest
		 * compression level.
		 */
		lv = 1;
		if (rv == 0)
			rv = lz4_init(&(adat->lz4_data), &lv, nthreads, chunksize, file_version, op);
		*data = adat;
		if (*level > 9) *level = 9;
	}
	lzma_count = 0;
	bzip2_count = 0;
	ppmd_count = 0;
	bsc_count = 0;
	lz4_count = 0;
	return (rv);
}
Example #25
0
int
lz_fx_init(void **data, int *level, int nthreads, int64_t chunksize,
	   int file_version, compress_op_t op)
{
	struct lzfx_params *lzdat;
	int lev;

	if (chunksize > UINT_MAX) {
		fprintf(stderr, "Chunk size too big for LZFX.\n");
		return (1);
	}
	lzdat = slab_alloc(NULL, sizeof (struct lzfx_params));

	lev = *level;
	if (lev > 5) lev = 5;
	lzdat->htab_bits = 16 + (lev-1);
	*data = lzdat;

	if (*level > 9) *level = 9;
	return (0);
}
struct vm_translation_map *create_translation_map(void)
{
    struct vm_translation_map *map;
    int old_flags;

    map = slab_alloc(&translation_map_slab);
    map->page_dir = page_to_pa(vm_allocate_page());

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    // Copy kernel page tables into new page directory
    memcpy((unsigned int*) PA_TO_VA(map->page_dir) + 768,
           (unsigned int*) PA_TO_VA(kernel_map.page_dir) + 768,
           256 * sizeof(unsigned int));

    map->asid = next_asid++;
    map->lock = 0;

    list_add_tail(&map_list, (struct list_node*) map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    return map;
}
Example #27
0
int
lz4_init(void **data, int *level, int nthreads, uint64_t chunksize,
	 int file_version, compress_op_t op)
{
	struct lz4_params *lzdat;
	int lev;

	if (chunksize > LZ4_MAX_CHUNK) {
		fprintf(stderr, "Max allowed chunk size for LZ4 is: %ld \n",
		    LZ4_MAX_CHUNK);
		return (1);
	}
	lzdat = (struct lz4_params *)slab_alloc(NULL, sizeof (struct lz4_params));

	lev = *level;
	if (lev > 3) lev = 3;
	lzdat->level = lev;
	*data = lzdat;

	if (*level > 9) *level = 9;
	return (0);
}
Example #28
0
static void *r600_buffer_get_transfer(struct pipe_context *ctx,
				      struct pipe_resource *resource,
                                      unsigned level,
                                      unsigned usage,
                                      const struct pipe_box *box,
				      struct pipe_transfer **ptransfer,
				      void *data, struct r600_resource *staging,
				      unsigned offset)
{
	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
	struct r600_transfer *transfer = slab_alloc(&rctx->pool_transfers);

	transfer->transfer.resource = resource;
	transfer->transfer.level = level;
	transfer->transfer.usage = usage;
	transfer->transfer.box = *box;
	transfer->transfer.stride = 0;
	transfer->transfer.layer_stride = 0;
	transfer->offset = offset;
	transfer->staging = staging;
	*ptransfer = &transfer->transfer;
	return data;
}
Example #29
0
static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_t vend,
                              struct capref frame, size_t offset, size_t pte_count,
                              vregion_flags_t flags)
{
    // Get the page table
    struct vnode *ptable;
    errval_t err = get_ptable(pmap, vaddr, &ptable);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
    }
    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
    // XXX: reassess the following note -SG
    // NOTE: strictly speaking a l2 entry only has 8 bits, but due to the way
    // Barrelfish allocates l1 and l2 tables, we use 10 bits for the tracking
    // index here and in the map syscall
    uintptr_t index = ARM_USER_L2_OFFSET(vaddr);
    // Create user level datastructure for the mapping
    bool has_page = has_vnode(ptable, index, pte_count);
    assert(!has_page);
    struct vnode *page = slab_alloc(&pmap->slab);
    assert(page);
    page->is_vnode = false;
    page->entry = index;
    page->next  = ptable->u.vnode.children;
    ptable->u.vnode.children = page;
    page->u.frame.cap = frame;
    page->u.frame.pte_count = pte_count;

    // Map entry into the page table
    err = vnode_map(ptable->u.vnode.cap, frame, index,
                    pmap_flags, offset, pte_count);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_MAP);
    }
    return SYS_ERR_OK;
}
Example #30
0
/**
 * \brief slot allocator
 *
 * \param ca   Instance of the allocator
 * \param ret  Pointer to return the allocated slot
 */
errval_t multi_alloc(struct slot_allocator *ca, struct capref *ret)
{
    errval_t err = SYS_ERR_OK;
    struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca;

    thread_mutex_lock(&ca->mutex);
    assert(ca->space != 0);
    ca->space--;

    /* Try allocating from the list of single slot allocators */
    struct slot_allocator_list *walk = mca->head;
    //struct slot_allocator_list *prev = NULL;
    while(walk != NULL) {
        err = walk->a.a.alloc(&walk->a.a, ret);
        if (err_no(err) != LIB_ERR_SLOT_ALLOC_NO_SPACE) {
            break;
        }
        //prev = walk;
        walk = walk->next;
    }
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC);
    }

    /* If no more slots left, grow */
    if (ca->space == 0) {
        ca->space = ca->nslots;
        /* Pull in the reserve */
        mca->reserve->next = mca->head;
        mca->head = mca->reserve;

        /* Setup a new reserve */
        // Cnode
        struct capref cap;
        struct cnoderef cnode;
        err = mca->top->alloc(mca->top, &cap);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&ca->mutex);
            return err_push(err, LIB_ERR_SLOT_ALLOC);
        }
        thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc
                                         // which may call slot_alloc
        err = cnode_create_raw(cap, &cnode, ca->nslots, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_CNODE_CREATE);
        }
        thread_mutex_lock(&ca->mutex);

        // Buffers
        void *buf = slab_alloc(&mca->slab);
        if (!buf) { /* Grow slab */
            // Allocate slot out of the list
            mca->a.space--;
            struct capref frame;
            err = mca->head->a.a.alloc(&mca->head->a.a, &frame);
            if (err_is_fail(err)) {
                thread_mutex_unlock(&ca->mutex);
                return err_push(err, LIB_ERR_SLOT_ALLOC);
            }

            thread_mutex_unlock(&ca->mutex); // following functions may call
                                             // slot_alloc
            void *slab_buf;
            size_t size;
            err = vspace_mmu_aware_map(&mca->mmu_state, frame,
                                       mca->slab.blocksize, &slab_buf, &size);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
            }

            thread_mutex_lock(&ca->mutex);

            // Grow slab
            slab_grow(&mca->slab, slab_buf, size);

            // Try allocating again
            buf = slab_alloc(&mca->slab);
            if (err_is_fail(err)) {
                thread_mutex_unlock(&ca->mutex);
                return err_push(err, LIB_ERR_SLAB_ALLOC_FAIL);
            }
        }

        mca->reserve = buf;
        buf = (char *)buf + sizeof(struct slot_allocator_list);
        size_t bufsize = mca->slab.blocksize - sizeof(struct slot_allocator_list);

        // Allocator
        err = single_slot_alloc_init_raw(&mca->reserve->a, cap, cnode,
                                         mca->a.nslots, buf, bufsize);
        if (err_is_fail(err)) {
            thread_mutex_unlock(&ca->mutex);
            return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
        }
    }

    thread_mutex_unlock(&ca->mutex);
    return SYS_ERR_OK;
}