Exemplo n.º 1
0
    T* allocate(bool* needs_gc) {
      utilities::thread::SpinLock::LockGuard lg(lock_);
      if(free_list_ == (uintptr_t)-1) allocate_chunk(needs_gc);
      T* t = from_index(free_list_);
      free_list_ = t->next();

      t->clear();
      in_use_++;

      return t;
    }
Exemplo n.º 2
0
    uintptr_t allocate_index(bool* needs_gc) {
      thread::SpinLock::LockGuard lg(lock_);
      if(free_list_ == (uintptr_t)-1) allocate_chunk(needs_gc);

      uintptr_t current_index = free_list_;
      T* t = from_index(free_list_);
      free_list_ = t->next();
      t->clear();
      in_use_++;

      return current_index;
    }
Exemplo n.º 3
0
//Before running run: "ulimit -s unlimited"
int main(int argc, char* argv[]) {
    parse_opts(argc, argv);

    srand(time(NULL)); 
    int tick = 0;
    struct list **chunks_lifetime = (struct list **)malloc(sizeof(struct list*) * max_ticks);// The stack memory has somee restrictions
    for (int i = 0; i < max_ticks; i++) {
        chunks_lifetime[i] = create_list();
    }

    struct memory *mem = create_memory(memory_size);
    printf("Starting\n"); 
    while (1) {
        if (need_trace) {
            printf("Tick %d:\n", tick);
        }
        while (!is_list_empty(chunks_lifetime[tick])) {
            int8_t *ptr = get_and_remove(chunks_lifetime[tick]);
            if (need_trace) {
                printf("   Removing chunk with size %d bytes\n", get_chunk_size(ptr));
            }
            free_chunk(mem, ptr);
        }
        SIZE_TYPE chunk_size = rand_range(min_chunk_size, max_chunk_size) / 4 * 4;
        int8_t *ptr = allocate_chunk(mem, chunk_size);

        if (ptr == NULL) {
            printf("Oops. We have no free memory to allocate %d bytes on tick %d. Exiting\n", chunk_size, tick);
            break;
        }
        int chunk_lifetime = rand_range(min_lifetime, max_lifetime);
        if (tick + chunk_lifetime >= max_ticks) {
            printf("The maximum number of ticks(%d) reached. Exiting\n", max_ticks);
            break;
        }
        
        add_to_list(chunks_lifetime[tick + chunk_lifetime], ptr);
        if (need_trace){
            printf("   Allocating chunk with size %d bytes. Its lifetime is %d ticks\n", chunk_size, chunk_lifetime);
        }
        tick++;
    }

    for (int i = 0; i < max_ticks; i++) {
        free_list(chunks_lifetime[i]);
    }
    free_memory(mem);
    free(chunks_lifetime);
    return 0;
}
WasmResult wasm_init_stack_allocator(WasmStackAllocator* stack_allocator,
                                     WasmAllocator* fallback) {
  WASM_ZERO_MEMORY(*stack_allocator);
  stack_allocator->allocator.alloc = stack_alloc;
  stack_allocator->allocator.realloc = stack_realloc;
  stack_allocator->allocator.free = stack_free;
  stack_allocator->allocator.destroy = stack_destroy;
  stack_allocator->allocator.mark = stack_mark;
  stack_allocator->allocator.reset_to_mark = stack_reset_to_mark;
  stack_allocator->allocator.print_stats = stack_print_stats;
  stack_allocator->allocator.setjmp_handler = stack_setjmp_handler;
  stack_allocator->fallback = fallback;

  WasmStackAllocatorChunk* chunk =
      allocate_chunk(stack_allocator, CHUNK_MAX_AVAIL, __FILE__, __LINE__);
  chunk->prev = NULL;
  stack_allocator->first = stack_allocator->last = chunk;
  return WASM_OK;
}
Exemplo n.º 5
0
    //  Adds an element to the back end of the queue.
    inline void push ()
    {
        _back_chunk = _end_chunk;
        _back_pos = _end_pos;

        if (++_end_pos != N)
            return;

        chunk_t *sc = _spare_chunk.xchg (NULL);
        if (sc) {
            _end_chunk->next = sc;
            sc->prev = _end_chunk;
        } else {
            _end_chunk->next = allocate_chunk ();
            alloc_assert (_end_chunk->next);
            _end_chunk->next->prev = _end_chunk;
        }
        _end_chunk = _end_chunk->next;
        _end_pos = 0;
    }
void* concurrent_growable_pool::allocate_pointer()
{
	void* p = nullptr;

	// check cached last-used chunk (racy but it's only an optimization hint)
	concurrent_pool* pool = last_allocate;
	if (pool)
		p = pool->allocate();

	// search for another chunk
	if (!p)
	{
		// iterating over the list inside the stack is safe because we're only
		// ever going to allow growth of the list which since it occurs on top
		// of any prior head means the rest of the list remains valid for traversal
		chunk_t* c = chunks.peek();
		while (c)
		{
			p = c->pool.allocate();
			if (p)
			{
				last_allocate = &c->pool; // racy but only an optimization hint
				break;
			}
			c = c->next;
		}

		// still no memory? add another chunk
		if (!p)
		{
			chunk_t* new_chunk = allocate_chunk();
			p = new_chunk->pool.allocate();
			chunks.push(new_chunk);
			last_allocate = &new_chunk->pool; // racy but only an optimization hint
		}
	}

	return p;
}
Exemplo n.º 7
0
struct chunk *new_chunk (struct atom *atm_ptr, int component_number, double contact_area, double reentrant_area, double accessible_area)
{

	struct chunk *n_chunk;
	int atom_number;

	atom_number = atm_ptr -> number;

	n_chunk = (struct chunk *) allocate_chunk ();
	if (n_chunk == NULL) {
		set_error1 ("new_chunk: ran out of memory");
		return(NULL);
	}

	n_chunk -> atom_number = atom_number;
	strcpy (n_chunk -> labels[0], atm_ptr -> group);
	strcpy (n_chunk -> labels[1], atm_ptr -> sequence);
	strcpy (n_chunk -> labels[2], atm_ptr -> name);
	n_chunk -> component_number = (short) component_number;
	n_chunk -> contact_area = contact_area;
	n_chunk -> reentrant_area = reentrant_area;
	n_chunk -> accessible_area = accessible_area;
	return (n_chunk);
}
Exemplo n.º 8
0
/* Store data in the compact image. The argument 'soft_write' means
 * the store was caused by copy-on-read or prefetching, which need not
 * update metadata immediately. */
static BlockDriverAIOCB *store_data_in_compact_image (FvdAIOCB * acb,
                                                      int soft_write,
                                                      FvdAIOCB * parent_acb,
                                                      BlockDriverState * bs,
                                                      int64_t sector_num,
                                                      QEMUIOVector * orig_qiov,
                                                      const int nb_sectors,
                                                      BlockDriverCompletionFunc
                                                      * cb, void *opaque)
{
    BDRVFvdState *s = bs->opaque;

    const uint32_t first_chunk = sector_num / s->chunk_size;
    const uint32_t last_chunk = (sector_num + nb_sectors - 1) / s->chunk_size;
    int table_dirty = FALSE;
    uint32_t chunk;
    int64_t start_sec;

    /* Check if storag space is allocated. */
    for (chunk = first_chunk; chunk <= last_chunk; chunk++) {
        if (IS_EMPTY (s->table[chunk])) {
            uint32_t id = allocate_chunk (bs);
            if (IS_EMPTY (id)) {
                return NULL;
            }
            id |= DIRTY_TABLE;
            WRITE_TABLE (s->table[chunk], id);

            table_dirty = TRUE;
        } else if (IS_DIRTY (s->table[chunk])) {
            /* This is possible if a previous soft-write allocated the storage
             * space but did not flush the table entry change to the journal
             * and hence did not clean the dirty bit. This is also possible
             * with two concurrent hard-writes. The first hard-write allocated
             * the storage space but has not flushed the table entry change to
             * the journal yet and hence the table entry remains dirty. In
             * this case, the second hard-write will also try to flush this
             * dirty table entry to the journal. The outcome is correct since
             * they store the same metadata change in the journal (although
             * twice). For this race condition, we prefer to have two writes
             * to the journal rather than introducing a locking mechanism,
             * because this happens rarely and those two writes to the journal
             * are likely to be merged by the kernel into a single write since
             * they are likely to update back-to-back sectors in the journal.
             * A locking mechanism would be less efficient, because the large
             * size of chunks would cause unnecessary locking due to ``false
             * sharing'' of a chunk by two writes. */
            table_dirty = TRUE;
        }
    }

    const int update_table = (!soft_write && table_dirty);
    size_t iov_left;
    uint8_t *iov_buf;
    int nb, iov_index, nqiov, niov;
    uint32_t prev;

    if (first_chunk == last_chunk) {
        goto handle_one_continuous_region;
    }

    /* Count the number of qiov and iov needed to cover the continuous regions
     * of the compact image. */
    iov_left = orig_qiov->iov[0].iov_len;
    iov_buf = orig_qiov->iov[0].iov_base;
    iov_index = 0;
    nqiov = 0;
    niov = 0;
    prev = READ_TABLE (s->table[first_chunk]);

    /* Data in the first chunk. */
    nb = s->chunk_size - (sector_num % s->chunk_size);

    for (chunk = first_chunk + 1; chunk <= last_chunk; chunk++) {
        uint32_t current = READ_TABLE (s->table[chunk]);
        int64_t data_size;
        if (chunk < last_chunk) {
            data_size = s->chunk_size;
        } else {
            data_size = (sector_num + nb_sectors) % s->chunk_size;
            if (data_size == 0) {
                data_size = s->chunk_size;
            }
        }

        if (current == prev + 1) {
            nb += data_size;        /* Continue the previous region. */
        } else {
            /* Terminate the previous region. */
            niov +=
                count_iov (orig_qiov->iov, &iov_index, &iov_buf, &iov_left,
                           nb * 512);
            nqiov++;
            nb = data_size;        /* Data in the new region. */
        }
        prev = current;
    }

    if (nqiov == 0) {
      handle_one_continuous_region:
        /* A simple case. All data can be written out in one qiov and no new
         * chunks are allocated. */
        start_sec = READ_TABLE (s->table[first_chunk]) * s->chunk_size +
                                        (sector_num % s->chunk_size);

        if (!update_table && !acb) {
            if (parent_acb) {
                QDEBUG ("STORE: acb%llu-%p  "
                        "store_directly_without_table_update\n",
                        parent_acb->uuid, parent_acb);
            }
            return bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec,
                                    orig_qiov, nb_sectors, cb, opaque);
        }

        if (!acb && !(acb = init_store_acb (soft_write, orig_qiov, bs,
                            sector_num, nb_sectors, parent_acb, cb, opaque))) {
            return NULL;
        }

        QDEBUG ("STORE: acb%llu-%p  store_directly  sector_num=%" PRId64
                " nb_sectors=%d\n", acb->uuid, acb, acb->sector_num,
                acb->nb_sectors);

        acb->store.update_table = update_table;
        acb->store.num_children = 1;
        acb->store.one_child.hd_acb =
            bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, orig_qiov,
                             nb_sectors, finish_store_data_in_compact_image,
                             &acb->store.one_child);
        if (acb->store.one_child.hd_acb) {
            acb->store.one_child.acb = acb;
            return &acb->common;
        } else {
            my_qemu_aio_unref (acb);
            return NULL;
        }
    }

    /* qiov for the last continuous region. */
    niov += count_iov (orig_qiov->iov, &iov_index, &iov_buf,
                       &iov_left, nb * 512);
    nqiov++;
    ASSERT (iov_index == orig_qiov->niov - 1 && iov_left == 0);

    /* Need to submit multiple requests to the lower layer. */
    if (!acb && !(acb = init_store_acb (soft_write, orig_qiov, bs, sector_num,
                                        nb_sectors, parent_acb, cb, opaque))) {
        return NULL;
    }
    acb->store.update_table = update_table;
    acb->store.num_children = nqiov;

    if (!parent_acb) {
        QDEBUG ("STORE: acb%llu-%p  start  sector_num=%" PRId64
                " nb_sectors=%d\n", acb->uuid, acb, acb->sector_num,
                acb->nb_sectors);
    }

    /* Allocate memory and create multiple requests. */
    const size_t metadata_size = nqiov * (sizeof (CompactChildCB) +
                                          sizeof (QEMUIOVector))
                                    + niov * sizeof (struct iovec);
    acb->store.children = (CompactChildCB *) my_qemu_malloc (metadata_size);
    QEMUIOVector *q = (QEMUIOVector *) (acb->store.children + nqiov);
    struct iovec *v = (struct iovec *) (q + nqiov);

    start_sec = READ_TABLE (s->table[first_chunk]) * s->chunk_size +
                                        (sector_num % s->chunk_size);
    nqiov = 0;
    iov_index = 0;
    iov_left = orig_qiov->iov[0].iov_len;
    iov_buf = orig_qiov->iov[0].iov_base;
    prev = READ_TABLE (s->table[first_chunk]);

    /* Data in the first chunk. */
    if (first_chunk == last_chunk) {
        nb = nb_sectors;
    }
    else {
        nb = s->chunk_size - (sector_num % s->chunk_size);
    }

    for (chunk = first_chunk + 1; chunk <= last_chunk; chunk++) {
        uint32_t current = READ_TABLE (s->table[chunk]);
        int64_t data_size;
        if (chunk < last_chunk) {
            data_size = s->chunk_size;
        } else {
            data_size = (sector_num + nb_sectors) % s->chunk_size;
            if (data_size == 0) {
                data_size = s->chunk_size;
            }
        }

        if (current == prev + 1) {
            nb += data_size;        /* Continue the previous region. */
        } else {
            /* Terminate the previous continuous region. */
            niov = setup_iov (orig_qiov->iov, v, &iov_index,
                              &iov_buf, &iov_left, nb * 512);
            qemu_iovec_init_external (q, v, niov);
            QDEBUG ("STORE: acb%llu-%p  create_child %d sector_num=%" PRId64
                    " nb_sectors=%d niov=%d\n", acb->uuid, acb, nqiov,
                    start_sec, q->size / 512, q->niov);
            acb->store.children[nqiov].hd_acb =
                bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, q,
                                 q->size / 512,
                                 finish_store_data_in_compact_image,
                                 &acb->store.children[nqiov]);
            if (!acb->store.children[nqiov].hd_acb) {
                goto fail;
            }
            acb->store.children[nqiov].acb = acb;
            v += niov;
            q++;
            nqiov++;
            start_sec = current * s->chunk_size; /* Begin of the new region. */
            nb = data_size;        /* Data in the new region. */
        }
        prev = current;
    }

    /* Requst for the last chunk. */
    niov = setup_iov (orig_qiov->iov, v, &iov_index, &iov_buf,
                      &iov_left, nb * 512);
    ASSERT (iov_index == orig_qiov->niov - 1 && iov_left == 0);
    qemu_iovec_init_external (q, v, niov);

    QDEBUG ("STORE: acb%llu-%p  create_child_last %d sector_num=%" PRId64
            " nb_sectors=%d niov=%d\n", acb->uuid, acb, nqiov, start_sec,
            q->size / 512, q->niov);
    acb->store.children[nqiov].hd_acb =
        bdrv_aio_writev (s->fvd_data, s->data_offset + start_sec, q,
                         q->size / 512, finish_store_data_in_compact_image,
                         &acb->store.children[nqiov]);
    if (acb->store.children[nqiov].hd_acb) {
        acb->store.children[nqiov].acb = acb;
        return &acb->common;
    }

    int i;
  fail:
    QDEBUG ("STORE: acb%llu-%p  failed\n", acb->uuid, acb);
    for (i = 0; i < nqiov; i++) {
        bdrv_aio_cancel (acb->store.children[i].hd_acb);
    }
    my_qemu_free (acb->store.children);
    my_qemu_aio_unref (acb);
    return NULL;
}