Пример #1
0
static list_node_t* list_create_node(void* data,
        list_node_t* prev, list_node_t* next) {
    // TODO: use a proper heap (malloc)
    list_node_t* node = vmm_alloc(sizeof(list_node_t), VMM_KERNEL);
    node->data = data;
    node->prev = prev;
    node->next = next;
    return node;
}
Пример #2
0
/**
 * Wrapper over vmm_alloc().
 */
static void *
rxbuf_page_alloc(size_t size)
{
	void *p;

	g_assert(size == rxbuf_pagesize);

	p = vmm_alloc(size);

	if (GNET_PROPERTY(rxbuf_debug) > 2)
		g_debug("RXBUF allocated %uK buffer at %p", (unsigned) size / 1024, p);

	return p;
}
Пример #3
0
/**
 * Setup allocated LRU page cache.
 */
static int
setup_cache(struct lru_cache *cache, long pages, bool wdelay)
{
	cache->arena = vmm_alloc(pages * DBM_PBLKSIZ);
	if (NULL == cache->arena)
		return -1;
	cache->pagnum = htable_create(HASH_KEY_SELF, 0);
	cache->used = hash_list_new(NULL, NULL);
	cache->available = slist_new();
	cache->pages = pages;
	cache->next = 0;
	cache->write_deferred = wdelay;
	cache->dirty = walloc(cache->pages);
	WALLOC_ARRAY(cache->numpag, cache->pages);

	return 0;
}
Пример #4
0
/**
 * Setup allocated LRU page cache.
 */
static int
setup_cache(struct lru_cache *cache, long pages, gboolean wdelay)
{
	cache->arena = vmm_alloc(pages * DBM_PBLKSIZ);
	if (NULL == cache->arena)
		return -1;
	cache->pagnum = g_hash_table_new(NULL, NULL);
	cache->used = hash_list_new(NULL, NULL);
	cache->available = slist_new();
	cache->pages = pages;
	cache->next = 0;
	cache->write_deferred = wdelay;
	cache->dirty = walloc(cache->pages);
	cache->numpag = walloc(cache->pages * sizeof(long));

	return 0;
}
Пример #5
0
void* elf_load (void* image) {
  elf_header_t* header = image;
  elf_program_header_t* ph;
  int i;

  if (header->magic != ELF_MAGIC) {
    ERROR("Invalid ELF magic!");
    return NULL;
  }

  ph = (elf_program_header_t*) (image + header->ph_offset);
  for (i = 0; i < header->ph_entry_count; i++, ph++) {
    void* dest = (void*) ph->virt_addr;
    void* src = image + ph->offset;

    if (ph->type != PT_LOAD) {
      continue;
    }

    if (ph->virt_addr < USER_MEMORY_START ||
        (ph->virt_addr + ph->mem_size) >= USER_MEMORY_END) {
      ERROR("invalid elf file");
      return NULL;
    }

    uintptr_t dest_page = ph->virt_addr & 0xfffff000;
    uintptr_t end = dest_page + ph->mem_size;
  	while (dest_page < end) {
      if (!vmm_is_mapped(dest_page)) {
        vmm_alloc(dest_page);
      }
  		dest_page += 0x1000;
  	}
    memset(dest, 0, ph->mem_size);
    memcpy(dest, src, ph->file_size);
  }

  return (void*) header->entry;
}
Пример #6
0
/**
 * Set the page cache size.
 * @return 0 if OK, -1 on failure with errno set.
 */
int
setcache(DBM *db, long pages)
{
	struct lru_cache *cache = db->cache;
	bool wdelay;

	sdbm_lru_check(cache);

	if (pages <= 0) {
		errno = EINVAL;
		return -1;
	}

	if (NULL == cache)
		return init_cache(db, pages, FALSE);

	/*
	 * Easiest case: the size identical.
	 */

	if (pages == cache->pages)
		return 0;

	/*
	 * Cache size is changed.
	 *
	 * This means the arena will be reallocated, so we must invalidate the
	 * current db->pagbuf pointer, which lies within the old arena.  It is
	 * sufficient to reset db->pagbno, forcing a reload from the upper layers.
	 * Note than when the cache size is enlarged, the old page is still cached
	 * so reloading will be just a matter of recomputing db->pagbuf.  We could
	 * do so here, but cache size changes should only be infrequent.
	 *
	 * We also reset all the cache statistics, since a different cache size
	 * will imply a different set of hit/miss ratio.
	 */

	db->pagbno = -1;		/* Current page address will become invalid */
	db->pagbuf = NULL;

	if (common_stats) {
		s_info("sdbm: \"%s\" LRU cache size %s from %ld page%s to %ld",
			sdbm_name(db), pages > cache->pages ? "increased" : "decreased",
			cache->pages, plural(cache->pages), pages);
		log_lrustats(db);
	}

	cache->rhits = cache->rmisses = 0;
	cache->whits = cache->wmisses = 0;

	/*
	 * Straightforward: the size is increased.
	 */

	if (pages > cache->pages) {
		char *new_arena = vmm_alloc(pages * DBM_PBLKSIZ);
		if (NULL == new_arena)
			return -1;
		memmove(new_arena, cache->arena, cache->pages * DBM_PBLKSIZ);
		vmm_free(cache->arena, cache->pages * DBM_PBLKSIZ);
		cache->arena = new_arena;
		cache->dirty = wrealloc(cache->dirty, cache->pages, pages);
		cache->numpag = wrealloc(cache->numpag,
			cache->pages * sizeof(long), pages * sizeof(long));
		cache->pages = pages;
		return 0;
	}

	/*
	 * Difficult: the size is decreased.
	 *
	 * The current page buffer could point in a cache area that is going
	 * to disappear, and the internal data structures must forget about
	 * all the old indices that are greater than the new limit.
	 *
	 * We do not try to optimize anything here, as this call should happen
	 * only infrequently: we flush the current cache (in case there are
	 * deferred writes), destroy the LRU cache data structures, recreate a
	 * new one and invalidate the current DB page.
	 */

	wdelay = cache->write_deferred;
	flush_dirtypag(db);
	free_cache(cache);
	return setup_cache(cache, pages, wdelay);
}
Пример #7
0
list_t* list_create() {
    // TODO: use a proper heap (malloc)
    list_t* list = vmm_alloc(sizeof(list_t), VMM_KERNEL);
    list->head = list->tail = 0;
    return list;
}
Пример #8
0
/**
 * Check which of qsort(), xqsort(), xsort() or smsort() is best for sorting
 * aligned arrays with a native item size of OPSIZ.  At identical performance
 * level, we prefer our own sorting algorithms instead of libc's qsort() for
 * memory allocation purposes.
 *
 * @param items		amount of items to use in the sorted array
 * @param idx		index of the virtual routine to update
 * @param verbose	whether to be verbose
 * @param which		either "large" or "small", for logging
 */
static void
vsort_init_items(size_t items, unsigned idx, int verbose, const char *which)
{
	struct vsort_testing tests[] = {
		{ vsort_qsort,	qsort,	0.0, 0, "qsort" },
		{ vsort_xqsort,	xqsort,	0.0, 2, "xqsort" },
		{ vsort_xsort,	xsort,	0.0, 1, "xsort" },
		{ vsort_tqsort,	tqsort,	0.0, 1, "tqsort" },
		{ vsort_smsort,	smsort,	0.0, 1, "smsort" },	/* Only for almost sorted */
	};
	size_t len = items * OPSIZ;
	struct vsort_timing vt;
	size_t loops, highest_loops;
	unsigned i;

	g_assert(uint_is_non_negative(idx));
	g_assert(idx < N_ITEMS(vsort_table));

	vt.data = vmm_alloc(len);
	vt.copy = vmm_alloc(len);
	vt.items = items;
	vt.isize = OPSIZ;
	vt.len = len;
	random_bytes(vt.data, len);

	highest_loops = loops = vsort_loops(items);

	/* The -1 below is to avoid benchmarking smsort() for the general case */

retry_random:
	for (i = 0; i < N_ITEMS(tests) - 1; i++) {
		tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops);

		if (verbose > 1) {
			s_debug("%s() took %.4f secs for %s array (%zu loops)",
				tests[i].v_name, tests[i].v_elapsed * loops, which, loops);
		}

		if (loops != highest_loops) {
			highest_loops = loops;
			/* Redo all the tests if the number of timing loops changes */
			if (i != 0)
				goto retry_random;
		}
	}

	/*
	 * When dealing with a large amount of items, redo the tests twice with
	 * another set of random bytes to make sure we're not hitting a special
	 * ordering case.
	 */

	if (items >= VSORT_ITEMS) {
		unsigned j;

		for (j = 0; j < 2; j++) {
			random_bytes(vt.data, len);

			for (i = 0; i < N_ITEMS(tests) - 1; i++) {
				tests[i].v_elapsed +=
					vsort_timeit(tests[i].v_timer, &vt, &loops);

				if (verbose > 1) {
					s_debug("%s() spent %.6f secs total for %s array",
						tests[i].v_name, tests[i].v_elapsed, which);
				}

				if (loops != highest_loops) {
					highest_loops = loops;
					/* Redo all the tests if the number of loops changes */
					s_info("%s(): restarting %s array tests with %zu loops",
						G_STRFUNC, which, loops);
					goto retry_random;
				}
			}
		}
	}

	xqsort(tests, N_ITEMS(tests) - 1, sizeof tests[0], vsort_testing_cmp);

	vsort_table[idx].v_sort = vsort_routine(tests[0].v_routine, items);

	if (verbose) {
		s_info("vsort() will use %s() for %s arrays",
			vsort_routine_name(tests[0].v_name, items), which);
	}

	/*
	 * Now sort the data, then randomly perturb them by swapping a few items
	 * so that the array is almost sorted.
	 */

	xqsort(vt.data, vt.items, vt.isize, vsort_long_cmp);
	vsort_perturb_sorted_array(vt.data, vt.items, vt.isize);

retry_sorted:
	for (i = 0; i < N_ITEMS(tests); i++) {
		tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops);

		if (verbose > 1) {
			s_debug("%s() on almost-sorted took %.4f secs "
				"for %s array (%zu loops)",
				tests[i].v_name, tests[i].v_elapsed * loops, which, loops);
		}

		if (loops != highest_loops) {
			highest_loops = loops;
			/* Redo all the tests if the number of timing loops changes */
			if (i != 0)
				goto retry_sorted;
		}
	}

	xqsort(tests, N_ITEMS(tests), sizeof tests[0], vsort_testing_cmp);

	vsort_table[idx].v_sort_almost = vsort_routine(tests[0].v_routine, items);

	if (verbose) {
		s_info("vsort_almost() will use %s() for %s arrays",
			vsort_routine_name(tests[0].v_name, items), which);
	}

	vmm_free(vt.data, len);
	vmm_free(vt.copy, len);
}