コード例 #1
0
ファイル: poolalloc.c プロジェクト: AG-Dev/wesnoth_ios
// Note this function might not be entirely POSIX compatible, but it seems to
// work.
int posix_memalign(void **memptr, size_t alignment, size_t size)
{
	pthread_mutex_lock(&dlmalloc_mutex);
	*memptr = dlmemalign(alignment, size);
	pthread_mutex_unlock(&dlmalloc_mutex);
	return errno;
}
コード例 #2
0
ファイル: poolalloc.c プロジェクト: AG-Dev/wesnoth_ios
void* memalign(size_t alignment, size_t size)
{
	pthread_mutex_lock(&dlmalloc_mutex);
	void* result = dlmemalign(alignment, size);
	pthread_mutex_unlock(&dlmalloc_mutex);
	return result;
}
コード例 #3
0
ファイル: symmetric_heap_c.c プロジェクト: jpdoyle/SOS
void *
shmem_align(size_t alignment, size_t size)
{
    void *ret;

    SHMEM_ERR_CHECK_INITIALIZED();

    SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc);
    ret = dlmemalign(alignment, size);
    SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc);

    shmem_internal_barrier_all();

    return ret;
}
コード例 #4
0
/* This routine serves as entry point for 'memalign'.
 * This routine behaves similarly to qemu_instrumented_malloc.
 */
void* qemu_instrumented_memalign(size_t alignment, size_t bytes) {
    MallocDesc desc;

    if (bytes == 0) {
        // Just let go zero bytes allocation.
        qemu_info_log("::: <libc_pid=%03u, pid=%03u>: memalign(%X, %u) redir to malloc",
                 malloc_pid, getpid(), alignment, bytes);
        return qemu_instrumented_malloc(0);
    }

    /* Prefix size for aligned allocation must be equal to the alignment used
     * for allocation in order to ensure proper alignment of the returned
     * pointer, in case that alignment requirement is greater than prefix
     * size. */
    desc.prefix_size = alignment > DEFAULT_PREFIX_SIZE ? alignment :
                                                         DEFAULT_PREFIX_SIZE;
    desc.requested_bytes = bytes;
    desc.suffix_size = DEFAULT_SUFFIX_SIZE;
    desc.ptr = dlmemalign(desc.prefix_size, mallocdesc_alloc_size(&desc));
    if (desc.ptr == NULL) {
        error_log("<libc_pid=%03u, pid=%03u> memalign(%X, %u): dlmalloc(%u) failed.",
                  malloc_pid, getpid(), alignment, bytes,
                  mallocdesc_alloc_size(&desc));
        return NULL;
    }
    if (notify_qemu_malloc(&desc)) {
        log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: memalign(%X, %u): notify_malloc failed for ",
                  malloc_pid, getpid(), alignment, bytes);
        dlfree(desc.ptr);
        return NULL;
    }

#if TEST_ACCESS_VIOLATIONS
    test_access_violation(&desc);
#endif  // TEST_ACCESS_VIOLATIONS

    log_mdesc(info, &desc, "@@@ <libc_pid=%03u, pid=%03u> memalign(%X, %u) -> ",
              malloc_pid, getpid(), alignment, bytes);
    return mallocdesc_user_ptr(&desc);
}
コード例 #5
0
ファイル: stubs.c プロジェクト: BackupTheBerlios/elua-svn
void* _memalign_r( struct _reent* r, size_t align, size_t nbytes )
{
  return dlmemalign( align, nbytes );
}
コード例 #6
0
static CodeChunk*
new_codechunk (CodeChunk *last, int dynamic, int size)
{
	int minsize, flags = CODE_FLAG_MMAP;
	int chunk_size, bsize = 0;
	int pagesize, valloc_granule;
	CodeChunk *chunk;
	void *ptr;

#ifdef FORCE_MALLOC
	flags = CODE_FLAG_MALLOC;
#endif

	pagesize = mono_pagesize ();
	valloc_granule = mono_valloc_granule ();

	if (dynamic) {
		chunk_size = size;
		flags = CODE_FLAG_MALLOC;
	} else {
		minsize = MAX (pagesize * MIN_PAGES, valloc_granule);
		if (size < minsize)
			chunk_size = minsize;
		else {
			/* Allocate MIN_ALIGN-1 more than we need so we can still */
			/* guarantee MIN_ALIGN alignment for individual allocs    */
			/* from mono_code_manager_reserve_align.                  */
			size += MIN_ALIGN - 1;
			size &= ~(MIN_ALIGN - 1);
			chunk_size = size;
			chunk_size += valloc_granule - 1;
			chunk_size &= ~ (valloc_granule - 1);
		}
	}
#ifdef BIND_ROOM
	if (dynamic)
		/* Reserve more space since there are no other chunks we might use if this one gets full */
		bsize = (chunk_size * 2) / BIND_ROOM;
	else
		bsize = chunk_size / BIND_ROOM;
	if (bsize < MIN_BSIZE)
		bsize = MIN_BSIZE;
	bsize += MIN_ALIGN -1;
	bsize &= ~ (MIN_ALIGN - 1);
	if (chunk_size - size < bsize) {
		chunk_size = size + bsize;
		if (!dynamic) {
			chunk_size += valloc_granule - 1;
			chunk_size &= ~ (valloc_granule - 1);
		}
	}
#endif

	if (flags == CODE_FLAG_MALLOC) {
		ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
		if (!ptr)
			return NULL;
	} else {
		/* Try to allocate code chunks next to each other to help the VM */
		ptr = NULL;
		if (last)
			ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size);
		if (!ptr)
			ptr = codechunk_valloc (NULL, chunk_size);
		if (!ptr)
			return NULL;
	}

	if (flags == CODE_FLAG_MALLOC) {
#ifdef BIND_ROOM
		/* Make sure the thunks area is zeroed */
		memset (ptr, 0, bsize);
#endif
	}

	chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk));
	if (!chunk) {
		if (flags == CODE_FLAG_MALLOC)
			dlfree (ptr);
		else
			mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE);
		return NULL;
	}
	chunk->next = NULL;
	chunk->size = chunk_size;
	chunk->data = (char *) ptr;
	chunk->flags = flags;
	chunk->pos = bsize;
	chunk->bsize = bsize;
	if (code_manager_callbacks.chunk_new)
		code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size);
	mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);

	code_memory_used += chunk_size;
	mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
	/*printf ("code chunk at: %p\n", ptr);*/
	return chunk;
}
コード例 #7
0
/**
 * Allocates an alligned block of RAM
 */
void *heapmm_alloc_alligned(size_t size, uintptr_t alignment)
{
	return dlmemalign((size_t) alignment, size);
}
コード例 #8
0
ファイル: malloc_debug_dummy.c プロジェクト: chiehwen/olibc
void* memalign(size_t alignment, size_t bytes) {
    return dlmemalign(alignment, bytes);
}