Beispiel #1
0
static void *huge_chunk_alloc(struct thread_cache *cache, size_t size, size_t alignment,
                              struct arena **out_arena) {
    struct arena *arena = get_arena(cache);
    void *chunk = chunk_recycle(&arena->chunks, NULL, size, alignment);
    if (chunk) {
        if (unlikely(memory_commit(chunk, size))) {
            chunk_free(&arena->chunks, chunk, size);
            return NULL;
        }
    } else {
        if (unlikely(!(chunk = chunk_alloc(NULL, size, alignment)))) {
            return NULL;
        }

        // Work around the possibility of holes created by huge_move_expand (see below).
        struct arena *chunk_arena = get_huge_arena(chunk);
        if (chunk_arena != arena) {
            mutex_unlock(&arena->mutex);
            if (chunk_arena) {
                mutex_lock(&chunk_arena->mutex);
            }
            arena = chunk_arena;
        }
    }

    *out_arena = arena;
    return chunk;
}
enum gcerror gcmmu_create_context(struct gccorecontext *gccorecontext,
				  struct gcmmucontext *gcmmucontext,
				  pid_t pid)
{
	enum gcerror gcerror;
	struct gcmmu *gcmmu = &gccorecontext->gcmmu;
	struct gcmmuarena *arena = NULL;
	unsigned int *logical;
	unsigned int i;

	GCENTER(GCZONE_CONTEXT);

	if (gcmmucontext == NULL) {
		gcerror = GCERR_MMU_CTXT_BAD;
		goto exit;
	}

	/* Reset the context. */
	memset(gcmmucontext, 0, sizeof(struct gcmmucontext));

	/* Initialize access lock. */
	GCLOCK_INIT(&gcmmucontext->lock);

	/* Initialize arena lists. */
	INIT_LIST_HEAD(&gcmmucontext->vacant);
	INIT_LIST_HEAD(&gcmmucontext->allocated);

	/* Mark context as dirty. */
	gcmmucontext->dirty = true;

	/* Set PID. */
	gcmmucontext->pid = pid;

	/* Allocate MTLB table. */
	gcerror = gc_alloc_cached(&gcmmucontext->master, GCMMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto exit;
	}

	/* Invalidate MTLB entries. */
	logical = gcmmucontext->master.logical;
	for (i = 0; i < GCMMU_MTLB_ENTRY_NUM; i += 1)
		logical[i] = GCMMU_MTLB_ENTRY_VACANT;

	/* Set MMU table mode. */
	gcmmucontext->mmuconfig.reg.master_mask
		= GCREG_MMU_CONFIGURATION_MASK_MODE_ENABLED;
	gcmmucontext->mmuconfig.reg.master = GCMMU_MTLB_MODE;

	/* Set the table address. */
	gcmmucontext->mmuconfig.reg.address_mask
		= GCREG_MMU_CONFIGURATION_MASK_ADDRESS_ENABLED;
	gcmmucontext->mmuconfig.reg.address
		= GCGETFIELD(gcmmucontext->master.physical,
			     GCREG_MMU_CONFIGURATION, ADDRESS);

	/* Allocate the first vacant arena. */
	gcerror = get_arena(gcmmu, &arena);
	if (gcerror != GCERR_NONE)
		goto exit;

	/* Entire range is currently vacant. */
	arena->start.absolute = 0;
	arena->end.absolute =
	arena->count = GCMMU_MTLB_ENTRY_NUM * GCMMU_STLB_ENTRY_NUM;
	list_add(&arena->link, &gcmmucontext->vacant);
	GCDUMPARENA(GCZONE_ARENA, "initial vacant arena", arena);

	/* Map the command queue. */
	gcerror = gcqueue_map(gccorecontext, gcmmucontext);
	if (gcerror != GCERR_NONE)
		goto exit;

	/* Reference MMU. */
	gcmmu->refcount += 1;

	GCEXIT(GCZONE_CONTEXT);
	return GCERR_NONE;

exit:
	gcmmu_destroy_context(gccorecontext, gcmmucontext);

	GCEXITARG(GCZONE_CONTEXT, "gcerror = 0x%08X\n", gcerror);
	return gcerror;
}