enum gcerror gcmmu_destroy_context(struct gccorecontext *gccorecontext,
				   struct gcmmucontext *gcmmucontext)
{
	enum gcerror gcerror;
	struct gcmmu *gcmmu = &gccorecontext->gcmmu;
	struct list_head *head;
	struct gcmmuarena *arena;
	struct gcmmustlbblock *nextblock;

	GCENTER(GCZONE_CONTEXT);

	if (gcmmucontext == NULL) {
		gcerror = GCERR_MMU_CTXT_BAD;
		goto exit;
	}

	/* Unmap the command queue. */
	gcerror = gcqueue_unmap(gccorecontext, gcmmucontext);
	if (gcerror != GCERR_NONE)
		goto exit;

	/* Free allocated arenas. */
	while (!list_empty(&gcmmucontext->allocated)) {
		head = gcmmucontext->allocated.next;
		arena = list_entry(head, struct gcmmuarena, link);
		release_physical_pages(arena);
		list_move(head, &gcmmucontext->vacant);
	}

	/* Free slave tables. */
	while (gcmmucontext->slavealloc != NULL) {
		gc_free_cached(&gcmmucontext->slavealloc->pages);
		nextblock = gcmmucontext->slavealloc->next;
		kfree(gcmmucontext->slavealloc);
		gcmmucontext->slavealloc = nextblock;
	}

	/* Free the master table. */
	gc_free_cached(&gcmmucontext->master);

	/* Free arenas. */
	GCLOCK(&gcmmu->lock);
	list_splice_init(&gcmmucontext->vacant, &gcmmu->vacarena);
	GCUNLOCK(&gcmmu->lock);

	/* Dereference. */
	gcmmu->refcount -= 1;

	GCEXIT(GCZONE_CONTEXT);
	return GCERR_NONE;

exit:
	GCEXITARG(GCZONE_CONTEXT, "gcerror = 0x%08X\n", gcerror);
	return gcerror;
}
Ejemplo n.º 2
0
enum gcerror mmu2d_unmap(struct mmu2dcontext *ctxt, struct mmu2darena *mapped)
{
	enum gcerror gcerror = GCERR_NONE;
	struct mmu2darena *prev, *allocated, *vacant;
#if MMU_ENABLE
	struct mmu2dstlb *stlb;
#endif
	u32 mtlb_idx, stlb_idx;
	u32 next_mtlb_idx, next_stlb_idx;
#if MMU_ENABLE
	u32 i, j, count, available;
	u32 *stlb_logical;
#else
	u32 i, count, available;
#endif

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	/*
	 * Find the arena.
	 */

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"unmapping arena 0x%08X\n",
		__func__, __LINE__, (unsigned int) mapped);

	prev = NULL;
	allocated = ctxt->allocated;

	while (allocated != NULL) {
		if (allocated == mapped)
			break;
		prev = allocated;
		allocated = allocated->next;
	}

	/* The allocation is not listed. */
	if (allocated == NULL) {
		gcerror = GCERR_MMU_ARG;
		goto fail;
	}

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"found allocated arena:\n",
		__func__, __LINE__);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  mtlb=%d\n",
		__func__, __LINE__, allocated->mtlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  stlb=%d\n",
		__func__, __LINE__, allocated->stlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  count=%d\n",
		__func__, __LINE__, allocated->count);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  address=0x%08X\n",
		__func__, __LINE__, allocated->address);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  logical=0x%08X\n",
		__func__, __LINE__, (unsigned int) allocated->logical);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  pages=0x%08X\n",
		__func__, __LINE__, (unsigned int) allocated->pages);

	mtlb_idx = allocated->mtlb;
	stlb_idx = allocated->stlb;

	/*
	 * Free slave tables.
	 */

	count = allocated->count;

	for (i = 0; count > 0; i += 1) {
		available = MMU_STLB_ENTRY_NUM - stlb_idx;

		if (available > count) {
			available = count;
			next_mtlb_idx = mtlb_idx;
			next_stlb_idx = stlb_idx + count;
		} else {
			next_mtlb_idx = mtlb_idx + 1;
			next_stlb_idx = 0;
		}

#if MMU_ENABLE
		stlb = ctxt->slave[mtlb_idx];
		if (stlb == NULL) {
			gcerror = GCERR_MMU_ARG;
			goto fail;
		}

		if (stlb->count < available) {
			gcerror = GCERR_MMU_ARG;
			goto fail;
		}

		stlb_logical = &stlb->pages.logical[stlb_idx];
		for (j = 0; j < available; j += 1)
			stlb_logical[j] = MMU_STLB_ENTRY_VACANT;

		stlb->count -= available;
#endif

		count -= available;
		mtlb_idx = next_mtlb_idx;
		stlb_idx = next_stlb_idx;
	}

	/*
	 * Remove from allocated arenas.
	 */

	if (prev == NULL)
		ctxt->allocated = allocated->next;
	else
		prev->next = allocated->next;

	release_physical_pages(allocated);

	/*
	 * Find point of insertion for the arena.
	 */

	prev = NULL;
	vacant = ctxt->vacant;

	while (vacant != NULL) {
		if ((vacant->mtlb > allocated->mtlb) ||
			((vacant->mtlb == allocated->mtlb) &&
			 (vacant->stlb  > allocated->stlb)))
			break;
		prev = vacant;
		vacant = vacant->next;
	}

	/* Insert between the previous and the next vacant arenas. */
	if (mmu2d_siblings(prev, allocated)) {
		if (mmu2d_siblings(allocated, vacant)) {
			prev->count += allocated->count;
			prev->count += vacant->count;
			prev->next   = vacant->next;
			mmu2d_free_arena(ctxt->mmu, allocated);
			mmu2d_free_arena(ctxt->mmu, vacant);
		} else {
			prev->count += allocated->count;
			mmu2d_free_arena(ctxt->mmu, allocated);
		}
	} else if (mmu2d_siblings(allocated, vacant)) {
		vacant->mtlb   = allocated->mtlb;
		vacant->stlb   = allocated->stlb;
		vacant->count += allocated->count;
		mmu2d_free_arena(ctxt->mmu, allocated);
	} else {
		allocated->next = vacant;
		if (prev == NULL)
			ctxt->vacant = allocated;
		else
			prev->next = allocated;
	}

fail:
	return gcerror;
}
Ejemplo n.º 3
0
enum gcerror mmu2d_map(struct mmu2dcontext *ctxt, struct mmu2dphysmem *mem,
			struct mmu2darena **mapped)
{
	enum gcerror gcerror = GCERR_NONE;
	struct mmu2darena *prev, *vacant, *split;
#if MMU_ENABLE
	struct mmu2dstlb *stlb = NULL;
	struct mmu2dstlb **stlb_array;
	u32 *mtlb_logical, *stlb_logical;
#endif
	u32 mtlb_idx, stlb_idx, next_idx;
#if MMU_ENABLE
	u32 i, j, count, available;
#else
	u32 i, count, available;
#endif
	pte_t *parray_alloc = NULL;
	pte_t *parray;

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	if ((mem == NULL) || (mem->count <= 0) || (mapped == NULL) ||
		((mem->pagesize != 0) && (mem->pagesize != MMU_PAGE_SIZE)))
		return GCERR_MMU_ARG;

	/*
	 * Find available sufficient arena.
	 */

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"mapping (%d) pages\n",
		__func__, __LINE__, mem->count);

	prev = NULL;
	vacant = ctxt->vacant;

	while (vacant != NULL) {
		if (vacant->count >= mem->count)
			break;
		prev = vacant;
		vacant = vacant->next;
	}

	if (vacant == NULL) {
		gcerror = GCERR_MMU_OOM;
		goto fail;
	}

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"found vacant arena:\n",
		__func__, __LINE__);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  mtlb=%d\n",
		__func__, __LINE__, vacant->mtlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  stlb=%d\n",
		__func__, __LINE__, vacant->stlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  count=%d (needed %d)\n",
		__func__, __LINE__, vacant->count, mem->count);

	/*
	 * Create page array.
	 */

	/* Reset page array. */
	vacant->pages = NULL;

	/* No page array given? */
	if (mem->pages == NULL) {
		/* Allocate physical address array. */
		parray_alloc = kmalloc(mem->count * sizeof(pte_t *),
					GFP_KERNEL);
		if (parray_alloc == NULL) {
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_MMU_PHYS_ALLOC);
			goto fail;
		}

		/* Fetch page addresses. */
		gcerror = get_physical_pages(mem, parray_alloc, vacant);
		if (gcerror != GCERR_NONE)
			goto fail;

		parray = parray_alloc;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"physical page array allocated (0x%08X)\n",
			__func__, __LINE__, (unsigned int) parray);
	} else {
		parray = mem->pages;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"physical page array provided (0x%08X)\n",
			__func__, __LINE__, (unsigned int) parray);
	}

	/*
	 * Allocate slave tables as necessary.
	 */

	mtlb_idx = vacant->mtlb;
	stlb_idx = vacant->stlb;
	count = mem->count;

#if MMU_ENABLE
	mtlb_logical = &ctxt->master.logical[mtlb_idx];
	stlb_array = &ctxt->slave[mtlb_idx];
#endif

	for (i = 0; count > 0; i += 1) {
#if MMU_ENABLE
		if (mtlb_logical[i] == MMU_MTLB_ENTRY_VACANT) {
			gcerror = mmu2d_allocate_slave(ctxt, &stlb);
			if (gcerror != GCERR_NONE)
				goto fail;

			mtlb_logical[i]
				= (stlb->pages.physical & MMU_MTLB_SLAVE_MASK)
				| MMU_MTLB_4K_PAGE
				| MMU_MTLB_EXCEPTION
				| MMU_MTLB_PRESENT;

			stlb_array[i] = stlb;
		}
#endif

		available = MMU_STLB_ENTRY_NUM - stlb_idx;

		if (available > count) {
			available = count;
			next_idx = stlb_idx + count;
		} else {
			mtlb_idx += 1;
			next_idx = 0;
		}

#if MMU_ENABLE
		stlb_logical = &stlb_array[i]->pages.logical[stlb_idx];
		stlb_array[i]->count += available;

		for (j = 0; j < available; j += 1) {
			stlb_logical[j]
				= (*parray & MMU_STLB_ADDRESS_MASK)
				| MMU_STLB_PRESENT
				| MMU_STLB_EXCEPTION
				| MMU_STLB_WRITEABLE;

			parray += 1;
		}
#endif

		count -= available;
		stlb_idx = next_idx;
	}

	/*
	 * Claim arena.
	 */

	mem->pagesize = MMU_PAGE_SIZE;

	if (vacant->count != mem->count) {
		gcerror = mmu2d_get_arena(ctxt->mmu, &split);
		if (gcerror != GCERR_NONE)
			goto fail;

		split->mtlb  = mtlb_idx;
		split->stlb  = stlb_idx;
		split->count = vacant->count - mem->count;
		split->next  = vacant->next;
		vacant->next = split;
		vacant->count = mem->count;
	}

	if (prev == NULL)
		ctxt->vacant = vacant->next;
	else
		prev->next = vacant->next;

	vacant->next = ctxt->allocated;
	ctxt->allocated = vacant;

	*mapped = vacant;

#if MMU_ENABLE
	vacant->address
		= ((vacant->mtlb << MMU_MTLB_SHIFT) & MMU_MTLB_MASK)
		| ((vacant->stlb << MMU_STLB_SHIFT) & MMU_STLB_MASK)
		| (mem->offset & MMU_OFFSET_MASK);
#else
	vacant->address = mem->offset + ((parray_alloc == NULL)
		? *mem->pages : *parray_alloc);
#endif

	vacant->size = mem->count * MMU_PAGE_SIZE - mem->offset;

fail:
	if (parray_alloc != NULL) {
		kfree(parray_alloc);

		if (gcerror != GCERR_NONE)
			release_physical_pages(vacant);
	}

	return gcerror;
}