Beispiel #1
0
static enum gcerror alloc_fixup(struct gcfixup **gcfixup)
{
	enum gcerror gcerror = GCERR_NONE;
	struct gcfixup *temp;

	GCLOCK(&g_fixuplock);

	if (list_empty(&g_fixupvac)) {
		temp = kmalloc(sizeof(struct gcfixup), GFP_KERNEL);
		if (temp == NULL) {
			GCERR("out of memory.\n");
			gcerror = GCERR_SETGRP(GCERR_OODM,
					       GCERR_IOCTL_FIXUP_ALLOC);
			goto exit;
		}
	} else {
		struct list_head *head;
		head = g_fixupvac.next;
		temp = list_entry(head, struct gcfixup, link);
		list_del(head);
	}

	GCUNLOCK(&g_fixuplock);

	INIT_LIST_HEAD(&temp->link);
	*gcfixup = temp;

exit:
	return gcerror;
}
Beispiel #2
0
static enum gcerror mmu2d_get_arena(struct mmu2dprivate *mmu,
					struct mmu2darena **arena)
{
	int i;
	struct mmu2darenablock *block;
	struct mmu2darena *temp;

	if (mmu->arena_recs == NULL) {
		block = kmalloc(ARENA_PREALLOC_SIZE, GFP_KERNEL);
		if (block == NULL)
			return GCERR_SETGRP(GCERR_OODM, GCERR_MMU_ARENA_ALLOC);

		block->next = mmu->arena_blocks;
		mmu->arena_blocks = block;

		temp = (struct mmu2darena *)(block + 1);
		for (i = 0; i < ARENA_PREALLOC_COUNT; i += 1) {
			temp->next = mmu->arena_recs;
			mmu->arena_recs = temp;
			temp += 1;
		}
	}

	*arena = mmu->arena_recs;
	mmu->arena_recs = mmu->arena_recs->next;

	return GCERR_NONE;
}
static enum gcerror get_arena(struct gcmmu *gcmmu, struct gcmmuarena **arena)
{
	enum gcerror gcerror = GCERR_NONE;
	struct gcmmuarena *temp;

	GCENTER(GCZONE_ARENA);

	GCLOCK(&gcmmu->lock);

	if (list_empty(&gcmmu->vacarena)) {
		temp = kmalloc(sizeof(struct gcmmuarena), GFP_KERNEL);
		if (temp == NULL) {
			GCERR("arena entry allocation failed.\n");
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_MMU_ARENA_ALLOC);
			goto exit;
		}
	} else {
		struct list_head *head;
		head = gcmmu->vacarena.next;
		temp = list_entry(head, struct gcmmuarena, link);
		list_del(head);
	}

	*arena = temp;

exit:
	GCUNLOCK(&gcmmu->lock);

	GCEXITARG(GCZONE_ARENA, "gc%s = 0x%08X\n",
		(gcerror == GCERR_NONE) ? "result" : "error", gcerror);
	return gcerror;
}
Beispiel #4
0
static enum gcerror get_fixup(struct gcfixup **gcfixup)
{
	enum gcerror gcerror;
	int bufferlocked = 0;
	struct gcfixup *temp;

	/* Acquire fixup access mutex. */
	gcerror = gc_acquire_mutex(&g_bufferlock, GC_INFINITE);
	if (gcerror != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to acquire mutex (0x%08X).\n",
			__func__, __LINE__, gcerror);
		gcerror = GCERR_SETGRP(gcerror, GCERR_IOCTL_FIXUP_ALLOC);
		goto exit;
	}
	bufferlocked = 1;

	if (g_fixupvacant == NULL) {
		temp = kmalloc(sizeof(struct gcfixup), GFP_KERNEL);
		if (temp == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"out of memory.\n",
				__func__, __LINE__);
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_IOCTL_FIXUP_ALLOC);
			goto exit;
		}
	} else {
		temp = g_fixupvacant;
		g_fixupvacant = g_fixupvacant->next;
	}

	*gcfixup = temp;

exit:
	if (bufferlocked)
		mutex_unlock(&g_bufferlock);

	return gcerror;
}
enum gcerror gcmmu_init(struct gccorecontext *gccorecontext)
{
	enum gcerror gcerror;
	struct gcmmu *gcmmu = &gccorecontext->gcmmu;

	GCENTER(GCZONE_INIT);

	/* Initialize access lock. */
	GCLOCK_INIT(&gcmmu->lock);

	/* Allocate one page. */
	gcerror = gc_alloc_noncached(&gcmmu->gcpage, PAGE_SIZE);
	if (gcerror != GCERR_NONE) {
		GCERR("failed to allocate MMU management buffer.\n");
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_SAFE_ALLOC);
		goto exit;
	}

	/* Determine the location of the physical command buffer. */
	gcmmu->cmdbufphys = gcmmu->gcpage.physical;
	gcmmu->cmdbuflog = gcmmu->gcpage.logical;
	gcmmu->cmdbufsize = PAGE_SIZE - GCMMU_SAFE_ZONE_SIZE;

	/* Determine the location of the safe zone. */
	gcmmu->safezonephys = gcmmu->gcpage.physical + gcmmu->cmdbufsize;
	gcmmu->safezonelog = (unsigned int *) ((unsigned char *)
		gcmmu->gcpage.logical + gcmmu->cmdbufsize);
	gcmmu->safezonesize = GCMMU_SAFE_ZONE_SIZE;

	/* Reset the master table. */
	gcmmu->master = ~0U;

	/* Initialize the list of vacant arenas. */
	INIT_LIST_HEAD(&gcmmu->vacarena);

exit:
	GCEXITARG(GCZONE_INIT, "gc%s = 0x%08X\n",
		(gcerror == GCERR_NONE) ? "result" : "error", gcerror);
	return gcerror;
}
Beispiel #6
0
static enum gcerror put_buffer_tree(struct gcbuffer *gcbuffer)
{
	enum gcerror gcerror;
	int bufferlocked = 0;
	struct gcbuffer *prev;
	struct gcbuffer *curr;

	/* Acquire buffer access mutex. */
	gcerror = gc_acquire_mutex(&g_bufferlock, GC_INFINITE);
	if (gcerror != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to acquire mutex (0x%08X).\n",
			__func__, __LINE__, gcerror);
		gcerror = GCERR_SETGRP(gcerror, GCERR_IOCTL_BUF_ALLOC);
		goto exit;
	}
	bufferlocked = 1;

	prev = NULL;
	curr = gcbuffer;
	while (curr != NULL) {
		if (curr->fixuphead != NULL) {
			curr->fixuptail->next = g_fixupvacant;
			g_fixupvacant = curr->fixuphead;
		}

		prev = curr;
		curr = curr->next;
	}

	prev->next = g_buffervacant;
	g_buffervacant = gcbuffer;

exit:
	if (bufferlocked)
		mutex_unlock(&g_bufferlock);

	return gcerror;
}
enum gcerror gcmmu_create_context(struct gccorecontext *gccorecontext,
				  struct gcmmucontext *gcmmucontext,
				  pid_t pid)
{
	enum gcerror gcerror;
	struct gcmmu *gcmmu = &gccorecontext->gcmmu;
	struct gcmmuarena *arena = NULL;
	unsigned int *logical;
	unsigned int i;

	GCENTER(GCZONE_CONTEXT);

	if (gcmmucontext == NULL) {
		gcerror = GCERR_MMU_CTXT_BAD;
		goto exit;
	}

	/* Reset the context. */
	memset(gcmmucontext, 0, sizeof(struct gcmmucontext));

	/* Initialize access lock. */
	GCLOCK_INIT(&gcmmucontext->lock);

	/* Initialize arena lists. */
	INIT_LIST_HEAD(&gcmmucontext->vacant);
	INIT_LIST_HEAD(&gcmmucontext->allocated);

	/* Mark context as dirty. */
	gcmmucontext->dirty = true;

	/* Set PID. */
	gcmmucontext->pid = pid;

	/* Allocate MTLB table. */
	gcerror = gc_alloc_cached(&gcmmucontext->master, GCMMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto exit;
	}

	/* Invalidate MTLB entries. */
	logical = gcmmucontext->master.logical;
	for (i = 0; i < GCMMU_MTLB_ENTRY_NUM; i += 1)
		logical[i] = GCMMU_MTLB_ENTRY_VACANT;

	/* Set MMU table mode. */
	gcmmucontext->mmuconfig.reg.master_mask
		= GCREG_MMU_CONFIGURATION_MASK_MODE_ENABLED;
	gcmmucontext->mmuconfig.reg.master = GCMMU_MTLB_MODE;

	/* Set the table address. */
	gcmmucontext->mmuconfig.reg.address_mask
		= GCREG_MMU_CONFIGURATION_MASK_ADDRESS_ENABLED;
	gcmmucontext->mmuconfig.reg.address
		= GCGETFIELD(gcmmucontext->master.physical,
			     GCREG_MMU_CONFIGURATION, ADDRESS);

	/* Allocate the first vacant arena. */
	gcerror = get_arena(gcmmu, &arena);
	if (gcerror != GCERR_NONE)
		goto exit;

	/* Entire range is currently vacant. */
	arena->start.absolute = 0;
	arena->end.absolute =
	arena->count = GCMMU_MTLB_ENTRY_NUM * GCMMU_STLB_ENTRY_NUM;
	list_add(&arena->link, &gcmmucontext->vacant);
	GCDUMPARENA(GCZONE_ARENA, "initial vacant arena", arena);

	/* Map the command queue. */
	gcerror = gcqueue_map(gccorecontext, gcmmucontext);
	if (gcerror != GCERR_NONE)
		goto exit;

	/* Reference MMU. */
	gcmmu->refcount += 1;

	GCEXIT(GCZONE_CONTEXT);
	return GCERR_NONE;

exit:
	gcmmu_destroy_context(gccorecontext, gcmmucontext);

	GCEXITARG(GCZONE_CONTEXT, "gcerror = 0x%08X\n", gcerror);
	return gcerror;
}
static enum gcerror get_physical_pages(struct gcmmuphysmem *mem,
					pte_t *parray,
					struct gcmmuarena *arena)
{
	enum gcerror gcerror = GCERR_NONE;
	struct vm_area_struct *vma;
	struct page **pages = NULL;
	unsigned int base, write;
	int i, count = 0;

	/* Reset page descriptor array. */
	arena->pages = NULL;

	/* Get base address shortcut. */
	base = mem->base;

	/* Store the logical pointer. */
	arena->logical = (void *) base;

	/*
	 * Important Note: base is mapped from user application process
	 * to current process - it must lie completely within the current
	 * virtual memory address space in order to be of use to us here.
	 */

	vma = find_vma(current->mm, base + (mem->count << PAGE_SHIFT) - 1);
	if ((vma == NULL) || (base < vma->vm_start)) {
		gcerror = GCERR_MMU_BUFFER_BAD;
		goto exit;
	}

	/* Allocate page descriptor array. */
	pages = kmalloc(mem->count * sizeof(struct page *), GFP_KERNEL);
	if (pages == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_DESC_ALLOC);
		goto exit;
	}

	/* Query page descriptors. */
	write = ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) != 0) ? 1 : 0;
	count = get_user_pages(current, current->mm, base, mem->count,
				write, 1, pages, NULL);

	if (count < 0) {
		/* Kernel allocated buffer. */
		for (i = 0; i < mem->count; i += 1) {
			gcerror = virt2phys(base, &parray[i]);
			if (gcerror != GCERR_NONE)
				goto exit;

			base += mem->pagesize;
		}
	} else if (count == mem->count) {
		/* User allocated buffer. */
		for (i = 0; i < mem->count; i += 1) {
			parray[i] = page_to_phys(pages[i]);
			if (phys_to_page(parray[i]) != pages[i]) {
				gcerror = GCERR_MMU_PAGE_BAD;
				goto exit;
			}
		}

		/* Set page descriptor array. */
		arena->pages = pages;
	} else {
		gcerror = GCERR_MMU_BUFFER_BAD;
		goto exit;
	}

exit:
	if (arena->pages == NULL) {
		for (i = 0; i < count; i += 1)
			page_cache_release(pages[i]);

		if (pages != NULL)
			kfree(pages);
	}

	return gcerror;
}
static enum gcerror allocate_slave(struct gcmmucontext *gcmmucontext,
				   union gcmmuloc index)
{
	enum gcerror gcerror;
	struct gcmmustlbblock *block = NULL;
	struct gcmmustlb *slave;
	unsigned int *mtlblogical;
	unsigned int prealloccount;
	unsigned int preallocsize;
	unsigned int preallocentries;
	unsigned int physical;
	unsigned int *logical;
	unsigned int i;

	GCENTER(GCZONE_MAPPING);

	/* Allocate a new prealloc block wrapper. */
	block = kmalloc(sizeof(struct gcmmustlbblock), GFP_KERNEL);
	if (block == NULL) {
		GCERR("failed to allocate slave page table wrapper\n");
		gcerror = GCERR_SETGRP(GCERR_OODM,
				       GCERR_MMU_STLB_ALLOC);
		goto exit;
	}

	/* Determine the number and the size of tables to allocate. */
	prealloccount = min(GCMMU_STLB_PREALLOC_COUNT,
			    GCMMU_MTLB_ENTRY_NUM - index.loc.mtlb);

	preallocsize = prealloccount * GCMMU_STLB_SIZE;
	preallocentries = prealloccount * GCMMU_STLB_ENTRY_NUM;

	GCDBG(GCZONE_MAPPING, "preallocating %d slave tables.\n",
	      prealloccount);

	/* Allocate slave table pool. */
	gcerror = gc_alloc_cached(&block->pages, preallocsize);
	if (gcerror != GCERR_NONE) {
		GCERR("failed to allocate slave page table\n");
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_STLB_ALLOC);
		goto exit;
	}

	/* Add the block to the list. */
	block->next = gcmmucontext->slavealloc;
	gcmmucontext->slavealloc = block;

	/* Get shortcuts to the pointers. */
	physical = block->pages.physical;
	logical = block->pages.logical;

	/* Invalidate all slave entries. */
	for (i = 0; i < preallocentries; i += 1)
		logical[i] = GCMMU_STLB_ENTRY_VACANT;

	/* Init the slaves. */
	slave = &gcmmucontext->slave[index.loc.mtlb];
	mtlblogical = &gcmmucontext->master.logical[index.loc.mtlb];

	for (i = 0; i < prealloccount; i += 1) {
		mtlblogical[i]
			= (physical & GCMMU_MTLB_SLAVE_MASK)
			| GCMMU_MTLB_4K_PAGE
			| GCMMU_MTLB_EXCEPTION
			| GCMMU_MTLB_PRESENT;

		slave[i].physical = physical;
		slave[i].logical = logical;

		physical += GCMMU_STLB_SIZE;
		logical = (unsigned int *)
			((unsigned char *) logical + GCMMU_STLB_SIZE);
	}

	/* Flush CPU cache. */
	gc_flush_region(gcmmucontext->master.physical,
			gcmmucontext->master.logical,
			index.loc.mtlb * sizeof(unsigned int),
			prealloccount * sizeof(unsigned int));

	GCEXIT(GCZONE_MAPPING);
	return GCERR_NONE;

exit:
	if (block != NULL)
		kfree(block);

	GCEXITARG(GCZONE_MAPPING, "gc%s = 0x%08X\n",
		(gcerror == GCERR_NONE) ? "result" : "error", gcerror);
	return gcerror;
}
Beispiel #10
0
static enum gcerror find_context(struct gccontextmap **context, int create)
{
	enum gcerror gcerror = GCERR_NONE;
	struct gccontextmap *prev;
	struct gccontextmap *curr;
	pid_t pid;

	/* Get current PID. */
	pid = 0;

	/* Search the list. */
	prev = NULL;
	curr = g_map;

	GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
		"scanning existing records for pid %d.\n",
		__func__, __LINE__, pid);

	/* Try to locate the record. */
	while (curr != NULL) {
		/* Found the record? */
		if (curr->pid == pid) {
			/* Move to the top of the list. */
			if (prev != NULL) {
				prev->next = curr->next;
				curr->next = g_map;
				g_map = curr;
			}

			/* Success. */
			GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
				"record is found @ 0x%08X\n",
				__func__, __LINE__, (unsigned int) curr);

			*context = curr;
			goto exit;
		}

		/* Get the next record. */
		prev = curr;
		curr = curr->next;
	}

	/* Not found, do we need to create a new one? */
	if (!create) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, exiting.\n",
			__func__, __LINE__);
		gcerror = GCERR_NOT_FOUND;
		goto exit;
	}

	/* Get new record. */
	if (g_mapvacant == NULL) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, allocating.\n",
			__func__, __LINE__);

		curr = kmalloc(sizeof(struct gccontextmap), GFP_KERNEL);
		if (curr == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"out of memory.\n",
				__func__, __LINE__);
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_IOCTL_CTX_ALLOC);
			goto exit;
		}

		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"allocated @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) curr);
	} else {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"not found, reusing record @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) g_mapvacant);

		curr = g_mapvacant;
		g_mapvacant = g_mapvacant->next;
	}

	GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
		"creating new context.\n",
		__func__, __LINE__);

	curr->context = kzalloc(sizeof(*curr->context), GFP_KERNEL);
	if (curr->context == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_CTX_ALLOC);
		goto exit;
	}

	gcerror = mmu2d_create_context(&curr->context->mmu);
	if (gcerror != GCERR_NONE)
		goto free_map_ctx;

#if MMU_ENABLE
	gcerror = cmdbuf_map(&curr->context->mmu);
	if (gcerror != GCERR_NONE)
		goto free_2d_ctx;
#endif

	curr->context->mmu_dirty = true;

	g_clientref += 1;

	/* Success? */
	if (gcerror == GCERR_NONE) {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"new context created @ 0x%08X\n",
			__func__, __LINE__, (unsigned int) curr->context);

		/* Set the PID. */
		curr->pid = pid;

		/* Add to the list. */
		curr->prev = NULL;
		curr->next = g_map;
		if (g_map != NULL)
			g_map->prev = curr;
		g_map = curr;

		/* Set return value. */
		*context = curr;
	} else {
		GCPRINT(GCDBGFILTER, GCZONE_CONTEXT, GC_MOD_PREFIX
			"failed to create a context.\n",
			__func__, __LINE__);

		/* Add the record to the vacant list. */
		curr->next = g_mapvacant;
		g_mapvacant = curr;
	}
	goto exit;

free_2d_ctx:
	mmu2d_destroy_context(&curr->context->mmu);
free_map_ctx:
	kfree(curr->context);
exit:
	return gcerror;
}
Beispiel #11
0
enum gcerror mmu2d_map(struct mmu2dcontext *ctxt, struct mmu2dphysmem *mem,
			struct mmu2darena **mapped)
{
	enum gcerror gcerror = GCERR_NONE;
	struct mmu2darena *prev, *vacant, *split;
#if MMU_ENABLE
	struct mmu2dstlb *stlb = NULL;
	struct mmu2dstlb **stlb_array;
	u32 *mtlb_logical, *stlb_logical;
#endif
	u32 mtlb_idx, stlb_idx, next_idx;
#if MMU_ENABLE
	u32 i, j, count, available;
#else
	u32 i, count, available;
#endif
	pte_t *parray_alloc = NULL;
	pte_t *parray;

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	if ((mem == NULL) || (mem->count <= 0) || (mapped == NULL) ||
		((mem->pagesize != 0) && (mem->pagesize != MMU_PAGE_SIZE)))
		return GCERR_MMU_ARG;

	/*
	 * Find available sufficient arena.
	 */

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"mapping (%d) pages\n",
		__func__, __LINE__, mem->count);

	prev = NULL;
	vacant = ctxt->vacant;

	while (vacant != NULL) {
		if (vacant->count >= mem->count)
			break;
		prev = vacant;
		vacant = vacant->next;
	}

	if (vacant == NULL) {
		gcerror = GCERR_MMU_OOM;
		goto fail;
	}

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"found vacant arena:\n",
		__func__, __LINE__);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  mtlb=%d\n",
		__func__, __LINE__, vacant->mtlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  stlb=%d\n",
		__func__, __LINE__, vacant->stlb);
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
		"  count=%d (needed %d)\n",
		__func__, __LINE__, vacant->count, mem->count);

	/*
	 * Create page array.
	 */

	/* Reset page array. */
	vacant->pages = NULL;

	/* No page array given? */
	if (mem->pages == NULL) {
		/* Allocate physical address array. */
		parray_alloc = kmalloc(mem->count * sizeof(pte_t *),
					GFP_KERNEL);
		if (parray_alloc == NULL) {
			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_MMU_PHYS_ALLOC);
			goto fail;
		}

		/* Fetch page addresses. */
		gcerror = get_physical_pages(mem, parray_alloc, vacant);
		if (gcerror != GCERR_NONE)
			goto fail;

		parray = parray_alloc;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"physical page array allocated (0x%08X)\n",
			__func__, __LINE__, (unsigned int) parray);
	} else {
		parray = mem->pages;

		GCPRINT(GCDBGFILTER, GCZONE_MAPPING, GC_MOD_PREFIX
			"physical page array provided (0x%08X)\n",
			__func__, __LINE__, (unsigned int) parray);
	}

	/*
	 * Allocate slave tables as necessary.
	 */

	mtlb_idx = vacant->mtlb;
	stlb_idx = vacant->stlb;
	count = mem->count;

#if MMU_ENABLE
	mtlb_logical = &ctxt->master.logical[mtlb_idx];
	stlb_array = &ctxt->slave[mtlb_idx];
#endif

	for (i = 0; count > 0; i += 1) {
#if MMU_ENABLE
		if (mtlb_logical[i] == MMU_MTLB_ENTRY_VACANT) {
			gcerror = mmu2d_allocate_slave(ctxt, &stlb);
			if (gcerror != GCERR_NONE)
				goto fail;

			mtlb_logical[i]
				= (stlb->pages.physical & MMU_MTLB_SLAVE_MASK)
				| MMU_MTLB_4K_PAGE
				| MMU_MTLB_EXCEPTION
				| MMU_MTLB_PRESENT;

			stlb_array[i] = stlb;
		}
#endif

		available = MMU_STLB_ENTRY_NUM - stlb_idx;

		if (available > count) {
			available = count;
			next_idx = stlb_idx + count;
		} else {
			mtlb_idx += 1;
			next_idx = 0;
		}

#if MMU_ENABLE
		stlb_logical = &stlb_array[i]->pages.logical[stlb_idx];
		stlb_array[i]->count += available;

		for (j = 0; j < available; j += 1) {
			stlb_logical[j]
				= (*parray & MMU_STLB_ADDRESS_MASK)
				| MMU_STLB_PRESENT
				| MMU_STLB_EXCEPTION
				| MMU_STLB_WRITEABLE;

			parray += 1;
		}
#endif

		count -= available;
		stlb_idx = next_idx;
	}

	/*
	 * Claim arena.
	 */

	mem->pagesize = MMU_PAGE_SIZE;

	if (vacant->count != mem->count) {
		gcerror = mmu2d_get_arena(ctxt->mmu, &split);
		if (gcerror != GCERR_NONE)
			goto fail;

		split->mtlb  = mtlb_idx;
		split->stlb  = stlb_idx;
		split->count = vacant->count - mem->count;
		split->next  = vacant->next;
		vacant->next = split;
		vacant->count = mem->count;
	}

	if (prev == NULL)
		ctxt->vacant = vacant->next;
	else
		prev->next = vacant->next;

	vacant->next = ctxt->allocated;
	ctxt->allocated = vacant;

	*mapped = vacant;

#if MMU_ENABLE
	vacant->address
		= ((vacant->mtlb << MMU_MTLB_SHIFT) & MMU_MTLB_MASK)
		| ((vacant->stlb << MMU_STLB_SHIFT) & MMU_STLB_MASK)
		| (mem->offset & MMU_OFFSET_MASK);
#else
	vacant->address = mem->offset + ((parray_alloc == NULL)
		? *mem->pages : *parray_alloc);
#endif

	vacant->size = mem->count * MMU_PAGE_SIZE - mem->offset;

fail:
	if (parray_alloc != NULL) {
		kfree(parray_alloc);

		if (gcerror != GCERR_NONE)
			release_physical_pages(vacant);
	}

	return gcerror;
}
Beispiel #12
0
enum gcerror mmu2d_set_master(struct mmu2dcontext *ctxt)
{
#if MMU_ENABLE
	enum gcerror gcerror;
	struct gcmommumaster *gcmommumaster;
	struct gcmommuinit *gcmommuinit;
	unsigned int size, status, enabled;
	struct mmu2dprivate *mmu = get_mmu();

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	/* Read the MMU status. */
	status = gc_read_reg(GCREG_MMU_CONTROL_Address);
	enabled = GETFIELD(status, GCREG_MMU_CONTROL, ENABLE);

	/* Is MMU enabled? */
	if (enabled) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is already enabled.\n",
			__func__, __LINE__);

		/* Allocate command buffer space. */
		gcerror = cmdbuf_alloc(sizeof(struct gcmommumaster),
					(void **) &gcmommumaster, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_SET);

		/* Program master table address. */
		gcmommumaster->master_ldst = gcmommumaster_master_ldst;
		gcmommumaster->master = ctxt->physical;
	} else {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is disabled, enabling.\n",
			__func__, __LINE__);

		/* MMU disabled, force physical mode. */
		cmdbuf_physical(true);

		/* Allocate command buffer space. */
		size = sizeof(struct gcmommuinit) + cmdbuf_flush(NULL);
		gcerror = cmdbuf_alloc(size, (void **) &gcmommuinit, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_INIT);

		/* Program the safe zone and the master table address. */
		gcmommuinit->safe_ldst = gcmommuinit_safe_ldst;
		gcmommuinit->safe = mmu->safezone.physical;
		gcmommuinit->mtlb = ctxt->physical;

		/* Execute the buffer. */
		cmdbuf_flush(gcmommuinit + 1);

		/* Resume normal mode. */
		cmdbuf_physical(false);

		/*
		* Enable MMU. For security reasons, once it is enabled,
		* the only way to disable is to reset the system.
		*/
		gc_write_reg(
			GCREG_MMU_CONTROL_Address,
			SETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE));
	}

	return GCERR_NONE;
#else
	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	return GCERR_NONE;
#endif
}
Beispiel #13
0
enum gcerror mmu2d_create_context(struct mmu2dcontext *ctxt)
{
	enum gcerror gcerror;

#if MMU_ENABLE
	int i;
#endif

	struct mmu2dprivate *mmu = get_mmu();

	if (ctxt == NULL)
		return GCERR_MMU_CTXT_BAD;

	memset(ctxt, 0, sizeof(struct mmu2dcontext));

#if MMU_ENABLE
	/* Allocate MTLB table. */
	gcerror = gc_alloc_pages(&ctxt->master, MMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto fail;
	}

	/* Allocate an array of pointers to slave descriptors. */
	ctxt->slave = kmalloc(MMU_MTLB_SIZE, GFP_KERNEL);
	if (ctxt->slave == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLBIDX_ALLOC);
		goto fail;
	}
	memset(ctxt->slave, 0, MMU_MTLB_SIZE);

	/* Invalidate all entries. */
	for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1)
		ctxt->master.logical[i] = MMU_MTLB_ENTRY_VACANT;

	/* Configure the physical address. */
	ctxt->physical
	= SETFIELD(~0U, GCREG_MMU_CONFIGURATION, ADDRESS,
	  (ctxt->master.physical >> GCREG_MMU_CONFIGURATION_ADDRESS_Start))
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_ADDRESS, ENABLED)
	& SETFIELD(~0U, GCREG_MMU_CONFIGURATION, MODE, MMU_MTLB_MODE)
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_MODE, ENABLED);
#endif

	/* Allocate the first vacant arena. */
	gcerror = mmu2d_get_arena(mmu, &ctxt->vacant);
	if (gcerror != GCERR_NONE)
		goto fail;

	/* Everything is vacant. */
	ctxt->vacant->mtlb  = 0;
	ctxt->vacant->stlb  = 0;
	ctxt->vacant->count = MMU_MTLB_ENTRY_NUM * MMU_STLB_ENTRY_NUM;
	ctxt->vacant->next  = NULL;

	/* Nothing is allocated. */
	ctxt->allocated = NULL;

#if MMU_ENABLE
	/* Allocate the safe zone. */
	if (mmu->safezone.size == 0) {
		gcerror = gc_alloc_pages(&mmu->safezone,
						MMU_SAFE_ZONE_SIZE);
		if (gcerror != GCERR_NONE) {
			gcerror = GCERR_SETGRP(gcerror,
						GCERR_MMU_SAFE_ALLOC);
			goto fail;
		}

		/* Initialize safe zone to a value. */
		for (i = 0; i < MMU_SAFE_ZONE_SIZE / sizeof(u32); i += 1)
			mmu->safezone.logical[i] = 0xDEADC0DE;
	}
#endif

	/* Reference MMU. */
	mmu->refcount += 1;
	ctxt->mmu = mmu;

	return GCERR_NONE;

fail:
#if MMU_ENABLE
	gc_free_pages(&ctxt->master);
	if (ctxt->slave != NULL)
		kfree(ctxt->slave);
#endif

	return gcerror;
}
Beispiel #14
0
static enum gcerror mmu2d_allocate_slave(struct mmu2dcontext *ctxt,
						struct mmu2dstlb **stlb)
{
	enum gcerror gcerror;
	int i;
	struct mmu2dstlbblock *block;
	struct mmu2dstlb *temp;

	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "++" GC_MOD_PREFIX
		"\n", __func__, __LINE__);

	if (ctxt->slave_recs == NULL) {
		block = kmalloc(STLB_PREALLOC_SIZE, GFP_KERNEL);
		if (block == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"failed to allocate slave page table wrapper\n",
				__func__, __LINE__);

			gcerror = GCERR_SETGRP(GCERR_OODM,
						GCERR_MMU_STLB_ALLOC);
			goto exit;
		}

		block->next = ctxt->slave_blocks;
		ctxt->slave_blocks = block;

		temp = (struct mmu2dstlb *)(block + 1);
		for (i = 0; i < STLB_PREALLOC_COUNT; i += 1) {
			temp->next = ctxt->slave_recs;
			ctxt->slave_recs = temp;
			temp += 1;
		}
	}

	gcerror = gc_alloc_pages(&ctxt->slave_recs->pages, MMU_STLB_SIZE);
	if (gcerror != GCERR_NONE) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to allocate slave page table\n",
			__func__, __LINE__);

		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_STLB_ALLOC);
		goto exit;
	}

	/* Remove from the list of available records. */
	temp = ctxt->slave_recs;
	ctxt->slave_recs = ctxt->slave_recs->next;

	/* Invalidate all entries. */
	for (i = 0; i < MMU_STLB_ENTRY_NUM; i += 1)
		temp->pages.logical[i] = MMU_STLB_ENTRY_VACANT;

	/* Reset allocated entry count. */
	temp->count = 0;
	*stlb = temp;

exit:
	GCPRINT(GCDBGFILTER, GCZONE_MAPPING, "--" GC_MOD_PREFIX
		"gc%s = 0x%08X\n", __func__, __LINE__,
		(gcerror == GCERR_NONE) ? "result" : "error", gcerror);

	return gcerror;
}