Beispiel #1
0
enum gcerror gc_alloc_pages(struct gcpage *p, unsigned int size)
{
	enum gcerror gcerror;
	int order;

	p->pages = NULL;
	p->logical = NULL;
	p->physical = ~0UL;

	order = get_order(size);

	p->order = order;
	p->size = (1 << order) * PAGE_SIZE;

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"requested size=%d\n", __func__, __LINE__, size);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"aligned size=%d\n", __func__, __LINE__, p->size);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"order=%d\n", __func__, __LINE__, order);

	p->logical = dma_alloc_coherent(NULL, p->size, &p->physical,
								GFP_KERNEL);
	if (!p->logical) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"failed to allocate memory\n",
			__func__, __LINE__);

		gcerror = GCERR_OOPM;
		goto fail;
	}

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"container = 0x%08X\n",
		__func__, __LINE__, (unsigned int) p);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"page array=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->pages);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"logical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->logical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"physical=0x%08X\n",
		__func__, __LINE__, (unsigned int) p->physical);

	GCPRINT(GCDBGFILTER, GCZONE_PAGE, GC_MOD_PREFIX
		"size=%d\n",
		__func__, __LINE__, p->size);

	return GCERR_NONE;

fail:
	gc_free_pages(p);
	return gcerror;
}
Beispiel #2
0
enum gcerror mmu2d_destroy_context(struct mmu2dcontext *ctxt)
{
	int i;
	struct mmu2dstlbblock *nextblock;

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	if (ctxt->slave != NULL) {
		for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1) {
			if (ctxt->slave[i] != NULL) {
				gc_free_pages(&ctxt->slave[i]->pages);
				ctxt->slave[i] = NULL;
			}
		}
		kfree(ctxt->slave);
		ctxt->slave = NULL;
	}

	gc_free_pages(&ctxt->master);

	while (ctxt->slave_blocks != NULL) {
		nextblock = ctxt->slave_blocks->next;
		kfree(ctxt->slave_blocks);
		ctxt->slave_blocks = nextblock;
	}

	ctxt->slave_recs = NULL;

	while (ctxt->allocated != NULL) {
		mmu2d_free_arena(ctxt->mmu, ctxt->allocated);
		ctxt->allocated = ctxt->allocated->next;
	}

	while (ctxt->vacant != NULL) {
		mmu2d_free_arena(ctxt->mmu, ctxt->vacant);
		ctxt->vacant = ctxt->vacant->next;
	}

	ctxt->mmu->refcount -= 1;
	ctxt->mmu = NULL;

	return GCERR_NONE;
}
Beispiel #3
0
enum gcerror mmu2d_create_context(struct mmu2dcontext *ctxt)
{
	enum gcerror gcerror;

#if MMU_ENABLE
	int i;
#endif

	struct mmu2dprivate *mmu = get_mmu();

	if (ctxt == NULL)
		return GCERR_MMU_CTXT_BAD;

	memset(ctxt, 0, sizeof(struct mmu2dcontext));

#if MMU_ENABLE
	/* Allocate MTLB table. */
	gcerror = gc_alloc_pages(&ctxt->master, MMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto fail;
	}

	/* Allocate an array of pointers to slave descriptors. */
	ctxt->slave = kmalloc(MMU_MTLB_SIZE, GFP_KERNEL);
	if (ctxt->slave == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLBIDX_ALLOC);
		goto fail;
	}
	memset(ctxt->slave, 0, MMU_MTLB_SIZE);

	/* Invalidate all entries. */
	for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1)
		ctxt->master.logical[i] = MMU_MTLB_ENTRY_VACANT;

	/* Configure the physical address. */
	ctxt->physical
	= SETFIELD(~0U, GCREG_MMU_CONFIGURATION, ADDRESS,
	  (ctxt->master.physical >> GCREG_MMU_CONFIGURATION_ADDRESS_Start))
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_ADDRESS, ENABLED)
	& SETFIELD(~0U, GCREG_MMU_CONFIGURATION, MODE, MMU_MTLB_MODE)
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_MODE, ENABLED);
#endif

	/* Allocate the first vacant arena. */
	gcerror = mmu2d_get_arena(mmu, &ctxt->vacant);
	if (gcerror != GCERR_NONE)
		goto fail;

	/* Everything is vacant. */
	ctxt->vacant->mtlb  = 0;
	ctxt->vacant->stlb  = 0;
	ctxt->vacant->count = MMU_MTLB_ENTRY_NUM * MMU_STLB_ENTRY_NUM;
	ctxt->vacant->next  = NULL;

	/* Nothing is allocated. */
	ctxt->allocated = NULL;

#if MMU_ENABLE
	/* Allocate the safe zone. */
	if (mmu->safezone.size == 0) {
		gcerror = gc_alloc_pages(&mmu->safezone,
						MMU_SAFE_ZONE_SIZE);
		if (gcerror != GCERR_NONE) {
			gcerror = GCERR_SETGRP(gcerror,
						GCERR_MMU_SAFE_ALLOC);
			goto fail;
		}

		/* Initialize safe zone to a value. */
		for (i = 0; i < MMU_SAFE_ZONE_SIZE / sizeof(u32); i += 1)
			mmu->safezone.logical[i] = 0xDEADC0DE;
	}
#endif

	/* Reference MMU. */
	mmu->refcount += 1;
	ctxt->mmu = mmu;

	return GCERR_NONE;

fail:
#if MMU_ENABLE
	gc_free_pages(&ctxt->master);
	if (ctxt->slave != NULL)
		kfree(ctxt->slave);
#endif

	return gcerror;
}