Beispiel #1
0
enum gcerror mmu2d_set_master(struct mmu2dcontext *ctxt)
{
#if MMU_ENABLE
	enum gcerror gcerror;
	struct gcmommumaster *gcmommumaster;
	struct gcmommuinit *gcmommuinit;
	unsigned int size, status, enabled;
	struct mmu2dprivate *mmu = get_mmu();

	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	/* Read the MMU status. */
	status = gc_read_reg(GCREG_MMU_CONTROL_Address);
	enabled = GETFIELD(status, GCREG_MMU_CONTROL, ENABLE);

	/* Is MMU enabled? */
	if (enabled) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is already enabled.\n",
			__func__, __LINE__);

		/* Allocate command buffer space. */
		gcerror = cmdbuf_alloc(sizeof(struct gcmommumaster),
					(void **) &gcmommumaster, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_SET);

		/* Program master table address. */
		gcmommumaster->master_ldst = gcmommumaster_master_ldst;
		gcmommumaster->master = ctxt->physical;
	} else {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"gcx: mmu is disabled, enabling.\n",
			__func__, __LINE__);

		/* MMU disabled, force physical mode. */
		cmdbuf_physical(true);

		/* Allocate command buffer space. */
		size = sizeof(struct gcmommuinit) + cmdbuf_flush(NULL);
		gcerror = cmdbuf_alloc(size, (void **) &gcmommuinit, NULL);
		if (gcerror != GCERR_NONE)
			return GCERR_SETGRP(gcerror, GCERR_MMU_INIT);

		/* Program the safe zone and the master table address. */
		gcmommuinit->safe_ldst = gcmommuinit_safe_ldst;
		gcmommuinit->safe = mmu->safezone.physical;
		gcmommuinit->mtlb = ctxt->physical;

		/* Execute the buffer. */
		cmdbuf_flush(gcmommuinit + 1);

		/* Resume normal mode. */
		cmdbuf_physical(false);

		/*
		* Enable MMU. For security reasons, once it is enabled,
		* the only way to disable is to reset the system.
		*/
		gc_write_reg(
			GCREG_MMU_CONTROL_Address,
			SETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE));
	}

	return GCERR_NONE;
#else
	if ((ctxt == NULL) || (ctxt->mmu == NULL))
		return GCERR_MMU_CTXT_BAD;

	return GCERR_NONE;
#endif
}
Beispiel #2
0
int cmdbuf_flush(void)
{
	int ret;
	u32 *buffer;
	u32 base, physical;
	u32 count;

#if ENABLE_POLLING
	u32 retry;
#endif

	ret = cmdbuf_alloc(4 * sizeof(u32), &buffer, &physical);
	if (ret != 0)
		goto fail;

	/* Append EVENT(Event, destination). */
	buffer[0]
		= SETFIELDVAL(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE,
								 LOAD_STATE)
		| SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS,
								AQEventRegAddrs)
		| SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1);

	buffer[1]
		= SETFIELDVAL(0, AQ_EVENT, PE_SRC, ENABLE)
		| SETFIELD(0, AQ_EVENT, EVENT_ID, 16);

	/* Stop FE. */
	buffer[2]
		= SETFIELDVAL(0, AQ_COMMAND_END_COMMAND, OPCODE, END);

#if ENABLE_CMD_DEBUG
	/* Dump command buffer. */
	cmdbuf_dump();
#endif

	/* Determine the command buffer base address. */
	base = cmdbuf.mapped ? cmdbuf.mapped_physical : cmdbuf.page.physical;

	/* Compute the data count. */
	count = (cmdbuf.data_size + 7) >> 3;

#if ENABLE_POLLING
	int_data = 0;
#endif

	CMDBUFPRINT("starting DMA at 0x%08X with count of %d\n", base, count);

#if ENABLE_CMD_DEBUG || ENABLE_GPU_COUNTERS
	/* Reset hardware counters. */
	hw_write_reg(GC_RESET_MEM_COUNTERS_Address, 1);
#endif

	/* Enable all events. */
	hw_write_reg(AQ_INTR_ENBL_Address, ~0U);

	/* Write address register. */
	hw_write_reg(AQ_CMD_BUFFER_ADDR_Address, base);

	/* Write control register. */
	hw_write_reg(AQ_CMD_BUFFER_CTRL_Address,
		SETFIELDVAL(0, AQ_CMD_BUFFER_CTRL, ENABLE, ENABLE) |
		SETFIELD(0, AQ_CMD_BUFFER_CTRL, PREFETCH, count)
		);

	/* Wait for the interrupt. */
#if ENABLE_POLLING
	retry = 0;
	while (1) {
		if (int_data != 0)
			break;

		msleep(500);
		retry += 1;

		if ((retry % 5) == 0)
			gpu_status((char *) __func__, __LINE__, 0);
	}
#else
	wait_event_interruptible(gc_event, done == true);
#endif

#if ENABLE_CMD_DEBUG
	gpu_status((char *) __func__, __LINE__, 0);
#endif

	/* Reset the buffer. */
	cmdbuf.logical  = cmdbuf.page.logical;
	cmdbuf.physical = base;

	cmdbuf.available = cmdbuf.page.size;
	cmdbuf.data_size = 0;

fail:
	return ret;
}
Beispiel #3
0
enum gcerror mmu2d_create_context(struct mmu2dcontext *ctxt)
{
	enum gcerror gcerror;

#if MMU_ENABLE
	int i;
#endif

	struct mmu2dprivate *mmu = get_mmu();

	if (ctxt == NULL)
		return GCERR_MMU_CTXT_BAD;

	memset(ctxt, 0, sizeof(struct mmu2dcontext));

#if MMU_ENABLE
	/* Allocate MTLB table. */
	gcerror = gc_alloc_pages(&ctxt->master, MMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto fail;
	}

	/* Allocate an array of pointers to slave descriptors. */
	ctxt->slave = kmalloc(MMU_MTLB_SIZE, GFP_KERNEL);
	if (ctxt->slave == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLBIDX_ALLOC);
		goto fail;
	}
	memset(ctxt->slave, 0, MMU_MTLB_SIZE);

	/* Invalidate all entries. */
	for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1)
		ctxt->master.logical[i] = MMU_MTLB_ENTRY_VACANT;

	/* Configure the physical address. */
	ctxt->physical
	= SETFIELD(~0U, GCREG_MMU_CONFIGURATION, ADDRESS,
	  (ctxt->master.physical >> GCREG_MMU_CONFIGURATION_ADDRESS_Start))
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_ADDRESS, ENABLED)
	& SETFIELD(~0U, GCREG_MMU_CONFIGURATION, MODE, MMU_MTLB_MODE)
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_MODE, ENABLED);
#endif

	/* Allocate the first vacant arena. */
	gcerror = mmu2d_get_arena(mmu, &ctxt->vacant);
	if (gcerror != GCERR_NONE)
		goto fail;

	/* Everything is vacant. */
	ctxt->vacant->mtlb  = 0;
	ctxt->vacant->stlb  = 0;
	ctxt->vacant->count = MMU_MTLB_ENTRY_NUM * MMU_STLB_ENTRY_NUM;
	ctxt->vacant->next  = NULL;

	/* Nothing is allocated. */
	ctxt->allocated = NULL;

#if MMU_ENABLE
	/* Allocate the safe zone. */
	if (mmu->safezone.size == 0) {
		gcerror = gc_alloc_pages(&mmu->safezone,
						MMU_SAFE_ZONE_SIZE);
		if (gcerror != GCERR_NONE) {
			gcerror = GCERR_SETGRP(gcerror,
						GCERR_MMU_SAFE_ALLOC);
			goto fail;
		}

		/* Initialize safe zone to a value. */
		for (i = 0; i < MMU_SAFE_ZONE_SIZE / sizeof(u32); i += 1)
			mmu->safezone.logical[i] = 0xDEADC0DE;
	}
#endif

	/* Reference MMU. */
	mmu->refcount += 1;
	ctxt->mmu = mmu;

	return GCERR_NONE;

fail:
#if MMU_ENABLE
	gc_free_pages(&ctxt->master);
	if (ctxt->slave != NULL)
		kfree(ctxt->slave);
#endif

	return gcerror;
}