void gc_commit(struct gccommit *gccommit, int fromuser) { struct gcbuffer *gcbuffer; unsigned int cmdflushsize; unsigned int mmuflushsize; unsigned int buffersize; unsigned int allocsize; unsigned int *logical; unsigned int address; struct gcmopipesel *gcmopipesel; struct gccontextmap *context; GCPRINT(GCDBGFILTER, GCZONE_COMMIT, "++" GC_MOD_PREFIX "\n", __func__, __LINE__); mutex_lock(&mtx); /* Enable power to the chip. */ gc_set_power(GCPWR_ON); /* Locate the client entry. */ gccommit->gcerror = find_context(&context, true); if (gccommit->gcerror != GCERR_NONE) goto exit; context->context->mmu_dirty = true; /* Set the client's master table. */ gccommit->gcerror = mmu2d_set_master(&context->context->mmu); if (gccommit->gcerror != GCERR_NONE) goto exit; /* Set 2D pipe. */ gccommit->gcerror = cmdbuf_alloc(sizeof(struct gcmopipesel), (void **) &gcmopipesel, NULL); if (gccommit->gcerror != GCERR_NONE) goto exit; gcmopipesel->pipesel_ldst = gcmopipesel_pipesel_ldst; gcmopipesel->pipesel.reg = gcregpipeselect_2D; /* Determine command buffer flush size. */ cmdflushsize = cmdbuf_flush(NULL); /* Go through all buffers one at a time. */ gcbuffer = gccommit->buffer; while (gcbuffer != NULL) { GCPRINT(GCDBGFILTER, GCZONE_COMMIT, GC_MOD_PREFIX "gcbuffer = 0x%08X\n", __func__, __LINE__, gcbuffer); /* Compute the size of the command buffer. */ buffersize = (unsigned char *) gcbuffer->tail - (unsigned char *) gcbuffer->head; GCPRINT(GCDBGFILTER, GCZONE_COMMIT, GC_MOD_PREFIX "buffersize = %d\n", __func__, __LINE__, buffersize); /* Determine MMU flush size. */ mmuflushsize = context->context->mmu_dirty ? mmu2d_flush(NULL, 0, 0) : 0; /* Reserve command buffer space. */ allocsize = mmuflushsize + buffersize + cmdflushsize; gccommit->gcerror = cmdbuf_alloc(allocsize, (void **) &logical, &address); if (gccommit->gcerror != GCERR_NONE) goto exit; /* Append MMU flush. */ if (context->context->mmu_dirty) { mmu2d_flush(logical, address, allocsize); /* Skip MMU flush. */ logical = (unsigned int *) ((unsigned char *) logical + mmuflushsize); /* Validate MMU state. */ context->context->mmu_dirty = false; } if (fromuser) { /* Copy command buffer. */ if (copy_from_user(logical, gcbuffer->head, buffersize)) { GCPRINT(NULL, 0, GC_MOD_PREFIX "failed to read data.\n", __func__, __LINE__); gccommit->gcerror = GCERR_USER_READ; goto exit; } } else { memcpy(logical, gcbuffer->head, buffersize); } /* Process fixups. */ gccommit->gcerror = mmu2d_fixup(gcbuffer->fixuphead, logical); if (gccommit->gcerror != GCERR_NONE) goto exit; /* Skip the command buffer. */ logical = (unsigned int *) ((unsigned char *) logical + buffersize); /* Execute the current command buffer. */ cmdbuf_flush(logical); /* Get the next buffer. */ gcbuffer = gcbuffer->next; } exit: gc_set_power(GCPWR_LOW); if (gforceoff) gc_set_power(GCPWR_OFF); mutex_unlock(&mtx); GCPRINT(GCDBGFILTER, GCZONE_COMMIT, "--" GC_MOD_PREFIX "gc%s = 0x%08X\n", __func__, __LINE__, (gccommit->gcerror == GCERR_NONE) ? "result" : "error", gccommit->gcerror); }
int cmdbuf_flush(void) { int ret; u32 *buffer; u32 base, physical; u32 count; #if ENABLE_POLLING u32 retry; #endif ret = cmdbuf_alloc(4 * sizeof(u32), &buffer, &physical); if (ret != 0) goto fail; /* Append EVENT(Event, destination). */ buffer[0] = SETFIELDVAL(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) | SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, AQEventRegAddrs) | SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1); buffer[1] = SETFIELDVAL(0, AQ_EVENT, PE_SRC, ENABLE) | SETFIELD(0, AQ_EVENT, EVENT_ID, 16); /* Stop FE. */ buffer[2] = SETFIELDVAL(0, AQ_COMMAND_END_COMMAND, OPCODE, END); #if ENABLE_CMD_DEBUG /* Dump command buffer. */ cmdbuf_dump(); #endif /* Determine the command buffer base address. */ base = cmdbuf.mapped ? cmdbuf.mapped_physical : cmdbuf.page.physical; /* Compute the data count. */ count = (cmdbuf.data_size + 7) >> 3; #if ENABLE_POLLING int_data = 0; #endif CMDBUFPRINT("starting DMA at 0x%08X with count of %d\n", base, count); #if ENABLE_CMD_DEBUG || ENABLE_GPU_COUNTERS /* Reset hardware counters. */ hw_write_reg(GC_RESET_MEM_COUNTERS_Address, 1); #endif /* Enable all events. */ hw_write_reg(AQ_INTR_ENBL_Address, ~0U); /* Write address register. */ hw_write_reg(AQ_CMD_BUFFER_ADDR_Address, base); /* Write control register. */ hw_write_reg(AQ_CMD_BUFFER_CTRL_Address, SETFIELDVAL(0, AQ_CMD_BUFFER_CTRL, ENABLE, ENABLE) | SETFIELD(0, AQ_CMD_BUFFER_CTRL, PREFETCH, count) ); /* Wait for the interrupt. */ #if ENABLE_POLLING retry = 0; while (1) { if (int_data != 0) break; msleep(500); retry += 1; if ((retry % 5) == 0) gpu_status((char *) __func__, __LINE__, 0); } #else wait_event_interruptible(gc_event, done == true); #endif #if ENABLE_CMD_DEBUG gpu_status((char *) __func__, __LINE__, 0); #endif /* Reset the buffer. */ cmdbuf.logical = cmdbuf.page.logical; cmdbuf.physical = base; cmdbuf.available = cmdbuf.page.size; cmdbuf.data_size = 0; fail: return ret; }
enum gcerror mmu2d_set_master(struct mmu2dcontext *ctxt) { #if MMU_ENABLE enum gcerror gcerror; struct gcmommumaster *gcmommumaster; struct gcmommuinit *gcmommuinit; unsigned int size, status, enabled; struct mmu2dprivate *mmu = get_mmu(); if ((ctxt == NULL) || (ctxt->mmu == NULL)) return GCERR_MMU_CTXT_BAD; /* Read the MMU status. */ status = gc_read_reg(GCREG_MMU_CONTROL_Address); enabled = GETFIELD(status, GCREG_MMU_CONTROL, ENABLE); /* Is MMU enabled? */ if (enabled) { GCPRINT(NULL, 0, GC_MOD_PREFIX "gcx: mmu is already enabled.\n", __func__, __LINE__); /* Allocate command buffer space. */ gcerror = cmdbuf_alloc(sizeof(struct gcmommumaster), (void **) &gcmommumaster, NULL); if (gcerror != GCERR_NONE) return GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_SET); /* Program master table address. */ gcmommumaster->master_ldst = gcmommumaster_master_ldst; gcmommumaster->master = ctxt->physical; } else { GCPRINT(NULL, 0, GC_MOD_PREFIX "gcx: mmu is disabled, enabling.\n", __func__, __LINE__); /* MMU disabled, force physical mode. */ cmdbuf_physical(true); /* Allocate command buffer space. */ size = sizeof(struct gcmommuinit) + cmdbuf_flush(NULL); gcerror = cmdbuf_alloc(size, (void **) &gcmommuinit, NULL); if (gcerror != GCERR_NONE) return GCERR_SETGRP(gcerror, GCERR_MMU_INIT); /* Program the safe zone and the master table address. */ gcmommuinit->safe_ldst = gcmommuinit_safe_ldst; gcmommuinit->safe = mmu->safezone.physical; gcmommuinit->mtlb = ctxt->physical; /* Execute the buffer. */ cmdbuf_flush(gcmommuinit + 1); /* Resume normal mode. */ cmdbuf_physical(false); /* * Enable MMU. For security reasons, once it is enabled, * the only way to disable is to reset the system. */ gc_write_reg( GCREG_MMU_CONTROL_Address, SETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE)); } return GCERR_NONE; #else if ((ctxt == NULL) || (ctxt->mmu == NULL)) return GCERR_MMU_CTXT_BAD; return GCERR_NONE; #endif }