enum gcerror gcpwr_enable_clock(enum gcpower prevstate) { bool ctxlost = g_gcxplat->was_context_lost(gcdevice.dev); if (!g_clockenabled) { /* Enable the clock. */ pm_runtime_get_sync(gcdevice.dev); /* Signal software not idle. */ gc_write_reg(GC_GP_OUT0_Address, 0); /* Clock enabled. */ g_clockenabled = true; } else if (ctxlost) { u32 reg; dev_info(gcdevice.dev, "unexpected context\n"); reg = gc_read_reg(GC_GP_OUT0_Address); if (reg) { dev_info(gcdevice.dev, "reset gchold\n"); gc_write_reg(GC_GP_OUT0_Address, 0); } } GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX "clock %s.\n", __func__, __LINE__, g_clockenabled ? "enabled" : "disabled"); if (ctxlost || prevstate == GCPWR_UNKNOWN) gc_reset_gpu(); return GCERR_NONE; }
static irqreturn_t gc_irq(int irq, void *p) { unsigned int data; /* Read gcregIntrAcknowledge register. */ data = gc_read_reg(GCREG_INTR_ACKNOWLEDGE_Address); /* Our interrupt? */ if (data == 0) return IRQ_NONE; gc_debug_cache_gpu_status_from_irq(data); g_gccoredata = data; complete(&g_gccoreint); return IRQ_HANDLED; }
void gc_debug_cache_gpu_id(void) { if (g_gcGpuId.valid) { /* only cached once */ return; } g_gcGpuId.chipModel = gc_read_reg(GC_CHIP_ID_Address); g_gcGpuId.chipRevision = gc_read_reg(GC_CHIP_REV_Address); g_gcGpuId.chipDate = gc_read_reg(GC_CHIP_DATE_Address); g_gcGpuId.chipTime = gc_read_reg(GC_CHIP_TIME_Address); g_gcGpuId.chipFeatures = gc_read_reg(GC_FEATURES_Address); g_gcGpuId.chipMinorFeatures = gc_read_reg(GC_MINOR_FEATURES0_Address); g_gcGpuId.valid = 1; }
enum gcerror gcmmu_enable(struct gccorecontext *gccorecontext, struct gcqueue *gcqueue) { enum gcerror gcerror; struct gcmmu *gcmmu = &gccorecontext->gcmmu; struct list_head *head; struct gccmdbuf *headcmdbuf; struct gccmdbuf *gccmdbuf = NULL; struct gcevent *gcevent = NULL; struct gcmommuinit *gcmommuinit; struct gcmosignal *gcmosignal; struct gccmdend *gccmdend; unsigned int status, enabled; GCENTER(GCZONE_INIT); /* Read the MMU status. */ status = gc_read_reg(GCREG_MMU_CONTROL_Address); enabled = GCGETFIELD(status, GCREG_MMU_CONTROL, ENABLE); /* Is MMU enabled? */ if (!enabled) { GCDBG(GCZONE_MASTER, "enabling MMU.\n"); /* Queue cannot be empty. */ if (list_empty(&gcqueue->queue)) { GCERR("queue is empty."); gcerror = GCERR_MMU_INIT; goto fail; } /* Get the first entry from the active queue. */ head = gcqueue->queue.next; headcmdbuf = list_entry(head, struct gccmdbuf, link); /* Allocate command init buffer. */ gcerror = gcqueue_alloc_cmdbuf(gcqueue, &gccmdbuf); if (gcerror != GCERR_NONE) goto fail; /* Add event for the current command buffer. */ gcerror = gcqueue_alloc_event(gcqueue, &gcevent); if (gcerror != GCERR_NONE) goto fail; /* Get free interrupt. */ gcerror = gcqueue_alloc_int(gcqueue, &gccmdbuf->interrupt); if (gcerror != GCERR_NONE) goto fail; /* Initialize the event and add to the list. */ gcevent->handler = event_enable_mmu; /* Attach records. */ list_add_tail(&gcevent->link, &gccmdbuf->events); list_add(&gccmdbuf->link, &gcqueue->queue); /* Program the safe zone and the master table address. */ gcmommuinit = (struct gcmommuinit *) gcmmu->cmdbuflog; gcmommuinit->safe_ldst = gcmommuinit_safe_ldst; gcmommuinit->safe = gcmmu->safezonephys; gcmommuinit->mtlb = headcmdbuf->gcmmucontext->mmuconfig.raw; /* Configure EVENT command. */ gcmosignal = (struct gcmosignal *) (gcmommuinit + 1); gcmosignal->signal_ldst = gcmosignal_signal_ldst; gcmosignal->signal.raw = 0; gcmosignal->signal.reg.id = gccmdbuf->interrupt; gcmosignal->signal.reg.pe = GCREG_EVENT_PE_SRC_ENABLE; /* Configure the END command. */ gccmdend = (struct gccmdend *) (gcmosignal + 1); gccmdend->cmd.raw = gccmdend_const.cmd.raw; /* Initialize the command buffer. */ gccmdbuf->gcmmucontext = headcmdbuf->gcmmucontext; gccmdbuf->logical = (unsigned char *) gcmmu->cmdbuflog; gccmdbuf->physical = gcmmu->cmdbufphys; gccmdbuf->size = sizeof(struct gcmommuinit) + sizeof(struct gcmosignal) + sizeof(struct gccmdend); gccmdbuf->count = (gccmdbuf->size + 7) >> 3; gccmdbuf->gcmoterminator = NULL; GCDUMPBUFFER(GCZONE_INIT, gccmdbuf->logical, gccmdbuf->physical, gccmdbuf->size); }
void gc_reset_gpu(void) { union gcclockcontrol gcclockcontrol; union gcidle gcidle; /* Read current clock control value. */ gcclockcontrol.raw = gc_read_reg(GCREG_HI_CLOCK_CONTROL_Address); while (true) { /* Isolate the GPU. */ gcclockcontrol.reg.isolate = 1; gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw); /* Set soft reset. */ gcclockcontrol.reg.reset = 1; gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw); /* Wait for reset. */ mdelay(1); /* Reset soft reset bit. */ gcclockcontrol.reg.reset = 0; gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw); /* Reset GPU isolation. */ gcclockcontrol.reg.isolate = 0; gc_write_reg(GCREG_HI_CLOCK_CONTROL_Address, gcclockcontrol.raw); /* Read idle register. */ gcidle.raw = gc_read_reg(GCREG_HI_IDLE_Address); /* Try resetting again if FE not idle. */ if (!gcidle.reg.fe) { GCPRINT(NULL, 0, GC_MOD_PREFIX " FE NOT IDLE\n", __func__, __LINE__); continue; } /* Read reset register. */ gcclockcontrol.raw = gc_read_reg(GCREG_HI_CLOCK_CONTROL_Address); /* Try resetting again if 2D is not idle. */ if (!gcclockcontrol.reg.idle2d) { GCPRINT(NULL, 0, GC_MOD_PREFIX " 2D NOT IDLE\n", __func__, __LINE__); continue; } /* GPU is idle. */ break; } /* Pulse skipping disabled. */ g_pulseskipping = false; GCPRINT(GCDBGFILTER, GCZONE_POWER, GC_MOD_PREFIX "gpu reset.\n", __func__, __LINE__); }
enum gcerror mmu2d_set_master(struct mmu2dcontext *ctxt) { #if MMU_ENABLE enum gcerror gcerror; struct gcmommumaster *gcmommumaster; struct gcmommuinit *gcmommuinit; unsigned int size, status, enabled; struct mmu2dprivate *mmu = get_mmu(); if ((ctxt == NULL) || (ctxt->mmu == NULL)) return GCERR_MMU_CTXT_BAD; /* Read the MMU status. */ status = gc_read_reg(GCREG_MMU_CONTROL_Address); enabled = GETFIELD(status, GCREG_MMU_CONTROL, ENABLE); /* Is MMU enabled? */ if (enabled) { GCPRINT(NULL, 0, GC_MOD_PREFIX "gcx: mmu is already enabled.\n", __func__, __LINE__); /* Allocate command buffer space. */ gcerror = cmdbuf_alloc(sizeof(struct gcmommumaster), (void **) &gcmommumaster, NULL); if (gcerror != GCERR_NONE) return GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_SET); /* Program master table address. */ gcmommumaster->master_ldst = gcmommumaster_master_ldst; gcmommumaster->master = ctxt->physical; } else { GCPRINT(NULL, 0, GC_MOD_PREFIX "gcx: mmu is disabled, enabling.\n", __func__, __LINE__); /* MMU disabled, force physical mode. */ cmdbuf_physical(true); /* Allocate command buffer space. */ size = sizeof(struct gcmommuinit) + cmdbuf_flush(NULL); gcerror = cmdbuf_alloc(size, (void **) &gcmommuinit, NULL); if (gcerror != GCERR_NONE) return GCERR_SETGRP(gcerror, GCERR_MMU_INIT); /* Program the safe zone and the master table address. */ gcmommuinit->safe_ldst = gcmommuinit_safe_ldst; gcmommuinit->safe = mmu->safezone.physical; gcmommuinit->mtlb = ctxt->physical; /* Execute the buffer. */ cmdbuf_flush(gcmommuinit + 1); /* Resume normal mode. */ cmdbuf_physical(false); /* * Enable MMU. For security reasons, once it is enabled, * the only way to disable is to reset the system. */ gc_write_reg( GCREG_MMU_CONTROL_Address, SETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE)); } return GCERR_NONE; #else if ((ctxt == NULL) || (ctxt->mmu == NULL)) return GCERR_MMU_CTXT_BAD; return GCERR_NONE; #endif }