static enum gcerror alloc_fixup(struct gcfixup **gcfixup) { enum gcerror gcerror = GCERR_NONE; struct gcfixup *temp; GCLOCK(&g_fixuplock); if (list_empty(&g_fixupvac)) { temp = kmalloc(sizeof(struct gcfixup), GFP_KERNEL); if (temp == NULL) { GCERR("out of memory.\n"); gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_IOCTL_FIXUP_ALLOC); goto exit; } } else { struct list_head *head; head = g_fixupvac.next; temp = list_entry(head, struct gcfixup, link); list_del(head); } GCUNLOCK(&g_fixuplock); INIT_LIST_HEAD(&temp->link); *gcfixup = temp; exit: return gcerror; }
static enum gcerror get_arena(struct gcmmu *gcmmu, struct gcmmuarena **arena) { enum gcerror gcerror = GCERR_NONE; struct gcmmuarena *temp; GCENTER(GCZONE_ARENA); GCLOCK(&gcmmu->lock); if (list_empty(&gcmmu->vacarena)) { temp = kmalloc(sizeof(struct gcmmuarena), GFP_KERNEL); if (temp == NULL) { GCERR("arena entry allocation failed.\n"); gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_ARENA_ALLOC); goto exit; } } else { struct list_head *head; head = gcmmu->vacarena.next; temp = list_entry(head, struct gcmmuarena, link); list_del(head); } *arena = temp; exit: GCUNLOCK(&gcmmu->lock); GCEXITARG(GCZONE_ARENA, "gc%s = 0x%08X\n", (gcerror == GCERR_NONE) ? "result" : "error", gcerror); return gcerror; }
static void event_enable_mmu(struct gcevent *gcevent, unsigned int *flags) { GCENTER(GCZONE_INIT); /* * Enable MMU. For security reasons, once it is enabled, * the only way to disable is to reset the system. */ gc_write_reg( GCREG_MMU_CONTROL_Address, GCSETFIELDVAL(0, GCREG_MMU_CONTROL, ENABLE, ENABLE)); /* After MMU command buffer is processed, FE will stop. * Let the control thread know that FE needs to be restarted. */ if (flags == NULL) GCERR("flags are not set.\n"); else *flags |= GC_CMDBUF_START_FE; GCEXIT(GCZONE_INIT); }
enum gcerror gcmmu_init(struct gccorecontext *gccorecontext) { enum gcerror gcerror; struct gcmmu *gcmmu = &gccorecontext->gcmmu; GCENTER(GCZONE_INIT); /* Initialize access lock. */ GCLOCK_INIT(&gcmmu->lock); /* Allocate one page. */ gcerror = gc_alloc_noncached(&gcmmu->gcpage, PAGE_SIZE); if (gcerror != GCERR_NONE) { GCERR("failed to allocate MMU management buffer.\n"); gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_SAFE_ALLOC); goto exit; } /* Determine the location of the physical command buffer. */ gcmmu->cmdbufphys = gcmmu->gcpage.physical; gcmmu->cmdbuflog = gcmmu->gcpage.logical; gcmmu->cmdbufsize = PAGE_SIZE - GCMMU_SAFE_ZONE_SIZE; /* Determine the location of the safe zone. */ gcmmu->safezonephys = gcmmu->gcpage.physical + gcmmu->cmdbufsize; gcmmu->safezonelog = (unsigned int *) ((unsigned char *) gcmmu->gcpage.logical + gcmmu->cmdbufsize); gcmmu->safezonesize = GCMMU_SAFE_ZONE_SIZE; /* Reset the master table. */ gcmmu->master = ~0U; /* Initialize the list of vacant arenas. */ INIT_LIST_HEAD(&gcmmu->vacarena); exit: GCEXITARG(GCZONE_INIT, "gc%s = 0x%08X\n", (gcerror == GCERR_NONE) ? "result" : "error", gcerror); return gcerror; }
enum gcerror gcmmu_enable(struct gccorecontext *gccorecontext, struct gcqueue *gcqueue) { enum gcerror gcerror; struct gcmmu *gcmmu = &gccorecontext->gcmmu; struct list_head *head; struct gccmdbuf *headcmdbuf; struct gccmdbuf *gccmdbuf = NULL; struct gcevent *gcevent = NULL; struct gcmommuinit *gcmommuinit; struct gcmosignal *gcmosignal; struct gccmdend *gccmdend; unsigned int status, enabled; GCENTER(GCZONE_INIT); /* Read the MMU status. */ status = gc_read_reg(GCREG_MMU_CONTROL_Address); enabled = GCGETFIELD(status, GCREG_MMU_CONTROL, ENABLE); /* Is MMU enabled? */ if (!enabled) { GCDBG(GCZONE_MASTER, "enabling MMU.\n"); /* Queue cannot be empty. */ if (list_empty(&gcqueue->queue)) { GCERR("queue is empty."); gcerror = GCERR_MMU_INIT; goto fail; } /* Get the first entry from the active queue. */ head = gcqueue->queue.next; headcmdbuf = list_entry(head, struct gccmdbuf, link); /* Allocate command init buffer. */ gcerror = gcqueue_alloc_cmdbuf(gcqueue, &gccmdbuf); if (gcerror != GCERR_NONE) goto fail; /* Add event for the current command buffer. */ gcerror = gcqueue_alloc_event(gcqueue, &gcevent); if (gcerror != GCERR_NONE) goto fail; /* Get free interrupt. */ gcerror = gcqueue_alloc_int(gcqueue, &gccmdbuf->interrupt); if (gcerror != GCERR_NONE) goto fail; /* Initialize the event and add to the list. */ gcevent->handler = event_enable_mmu; /* Attach records. */ list_add_tail(&gcevent->link, &gccmdbuf->events); list_add(&gccmdbuf->link, &gcqueue->queue); /* Program the safe zone and the master table address. */ gcmommuinit = (struct gcmommuinit *) gcmmu->cmdbuflog; gcmommuinit->safe_ldst = gcmommuinit_safe_ldst; gcmommuinit->safe = gcmmu->safezonephys; gcmommuinit->mtlb = headcmdbuf->gcmmucontext->mmuconfig.raw; /* Configure EVENT command. */ gcmosignal = (struct gcmosignal *) (gcmommuinit + 1); gcmosignal->signal_ldst = gcmosignal_signal_ldst; gcmosignal->signal.raw = 0; gcmosignal->signal.reg.id = gccmdbuf->interrupt; gcmosignal->signal.reg.pe = GCREG_EVENT_PE_SRC_ENABLE; /* Configure the END command. */ gccmdend = (struct gccmdend *) (gcmosignal + 1); gccmdend->cmd.raw = gccmdend_const.cmd.raw; /* Initialize the command buffer. */ gccmdbuf->gcmmucontext = headcmdbuf->gcmmucontext; gccmdbuf->logical = (unsigned char *) gcmmu->cmdbuflog; gccmdbuf->physical = gcmmu->cmdbufphys; gccmdbuf->size = sizeof(struct gcmommuinit) + sizeof(struct gcmosignal) + sizeof(struct gccmdend); gccmdbuf->count = (gccmdbuf->size + 7) >> 3; gccmdbuf->gcmoterminator = NULL; GCDUMPBUFFER(GCZONE_INIT, gccmdbuf->logical, gccmdbuf->physical, gccmdbuf->size); }
static enum gcerror allocate_slave(struct gcmmucontext *gcmmucontext, union gcmmuloc index) { enum gcerror gcerror; struct gcmmustlbblock *block = NULL; struct gcmmustlb *slave; unsigned int *mtlblogical; unsigned int prealloccount; unsigned int preallocsize; unsigned int preallocentries; unsigned int physical; unsigned int *logical; unsigned int i; GCENTER(GCZONE_MAPPING); /* Allocate a new prealloc block wrapper. */ block = kmalloc(sizeof(struct gcmmustlbblock), GFP_KERNEL); if (block == NULL) { GCERR("failed to allocate slave page table wrapper\n"); gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLB_ALLOC); goto exit; } /* Determine the number and the size of tables to allocate. */ prealloccount = min(GCMMU_STLB_PREALLOC_COUNT, GCMMU_MTLB_ENTRY_NUM - index.loc.mtlb); preallocsize = prealloccount * GCMMU_STLB_SIZE; preallocentries = prealloccount * GCMMU_STLB_ENTRY_NUM; GCDBG(GCZONE_MAPPING, "preallocating %d slave tables.\n", prealloccount); /* Allocate slave table pool. */ gcerror = gc_alloc_cached(&block->pages, preallocsize); if (gcerror != GCERR_NONE) { GCERR("failed to allocate slave page table\n"); gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_STLB_ALLOC); goto exit; } /* Add the block to the list. */ block->next = gcmmucontext->slavealloc; gcmmucontext->slavealloc = block; /* Get shortcuts to the pointers. */ physical = block->pages.physical; logical = block->pages.logical; /* Invalidate all slave entries. */ for (i = 0; i < preallocentries; i += 1) logical[i] = GCMMU_STLB_ENTRY_VACANT; /* Init the slaves. */ slave = &gcmmucontext->slave[index.loc.mtlb]; mtlblogical = &gcmmucontext->master.logical[index.loc.mtlb]; for (i = 0; i < prealloccount; i += 1) { mtlblogical[i] = (physical & GCMMU_MTLB_SLAVE_MASK) | GCMMU_MTLB_4K_PAGE | GCMMU_MTLB_EXCEPTION | GCMMU_MTLB_PRESENT; slave[i].physical = physical; slave[i].logical = logical; physical += GCMMU_STLB_SIZE; logical = (unsigned int *) ((unsigned char *) logical + GCMMU_STLB_SIZE); } /* Flush CPU cache. */ gc_flush_region(gcmmucontext->master.physical, gcmmucontext->master.logical, index.loc.mtlb * sizeof(unsigned int), prealloccount * sizeof(unsigned int)); GCEXIT(GCZONE_MAPPING); return GCERR_NONE; exit: if (block != NULL) kfree(block); GCEXITARG(GCZONE_MAPPING, "gc%s = 0x%08X\n", (gcerror == GCERR_NONE) ? "result" : "error", gcerror); return gcerror; }