static enum gcerror alloc_fixup(struct gcfixup **gcfixup) { enum gcerror gcerror = GCERR_NONE; struct gcfixup *temp; GCLOCK(&g_fixuplock); if (list_empty(&g_fixupvac)) { temp = kmalloc(sizeof(struct gcfixup), GFP_KERNEL); if (temp == NULL) { GCERR("out of memory.\n"); gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_IOCTL_FIXUP_ALLOC); goto exit; } } else { struct list_head *head; head = g_fixupvac.next; temp = list_entry(head, struct gcfixup, link); list_del(head); } GCUNLOCK(&g_fixuplock); INIT_LIST_HEAD(&temp->link); *gcfixup = temp; exit: return gcerror; }
static enum gcerror get_arena(struct gcmmu *gcmmu, struct gcmmuarena **arena) { enum gcerror gcerror = GCERR_NONE; struct gcmmuarena *temp; GCENTER(GCZONE_ARENA); GCLOCK(&gcmmu->lock); if (list_empty(&gcmmu->vacarena)) { temp = kmalloc(sizeof(struct gcmmuarena), GFP_KERNEL); if (temp == NULL) { GCERR("arena entry allocation failed.\n"); gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_ARENA_ALLOC); goto exit; } } else { struct list_head *head; head = gcmmu->vacarena.next; temp = list_entry(head, struct gcmmuarena, link); list_del(head); } *arena = temp; exit: GCUNLOCK(&gcmmu->lock); GCEXITARG(GCZONE_ARENA, "gc%s = 0x%08X\n", (gcerror == GCERR_NONE) ? "result" : "error", gcerror); return gcerror; }
static void free_buffer(struct gcbuffer *gcbuffer) { /* Free fixups. */ free_fixup_list(&gcbuffer->fixup); /* Free the buffer. */ GCLOCK(&g_bufferlock); list_move(&gcbuffer->link, &g_buffervac); GCUNLOCK(&g_bufferlock); }
enum gcerror gcmmu_destroy_context(struct gccorecontext *gccorecontext, struct gcmmucontext *gcmmucontext) { enum gcerror gcerror; struct gcmmu *gcmmu = &gccorecontext->gcmmu; struct list_head *head; struct gcmmuarena *arena; struct gcmmustlbblock *nextblock; GCENTER(GCZONE_CONTEXT); if (gcmmucontext == NULL) { gcerror = GCERR_MMU_CTXT_BAD; goto exit; } /* Unmap the command queue. */ gcerror = gcqueue_unmap(gccorecontext, gcmmucontext); if (gcerror != GCERR_NONE) goto exit; /* Free allocated arenas. */ while (!list_empty(&gcmmucontext->allocated)) { head = gcmmucontext->allocated.next; arena = list_entry(head, struct gcmmuarena, link); release_physical_pages(arena); list_move(head, &gcmmucontext->vacant); } /* Free slave tables. */ while (gcmmucontext->slavealloc != NULL) { gc_free_cached(&gcmmucontext->slavealloc->pages); nextblock = gcmmucontext->slavealloc->next; kfree(gcmmucontext->slavealloc); gcmmucontext->slavealloc = nextblock; } /* Free the master table. */ gc_free_cached(&gcmmucontext->master); /* Free arenas. */ GCLOCK(&gcmmu->lock); list_splice_init(&gcmmucontext->vacant, &gcmmu->vacarena); GCUNLOCK(&gcmmu->lock); /* Dereference. */ gcmmu->refcount -= 1; GCEXIT(GCZONE_CONTEXT); return GCERR_NONE; exit: GCEXITARG(GCZONE_CONTEXT, "gcerror = 0x%08X\n", gcerror); return gcerror; }
static void free_vacant_unmap(void) { struct list_head *head; struct gcschedunmap *gcschedunmap; GCLOCK(&g_unmaplock); while (!list_empty(&g_unmapvac)) { head = g_unmapvac.next; gcschedunmap = list_entry(head, struct gcschedunmap, link); list_del(head); kfree(gcschedunmap); } GCUNLOCK(&g_unmaplock); }
static void free_vacant_buffers(void) { struct list_head *head; struct gcbuffer *gcbuffer; GCLOCK(&g_bufferlock); while (!list_empty(&g_buffervac)) { head = g_buffervac.next; gcbuffer = list_entry(head, struct gcbuffer, link); list_del(head); kfree(gcbuffer); } GCUNLOCK(&g_bufferlock); }
static void free_vacant_fixups(void) { struct list_head *head; struct gcfixup *gcfixup; GCLOCK(&g_fixuplock); while (!list_empty(&g_fixupvac)) { head = g_fixupvac.next; gcfixup = list_entry(head, struct gcfixup, link); list_del(head); kfree(gcfixup); } GCUNLOCK(&g_fixuplock); }
enum bverror do_map(struct bvbuffdesc *bvbuffdesc, struct gcbatch *batch, struct bvbuffmap **map) { static const int mapsize = sizeof(struct bvbuffmap) + sizeof(struct bvbuffmapinfo); enum bverror bverror; struct gccontext *gccontext = get_context(); struct bvbuffmap *bvbuffmap; struct bvbuffmapinfo *bvbuffmapinfo; struct bvphysdesc *bvphysdesc; bool mappedbyothers; struct gcimap gcimap; struct gcschedunmap *gcschedunmap; GCENTERARG(GCZONE_MAPPING, "bvbuffdesc = 0x%08X\n", (unsigned int) bvbuffdesc); /* Lock access to the mapping list. */ GCLOCK(&gccontext->maplock); /* Try to find existing mapping. */ bvbuffmap = bvbuffdesc->map; while (bvbuffmap != NULL) { if (bvbuffmap->bv_unmap == bv_unmap) break; bvbuffmap = bvbuffmap->nextmap; } /* Not mapped yet? */ if (bvbuffmap == NULL) { /* New mapping, allocate a record. */ if (gccontext->buffmapvac == NULL) { bvbuffmap = gcalloc(struct bvbuffmap, mapsize); if (bvbuffmap == NULL) { BVSETERROR(BVERR_OOM, "failed to allocate mapping record"); goto fail; } bvbuffmap->structsize = sizeof(struct bvbuffmap); bvbuffmap->bv_unmap = bv_unmap; bvbuffmap->handle = (unsigned long) (bvbuffmap + 1); } else {
enum bverror allocate_batch(struct bvbltparams *bvbltparams, struct gcbatch **gcbatch) { enum bverror bverror; struct gccontext *gccontext = get_context(); struct gcbatch *temp; struct gcbuffer *gcbuffer; GCENTER(GCZONE_BATCH_ALLOC); /* Lock access to batch management. */ GCLOCK(&gccontext->batchlock); if (list_empty(&gccontext->batchvac)) { temp = gcalloc(struct gcbatch, sizeof(struct gcbatch)); if (temp == NULL) { BVSETBLTERROR(BVERR_OOM, "batch header allocation failed"); goto exit; } GCDBG(GCZONE_BATCH_ALLOC, "allocated new batch = 0x%08X\n", (unsigned int) temp); } else {
static void free_fixup(struct gcfixup *gcfixup) { GCLOCK(&g_fixuplock); list_move(&gcfixup->link, &g_fixupvac); GCUNLOCK(&g_fixuplock); }
static void free_schedunmap_list(struct list_head *schedunmaplist) { GCLOCK(&g_unmaplock); list_splice_init(schedunmaplist, &g_unmapvac); GCUNLOCK(&g_unmaplock); }
static void free_schedunmap(struct gcschedunmap *gcschedunmap) { GCLOCK(&g_unmaplock); list_move(&gcschedunmap->link, &g_unmapvac); GCUNLOCK(&g_unmaplock); }
static void free_fixup_list(struct list_head *fixuplist) { GCLOCK(&g_fixuplock); list_splice_init(fixuplist, &g_fixupvac); GCUNLOCK(&g_fixuplock); }