/******************************************************************************* ** ** gckVIDMEM_Destroy ** ** Destroy an gckVIDMEM object. ** ** INPUT: ** ** gckVIDMEM Memory ** Pointer to an gckVIDMEM object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_Destroy( IN gckVIDMEM Memory ) { gcuVIDMEM_NODE_PTR node, next; gctINT i; gcmkHEADER_ARG("Memory=0x%x", Memory); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); /* Walk all sentinels. */ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i) { /* Bail out of the heap is not used. */ if (Memory->sentinel[i].VidMem.next == gcvNULL) { break; } /* Walk all the nodes until we reach the sentinel. */ for (node = Memory->sentinel[i].VidMem.next; node->VidMem.bytes != 0; node = next) { /* Save pointer to the next node. */ next = node->VidMem.next; /* Free the node. */ gcmkVERIFY_OK(gckOS_Free(Memory->os, node)); } } /* Free the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex)); /* Mark the object as unknown. */ Memory->object.type = gcvOBJ_UNKNOWN; /* Free the gckVIDMEM object. */ gcmkVERIFY_OK(gckOS_Free(Memory->os, Memory)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckVGMMU_Destroy ** ** Destroy a nAQMMU object. ** ** INPUT: ** ** gckVGMMU Mmu ** Pointer to an gckVGMMU object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVGMMU_Destroy( IN gckVGMMU Mmu ) { gcmkHEADER_ARG("Mmu=0x%x", Mmu); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); /* Free the page table. */ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os, Mmu->pageTablePhysical, Mmu->pageTableLogical, Mmu->pageTableSize)); /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex)); /* Mark the gckVGMMU object as unknown. */ Mmu->object.type = gcvOBJ_UNKNOWN; /* Free the gckVGMMU object. */ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu)); gcmkFOOTER_NO(); /* Success. */ return gcvSTATUS_OK; }
/******************************************************************************* ** ** _Merge ** ** Merge two adjacent nodes together. ** ** INPUT: ** ** gckOS Os ** Pointer to an gckOS object. ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to the first of the two nodes to merge. ** ** OUTPUT: ** ** Nothing. ** */ static gceSTATUS _Merge( IN gckOS Os, IN gcuVIDMEM_NODE_PTR Node ) { gcuVIDMEM_NODE_PTR node; /* Save pointer to next node. */ node = Node->VidMem.next; /* This is a good time to make sure the heap is not corrupted. */ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset) { /* Corrupted heap. */ gcmkASSERT( Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset); return gcvSTATUS_HEAP_CORRUPTED; } /* Adjust byte count. */ Node->VidMem.bytes += node->VidMem.bytes; /* Unlink next node from linked list. */ Node->VidMem.next = node->VidMem.next; Node->VidMem.nextFree = node->VidMem.nextFree; Node->VidMem.next->VidMem.prev = Node->VidMem.nextFree->VidMem.prevFree = Node; /* Free next node. */ return gckOS_Free(Os, node); }
/******************************************************************************* ** ** gckKERNEL_Destroy ** ** Destroy an gckKERNEL object. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVGKERNEL_Destroy( IN gckVGKERNEL Kernel ) { gceSTATUS status; gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); do { /* Destroy the gckVGMMU object. */ if (Kernel->mmu != gcvNULL) { gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu)); Kernel->mmu = gcvNULL; } /* Destroy the gckVGCOMMAND object. */ if (Kernel->command != gcvNULL) { gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command)); Kernel->command = gcvNULL; } /* Destroy the gckVGINTERRUPT object. */ if (Kernel->interrupt != gcvNULL) { gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt)); Kernel->interrupt = gcvNULL; } /* Destroy the gckVGHARDWARE object. */ if (Kernel->hardware != gcvNULL) { gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware)); Kernel->hardware = gcvNULL; } /* Mark the gckKERNEL object as unknown. */ Kernel->object.type = gcvOBJ_UNKNOWN; /* Free the gckKERNEL object. */ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel)); } while (gcvFALSE); gcmkFOOTER(); /* Return status. */ return status; }
/******************************************************************************* ** ** gckKERNEL_Destroy ** ** Destroy an gckKERNEL object. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckKERNEL_Destroy( IN gckKERNEL Kernel ) { gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); /* Destroy the gckMMU object. */ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu)); /* Destroy the gckEVENT object. */ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->event)); /* Destroy the gckCOMMNAND object. */ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command)); /* Destroy the gckHARDWARE object. */ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware)); /* Detsroy the client atom. */ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients)); /* Mark the gckKERNEL object as unknown. */ Kernel->object.type = gcvOBJ_UNKNOWN; #if MRVL_LOW_POWER_MODE_DEBUG gcmkVERIFY_OK( gckOS_Free(Kernel->os, Kernel->kernelMSG)); #endif /* Free the gckKERNEL object. */ gcmkVERIFY_OK(gckOS_Free(Kernel->os, Kernel)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
static gceSTATUS _AddMap( IN gckOS Os, IN gctPOINTER Source, IN gctSIZE_T Bytes, OUT gctPOINTER * Destination, IN OUT gcsMAPPED_PTR * Stack ) { gcsMAPPED_PTR map = gcvNULL; gceSTATUS status; /* Don't try to map NULL pointers. */ if (Source == gcvNULL) { *Destination = gcvNULL; return gcvSTATUS_OK; } /* Allocate the gcsMAPPED structure. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(*map), (gctPOINTER *) &map)); /* Map the user pointer into kernel addressing space. */ gcmkONERROR( gckOS_MapUserPointer(Os, Source, Bytes, Destination)); /* Save mapping. */ map->pointer = Source; map->kernelPointer = *Destination; map->bytes = Bytes; /* Push structure on top of the stack. */ map->next = *Stack; *Stack = map; /* Success. */ return gcvSTATUS_OK; OnError: if (gcmIS_ERROR(status) && (map != gcvNULL)) { /* Roll back on error. */ gcmkVERIFY_OK(gckOS_Free(Os, map)); } /* Return the status. */ return status; }
/******************************************************************************* ** ** gckVIDMEM_DestroyVirtual ** ** Destroy an gcuVIDMEM_NODE union for virtual memory. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE union. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_DestroyVirtual( IN gcuVIDMEM_NODE_PTR Node ) { gckOS os; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); /* Extact the gckOS object pointer. */ os = Node->Virtual.kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); #ifdef __QNXNTO__ /* Unregister. */ gcmkVERIFY_OK( gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node)); /* Free virtual memory. */ gcmkVERIFY_OK( gckOS_FreePagedMemory(os, Node->Virtual.physical, Node->Virtual.bytes)); #endif /* Delete the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex)); if (Node->Virtual.pageTable != gcvNULL) { /* Free the pages. */ gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); } /* Delete the gcuVIDMEM_NODE union. */ gcmkVERIFY_OK(gckOS_Free(os, Node)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckCOMMAND_Destroy ** ** Destroy an gckCOMMAND object. ** ** INPUT: ** ** gckCOMMAND Command ** Pointer to an gckCOMMAND object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckCOMMAND_Destroy( IN gckCOMMAND Command ) { gctINT i; gcmkHEADER_ARG("Command=0x%x", Command); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); /* Stop the command queue. */ gcmkVERIFY_OK(gckCOMMAND_Stop(Command)); for (i = 0; i < gcdCOMMAND_QUEUES; ++i) { gcmkASSERT(Command->queues[i].signal != gcvNULL); gcmkVERIFY_OK( gckOS_DestroySignal(Command->os, Command->queues[i].signal)); gcmkASSERT(Command->queues[i].logical != gcvNULL); gcmkVERIFY_OK( gckOS_FreeNonPagedMemory(Command->os, Command->pageSize, Command->queues[i].physical, Command->queues[i].logical)); } /* Delete the context switching mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext)); /* Delete the command queue mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue)); /* Mark object as unknown. */ Command->object.type = gcvOBJ_UNKNOWN; /* Free the gckCOMMAND object. */ gcmkVERIFY_OK(gckOS_Free(Command->os, Command)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckVGMMU_Construct ** ** Construct a new gckVGMMU object. ** ** INPUT: ** ** gckVGKERNEL Kernel ** Pointer to an gckVGKERNEL object. ** ** gctSIZE_T MmuSize ** Number of bytes for the page table. ** ** OUTPUT: ** ** gckVGMMU * Mmu ** Pointer to a variable that receives the gckVGMMU object pointer. */ gceSTATUS gckVGMMU_Construct( IN gckVGKERNEL Kernel, IN gctSIZE_T MmuSize, OUT gckVGMMU * Mmu ) { gckOS os; gckVGHARDWARE hardware; gceSTATUS status; gckVGMMU mmu; gctUINT32 * pageTable; gctUINT32 i; gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(MmuSize > 0); gcmkVERIFY_ARGUMENT(Mmu != gcvNULL); /* Extract the gckOS object pointer. */ os = Kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Extract the gckVGHARDWARE object pointer. */ hardware = Kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Allocate memory for the gckVGMMU object. */ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu); if (status < 0) { /* Error. */ gcmkFATAL( "%s(%d): could not allocate gckVGMMU object.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Initialize the gckVGMMU object. */ mmu->object.type = gcvOBJ_MMU; mmu->os = os; mmu->hardware = hardware; /* Create the mutex. */ status = gckOS_CreateMutex(os, &mmu->mutex); if (status < 0) { /* Roll back. */ mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); gcmkFOOTER(); /* Error. */ return status; } /* Allocate the page table. */ mmu->pageTableSize = MmuSize; status = gckOS_AllocateContiguous(os, gcvFALSE, &mmu->pageTableSize, &mmu->pageTablePhysical, &mmu->pageTableLogical); if (status < 0) { /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex)); mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); /* Error. */ gcmkFATAL( "%s(%d): could not allocate page table.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Compute number of entries in page table. */ mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32); mmu->entry = 0; /* Mark the entire page table as available. */ pageTable = (gctUINT32 *) mmu->pageTableLogical; for (i = 0; i < mmu->entryCount; i++) { pageTable[i] = (gctUINT32)~0; } /* Set page table address. */ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical); if (status < 0) { /* Free the page table. */ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os, mmu->pageTablePhysical, mmu->pageTableLogical, mmu->pageTableSize)); /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex)); mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); /* Error. */ gcmkFATAL( "%s(%d): could not program page table.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Return the gckVGMMU object pointer. */ *Mmu = mmu; gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): %u entries at %p.(0x%08X)\n", __FUNCTION__, __LINE__, mmu->entryCount, mmu->pageTableLogical, mmu->pageTablePhysical ); gcmkFOOTER_NO(); /* Success. */ return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckVIDMEM_Construct ** ** Construct a new gckVIDMEM object. ** ** INPUT: ** ** gckOS Os ** Pointer to an gckOS object. ** ** gctUINT32 BaseAddress ** Base address for the video memory heap. ** ** gctSIZE_T Bytes ** Number of bytes in the video memory heap. ** ** gctSIZE_T Threshold ** Minimum number of bytes beyond am allocation before the node is ** split. Can be used as a minimum alignment requirement. ** ** gctSIZE_T BankSize ** Number of bytes per physical memory bank. Used by bank ** optimization. ** ** OUTPUT: ** ** gckVIDMEM * Memory ** Pointer to a variable that will hold the pointer to the gckVIDMEM ** object. */ gceSTATUS gckVIDMEM_Construct( IN gckOS Os, IN gctUINT32 BaseAddress, IN gctSIZE_T Bytes, IN gctSIZE_T Threshold, IN gctSIZE_T BankSize, OUT gckVIDMEM * Memory ) { gckVIDMEM memory = gcvNULL; gceSTATUS status; gcuVIDMEM_NODE_PTR node; gctINT i, banks = 0; gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu " "BankSize=%lu", Os, BaseAddress, Bytes, Threshold, BankSize); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Allocate the gckVIDMEM object. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), (gctPOINTER *) &memory)); /* Initialize the gckVIDMEM object. */ memory->object.type = gcvOBJ_VIDMEM; memory->os = Os; /* Set video memory heap information. */ memory->baseAddress = BaseAddress; memory->bytes = Bytes; memory->freeBytes = Bytes; memory->threshold = Threshold; memory->mutex = gcvNULL; BaseAddress = 0; /* Walk all possible banks. */ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i) { gctSIZE_T bytes; if (BankSize == 0) { /* Use all bytes for the first bank. */ bytes = Bytes; } else { /* Compute number of bytes for this bank. */ bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress; if (bytes > Bytes) { /* Make sure we don't exceed the total number of bytes. */ bytes = Bytes; } } if (bytes == 0) { /* Mark heap is not used. */ memory->sentinel[i].VidMem.next = memory->sentinel[i].VidMem.prev = memory->sentinel[i].VidMem.nextFree = memory->sentinel[i].VidMem.prevFree = gcvNULL; continue; } /* Allocate one gcuVIDMEM_NODE union. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER *) &node)); /* Initialize gcuVIDMEM_NODE union. */ node->VidMem.memory = memory; node->VidMem.next = node->VidMem.prev = node->VidMem.nextFree = node->VidMem.prevFree = &memory->sentinel[i]; node->VidMem.offset = BaseAddress; node->VidMem.bytes = bytes; node->VidMem.alignment = 0; node->VidMem.physical = 0; node->VidMem.pool = gcvPOOL_UNKNOWN; node->VidMem.locked = 0; #ifdef __QNXNTO__ node->VidMem.logical = gcvNULL; node->VidMem.handle = 0; #endif /* Initialize the linked list of nodes. */ memory->sentinel[i].VidMem.next = memory->sentinel[i].VidMem.prev = memory->sentinel[i].VidMem.nextFree = memory->sentinel[i].VidMem.prevFree = node; /* Mark sentinel. */ memory->sentinel[i].VidMem.bytes = 0; /* Adjust address for next bank. */ BaseAddress += bytes; Bytes -= bytes; banks ++; } /* Assign all the bank mappings. */ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1; memory->mapping[gcvSURF_BITMAP] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_DEPTH] = banks - 1; memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TEXTURE] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_VERTEX] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_INDEX] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TILE_STATUS] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] INDEX: bank %d", memory->mapping[gcvSURF_INDEX]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] VERTEX: bank %d", memory->mapping[gcvSURF_VERTEX]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] TEXTURE: bank %d", memory->mapping[gcvSURF_TEXTURE]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] RENDER_TARGET: bank %d", memory->mapping[gcvSURF_RENDER_TARGET]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] DEPTH: bank %d", memory->mapping[gcvSURF_DEPTH]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] TILE_STATUS: bank %d", memory->mapping[gcvSURF_TILE_STATUS]); /* Allocate the mutex. */ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex)); /* Return pointer to the gckVIDMEM object. */ *Memory = memory; /* Success. */ gcmkFOOTER_ARG("*Memory=0x%x", *Memory); return gcvSTATUS_OK; OnError: /* Roll back. */ if (memory != gcvNULL) { if (memory->mutex != gcvNULL) { /* Delete the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex)); } for (i = 0; i < banks; ++i) { /* Free the heap. */ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL); gcmkVERIFY_OK(gckOS_Free(Os, memory->sentinel[i].VidMem.next)); } /* Free the object. */ gcmkVERIFY_OK(gckOS_Free(Os, memory)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_ConstructVirtual ** ** Construct a new gcuVIDMEM_NODE union for virtual memory. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gctSIZE_T Bytes ** Number of byte to allocate. ** ** OUTPUT: ** ** gcuVIDMEM_NODE_PTR * Node ** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer. */ gceSTATUS gckVIDMEM_ConstructVirtual( IN gckKERNEL Kernel, IN gctBOOL Contiguous, IN gctSIZE_T Bytes, #ifdef __QNXNTO__ IN gctHANDLE Handle, #endif OUT gcuVIDMEM_NODE_PTR * Node ) { gckOS os; gceSTATUS status; gcuVIDMEM_NODE_PTR node = gcvNULL; gcmkHEADER_ARG("Kernel=0x%x Bytes=%lu", Kernel, Bytes); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Node != gcvNULL); #ifdef __QNXNTO__ gcmkVERIFY_ARGUMENT(Handle != gcvNULL); #endif /* Extract the gckOS object pointer. */ os = Kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Allocate an gcuVIDMEM_NODE union. */ gcmkONERROR( gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER *) &node)); /* Initialize gcuVIDMEM_NODE union for virtual memory. */ node->Virtual.kernel = Kernel; node->Virtual.contiguous = Contiguous; node->Virtual.locked = 0; node->Virtual.logical = gcvNULL; node->Virtual.pageTable = gcvNULL; node->Virtual.mutex = gcvNULL; #ifdef __QNXNTO__ node->Virtual.next = gcvNULL; node->Virtual.unlockPending = gcvFALSE; node->Virtual.freePending = gcvFALSE; node->Virtual.handle = Handle; #else node->Virtual.pending = gcvFALSE; #endif /* Create the mutex. */ gcmkONERROR( gckOS_CreateMutex(os, &node->Virtual.mutex)); /* Allocate the virtual memory. */ gcmkONERROR( gckOS_AllocatePagedMemoryEx(os, node->Virtual.contiguous, node->Virtual.bytes = Bytes, &node->Virtual.physical)); #ifdef __QNXNTO__ /* Register. */ gckMMU_InsertNode(Kernel->mmu, node); #endif /* Return pointer to the gcuVIDMEM_NODE union. */ *Node = node; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Created virtual node 0x%x for %u bytes @ 0x%x", node, Bytes, node->Virtual.physical); /* Success. */ gcmkFOOTER_ARG("*Node=0x%x", *Node); return gcvSTATUS_OK; OnError: /* Roll back. */ if (node != gcvNULL) { if (node->Virtual.mutex != gcvNULL) { /* Destroy the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex)); } /* Free the structure. */ gcmkVERIFY_OK(gckOS_Free(os, node)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckKERNEL_Construct ** ** Construct a new gckKERNEL object. ** ** INPUT: ** ** gckOS Os ** Pointer to an gckOS object. ** ** IN gctPOINTER Context ** Pointer to a driver defined context. ** ** OUTPUT: ** ** gckKERNEL * Kernel ** Pointer to a variable that will hold the pointer to the gckKERNEL ** object. */ gceSTATUS gckVGKERNEL_Construct( IN gckOS Os, IN gctPOINTER Context, IN gckKERNEL inKernel, OUT gckVGKERNEL * Kernel ) { gceSTATUS status; gckVGKERNEL kernel = gcvNULL; gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); gcmkVERIFY_ARGUMENT(Kernel != gcvNULL); do { /* Allocate the gckKERNEL object. */ gcmkERR_BREAK(gckOS_Allocate( Os, sizeof(struct _gckVGKERNEL), (gctPOINTER *) &kernel )); /* Initialize the gckKERNEL object. */ kernel->object.type = gcvOBJ_KERNEL; kernel->os = Os; kernel->context = Context; kernel->hardware = gcvNULL; kernel->interrupt = gcvNULL; kernel->command = gcvNULL; kernel->mmu = gcvNULL; kernel->kernel = inKernel; /* Construct the gckVGHARDWARE object. */ gcmkERR_BREAK(gckVGHARDWARE_Construct( Os, &kernel->hardware )); /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */ kernel->hardware->kernel = kernel; /* Construct the gckVGINTERRUPT object. */ gcmkERR_BREAK(gckVGINTERRUPT_Construct( kernel, &kernel->interrupt )); /* Construct the gckVGCOMMAND object. */ gcmkERR_BREAK(gckVGCOMMAND_Construct( kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command )); /* Construct the gckVGMMU object. */ gcmkERR_BREAK(gckVGMMU_Construct( kernel, gcmKB2BYTES(32), &kernel->mmu )); /* Return pointer to the gckKERNEL object. */ *Kernel = kernel; gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel); /* Success. */ return gcvSTATUS_OK; } while (gcvFALSE); /* Roll back. */ if (kernel != gcvNULL) { if (kernel->mmu != gcvNULL) { gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu)); } if (kernel->command != gcvNULL) { gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command)); } if (kernel->interrupt != gcvNULL) { gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt)); } if (kernel->hardware != gcvNULL) { gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware)); } gcmkVERIFY_OK(gckOS_Free(Os, kernel)); } gcmkFOOTER(); /* Return status. */ return status; }
gceSTATUS gckKERNEL_Construct( IN gckOS Os, IN gctPOINTER Context, OUT gckKERNEL * Kernel ) { gckKERNEL kernel = gcvNULL; gceSTATUS status; gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); gcmkVERIFY_ARGUMENT(Kernel != gcvNULL); /* Allocate the gckKERNEL object. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(struct _gckKERNEL), (gctPOINTER *) &kernel)); #if MRVL_LOW_POWER_MODE_DEBUG gcmERR_RETURN( gckOS_Allocate(Os, 0x1000000, (gctPOINTER *) &kernel->kernelMSG)); kernel->msgLen = 0; #endif /* Zero the object pointers. */ kernel->hardware = gcvNULL; kernel->command = gcvNULL; kernel->event = gcvNULL; kernel->mmu = gcvNULL; /* Initialize the gckKERNEL object. */ kernel->object.type = gcvOBJ_KERNEL; kernel->os = Os; /* Save context. */ kernel->context = Context; /* Construct atom holding number of clients. */ kernel->atomClients = gcvNULL; gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients)); #if gcdSECURE_USER kernel->cacheSlots = 0; kernel->cacheTimeStamp = 0; #endif /* Construct the gckHARDWARE object. */ gcmkONERROR( gckHARDWARE_Construct(Os, &kernel->hardware)); /* Set pointer to gckKERNEL object in gckHARDWARE object. */ kernel->hardware->kernel = kernel; /* Initialize the hardware. */ gcmkONERROR( gckHARDWARE_InitializeHardware(kernel->hardware)); /* Construct the gckCOMMAND object. */ gcmkONERROR( gckCOMMAND_Construct(kernel, &kernel->command)); /* Construct the gckEVENT object. */ gcmkONERROR( gckEVENT_Construct(kernel, &kernel->event)); /* Construct the gckMMU object. */ gcmkONERROR( gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu)); kernel->notifyIdle = gcvTRUE; /* gcvFALSE; */ #if VIVANTE_PROFILER /* Initialize profile setting */ #if defined ANDROID kernel->profileEnable = gcvFALSE; #else kernel->profileEnable = gcvTRUE; #endif gcmkVERIFY_OK( gckOS_MemCopy(kernel->profileFileName, DEFAULT_PROFILE_FILE_NAME, gcmSIZEOF(DEFAULT_PROFILE_FILE_NAME) + 1)); #endif /* Return pointer to the gckKERNEL object. */ *Kernel = kernel; /* Success. */ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel); return gcvSTATUS_OK; OnError: if (kernel != gcvNULL) { if (kernel->event != gcvNULL) { gcmkVERIFY_OK(gckEVENT_Destroy(kernel->event)); } if (kernel->command != gcvNULL) { gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command)); } if (kernel->hardware != gcvNULL) { gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware)); } if (kernel->atomClients != gcvNULL) { gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients)); } kernel->version = _GC_VERSION_STRING_; #if MRVL_LOW_POWER_MODE_DEBUG gcmkVERIFY_OK( gckOS_Free(Os, kernel->kernelMSG)); #endif gcmkVERIFY_OK(gckOS_Free(Os, kernel)); } /* Return the error. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckCOMMAND_Commit ** ** Commit a command buffer to the command queue. ** ** INPUT: ** ** gckCOMMAND Command ** Pointer to an gckCOMMAND object. ** ** gcoCMDBUF CommandBuffer ** Pointer to an gcoCMDBUF object. ** ** gcoCONTEXT Context ** Pointer to an gcoCONTEXT object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckCOMMAND_Commit( IN gckCOMMAND Command, IN gcoCMDBUF CommandBuffer, IN gcoCONTEXT Context, IN gctHANDLE Process ) { gcoCMDBUF commandBuffer; gcoCONTEXT context; gckHARDWARE hardware; gceSTATUS status; gctPOINTER initialLink, link; gctSIZE_T bytes, initialSize, lastRun; gcoCMDBUF buffer; gctPOINTER wait; gctSIZE_T waitSize; gctUINT32 offset; gctPOINTER fetchAddress; gctSIZE_T fetchSize; gctUINT8_PTR logical; gcsMAPPED_PTR stack = gcvNULL; gctINT acquired = 0; #if gcdSECURE_USER gctUINT32_PTR hint; #endif #if gcdDUMP_COMMAND gctPOINTER dataPointer; gctSIZE_T dataBytes; #endif gctPOINTER flushPointer; gctSIZE_T flushSize; gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x Context=0x%x", Command, CommandBuffer, Context); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); #if gcdNULL_DRIVER == 2 /* Do nothing with infinite hardware. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; #endif gcmkONERROR( _AddMap(Command->os, CommandBuffer, gcmSIZEOF(struct _gcoCMDBUF), (gctPOINTER *) &commandBuffer, &stack)); gcmkVERIFY_OBJECT(commandBuffer, gcvOBJ_COMMANDBUFFER); gcmkONERROR( _AddMap(Command->os, Context, gcmSIZEOF(struct _gcoCONTEXT), (gctPOINTER *) &context, &stack)); gcmkVERIFY_OBJECT(context, gcvOBJ_CONTEXT); /* Extract the gckHARDWARE and gckEVENT objects. */ hardware = Command->kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Acquire the context switching mutex. */ gcmkONERROR( gckOS_AcquireMutex(Command->os, Command->mutexContext, gcvINFINITE)); ++acquired; /* Reserved slot in the context or command buffer. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, gcvNULL, 0, &bytes)); /* Test if we need to switch to this context. */ if ((context->id != 0) && (context->id != Command->currentContext) ) { /* Map the context buffer.*/ gcmkONERROR( _AddMap(Command->os, context->logical, context->bufferSize, (gctPOINTER *) &logical, &stack)); #if gcdSECURE_USER /* Map the hint array.*/ gcmkONERROR( _AddMap(Command->os, context->hintArray, context->hintCount * gcmSIZEOF(gctUINT32), (gctPOINTER *) &hint, &stack)); /* Loop while we have valid hints. */ while (*hint != 0) { /* Map handle into physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif /* See if we have to check pipes. */ if (context->pipe2DIndex != 0) { /* See if we are in the correct pipe. */ if (context->initialPipe == Command->pipeSelect) { gctUINT32 reserved = bytes; gctUINT8_PTR nop = logical; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } else { /* Switch to the correct pipe. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, logical, context->initialPipe, &bytes)); } } /* Save initial link pointer. */ initialLink = logical; initialSize = context->bufferSize; #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, initialLink, initialSize, gcvTRUE, gcvFALSE ); #endif /* Save initial buffer to flush. */ flushPointer = initialLink; flushSize = initialSize; /* Save pointer to next link. */ gcmkONERROR( _AddMap(Command->os, context->link, 8, &link, &stack)); /* Start parsing CommandBuffer. */ buffer = commandBuffer; /* Mark context buffer as used. */ if (context->inUse != gcvNULL) { gctBOOL_PTR inUse; gcmkONERROR( _AddMap(Command->os, (gctPOINTER) context->inUse, gcmSIZEOF(gctBOOL), (gctPOINTER *) &inUse, &stack)); *inUse = gcvTRUE; } } else { /* Test if this is a new context. */ if (context->id == 0) { /* Generate unique ID for the context buffer. */ context->id = ++ Command->contextCounter; if (context->id == 0) { /* Context counter overflow (wow!) */ gcmkONERROR(gcvSTATUS_TOO_COMPLEX); } } /* Map the command buffer. */ gcmkONERROR( _AddMap(Command->os, commandBuffer->logical, commandBuffer->offset, (gctPOINTER *) &logical, &stack)); #if gcdSECURE_USER /* Map the hint table. */ gcmkONERROR( _AddMap(Command->os, commandBuffer->hintCommit, commandBuffer->offset - commandBuffer->startOffset, (gctPOINTER *) &hint, &stack)); /* Walk while we have valid hints. */ while (*hint != 0) { /* Map the handle to a physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif if (context->entryPipe == Command->pipeSelect) { gctUINT32 reserved = Command->reservedHead; gctUINT8_PTR nop = logical + commandBuffer->startOffset; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } else { /* Switch to the correct pipe. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, logical + commandBuffer->startOffset, context->entryPipe, &bytes)); } /* Save initial link pointer. */ initialLink = logical + commandBuffer->startOffset; initialSize = commandBuffer->offset - commandBuffer->startOffset + Command->reservedTail; #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, initialLink, initialSize, gcvFALSE, gcvFALSE ); #endif /* Save initial buffer to flush. */ flushPointer = initialLink; flushSize = initialSize; /* Save pointer to next link. */ link = logical + commandBuffer->offset; /* No more data. */ buffer = gcvNULL; } #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, Command->wait, initialLink); #endif #if gcdDUMP_COMMAND dataPointer = initialLink; dataBytes = initialSize; #endif /* Loop through all remaining command buffers. */ if (buffer != gcvNULL) { /* Map the command buffer. */ gcmkONERROR( _AddMap(Command->os, buffer->logical, buffer->offset + Command->reservedTail, (gctPOINTER *) &logical, &stack)); #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, (gctUINT32_PTR)logical, buffer->offset + Command->reservedTail, gcvFALSE, gcvFALSE ); #endif #if gcdSECURE_USER /* Map the hint table. */ gcmkONERROR( _AddMap(Command->os, buffer->hintCommit, buffer->offset - buffer->startOffset, (gctPOINTER *) &hint, &stack)); /* Walk while we have valid hints. */ while (*hint != 0) { /* Map the handle to a physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif /* First slot becomes a NOP. */ { gctUINT32 reserved = Command->reservedHead; gctUINT8_PTR nop = logical + buffer->startOffset; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } /* Generate the LINK to this command buffer. */ gcmkONERROR( gckHARDWARE_Link(hardware, link, logical + buffer->startOffset, buffer->offset - buffer->startOffset + Command->reservedTail, &bytes)); #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, link, (gctUINT32_PTR)logical); #endif /* Flush the initial buffer. */ gcmkONERROR(gckOS_CacheFlush(Command->os, Process, flushPointer, flushSize)); /* Save new flush pointer. */ flushPointer = logical + buffer->startOffset; flushSize = buffer->offset - buffer->startOffset + Command->reservedTail; #if gcdDUMP_COMMAND _DumpCommand(Command, dataPointer, dataBytes); dataPointer = logical + buffer->startOffset; dataBytes = buffer->offset - buffer->startOffset + Command->reservedTail; #endif /* Save pointer to next link. */ link = logical + buffer->offset; } /* Compute number of bytes required for WAIT/LINK. */ gcmkONERROR( gckHARDWARE_WaitLink(hardware, gcvNULL, Command->offset, &bytes, gcvNULL, gcvNULL)); lastRun = bytes; /* Grab the command queue mutex. */ gcmkONERROR( gckOS_AcquireMutex(Command->os, Command->mutexQueue, gcvINFINITE)); ++acquired; if (Command->kernel->notifyIdle) { /* Increase the commit stamp */ Command->commitStamp++; /* Set busy if idle */ if (Command->idle) { Command->idle = gcvFALSE; gcmkVERIFY_OK(gckOS_NotifyIdle(Command->os, gcvFALSE)); } } /* Compute number of bytes left in current command queue. */ bytes = Command->pageSize - Command->offset; if (bytes < lastRun) { /* Create a new command queue. */ gcmkONERROR(_NewQueue(Command, gcvTRUE)); /* Adjust run size with any extra commands inserted. */ lastRun += Command->offset; } /* Get current offset. */ offset = Command->offset; /* Append WAIT/LINK in command queue. */ bytes = Command->pageSize - offset; gcmkONERROR( gckHARDWARE_WaitLink(hardware, (gctUINT8 *) Command->logical + offset, offset, &bytes, &wait, &waitSize)); /* Flush the cache for the wait/link. */ gcmkONERROR(gckOS_CacheFlush(Command->os, gcvNULL, (gctUINT8 *) Command->logical + offset, bytes)); #if gcdDUMP_COMMAND _DumpCommand(Command, (gctUINT8 *) Command->logical + offset, bytes); #endif /* Adjust offset. */ offset += bytes; if (Command->newQueue) { /* Compute fetch location and size for a new command queue. */ fetchAddress = Command->logical; fetchSize = offset; } else { /* Compute fetch location and size for an existing command queue. */ fetchAddress = (gctUINT8 *) Command->logical + Command->offset; fetchSize = offset - Command->offset; } bytes = 8; /* Link in WAIT/LINK. */ gcmkONERROR( gckHARDWARE_Link(hardware, link, fetchAddress, fetchSize, &bytes)); #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, link, fetchAddress); #endif /* Flush the cache for the command buffer. */ gcmkONERROR(gckOS_CacheFlush(Command->os, Process, flushPointer, flushSize)); #if gcdDUMP_COMMAND _DumpCommand(Command, dataPointer, dataBytes); #endif /* Execute the entire sequence. */ gcmkONERROR( gckHARDWARE_Link(hardware, Command->wait, initialLink, initialSize, &Command->waitSize)); /* Flush the cache for the link. */ gcmkONERROR(gckOS_CacheFlush(Command->os, gcvNULL, Command->wait, Command->waitSize)); #if gcdDUMP_COMMAND _DumpCommand(Command, Command->wait, Command->waitSize); #endif /* Update command queue offset. */ Command->offset = offset; Command->newQueue = gcvFALSE; /* Update address of last WAIT. */ Command->wait = wait; Command->waitSize = waitSize; /* Update context and pipe select. */ Command->currentContext = context->id; Command->pipeSelect = context->currentPipe; /* Update queue tail pointer. */ gcmkONERROR( gckHARDWARE_UpdateQueueTail(hardware, Command->logical, Command->offset)); #if gcdDUMP_COMMAND gcmkPRINT("@[kernel.commit]"); #endif /* Release the command queue mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); --acquired; /* Release the context switching mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); --acquired; /* Submit events if asked for. */ if (Command->submit) { /* Submit events. */ status = gckEVENT_Submit(Command->kernel->event, gcvFALSE, gcvFALSE); if (gcmIS_SUCCESS(status)) { /* Success. */ Command->submit = gcvFALSE; } else { gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_COMMAND, "gckEVENT_Submit returned %d", status); } } /* Success. */ status = gcvSTATUS_OK; OnError: if (acquired > 1) { /* Release the command queue mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); } if (acquired > 0) { /* Release the context switching mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Command->os, Command->mutexContext)); } /* Unmap all mapped pointers. */ while (stack != gcvNULL) { gcsMAPPED_PTR map = stack; stack = map->next; gcmkVERIFY_OK( gckOS_UnmapUserPointer(Command->os, map->pointer, map->bytes, map->kernelPointer)); gcmkVERIFY_OK( gckOS_Free(Command->os, map)); } /* Return status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckCOMMAND_Construct ** ** Construct a new gckCOMMAND object. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** OUTPUT: ** ** gckCOMMAND * Command ** Pointer to a variable that will hold the pointer to the gckCOMMAND ** object. */ gceSTATUS gckCOMMAND_Construct( IN gckKERNEL Kernel, OUT gckCOMMAND * Command ) { gckOS os; gckCOMMAND command = gcvNULL; gceSTATUS status; gctINT i; gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Command != gcvNULL); /* Extract the gckOS object. */ os = Kernel->os; /* Allocate the gckCOMMAND structure. */ gcmkONERROR( gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), (gctPOINTER *) &command)); /* Initialize the gckCOMMAND object.*/ command->object.type = gcvOBJ_COMMAND; command->kernel = Kernel; command->os = os; command->mutexQueue = gcvNULL; command->mutexContext = gcvNULL; /* No command queues created yet. */ command->index = 0; for (i = 0; i < gcdCOMMAND_QUEUES; ++i) { command->queues[i].signal = gcvNULL; command->queues[i].logical = gcvNULL; } /* Get the command buffer requirements. */ gcmkONERROR( gckHARDWARE_QueryCommandBuffer(Kernel->hardware, &command->alignment, &command->reservedHead, &command->reservedTail)); /* No contexts available yet. */ command->contextCounter = command->currentContext = 0; /* Create the command queue mutex. */ gcmkONERROR( gckOS_CreateMutex(os, &command->mutexQueue)); /* Create the context switching mutex. */ gcmkONERROR( gckOS_CreateMutex(os, &command->mutexContext)); /* Get the page size from teh OS. */ gcmkONERROR( gckOS_GetPageSize(os, &command->pageSize)); /* Set hardware to pipe 0. */ command->pipeSelect = 0; /* Pre-allocate the command queues. */ for (i = 0; i < gcdCOMMAND_QUEUES; ++i) { gcmkONERROR( gckOS_AllocateNonPagedMemory(os, gcvFALSE, &command->pageSize, &command->queues[i].physical, &command->queues[i].logical)); gcmkONERROR( gckOS_CreateSignal(os, gcvFALSE, &command->queues[i].signal)); gcmkONERROR( gckOS_Signal(os, command->queues[i].signal, gcvTRUE)); } /* No command queue in use yet. */ command->index = -1; command->logical = gcvNULL; command->newQueue = gcvFALSE; command->submit = gcvFALSE; /* Command is not yet running. */ command->running = gcvFALSE; /* Command queue is idle. */ command->idle = gcvTRUE; /* Commit stamp is zero. */ command->commitStamp = 0; /* Return pointer to the gckCOMMAND object. */ *Command = command; /* Success. */ gcmkFOOTER_ARG("*Command=0x%x", *Command); return gcvSTATUS_OK; OnError: /* Roll back. */ if (command != gcvNULL) { if (command->mutexContext != gcvNULL) { gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext)); } if (command->mutexQueue != gcvNULL) { gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue)); } for (i = 0; i < gcdCOMMAND_QUEUES; ++i) { if (command->queues[i].signal != gcvNULL) { gcmkVERIFY_OK( gckOS_DestroySignal(os, command->queues[i].signal)); } if (command->queues[i].logical != gcvNULL) { gcmkVERIFY_OK( gckOS_FreeNonPagedMemory(os, command->pageSize, command->queues[i].physical, command->queues[i].logical)); } } gcmkVERIFY_OK(gckOS_Free(os, command)); } /* Return the status. */ gcmkFOOTER(); return status; }