gceSTATUS _power_off_gc(gckGALDEVICE device, gctBOOL early_suspend) { /* turn off gc */ if (device->kernel->hardware->chipPowerState != gcvPOWER_OFF) { gceSTATUS status; gckCOMMAND command; command = device->kernel->command; printk("[%s]\t@%d\tC:0x%p\tQ:0x%p\n", __func__, __LINE__, command->mutexContext, command->mutexQueue); // stall { /* Acquire the context switching mutex so nothing else can be committed. */ #if MUTEX_CONTEXT gcmkONERROR( gckOS_AcquireMutex(device->kernel->hardware->os, command->mutexContext, gcvINFINITE)); #endif if (gcvTRUE == early_suspend) { gcmkONERROR( gckCOMMAND_Stall(command)); } } // stop { /* Stop the command parser. */ gcmkONERROR( gckCOMMAND_Stop(command)); #if MUTEX_QUEUE /* Grab the command queue mutex so nothing can get access to the command queue. */ gcmkONERROR( gckOS_AcquireMutex(device->kernel->hardware->os, command->mutexQueue, gcvINFINITE)); #endif } // disable irq and clock { gckOS_SuspendInterrupt(device->os); gckOS_ClockOff(); } galDevice->kernel->hardware->chipPowerState = gcvPOWER_OFF; } return gcvSTATUS_OK; OnError: printk("ERROR: %s has error \n",__func__); return gcvSTATUS_OK; }
gceSTATUS gckKERNEL_DumpProcessDB( IN gckKERNEL Kernel ) { gcsDATABASE_PTR database; gctINT i, pid; gctUINT8 name[24]; gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Acquire the database mutex. */ gcmkVERIFY_OK( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); gcmkPRINT("**************************\n"); gcmkPRINT("*** PROCESS DB DUMP ***\n"); gcmkPRINT("**************************\n"); gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME"); /* Walk the databases. */ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i) { for (database = Kernel->db->db[i]; database != gcvNULL; database = database->next) { pid = database->processID; gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name))); gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name)); gcmkPRINT_N(8, "%-8d%s\n", pid, name); } } /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckHEAP_Free ** ** Free allocated memory from the heap. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object. ** ** IN gctPOINTER Memory ** Pointer to memory to free. ** ** OUTPUT: ** ** NOTHING. */ gceSTATUS gckHEAP_Free( IN gckHEAP Heap, IN gctPOINTER Memory ) { gcskNODE_PTR node; gceSTATUS status; gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); /* Pointer to structure. */ node = (gcskNODE_PTR) Memory - 1; /* Mark the node as freed. */ node->next = gcvNULL; #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profile counters. */ Heap->allocBytes -= node->bytes; #endif /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: /* Return the status. */ gcmkFOOTER(); return status; }
/* ** PM Thread Routine **/ static int _threadRoutinePM(gckGALDEVICE Device, gckHARDWARE Hardware) { gceCHIPPOWERSTATE state; for(;;) { /* wait for idle */ gcmkVERIFY_OK( gckOS_AcquireMutex(Device->os, Hardware->powerOffSema, gcvINFINITE)); /* We try to power off every 200 ms, until GPU is not idle */ do { if (Device->killThread == gcvTRUE) { /* The daemon exits. */ while (!kthread_should_stop()) { gckOS_Delay(Device->os, 1); } return 0; } gcmkVERIFY_OK( gckHARDWARE_SetPowerManagementState( Hardware, gcvPOWER_OFF_TIMEOUT)); /* relax cpu 200 ms before retry */ gckOS_Delay(Device->os, 200); gcmkVERIFY_OK( gckHARDWARE_QueryPowerManagementState(Hardware, &state)); } while (state == gcvPOWER_IDLE); } }
/******************************************************************************* ** ** gckVIDMEM_Unlock ** ** Unlock a video memory node. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a locked gcuVIDMEM_NODE union. ** ** gceSURF_TYPE Type ** Type of surface to unlock. ** ** gctSIZE_T * CommandSize ** Pointer to a variable specifying the number of bytes in the command ** buffer specified by 'Commands'. If gcvNULL, there is no command ** buffer and the video memory shoud be unlocked synchronously. ** ** gctBOOL * Asynchroneous ** Pointer to a variable specifying whether the surface should be ** unlocked asynchroneously or not. ** ** OUTPUT: ** ** gctBOOL * Asynchroneous ** Pointer to a variable receiving the number of bytes used in the ** command buffer specified by 'Commands'. If gcvNULL, there is no ** command buffer. */ gceSTATUS gckVIDMEM_Unlock( IN gcuVIDMEM_NODE_PTR Node, IN gceSURF_TYPE Type, IN OUT gctBOOL * Asynchroneous ) { gceSTATUS status; gckKERNEL kernel; gckHARDWARE hardware; gctPOINTER buffer; gctSIZE_T requested, bufferSize; gckCOMMAND command = gcvNULL; gceKERNEL_FLUSH flush; gckOS os = gcvNULL; gctBOOL acquired = gcvFALSE; gctBOOL needRelease = gcvFALSE; gctBOOL pendingUnlock = gcvFALSE; gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d", Node, Type, gcmOPT_VALUE(Asynchroneous)); /* Verify the arguments. */ if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { if (Node->VidMem.locked <= 0) { /* The surface was not locked. */ gcmkONERROR(gcvSTATUS_MEMORY_UNLOCKED); } /* Decrement the lock count. */ Node->VidMem.locked --; if (Asynchroneous != gcvNULL) { /* No need for any events. */ *Asynchroneous = gcvFALSE; } } /*************************** Virtual Memory *******************************/ else { /* Verify the gckKERNEL object pointer. */ kernel = Node->Virtual.kernel; gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL); /* Verify the gckHARDWARE object pointer. */ hardware = Node->Virtual.kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Verify the gckCOMMAND object pointer. */ command = Node->Virtual.kernel->command; gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND); if (Asynchroneous == gcvNULL) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "gckVIDMEM_Unlock: Unlocking virtual node 0x%x (%d)", Node, Node->Virtual.locked); /* Get the gckOS object pointer. */ os = kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Grab the mutex. */ gcmkONERROR( gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE)); /* If we need to unlock a node from virtual memory we have to be ** very carefull. If the node is still inside the caches we ** might get a bus error later if the cache line needs to be ** replaced. So - we have to flush the caches before we do ** anything. We also need to stall to make sure the flush has ** happened. However - when we get to this point we are inside ** the interrupt handler and we cannot just gckCOMMAND_Wait ** because it will wait forever. So - what we do here is we ** verify the type of the surface, flush the appropriate cache, ** mark the node as flushed, and issue another unlock to unmap ** the MMU. */ if (!Node->Virtual.contiguous && (Node->Virtual.locked == 1) #ifdef __QNXTO__ && !Node->Virtual.unlockPending #else && !Node->Virtual.pending #endif ) { if (Type == gcvSURF_BITMAP) { /* Flush 2D cache. */ flush = gcvFLUSH_2D; } else if (Type == gcvSURF_RENDER_TARGET) { /* Flush color cache. */ flush = gcvFLUSH_COLOR; } else if (Type == gcvSURF_DEPTH) { /* Flush depth cache. */ flush = gcvFLUSH_DEPTH; } else { /* No flush required. */ flush = (gceKERNEL_FLUSH) 0; } gcmkONERROR( gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested)); if (requested != 0) { gcmkONERROR( gckCOMMAND_Reserve(command, requested, &buffer, &bufferSize)); needRelease = gcvTRUE; gcmkONERROR(gckHARDWARE_Flush(hardware, flush, buffer, &bufferSize)); gcmkONERROR( gckEVENT_Unlock(Node->Virtual.kernel->event, gcvKERNEL_PIXEL, Node, Type)); /* Mark node as pending. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvTRUE; #else Node->Virtual.pending = gcvTRUE; #endif needRelease = gcvFALSE; gcmkONERROR(gckCOMMAND_Execute(command, requested)); pendingUnlock = gcvTRUE; } } if (!pendingUnlock) { if (Node->Virtual.locked == 0) { status = gcvSTATUS_MEMORY_UNLOCKED; goto OnError; } /* Decrement lock count. */ -- Node->Virtual.locked; /* See if we can unlock the resources. */ if (Node->Virtual.locked == 0) { /* Unlock the pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.userPID, Node->Virtual.bytes, Node->Virtual.logical)); #else gcmkONERROR( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.logical)); #endif /* Free the page table. */ if (Node->Virtual.pageTable != gcvNULL) { gcmkONERROR( gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); /* Mark page table as freed. */ Node->Virtual.pageTable = gcvNULL; } /* Mark node as unlocked. */ #ifdef __QNXTO Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif } gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Unmapped virtual node 0x%x from 0x%08X", Node, Node->Virtual.address); } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } else { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Scheduled unlock for virtual node 0x%x", Node); /* Schedule the surface to be unlocked. */ *Asynchroneous = gcvTRUE; } } /* Success. */ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous)); return gcvSTATUS_OK; OnError: if (needRelease) { gcmkVERIFY_OK(gckCOMMAND_Release(command)); } if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_Lock ** ** Lock a video memory node and return it's hardware specific address. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE union. ** ** OUTPUT: ** ** gctUINT32 * Address ** Pointer to a variable that will hold the hardware specific address. */ gceSTATUS gckVIDMEM_Lock( IN gcuVIDMEM_NODE_PTR Node, OUT gctUINT32 * Address ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gctBOOL locked = gcvFALSE; gckOS os = gcvNULL; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ gcmkVERIFY_ARGUMENT(Address != gcvNULL); if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { /* Increment the lock count. */ Node->VidMem.locked ++; /* Return the address of the node. */ *Address = Node->VidMem.memory->baseAddress + Node->VidMem.offset + Node->VidMem.alignment; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Locked node 0x%x (%d) @ 0x%08X", Node, Node->VidMem.locked, *Address); } /*************************** Virtual Memory *******************************/ else { /* Verify the gckKERNEL object pointer. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); /* Extract the gckOS object pointer. */ os = Node->Virtual.kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Grab the mutex. */ gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE)); acquired = gcvTRUE; /* Increment the lock count. */ if (Node->Virtual.locked ++ == 0) { /* Is this node pending for a final unlock? */ #ifdef __QNXNTO__ if (!Node->Virtual.contiguous && Node->Virtual.unlockPending) #else if (!Node->Virtual.contiguous && Node->Virtual.pending) #endif { /* Make sure we have a page table. */ gcmkASSERT(Node->Virtual.pageTable != gcvNULL); /* Remove pending unlock. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif } /* First lock - create a page table. */ gcmkASSERT(Node->Virtual.pageTable == gcvNULL); /* Make sure we mark our node as not flushed. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif /* Lock the allocated pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_LockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.userPID, &Node->Virtual.logical, &Node->Virtual.pageCount)); #else gcmkONERROR( gckOS_LockPages(os, Node->Virtual.physical, Node->Virtual.bytes, &Node->Virtual.logical, &Node->Virtual.pageCount)); #endif locked = gcvTRUE; if (Node->Virtual.contiguous) { /* Get physical address directly */ gcmkONERROR(gckOS_GetPhysicalAddress(os, Node->Virtual.logical, &Node->Virtual.address)); } else { /* Allocate pages inside the MMU. */ gcmkONERROR( gckMMU_AllocatePages(Node->Virtual.kernel->mmu, Node->Virtual.pageCount, &Node->Virtual.pageTable, &Node->Virtual.address)); /* Map the pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_MapPages(os, Node->Virtual.physical, Node->Virtual.logical, Node->Virtual.pageCount, Node->Virtual.pageTable)); #else gcmkONERROR( gckOS_MapPages(os, Node->Virtual.physical, Node->Virtual.pageCount, Node->Virtual.pageTable)); #endif gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Mapped virtual node 0x%x to 0x%08X", Node, Node->Virtual.address); } } /* Return hardware address. */ *Address = Node->Virtual.address; /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Success. */ gcmkFOOTER_ARG("*Address=%08x", *Address); return gcvSTATUS_OK; OnError: if (locked) { if (Node->Virtual.pageTable != gcvNULL) { /* Free the pages from the MMU. */ gcmkVERIFY_OK( gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); Node->Virtual.pageTable = gcvNULL; } /* Unlock the pages. */ #ifdef __QNXNTO__ gcmkVERIFY_OK( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.userPID, Node->Virtual.bytes, Node->Virtual.logical)); #else gcmkVERIFY_OK( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.logical)); #endif } if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gcoVIDMEM_FreeHandleMemory ** ** Free all allocated video memory nodes for a handle. ** ** INPUT: ** ** gcoVIDMEM Memory ** Pointer to an gcoVIDMEM object.. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_FreeHandleMemory( IN gckVIDMEM Memory, IN gctHANDLE Handle ) { gceSTATUS status; gctBOOL mutex = gcvFALSE; gcuVIDMEM_NODE_PTR node; gctINT i; gctUINT32 nodeCount = 0, byteCount = 0; gctBOOL again; gcmkHEADER_ARG("Memory=0x%x Handle=0x%x", Memory, Handle); gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE)); mutex = gcvTRUE; /* Walk all sentinels. */ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i) { /* Bail out of the heap if it is not used. */ if (Memory->sentinel[i].VidMem.next == gcvNULL) { break; } do { again = gcvFALSE; /* Walk all the nodes until we reach the sentinel. */ for (node = Memory->sentinel[i].VidMem.next; node->VidMem.bytes != 0; node = node->VidMem.next) { /* Free the node if it was allocated by Handle. */ if (node->VidMem.handle == Handle) { /* Unlock video memory. */ while (gckVIDMEM_Unlock(node, gcvSURF_TYPE_UNKNOWN, gcvNULL, gcvNULL) != gcvSTATUS_MEMORY_UNLOCKED) ; nodeCount++; byteCount += node->VidMem.bytes; /* Free video memory. */ gcmkVERIFY_OK(gckVIDMEM_Free(node, gcvNULL)); /* * Freeing may cause a merge which will invalidate our iteration. * Don't be clever, just restart. */ again = gcvTRUE; break; } } } while (again); } gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); gcmkFOOTER(); return gcvSTATUS_OK; OnError: if (mutex) { gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); } gcmkFOOTER(); return status; }
/******************************************************************************* ** gckKERNEL_FindRecord ** ** Find a database record from the database. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gcsDATABASE_PTR Database ** Pointer to a database structure. ** ** gceDATABASE_TYPE Type ** Type of the record to remove. ** ** gctPOINTER Data ** Data of the record to remove. ** ** OUTPUT: ** ** gctSIZE_T_PTR Bytes ** Pointer to a variable that receives the size of the record deleted. ** Can be gcvNULL if the size is not required. */ static gceSTATUS gckKERNEL_FindRecord( IN gckKERNEL Kernel, IN gcsDATABASE_PTR Database, IN gceDATABASE_TYPE Type, IN gctPOINTER Data, OUT gcsDATABASE_RECORD_PTR Record ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gcsDATABASE_RECORD_PTR record; gctUINT32 slot = _GetSlot(Database, Data); gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x", Kernel, Database, Type, Data); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Scan the database for this record. */ for (record = Database->list[slot]; record != gcvNULL; record = record->next ) { if ((record->type == Type) && (record->data == Data) ) { /* Found it! */ break; } } if (record == gcvNULL) { /* Ouch! This record is not found? */ gcmkONERROR(gcvSTATUS_INVALID_DATA); } if (Record != gcvNULL) { /* Return information of record. */ gcmkONERROR( gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD))); } /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Success. */ gcmkFOOTER_ARG("Record=0x%x", Record); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckCOMMAND_Commit ** ** Commit a command buffer to the command queue. ** ** INPUT: ** ** gckCOMMAND Command ** Pointer to an gckCOMMAND object. ** ** gcoCMDBUF CommandBuffer ** Pointer to an gcoCMDBUF object. ** ** gcoCONTEXT Context ** Pointer to an gcoCONTEXT object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckCOMMAND_Commit( IN gckCOMMAND Command, IN gcoCMDBUF CommandBuffer, IN gcoCONTEXT Context, IN gctHANDLE Process ) { gcoCMDBUF commandBuffer; gcoCONTEXT context; gckHARDWARE hardware; gceSTATUS status; gctPOINTER initialLink, link; gctSIZE_T bytes, initialSize, lastRun; gcoCMDBUF buffer; gctPOINTER wait; gctSIZE_T waitSize; gctUINT32 offset; gctPOINTER fetchAddress; gctSIZE_T fetchSize; gctUINT8_PTR logical; gcsMAPPED_PTR stack = gcvNULL; gctINT acquired = 0; #if gcdSECURE_USER gctUINT32_PTR hint; #endif #if gcdDUMP_COMMAND gctPOINTER dataPointer; gctSIZE_T dataBytes; #endif gctPOINTER flushPointer; gctSIZE_T flushSize; gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x Context=0x%x", Command, CommandBuffer, Context); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); #if gcdNULL_DRIVER == 2 /* Do nothing with infinite hardware. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; #endif gcmkONERROR( _AddMap(Command->os, CommandBuffer, gcmSIZEOF(struct _gcoCMDBUF), (gctPOINTER *) &commandBuffer, &stack)); gcmkVERIFY_OBJECT(commandBuffer, gcvOBJ_COMMANDBUFFER); gcmkONERROR( _AddMap(Command->os, Context, gcmSIZEOF(struct _gcoCONTEXT), (gctPOINTER *) &context, &stack)); gcmkVERIFY_OBJECT(context, gcvOBJ_CONTEXT); /* Extract the gckHARDWARE and gckEVENT objects. */ hardware = Command->kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Acquire the context switching mutex. */ gcmkONERROR( gckOS_AcquireMutex(Command->os, Command->mutexContext, gcvINFINITE)); ++acquired; /* Reserved slot in the context or command buffer. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, gcvNULL, 0, &bytes)); /* Test if we need to switch to this context. */ if ((context->id != 0) && (context->id != Command->currentContext) ) { /* Map the context buffer.*/ gcmkONERROR( _AddMap(Command->os, context->logical, context->bufferSize, (gctPOINTER *) &logical, &stack)); #if gcdSECURE_USER /* Map the hint array.*/ gcmkONERROR( _AddMap(Command->os, context->hintArray, context->hintCount * gcmSIZEOF(gctUINT32), (gctPOINTER *) &hint, &stack)); /* Loop while we have valid hints. */ while (*hint != 0) { /* Map handle into physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif /* See if we have to check pipes. */ if (context->pipe2DIndex != 0) { /* See if we are in the correct pipe. */ if (context->initialPipe == Command->pipeSelect) { gctUINT32 reserved = bytes; gctUINT8_PTR nop = logical; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } else { /* Switch to the correct pipe. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, logical, context->initialPipe, &bytes)); } } /* Save initial link pointer. */ initialLink = logical; initialSize = context->bufferSize; #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, initialLink, initialSize, gcvTRUE, gcvFALSE ); #endif /* Save initial buffer to flush. */ flushPointer = initialLink; flushSize = initialSize; /* Save pointer to next link. */ gcmkONERROR( _AddMap(Command->os, context->link, 8, &link, &stack)); /* Start parsing CommandBuffer. */ buffer = commandBuffer; /* Mark context buffer as used. */ if (context->inUse != gcvNULL) { gctBOOL_PTR inUse; gcmkONERROR( _AddMap(Command->os, (gctPOINTER) context->inUse, gcmSIZEOF(gctBOOL), (gctPOINTER *) &inUse, &stack)); *inUse = gcvTRUE; } } else { /* Test if this is a new context. */ if (context->id == 0) { /* Generate unique ID for the context buffer. */ context->id = ++ Command->contextCounter; if (context->id == 0) { /* Context counter overflow (wow!) */ gcmkONERROR(gcvSTATUS_TOO_COMPLEX); } } /* Map the command buffer. */ gcmkONERROR( _AddMap(Command->os, commandBuffer->logical, commandBuffer->offset, (gctPOINTER *) &logical, &stack)); #if gcdSECURE_USER /* Map the hint table. */ gcmkONERROR( _AddMap(Command->os, commandBuffer->hintCommit, commandBuffer->offset - commandBuffer->startOffset, (gctPOINTER *) &hint, &stack)); /* Walk while we have valid hints. */ while (*hint != 0) { /* Map the handle to a physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif if (context->entryPipe == Command->pipeSelect) { gctUINT32 reserved = Command->reservedHead; gctUINT8_PTR nop = logical + commandBuffer->startOffset; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } else { /* Switch to the correct pipe. */ gcmkONERROR( gckHARDWARE_PipeSelect(hardware, logical + commandBuffer->startOffset, context->entryPipe, &bytes)); } /* Save initial link pointer. */ initialLink = logical + commandBuffer->startOffset; initialSize = commandBuffer->offset - commandBuffer->startOffset + Command->reservedTail; #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, initialLink, initialSize, gcvFALSE, gcvFALSE ); #endif /* Save initial buffer to flush. */ flushPointer = initialLink; flushSize = initialSize; /* Save pointer to next link. */ link = logical + commandBuffer->offset; /* No more data. */ buffer = gcvNULL; } #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, Command->wait, initialLink); #endif #if gcdDUMP_COMMAND dataPointer = initialLink; dataBytes = initialSize; #endif /* Loop through all remaining command buffers. */ if (buffer != gcvNULL) { /* Map the command buffer. */ gcmkONERROR( _AddMap(Command->os, buffer->logical, buffer->offset + Command->reservedTail, (gctPOINTER *) &logical, &stack)); #if MRVL_PRINT_CMD_BUFFER _AddCmdBuffer( Command, (gctUINT32_PTR)logical, buffer->offset + Command->reservedTail, gcvFALSE, gcvFALSE ); #endif #if gcdSECURE_USER /* Map the hint table. */ gcmkONERROR( _AddMap(Command->os, buffer->hintCommit, buffer->offset - buffer->startOffset, (gctPOINTER *) &hint, &stack)); /* Walk while we have valid hints. */ while (*hint != 0) { /* Map the handle to a physical address. */ gcmkONERROR( gckKERNEL_MapLogicalToPhysical( Command->kernel, Process, (gctPOINTER *) (logical + *hint))); /* Next hint. */ ++hint; } #endif /* First slot becomes a NOP. */ { gctUINT32 reserved = Command->reservedHead; gctUINT8_PTR nop = logical + buffer->startOffset; /* Already in the correct pipe, fill context buffer with NOP. */ while (reserved > 0) { bytes = reserved; gcmkONERROR( gckHARDWARE_Nop(hardware, nop, &bytes)); gcmkASSERT(reserved >= bytes); reserved -= bytes; nop += bytes; } } /* Generate the LINK to this command buffer. */ gcmkONERROR( gckHARDWARE_Link(hardware, link, logical + buffer->startOffset, buffer->offset - buffer->startOffset + Command->reservedTail, &bytes)); #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, link, (gctUINT32_PTR)logical); #endif /* Flush the initial buffer. */ gcmkONERROR(gckOS_CacheFlush(Command->os, Process, flushPointer, flushSize)); /* Save new flush pointer. */ flushPointer = logical + buffer->startOffset; flushSize = buffer->offset - buffer->startOffset + Command->reservedTail; #if gcdDUMP_COMMAND _DumpCommand(Command, dataPointer, dataBytes); dataPointer = logical + buffer->startOffset; dataBytes = buffer->offset - buffer->startOffset + Command->reservedTail; #endif /* Save pointer to next link. */ link = logical + buffer->offset; } /* Compute number of bytes required for WAIT/LINK. */ gcmkONERROR( gckHARDWARE_WaitLink(hardware, gcvNULL, Command->offset, &bytes, gcvNULL, gcvNULL)); lastRun = bytes; /* Grab the command queue mutex. */ gcmkONERROR( gckOS_AcquireMutex(Command->os, Command->mutexQueue, gcvINFINITE)); ++acquired; if (Command->kernel->notifyIdle) { /* Increase the commit stamp */ Command->commitStamp++; /* Set busy if idle */ if (Command->idle) { Command->idle = gcvFALSE; gcmkVERIFY_OK(gckOS_NotifyIdle(Command->os, gcvFALSE)); } } /* Compute number of bytes left in current command queue. */ bytes = Command->pageSize - Command->offset; if (bytes < lastRun) { /* Create a new command queue. */ gcmkONERROR(_NewQueue(Command, gcvTRUE)); /* Adjust run size with any extra commands inserted. */ lastRun += Command->offset; } /* Get current offset. */ offset = Command->offset; /* Append WAIT/LINK in command queue. */ bytes = Command->pageSize - offset; gcmkONERROR( gckHARDWARE_WaitLink(hardware, (gctUINT8 *) Command->logical + offset, offset, &bytes, &wait, &waitSize)); /* Flush the cache for the wait/link. */ gcmkONERROR(gckOS_CacheFlush(Command->os, gcvNULL, (gctUINT8 *) Command->logical + offset, bytes)); #if gcdDUMP_COMMAND _DumpCommand(Command, (gctUINT8 *) Command->logical + offset, bytes); #endif /* Adjust offset. */ offset += bytes; if (Command->newQueue) { /* Compute fetch location and size for a new command queue. */ fetchAddress = Command->logical; fetchSize = offset; } else { /* Compute fetch location and size for an existing command queue. */ fetchAddress = (gctUINT8 *) Command->logical + Command->offset; fetchSize = offset - Command->offset; } bytes = 8; /* Link in WAIT/LINK. */ gcmkONERROR( gckHARDWARE_Link(hardware, link, fetchAddress, fetchSize, &bytes)); #if MRVL_PRINT_CMD_BUFFER _AddLink(Command, link, fetchAddress); #endif /* Flush the cache for the command buffer. */ gcmkONERROR(gckOS_CacheFlush(Command->os, Process, flushPointer, flushSize)); #if gcdDUMP_COMMAND _DumpCommand(Command, dataPointer, dataBytes); #endif /* Execute the entire sequence. */ gcmkONERROR( gckHARDWARE_Link(hardware, Command->wait, initialLink, initialSize, &Command->waitSize)); /* Flush the cache for the link. */ gcmkONERROR(gckOS_CacheFlush(Command->os, gcvNULL, Command->wait, Command->waitSize)); #if gcdDUMP_COMMAND _DumpCommand(Command, Command->wait, Command->waitSize); #endif /* Update command queue offset. */ Command->offset = offset; Command->newQueue = gcvFALSE; /* Update address of last WAIT. */ Command->wait = wait; Command->waitSize = waitSize; /* Update context and pipe select. */ Command->currentContext = context->id; Command->pipeSelect = context->currentPipe; /* Update queue tail pointer. */ gcmkONERROR( gckHARDWARE_UpdateQueueTail(hardware, Command->logical, Command->offset)); #if gcdDUMP_COMMAND gcmkPRINT("@[kernel.commit]"); #endif /* Release the command queue mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); --acquired; /* Release the context switching mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext)); --acquired; /* Submit events if asked for. */ if (Command->submit) { /* Submit events. */ status = gckEVENT_Submit(Command->kernel->event, gcvFALSE, gcvFALSE); if (gcmIS_SUCCESS(status)) { /* Success. */ Command->submit = gcvFALSE; } else { gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_COMMAND, "gckEVENT_Submit returned %d", status); } } /* Success. */ status = gcvSTATUS_OK; OnError: if (acquired > 1) { /* Release the command queue mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); } if (acquired > 0) { /* Release the context switching mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Command->os, Command->mutexContext)); } /* Unmap all mapped pointers. */ while (stack != gcvNULL) { gcsMAPPED_PTR map = stack; stack = map->next; gcmkVERIFY_OK( gckOS_UnmapUserPointer(Command->os, map->pointer, map->bytes, map->kernelPointer)); gcmkVERIFY_OK( gckOS_Free(Command->os, map)); } /* Return status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckHEAP_Allocate ** ** Allocate data from the heap. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object. ** ** IN gctSIZE_T Bytes ** Number of byte to allocate. ** ** OUTPUT: ** ** gctPOINTER * Memory ** Pointer to a variable that will hold the address of the allocated ** memory. */ gceSTATUS gckHEAP_Allocate( IN gckHEAP Heap, IN gctSIZE_T Bytes, OUT gctPOINTER * Memory ) { gctBOOL acquired = gcvFALSE; gcskHEAP_PTR heap; gceSTATUS status; gctSIZE_T bytes; gcskNODE_PTR node, used, prevFree = gcvNULL; gctPOINTER memory = gcvNULL; gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Determine number of bytes required for a node. */ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Check if this allocation is bigger than the default allocation size. */ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE)) { /* Adjust allocation size. */ Heap->allocationSize = bytes * 2; } else if (Heap->heap != gcvNULL) { gctINT i; /* 2 retries, since we might need to compact. */ for (i = 0; i < 2; ++i) { /* Walk all the heaps. */ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next) { /* Check if this heap has enough bytes to hold the request. */ if (bytes <= heap->size - gcmSIZEOF(gcskNODE)) { prevFree = gcvNULL; /* Walk the chain of free nodes. */ for (node = heap->freeList; node != gcvNULL; node = node->next ) { gcmkASSERT(node->next != gcdIN_USE); /* Check if this free node has enough bytes. */ if (node->bytes >= bytes) { /* Use the node. */ goto UseNode; } /* Save current free node for linked list management. */ prevFree = node; } } } if (i == 0) { /* Compact the heap. */ gcmkVERIFY_OK(_CompactKernelHeap(Heap)); #if gcmIS_DEBUG(gcdDEBUG_CODE) gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "===== KERNEL HEAP ====="); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of allocations : %12u", Heap->allocCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of bytes allocated : %12llu", Heap->allocBytes); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum allocation size : %12llu", Heap->allocBytesMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Total number of bytes allocated : %12llu", Heap->allocBytesTotal); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of heaps : %12u", Heap->heapCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Heap memory in bytes : %12llu", Heap->heapMemory); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum number of heaps : %12u", Heap->heapCountMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax); #endif } } } /* Release the mutex. */ gcmkONERROR( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); acquired = gcvFALSE; /* Allocate a new heap. */ gcmkONERROR( gckOS_AllocateMemory(Heap->os, Heap->allocationSize, &memory)); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, "Allocated heap 0x%x (%lu bytes)", memory, Heap->allocationSize); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Use the allocated memory as the heap. */ heap = (gcskHEAP_PTR) memory; /* Insert this heap to the head of the chain. */ heap->next = Heap->heap; heap->prev = gcvNULL; heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP); if (heap->next != gcvNULL) { heap->next->prev = heap; } Heap->heap = heap; /* Mark the end of the heap. */ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap + Heap->allocationSize - gcmSIZEOF(gcskNODE) ); node->bytes = 0; node->next = gcvNULL; /* Create a free list. */ node = (gcskNODE_PTR) (heap + 1); heap->freeList = node; /* Initialize the free list. */ node->bytes = heap->size - gcmSIZEOF(gcskNODE); node->next = gcvNULL; /* No previous free. */ prevFree = gcvNULL; #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profiling. */ Heap->heapCount += 1; Heap->heapMemory += Heap->allocationSize; if (Heap->heapCount > Heap->heapCountMax) { Heap->heapCountMax = Heap->heapCount; } if (Heap->heapMemory > Heap->heapMemoryMax) { Heap->heapMemoryMax = Heap->heapMemory; } #endif UseNode: /* Verify some stuff. */ gcmkASSERT(heap != gcvNULL); gcmkASSERT(node != gcvNULL); gcmkASSERT(node->bytes >= bytes); if (heap->prev != gcvNULL) { /* Unlink the heap from the linked list. */ heap->prev->next = heap->next; if (heap->next != gcvNULL) { heap->next->prev = heap->prev; } /* Move the heap to the front of the list. */ heap->next = Heap->heap; heap->prev = gcvNULL; Heap->heap = heap; heap->next->prev = heap; } /* Check if there is enough free space left after usage for another free ** node. */ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE)) { /* Allocated used space from the back of the free list. */ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes); /* Adjust the number of free bytes. */ node->bytes -= bytes; gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE)); } else { /* Remove this free list from the chain. */ if (prevFree == gcvNULL) { heap->freeList = node->next; } else { prevFree->next = node->next; } /* Consume the entire free node. */ used = (gcskNODE_PTR) node; bytes = node->bytes; } /* Mark node as used. */ used->bytes = bytes; used->next = gcdIN_USE; #if gcmIS_DEBUG(gcdDEBUG_CODE) used->timeStamp = ++Heap->timeStamp; #endif #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profile counters. */ Heap->allocCount += 1; Heap->allocBytes += bytes; Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax); Heap->allocBytesTotal += bytes; #endif /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Return pointer to memory. */ *Memory = used + 1; /* Success. */ gcmkFOOTER_ARG("*Memory=0x%x", *Memory); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); } if (memory != gcvNULL) { /* Free the heap memory. */ gckOS_FreeMemory(Heap->os, memory); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVGMMU_AllocatePages ** ** Allocate pages inside the page table. ** ** INPUT: ** ** gckVGMMU Mmu ** Pointer to an gckVGMMU object. ** ** gctSIZE_T PageCount ** Number of pages to allocate. ** ** OUTPUT: ** ** gctPOINTER * PageTable ** Pointer to a variable that receives the base address of the page ** table. ** ** gctUINT32 * Address ** Pointer to a variable that receives the hardware specific address. */ gceSTATUS gckVGMMU_AllocatePages( IN gckVGMMU Mmu, IN gctSIZE_T PageCount, OUT gctPOINTER * PageTable, OUT gctUINT32 * Address ) { gceSTATUS status; gctUINT32 tail, index, i; gctUINT32 * table; gctBOOL allocated = gcvFALSE; gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x", Mmu, PageCount, PageTable, Address); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); gcmkVERIFY_ARGUMENT(PageCount > 0); gcmkVERIFY_ARGUMENT(PageTable != gcvNULL); gcmkVERIFY_ARGUMENT(Address != gcvNULL); gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): %u pages.\n", __FUNCTION__, __LINE__, PageCount ); if (PageCount > Mmu->entryCount) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): page table too small for %u pages.\n", __FUNCTION__, __LINE__, PageCount ); gcmkFOOTER_NO(); /* Not enough pages avaiable. */ return gcvSTATUS_OUT_OF_RESOURCES; } /* Grab the mutex. */ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE); if (status < 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): could not acquire mutex.\n" ,__FUNCTION__, __LINE__ ); gcmkFOOTER(); /* Error. */ return status; } /* Compute the tail for this allocation. */ tail = Mmu->entryCount - PageCount; /* Walk all entries until we find enough slots. */ for (index = Mmu->entry; index <= tail;) { /* Access page table. */ table = (gctUINT32 *) Mmu->pageTableLogical + index; /* See if all slots are available. */ for (i = 0; i < PageCount; i++, table++) { if (*table != ~0) { /* Start from next slot. */ index += i + 1; break; } } if (i == PageCount) { /* Bail out if we have enough page entries. */ allocated = gcvTRUE; break; } } if (!allocated) { if (status >= 0) { /* Walk all entries until we find enough slots. */ for (index = 0; index <= tail;) { /* Access page table. */ table = (gctUINT32 *) Mmu->pageTableLogical + index; /* See if all slots are available. */ for (i = 0; i < PageCount; i++, table++) { if (*table != ~0) { /* Start from next slot. */ index += i + 1; break; } } if (i == PageCount) { /* Bail out if we have enough page entries. */ allocated = gcvTRUE; break; } } } } if (!allocated && (status >= 0)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): not enough free pages for %u pages.\n", __FUNCTION__, __LINE__, PageCount ); /* Not enough empty slots available. */ status = gcvSTATUS_OUT_OF_RESOURCES; } if (status >= 0) { /* Build virtual address. */ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, Address); if (status >= 0) { /* Update current entry into page table. */ Mmu->entry = index + PageCount; /* Return pointer to page table. */ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index; gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n", __FUNCTION__, __LINE__, PageCount, index, *Address, *PageTable ); } } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex)); gcmkFOOTER(); /* Return status. */ return status; }
/******************************************************************************* ** gckKERNEL_DeleteDatabase ** ** Remove a database from the hash list and delete its structure. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gcsDATABASE_PTR Database ** Pointer to the database structure to remove. ** ** OUTPUT: ** ** Nothing. */ static gceSTATUS gckKERNEL_DeleteDatabase( IN gckKERNEL Kernel, IN gcsDATABASE_PTR Database ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gcsDATABASE_PTR database; gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Check slot value. */ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db)); if (Database->slot < gcmCOUNTOF(Kernel->db->db)) { /* Check if database if the head of the hash list. */ if (Kernel->db->db[Database->slot] == Database) { /* Remove the database from the hash list. */ Kernel->db->db[Database->slot] = Database->next; } else { /* Walk the has list to find the database. */ for (database = Kernel->db->db[Database->slot]; database != gcvNULL; database = database->next ) { /* Check if the next list entry is this database. */ if (database->next == Database) { /* Remove the database from the hash list. */ database->next = Database->next; break; } } if (database == gcvNULL) { /* Ouch! Something got corrupted. */ gcmkONERROR(gcvSTATUS_INVALID_DATA); } } } if (Kernel->db->lastDatabase != gcvNULL) { /* Insert database to the free list. */ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase; Kernel->db->freeDatabase = Kernel->db->lastDatabase; } /* Keep database as the last database. */ Kernel->db->lastDatabase = Database; /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** gckKERNEL_FindDatabase ** ** Find a database identified by a process ID and move it to the head of the ** hash list. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gctUINT32 ProcessID ** ProcessID that identifies the database. ** ** gctBOOL LastProcessID ** gcvTRUE if searching for the last known process ID. gcvFALSE if ** we need to search for the process ID specified by the ProcessID ** argument. ** ** OUTPUT: ** ** gcsDATABASE_PTR * Database ** Pointer to a variable receiving the database structure pointer on ** success. */ static gceSTATUS gckKERNEL_FindDatabase( IN gckKERNEL Kernel, IN gctUINT32 ProcessID, IN gctBOOL LastProcessID, OUT gcsDATABASE_PTR * Database ) { gceSTATUS status; gcsDATABASE_PTR database, previous; gctSIZE_T slot; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d", Kernel, ProcessID, LastProcessID); /* Compute the hash for the database. */ slot = ProcessID % gcmCOUNTOF(Kernel->db->db); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Check whether we are getting the last known database. */ if (LastProcessID) { /* Use last database. */ database = Kernel->db->lastDatabase; if (database == gcvNULL) { /* Database not found. */ gcmkONERROR(gcvSTATUS_INVALID_DATA); } } else { /* Walk the hash list. */ for (previous = gcvNULL, database = Kernel->db->db[slot]; database != gcvNULL; database = database->next) { if (database->processID == ProcessID) { /* Found it! */ break; } previous = database; } if (database == gcvNULL) { /* Database not found. */ gcmkONERROR(gcvSTATUS_INVALID_DATA); } if (previous != gcvNULL) { /* Move database to the head of the hash list. */ previous->next = database->next; database->next = Kernel->db->db[slot]; Kernel->db->db[slot] = database; } } /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Return the database. */ *Database = database; /* Success. */ gcmkFOOTER_ARG("*Database=0x%x", *Database); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** gckKERNEL_NewDatabase ** ** Create a new database structure and insert it to the head of the hash list. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** u32 ProcessID ** ProcessID that identifies the database. ** ** OUTPUT: ** ** gcsDATABASE_PTR * Database ** Pointer to a variable receiving the database structure pointer on ** success. */ static gceSTATUS gckKERNEL_NewDatabase( IN gckKERNEL Kernel, IN u32 ProcessID, OUT gcsDATABASE_PTR * Database ) { gceSTATUS status; gcsDATABASE_PTR database; int acquired = gcvFALSE; size_t slot; gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID); /* Acquire the database mutex. */ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; if (Kernel->db->freeDatabase != NULL) { /* Allocate a database from the free list. */ database = Kernel->db->freeDatabase; Kernel->db->freeDatabase = database->next; } else { void *pointer = NULL; /* Allocate a new database from the heap. */ gcmkONERROR(gckOS_Allocate(Kernel->os, sizeof(gcsDATABASE), &pointer)); database = pointer; } /* Compute the hash for the database. */ slot = ProcessID % ARRAY_SIZE(Kernel->db->db); /* Insert the database into the hash. */ database->next = Kernel->db->db[slot]; Kernel->db->db[slot] = database; /* Save the hash slot. */ database->slot = slot; /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Return the database. */ *Database = database; /* Success. */ gcmkFOOTER_ARG("*Database=0x%x", *Database); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
IN gcsDATABASE_PTR Database, IN gceDATABASE_TYPE Type, IN void *Data, OUT size_t *Bytes OPTIONAL ) { gceSTATUS status; int acquired = gcvFALSE; gcsDATABASE_RECORD_PTR record, previous; gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x", Kernel, Database, Type, Data); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Scan the database for this record. */ for (record = Database->list, previous = NULL; record != NULL; record = record->next ) { if ((record->type == Type) && (record->data == Data) ) { /* Found it! */ break; }
/******************************************************************************* ** ** gckVIDMEM_AllocateLinear ** ** Allocate linear memory from the gckVIDMEM object. ** ** INPUT: ** ** gckVIDMEM Memory ** Pointer to an gckVIDMEM object. ** ** gctSIZE_T Bytes ** Number of bytes to allocate. ** ** gctUINT32 Alignment ** Byte alignment for allocation. ** ** gceSURF_TYPE Type ** Type of surface to allocate (use by bank optimization). ** ** OUTPUT: ** ** gcuVIDMEM_NODE_PTR * Node ** Pointer to a variable that will hold the allocated memory node. */ gceSTATUS gckVIDMEM_AllocateLinear( IN gckVIDMEM Memory, IN gctSIZE_T Bytes, IN gctUINT32 Alignment, IN gceSURF_TYPE Type, #ifdef __QNXNTO__ IN gctHANDLE Handle, #endif OUT gcuVIDMEM_NODE_PTR * Node ) { gceSTATUS status; gcuVIDMEM_NODE_PTR node; gctUINT32 alignment; gctINT bank, i; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d", Memory, Bytes, Alignment, Type); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Node != gcvNULL); #ifdef __QNXNTO__ gcmkVERIFY_ARGUMENT(Handle != gcvNULL); #endif /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE)); acquired = gcvTRUE; if (Bytes > Memory->freeBytes) { /* Not enough memory. */ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } /* Find the default bank for this surface type. */ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping)); bank = Memory->mapping[Type]; alignment = Alignment; /* Find a free node in the default bank. */ node = _FindNode(Memory, bank, Bytes, &alignment); /* Out of memory? */ if (node == gcvNULL) { /* Walk all lower banks. */ for (i = bank - 1; i >= 0; --i) { /* Find a free node inside the current bank. */ node = _FindNode(Memory, i, Bytes, &alignment); if (node != gcvNULL) { break; } } } if (node == gcvNULL) { /* Walk all upper banks. */ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i) { if (Memory->sentinel[i].VidMem.nextFree == gcvNULL) { /* Abort when we reach unused banks. */ break; } /* Find a free node inside the current bank. */ node = _FindNode(Memory, i, Bytes, &alignment); if (node != gcvNULL) { break; } } } if (node == gcvNULL) { /* Out of memory. */ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } /* Do we have an alignment? */ if (alignment > 0) { /* Split the node so it is aligned. */ if (_Split(Memory->os, node, alignment)) { /* Successful split, move to aligned node. */ node = node->VidMem.next; /* Remove alignment. */ alignment = 0; } } /* Do we have enough memory after the allocation to split it? */ if (node->VidMem.bytes - Bytes > Memory->threshold) { /* Adjust the node size. */ _Split(Memory->os, node, Bytes); } /* Remove the node from the free list. */ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree; node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree; node->VidMem.nextFree = node->VidMem.prevFree = gcvNULL; /* Fill in the information. */ node->VidMem.alignment = alignment; node->VidMem.memory = Memory; #ifdef __QNXNTO__ node->VidMem.logical = gcvNULL; node->VidMem.handle = Handle; #endif /* Adjust the number of free bytes. */ Memory->freeBytes -= node->VidMem.bytes; /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); /* Return the pointer to the node. */ *Node = node; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Allocated %u bytes @ 0x%x [0x%08X]", node->VidMem.bytes, node, node->VidMem.offset); /* Success. */ gcmkFOOTER_ARG("*Node=0x%x", *Node); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_Free ** ** Free an allocated video memory node. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_Free( IN gcuVIDMEM_NODE_PTR Node ) { gckVIDMEM memory = gcvNULL; gcuVIDMEM_NODE_PTR node; gceSTATUS status; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { if (Node->VidMem.locked > 0) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM, "Node 0x%x is locked (%d)", Node, Node->VidMem.locked); /* Force unlock. */ Node->VidMem.locked = 0; } /* Extract pointer to gckVIDMEM object owning the node. */ memory = Node->VidMem.memory; /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE)); acquired = gcvTRUE; #ifdef __QNXNTO__ /* Reset handle to 0. */ Node->VidMem.logical = gcvNULL; Node->VidMem.handle = 0; /* Don't try to a re-free an already freed node. */ if ((Node->VidMem.nextFree == gcvNULL) && (Node->VidMem.prevFree == gcvNULL) ) #endif { /* Update the number of free bytes. */ memory->freeBytes += Node->VidMem.bytes; /* Find the next free node. */ for (node = Node->VidMem.next; node->VidMem.nextFree == gcvNULL; node = node->VidMem.next) ; /* Insert this node in the free list. */ Node->VidMem.nextFree = node; Node->VidMem.prevFree = node->VidMem.prevFree; Node->VidMem.prevFree->VidMem.nextFree = node->VidMem.prevFree = Node; /* Is the next node a free node and not the sentinel? */ if ((Node->VidMem.next == Node->VidMem.nextFree) && (Node->VidMem.next->VidMem.bytes != 0) ) { /* Merge this node with the next node. */ gcmkONERROR(_Merge(memory->os, node = Node)); gcmkASSERT(node->VidMem.nextFree != node); gcmkASSERT(node->VidMem.prevFree != node); } /* Is the previous node a free node and not the sentinel? */ if ((Node->VidMem.prev == Node->VidMem.prevFree) && (Node->VidMem.prev->VidMem.bytes != 0) ) { /* Merge this node with the previous node. */ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev)); gcmkASSERT(node->VidMem.nextFree != node); gcmkASSERT(node->VidMem.prevFree != node); } } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; } /*************************** Virtual Memory *******************************/ /* Verify the gckKERNEL object pointer. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); #ifdef __QNXNTO__ if (!Node->Virtual.unlockPending && (Node->Virtual.locked > 0)) #else if (!Node->Virtual.pending && (Node->Virtual.locked > 0)) #endif { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM, "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)", Node, Node->Virtual.locked); /* Force unlock. */ Node->Virtual.locked = 0; } #ifdef __QNXNTO__ if (!Node->Virtual.freePending) { if (Node->Virtual.unlockPending) #else if (Node->Virtual.pending) #endif { gcmkASSERT(Node->Virtual.locked == 1); /* Schedule the node to be freed. */ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "gckVIDMEM_Free: Scheduling node 0x%x to be freed later", Node); /* Schedule the video memory to be freed again. */ gcmkONERROR(gckEVENT_FreeVideoMemory(Node->Virtual.kernel->event, Node, gcvKERNEL_PIXEL)); #ifdef __QNXNTO__ Node->Virtual.freePending = gcvTRUE; } #endif /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_SKIP; } else { /* Free the virtual memory. */ gcmkVERIFY_OK(gckOS_FreePagedMemory(Node->Virtual.kernel->os, Node->Virtual.physical, Node->Virtual.bytes)); /* Destroy the gcuVIDMEM_NODE union. */ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node)); } /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** gckKERNEL_NewRecord ** ** Create a new database record structure and insert it to the head of the ** database. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gcsDATABASE_PTR Database ** Pointer to a database structure. ** ** OUTPUT: ** ** gcsDATABASE_RECORD_PTR * Record ** Pointer to a variable receiving the database record structure ** pointer on success. */ static gceSTATUS gckKERNEL_NewRecord( IN gckKERNEL Kernel, IN gcsDATABASE_PTR Database, IN gctUINT32 Slot, OUT gcsDATABASE_RECORD_PTR * Record ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gcsDATABASE_RECORD_PTR record = gcvNULL; gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; if (Kernel->db->freeRecord != gcvNULL) { /* Allocate the record from the free list. */ record = Kernel->db->freeRecord; Kernel->db->freeRecord = record->next; } else { gctPOINTER pointer = gcvNULL; /* Allocate the record from the heap. */ gcmkONERROR(gckOS_Allocate(Kernel->os, gcmSIZEOF(gcsDATABASE_RECORD), &pointer)); record = pointer; } /* Insert the record in the database. */ record->next = Database->list[Slot]; Database->list[Slot] = record; /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Return the record. */ *Record = record; /* Success. */ gcmkFOOTER_ARG("*Record=0x%x", *Record); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } if (record != gcvNULL) { gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record)); } /* Return the status. */ gcmkFOOTER(); return status; }
static gceSTATUS _CompactKernelHeap( IN gckHEAP Heap ) { gcskHEAP_PTR heap, next; gctPOINTER p; gcskHEAP_PTR freeList = gcvNULL; gcmkHEADER_ARG("Heap=0x%x", Heap); /* Walk all the heaps. */ for (heap = Heap->heap; heap != gcvNULL; heap = next) { gcskNODE_PTR lastFree = gcvNULL; /* Zero out the free list. */ heap->freeList = gcvNULL; /* Start at the first node. */ for (p = (gctUINT8_PTR) (heap + 1);;) { /* Convert the pointer. */ gcskNODE_PTR node = (gcskNODE_PTR) p; gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size)); /* Test if this node not used. */ if (node->next != gcdIN_USE) { /* Test if this is the end of the heap. */ if (node->bytes == 0) { break; } /* Test of this is the first free node. */ else if (lastFree == gcvNULL) { /* Initialzie the free list. */ heap->freeList = node; lastFree = node; } else { /* Test if this free node is contiguous with the previous ** free node. */ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p) { /* Just increase the size of the previous free node. */ lastFree->bytes += node->bytes; } else { /* Add to linked list. */ lastFree->next = node; lastFree = node; } } } /* Move to next node. */ p = (gctUINT8_PTR) node + node->bytes; } /* Mark the end of the chain. */ if (lastFree != gcvNULL) { lastFree->next = gcvNULL; } /* Get next heap. */ next = heap->next; /* Check if the entire heap is free. */ if ((heap->freeList != gcvNULL) && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE)) ) { /* Remove the heap from the linked list. */ if (heap->prev == gcvNULL) { Heap->heap = next; } else { heap->prev->next = next; } if (heap->next != gcvNULL) { heap->next->prev = heap->prev; } #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profiling. */ Heap->heapCount -= 1; Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP); #endif /* Add this heap to the list of heaps that need to be freed. */ heap->next = freeList; freeList = heap; } } if (freeList != gcvNULL) { /* Release the mutex, remove any chance for a dead lock. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Free all heaps in the free list. */ for (heap = freeList; heap != gcvNULL; heap = next) { /* Get pointer to the next heap. */ next = heap->next; /* Free the heap. */ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, "Freeing heap 0x%x (%lu bytes)", heap, heap->size + gcmSIZEOF(gcskHEAP)); gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap)); } /* Acquire the mutex again. */ gcmkVERIFY_OK( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); } /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** gckKERNEL_DeleteRecord ** ** Remove a database record from the database and delete its structure. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gcsDATABASE_PTR Database ** Pointer to a database structure. ** ** gceDATABASE_TYPE Type ** Type of the record to remove. ** ** gctPOINTER Data ** Data of the record to remove. ** ** OUTPUT: ** ** gctSIZE_T_PTR Bytes ** Pointer to a variable that receives the size of the record deleted. ** Can be gcvNULL if the size is not required. */ static gceSTATUS gckKERNEL_DeleteRecord( IN gckKERNEL Kernel, IN gcsDATABASE_PTR Database, IN gceDATABASE_TYPE Type, IN gctPOINTER Data, OUT gctSIZE_T_PTR Bytes OPTIONAL ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gcsDATABASE_RECORD_PTR record, previous; gctUINT32 slot = _GetSlot(Database, Data); gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x", Kernel, Database, Type, Data); /* Acquire the database mutex. */ gcmkONERROR( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Scan the database for this record. */ for (record = Database->list[slot], previous = gcvNULL; record != gcvNULL; record = record->next ) { if ((record->type == Type) && (record->data == Data) ) { /* Found it! */ break; } previous = record; } if (record == gcvNULL) { /* Ouch! This record is not found? */ gcmkONERROR(gcvSTATUS_INVALID_DATA); } if (Bytes != gcvNULL) { /* Return size of record. */ *Bytes = record->bytes; } /* Remove record from database. */ if (previous == gcvNULL) { Database->list[slot] = record->next; } else { previous->next = record->next; } /* Insert record in free list. */ record->next = Kernel->db->freeRecord; Kernel->db->freeRecord = record; /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Success. */ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes)); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** gckKERNEL_NewDatabase ** ** Create a new database structure and insert it to the head of the hash list. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to a gckKERNEL object. ** ** gctUINT32 ProcessID ** ProcessID that identifies the database. ** ** OUTPUT: ** ** gcsDATABASE_PTR * Database ** Pointer to a variable receiving the database structure pointer on ** success. */ static gceSTATUS gckKERNEL_NewDatabase( IN gckKERNEL Kernel, IN gctUINT32 ProcessID, OUT gcsDATABASE_PTR * Database ) { gceSTATUS status; gcsDATABASE_PTR database; gctBOOL acquired = gcvFALSE; gctSIZE_T slot; gcsDATABASE_PTR existingDatabase; gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID); /* Acquire the database mutex. */ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); acquired = gcvTRUE; /* Compute the hash for the database. */ slot = ProcessID % gcmCOUNTOF(Kernel->db->db); /* Walk the hash list. */ for (existingDatabase = Kernel->db->db[slot]; existingDatabase != gcvNULL; existingDatabase = existingDatabase->next) { if (existingDatabase->processID == ProcessID) { /* One process can't be added twice. */ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); } } if (Kernel->db->freeDatabase != gcvNULL) { /* Allocate a database from the free list. */ database = Kernel->db->freeDatabase; Kernel->db->freeDatabase = database->next; } else { gctPOINTER pointer = gcvNULL; /* Allocate a new database from the heap. */ gcmkONERROR(gckOS_Allocate(Kernel->os, gcmSIZEOF(gcsDATABASE), &pointer)); database = pointer; } /* Insert the database into the hash. */ database->next = Kernel->db->db[slot]; Kernel->db->db[slot] = database; /* Save the hash slot. */ database->slot = slot; /* Release the database mutex. */ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Return the database. */ *Database = database; /* Success. */ gcmkFOOTER_ARG("*Database=0x%x", *Database); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckCOMMAND_Reserve ** ** Reserve space in the command queue. Also acquire the command queue mutex. ** ** INPUT: ** ** gckCOMMAND Command ** Pointer to an gckCOMMAND object. ** ** gctSIZE_T RequestedBytes ** Number of bytes previously reserved. ** ** OUTPUT: ** ** gctPOINTER * Buffer ** Pointer to a variable that will receive the address of the reserved ** space. ** ** gctSIZE_T * BufferSize ** Pointer to a variable that will receive the number of bytes ** available in the command queue. */ gceSTATUS gckCOMMAND_Reserve( IN gckCOMMAND Command, IN gctSIZE_T RequestedBytes, IN gctBOOL Locking, OUT gctPOINTER * Buffer, OUT gctSIZE_T * BufferSize ) { gceSTATUS status; gctSIZE_T requiredBytes, bytes; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu Locking=%d", Command, RequestedBytes, Locking); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); if (!Locking) { /* Grab the conmmand queue mutex. */ gcmkONERROR( gckOS_AcquireMutex(Command->os, Command->mutexQueue, gcvINFINITE)); acquired = gcvTRUE; } /* Compute number of bytes required for WAIT/LINK. */ gcmkONERROR( gckHARDWARE_WaitLink(Command->kernel->hardware, gcvNULL, Command->offset + gcmALIGN(RequestedBytes, Command->alignment), &requiredBytes, gcvNULL, gcvNULL)); /* Compute total number of bytes required. */ requiredBytes += gcmALIGN(RequestedBytes, Command->alignment); /* Compute number of bytes available in command queue. */ bytes = Command->pageSize - Command->offset; if (bytes < requiredBytes) { /* Create a new command queue. */ gcmkONERROR(_NewQueue(Command, gcvTRUE)); /* Recompute number of bytes available in command queue. */ bytes = Command->pageSize - Command->offset; if (bytes < requiredBytes) { /* Rare case, not enough room in command queue. */ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL); } } /* Return pointer to empty slot command queue. */ *Buffer = (gctUINT8 *) Command->logical + Command->offset; /* Return number of bytes left in command queue. */ *BufferSize = bytes; /* Success. */ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize); return gcvSTATUS_OK; OnError: if (acquired) { /* Release command queue mutex on error. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Command->os, Command->mutexQueue)); } /* Return status. */ gcmkFOOTER(); return status; }