static void drv_exit(void) #endif { gcmkHEADER(); gcmkASSERT(gpuClass != gcvNULL); device_destroy(gpuClass, MKDEV(major, 0)); class_destroy(gpuClass); unregister_chrdev(major, DRV_NAME); gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice)); if(gckDebugFileSystemIsEnabled()) { gckDebugFileSystemTerminate(); } #if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) { #if MRVL_PLATFORM_PXA1088 gcmkVERIFY_OK(gckOS_GpuPowerDisable(galDevice->os, gcvCORE_2D, gcvTRUE, gcvFALSE)); #endif gcmkVERIFY_OK(gckOS_GpuPowerDisable(galDevice->os, gcvCORE_MAJOR, gcvTRUE, gcvTRUE)); } #endif gcmkFOOTER_NO(); }
/******************************************************************************* ** ** gckVGMMU_Destroy ** ** Destroy a nAQMMU object. ** ** INPUT: ** ** gckVGMMU Mmu ** Pointer to an gckVGMMU object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVGMMU_Destroy( IN gckVGMMU Mmu ) { gcmkHEADER_ARG("Mmu=0x%x", Mmu); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); /* Free the page table. */ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os, Mmu->pageTablePhysical, Mmu->pageTableLogical, Mmu->pageTableSize)); /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex)); /* Mark the gckVGMMU object as unknown. */ Mmu->object.type = gcvOBJ_UNKNOWN; /* Free the gckVGMMU object. */ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu)); gcmkFOOTER_NO(); /* Success. */ return gcvSTATUS_OK; }
static void drv_exit(void) #endif { gcmkHEADER(); gcmkASSERT(gpuClass != gcvNULL); device_destroy(gpuClass, MKDEV(major, 0)); class_destroy(gpuClass); unregister_chrdev(major, DEVICE_NAME); gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice)); if(gckDEBUGFS_IsEnabled()) { gckDEBUGFS_Terminate(); } #if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) { struct clk * clk = NULL; #if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) gc_pwr(0); #endif clk = clk_get(NULL, "GCCLK"); clk_disable(clk); } #endif gcmkFOOTER_NO(); }
/******************************************************************************* ** ** gckVIDMEM_Destroy ** ** Destroy an gckVIDMEM object. ** ** INPUT: ** ** gckVIDMEM Memory ** Pointer to an gckVIDMEM object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_Destroy( IN gckVIDMEM Memory ) { gcuVIDMEM_NODE_PTR node, next; gctINT i; gcmkHEADER_ARG("Memory=0x%x", Memory); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); /* Walk all sentinels. */ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i) { /* Bail out of the heap is not used. */ if (Memory->sentinel[i].VidMem.next == gcvNULL) { break; } /* Walk all the nodes until we reach the sentinel. */ for (node = Memory->sentinel[i].VidMem.next; node->VidMem.bytes != 0; node = next) { /* Save pointer to the next node. */ next = node->VidMem.next; /* Free the node. */ gcmkVERIFY_OK(gckOS_Free(Memory->os, node)); } } /* Free the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex)); /* Mark the object as unknown. */ Memory->object.type = gcvOBJ_UNKNOWN; /* Free the gckVIDMEM object. */ gcmkVERIFY_OK(gckOS_Free(Memory->os, Memory)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/* cat /proc/driver/gc will print gc related msg */ static ssize_t gc_proc_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { ssize_t len = 0; char buf[1000]; gctUINT32 idle; gctUINT32 clockControl; gcmkVERIFY_OK(gckHARDWARE_GetIdle(galDevice->kernel->hardware, gcvFALSE, &idle)); len += sprintf(buf+len, "idle register: 0x%02x\n", idle); gckOS_ReadRegister(galDevice->os, 0x00000, &clockControl); len += sprintf(buf+len, "clockControl register: 0x%02x\n", clockControl); #ifdef CONFIG_PXA_DVFM len += sprintf(buf+len, "mode:\tDOCS(%d)D1(%d)D2(%d)CG(%d)\n\tDebug(%d)Pid(%d)Reset(%d)\n", galDevice->enableD0CS, galDevice->enableD1, galDevice->enableD2, galDevice->enableCG, galDevice->needD2DebugInfo, galDevice->printPID, galDevice->needResetAfterD2); #endif return simple_read_from_buffer(buffer, count, offset, buf, len); return 0; }
static ssize_t show_poweroff_timeout (struct device *dev, struct device_attribute *attr, char * buf) { gctUINT32 timeout, i; ssize_t len = 0; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (galDevice->kernels[i] != gcvNULL) { gcmkVERIFY_OK(gckHARDWARE_QueryPowerOffTimeout( galDevice->kernels[i]->hardware, &timeout)); len += sprintf(buf+len, "[%s] power_off_timeout = %d ms\n", _core_desc[i], timeout); } } len += sprintf(buf+len, "\n* Usage:\n" " $ cat /sys/devices/.../poweroff_timeout\n" " $ echo [core],[timeout] > /sys/devices/.../poweroff_timeout\n" ); return len; }
static ssize_t store_register_stats (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { gctCHAR type[10]; gctUINT32 offset, clkState = 0; gctINT t = ~0; SYSFS_VERIFY_INPUT(sscanf(buf, "%s 0x%x", type, &offset), 2); SYSFS_VERIFY_INPUT_RANGE(offset, 0, 0x30001); if(strstr(type, "default")) { t = 0; } else if(strstr(type, "offset")) { t = 1; } else { gcmkPRINT("Invalid Command~"); return count; } gcmkVERIFY_OK(gckOS_QueryRegisterStats( galDevice->os, t, offset, &clkState)); if(t && clkState) { gcmkPRINT("Some Registers can't be read because of %s disabled", (clkState&0x11)?("External/Internal clk"):((clkState&0x01)?"External":"Internal")); } return count; }
/******************************************************************************* ** ** gckVIDMEM_DestroyVirtual ** ** Destroy an gcuVIDMEM_NODE union for virtual memory. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE union. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_DestroyVirtual( IN gcuVIDMEM_NODE_PTR Node ) { gckOS os; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); /* Extact the gckOS object pointer. */ os = Node->Virtual.kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); #ifdef __QNXNTO__ /* Unregister. */ gcmkVERIFY_OK( gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node)); /* Free virtual memory. */ gcmkVERIFY_OK( gckOS_FreePagedMemory(os, Node->Virtual.physical, Node->Virtual.bytes)); #endif /* Delete the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex)); if (Node->Virtual.pageTable != gcvNULL) { /* Free the pages. */ gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); } /* Delete the gcuVIDMEM_NODE union. */ gcmkVERIFY_OK(gckOS_Free(os, Node)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
gceSTATUS gckKERNEL_DumpProcessDB( IN gckKERNEL Kernel ) { gcsDATABASE_PTR database; gctINT i, pid; gctUINT8 name[24]; gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Acquire the database mutex. */ gcmkVERIFY_OK( gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE)); gcmkPRINT("**************************\n"); gcmkPRINT("*** PROCESS DB DUMP ***\n"); gcmkPRINT("**************************\n"); gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME"); /* Walk the databases. */ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i) { for (database = Kernel->db->db[i]; database != gcvNULL; database = database->next) { pid = database->processID; gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name))); gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name)); gcmkPRINT_N(8, "%-8d%s\n", pid, name); } } /* Release the database mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
static ssize_t show_register_stats (struct device *dev, struct device_attribute *attr, char * buf) { gckKERNEL kernel = gcvNULL; gctUINT32 clockControl, clockRate, idle, len = 0, i = 0; gctBOOL isIdle; for(; i< gcdMAX_GPU_COUNT; i++) { kernel = galDevice->kernels[i]; if(kernel != gcvNULL) { if(i == gcvCORE_MAJOR) { gcmkVERIFY_OK(gckOS_DirectReadRegister(galDevice->os, gcvCORE_MAJOR, 0x00000, &clockControl)); len += sprintf(buf+len, "clock register: [0x%02x]\n", clockControl); if(has_feat_gc_shader()) { gctUINT32 shClkRate; gcmkVERIFY_OK(gckOS_QueryShClkRate(galDevice->os, &shClkRate)); len += sprintf(buf+len, "shader clock rate: [%d] MHz\n", (gctUINT32)shClkRate/1000/1000); } } len += sprintf(buf+len, "[%s]\n", _core_desc[i]); gcmkVERIFY_OK(gckHARDWARE_QueryIdleEx(kernel->hardware, &idle, &isIdle)); gcmkVERIFY_OK(gckOS_QueryClkRate(galDevice->os, i, &clockRate)); len += sprintf(buf+len, " idle register: [0x%02x][%s]\n", idle, (gcvTRUE == isIdle)?"idle":"busy"); len += sprintf(buf+len, " clock rate: [%d] MHz\n", (gctUINT32)clockRate/1000/1000); } } return len +=sprintf(buf+len, "options:\n" " echo Core 0xAddr > register_stats\n" " e.g: echo 0 0x664 > register_stats\n" " # 0 means core 0\n" " # 0x664 means register address\n"); }
gceSTATUS gckDVFS_Construct( IN gckHARDWARE Hardware, OUT gckDVFS * Dvfs ) { gceSTATUS status = gcvSTATUS_OK; gctPOINTER pointer; gckDVFS dvfs = gcvNULL; gckOS os = Hardware->os; gcmkHEADER_ARG("Hardware=0x%X", Hardware); gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE); gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); /* Allocate a gckDVFS manager. */ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer)); gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS)); dvfs = pointer; /* Initialization. */ dvfs->hardware = Hardware; dvfs->pollingTime = gcdDVFS_POLLING_TIME; dvfs->os = Hardware->os; dvfs->currentScale = 64; /* Create a polling timer. */ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer)); /* Initialize frequency and voltage adjustment helper. */ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core)); /* Return result. */ *Dvfs = dvfs; gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: /* Roll back. */ if (dvfs) { if (dvfs->timer) { gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer)); } gcmkOS_SAFE_FREE(os, dvfs); } gcmkFOOTER(); return status; }
gceSTATUS gckDVFS_Destroy( IN gckDVFS Dvfs ) { gcmkHEADER_ARG("Dvfs=0x%X", Dvfs); gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL); /* Deinitialize helper fuunction. */ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core)); /* DestroyTimer. */ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer)); gcmkOS_SAFE_FREE(Dvfs->os, Dvfs); gcmkFOOTER_NO(); return gcvSTATUS_OK; }
// ------------------------------------------------- static ssize_t show_mem_stats (struct device *dev, struct device_attribute *attr, char * buf) { gctUINT32 len = 0; gcmkVERIFY_OK(gckOS_ShowVidMemUsage(galDevice->os, buf, &len)); return (ssize_t)len; }
static void drv_exit(void) #endif { gcmkHEADER(); #ifdef CONFIG_GPU_LOW_MEMORY_KILLER task_free_unregister(&task_nb); #endif #ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT unregister_reserved_memory_account(&viv_gpu_resmem_handler); #endif gcmkASSERT(gpuClass != gcvNULL); device_destroy(gpuClass, MKDEV(major, 0)); class_destroy(gpuClass); unregister_chrdev(major, DRV_NAME); gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice)); if(gckDebugFileSystemIsEnabled()) { gckDebugFileSystemTerminate(); } #if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) { # if 0 struct clk * clk = NULL; #if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) gc_pwr(0); #endif clk = clk_get(NULL, "GCCLK"); clk_disable(clk); # endif } #endif gcmkFOOTER_NO(); }
static void _TimerFunction( gctPOINTER Data ) { gceSTATUS status = gcvSTATUS_OK; gckDVFS dvfs = (gckDVFS) Data; gckHARDWARE hardware = dvfs->hardware; gctUINT32 value; gctUINT32 frequency; gctUINT8 scale; gctUINT32 t1, t2, consumed; gckOS_GetTicks(&t1); gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value)); /* determine target sacle. */ _Policy(dvfs, value, &scale); /* Set frequency and voltage. */ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale)); /* Query real frequency. */ gcmkONERROR( gckOS_QueryGPUFrequency(hardware->os, hardware->core, &frequency, &dvfs->currentScale)); _RecordFrequencyHistory(dvfs, frequency); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, "Current frequency = %d", frequency); /* Set period. */ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency)); OnError: /* Determine next querying time. */ gckOS_GetTicks(&t2); consumed = gcmMIN(((long)t2 - (long)t1), 5); if (dvfs->stop == gcvFALSE) { gcmkVERIFY_OK(gckOS_StartTimer(hardware->os, dvfs->timer, dvfs->pollingTime - consumed)); } return; }
static void drv_exit(void) #endif { gcmkHEADER(); gcmkASSERT(gpuClass != gcvNULL); device_destroy(gpuClass, MKDEV(major, 0)); class_destroy(gpuClass); unregister_chrdev(major, DEVICE_NAME); gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice)); if(gckDEBUGFS_IsEnabled()) { gckDEBUGFS_Terminate(); } gcmkFOOTER_NO(); }
/******************************************************************************* ** ** gckHEAP_Destroy ** ** Destroy a gckHEAP object. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckHEAP_Destroy( IN gckHEAP Heap ) { gcskHEAP_PTR heap; #if gcmIS_DEBUG(gcdDEBUG_CODE) gctSIZE_T leaked = 0; #endif gcmkHEADER_ARG("Heap=0x%x", Heap); for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap) { /* Unlink heap from linked list. */ Heap->heap = heap->next; #if gcmIS_DEBUG(gcdDEBUG_CODE) /* Check for leaked memory. */ leaked += _DumpHeap(heap); #endif /* Free the heap. */ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap)); } /* Free the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex)); /* Free the heap structure. */ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap)); /* Success. */ #if gcmIS_DEBUG(gcdDEBUG_CODE) gcmkFOOTER_ARG("leaked=%lu", leaked); #else gcmkFOOTER_NO(); #endif return gcvSTATUS_OK; }
/* ** PM Thread Routine **/ static int threadRoutinePM(void *ctxt) { gckGALDEVICE device = (gckGALDEVICE) ctxt; gckHARDWARE hardware = device->kernels[gcvCORE_MAJOR]->hardware; gceCHIPPOWERSTATE state; for(;;) { /* wait for idle */ gcmkVERIFY_OK( gckOS_WaitSignal(device->os, hardware->powerOffSignal, gcvINFINITE)); /* We try to power off every 200 ms, until GPU is not idle */ do { if (device->killThread == gcvTRUE) { /* The daemon exits. */ while (!kthread_should_stop()) { gckOS_Delay(device->os, 1); } return 0; } gcmkVERIFY_OK( gckHARDWARE_SetPowerManagementState( hardware, gcvPOWER_OFF_TIMEOUT)); /* relax cpu 200 ms before retry */ gckOS_Delay(device->os, 200); gcmkVERIFY_OK( gckHARDWARE_QueryPowerManagementState(hardware, &state)); } while (state == gcvPOWER_IDLE); } }
/* ** PM Thread Routine **/ static int _threadRoutinePM(gckGALDEVICE Device, gckHARDWARE Hardware) { gceCHIPPOWERSTATE state; for(;;) { /* wait for idle */ gcmkVERIFY_OK( gckOS_AcquireMutex(Device->os, Hardware->powerOffSema, gcvINFINITE)); /* We try to power off every 200 ms, until GPU is not idle */ do { if (Device->killThread == gcvTRUE) { /* The daemon exits. */ while (!kthread_should_stop()) { gckOS_Delay(Device->os, 1); } return 0; } gcmkVERIFY_OK( gckHARDWARE_SetPowerManagementState( Hardware, gcvPOWER_OFF_TIMEOUT)); /* relax cpu 200 ms before retry */ gckOS_Delay(Device->os, 200); gcmkVERIFY_OK( gckHARDWARE_QueryPowerManagementState(Hardware, &state)); } while (state == gcvPOWER_IDLE); } }
static gceSTATUS _AddMap( IN gckOS Os, IN gctPOINTER Source, IN gctSIZE_T Bytes, OUT gctPOINTER * Destination, IN OUT gcsMAPPED_PTR * Stack ) { gcsMAPPED_PTR map = gcvNULL; gceSTATUS status; /* Don't try to map NULL pointers. */ if (Source == gcvNULL) { *Destination = gcvNULL; return gcvSTATUS_OK; } /* Allocate the gcsMAPPED structure. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(*map), (gctPOINTER *) &map)); /* Map the user pointer into kernel addressing space. */ gcmkONERROR( gckOS_MapUserPointer(Os, Source, Bytes, Destination)); /* Save mapping. */ map->pointer = Source; map->kernelPointer = *Destination; map->bytes = Bytes; /* Push structure on top of the stack. */ map->next = *Stack; *Stack = map; /* Success. */ return gcvSTATUS_OK; OnError: if (gcmIS_ERROR(status) && (map != gcvNULL)) { /* Roll back on error. */ gcmkVERIFY_OK(gckOS_Free(Os, map)); } /* Return the status. */ return status; }
static ssize_t store_register_stats (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { gctUINT32 core, offset, value; SYSFS_VERIFY_INPUT(sscanf(buf, "%d 0x%x", &core, &offset), 2); SYSFS_VERIFY_INPUT_RANGE(core, 0, gcdMAX_GPU_COUNT - 1); SYSFS_VERIFY_INPUT_RANGE(offset, 0, 0x30001); gcmkVERIFY_OK(gckOS_ReadRegisterEx(galDevice->os, core, offset, &value)); gcmkPRINT("Core(%d) Register[0x%x] value is 0x%08x\n", core, offset, value); return count; }
gceSTATUS gckVGMMU_Flush( IN gckVGMMU Mmu ) { gckVGHARDWARE hardware; gcmkHEADER_ARG("Mmu=0x%x", Mmu); hardware = Mmu->hardware; gcmkVERIFY_OK( gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckHEAP_Free ** ** Free allocated memory from the heap. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object. ** ** IN gctPOINTER Memory ** Pointer to memory to free. ** ** OUTPUT: ** ** NOTHING. */ gceSTATUS gckHEAP_Free( IN gckHEAP Heap, IN gctPOINTER Memory ) { gcskNODE_PTR node; gceSTATUS status; gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); /* Pointer to structure. */ node = (gcskNODE_PTR) Memory - 1; /* Mark the node as freed. */ node->next = gcvNULL; #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profile counters. */ Heap->allocBytes -= node->bytes; #endif /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: /* Return the status. */ gcmkFOOTER(); return status; }
static ssize_t store_poweroff_timeout (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int core, timeout, i, gpu_count; /* count core numbers */ for (i = 0, gpu_count = 0; i < gcdMAX_GPU_COUNT; i++) if (galDevice->kernels[i] != gcvNULL) gpu_count++; /* read input and verify */ SYSFS_VERIFY_INPUT(sscanf(buf, "%d,%d", &core, &timeout), 2); SYSFS_VERIFY_INPUT_RANGE(core, 0, (gpu_count-1)); SYSFS_VERIFY_INPUT_RANGE(timeout, 0, 3600000); gcmkVERIFY_OK(gckHARDWARE_SetPowerOffTimeout( galDevice->kernels[core]->hardware, timeout)); return count; }
/******************************************************************************* ** ** gckKERNEL_Destroy ** ** Destroy an gckKERNEL object. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckKERNEL_Destroy( IN gckKERNEL Kernel ) { gcmkHEADER_ARG("Kernel=0x%x", Kernel); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); /* Destroy the gckMMU object. */ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu)); /* Destroy the gckEVENT object. */ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->event)); /* Destroy the gckCOMMNAND object. */ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command)); /* Destroy the gckHARDWARE object. */ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware)); /* Detsroy the client atom. */ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients)); /* Mark the gckKERNEL object as unknown. */ Kernel->object.type = gcvOBJ_UNKNOWN; #if MRVL_LOW_POWER_MODE_DEBUG gcmkVERIFY_OK( gckOS_Free(Kernel->os, Kernel->kernelMSG)); #endif /* Free the gckKERNEL object. */ gcmkVERIFY_OK(gckOS_Free(Kernel->os, Kernel)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
static ssize_t show_fscale (struct device *dev, struct device_attribute *attr, char * buf) { int len = 0, i; gctUINT32 fscale = 64, minscale = 1, maxscale = 64; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (galDevice->kernels[i] != gcvNULL) { #if gcdENABLE_FSCALE_VAL_ADJUST gcmkVERIFY_OK(gckHARDWARE_GetFscaleValue( galDevice->kernels[i]->hardware, &fscale, &minscale, &maxscale)); #endif } len += sprintf(buf+len, "[%s] internal fscale value = %d\n", _core_desc[i], fscale); } return len; }
/******************************************************************************* ** ** gckCOMMAND_Destroy ** ** Destroy an gckCOMMAND object. ** ** INPUT: ** ** gckCOMMAND Command ** Pointer to an gckCOMMAND object to destroy. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckCOMMAND_Destroy( IN gckCOMMAND Command ) { gctINT i; gcmkHEADER_ARG("Command=0x%x", Command); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND); /* Stop the command queue. */ gcmkVERIFY_OK(gckCOMMAND_Stop(Command)); for (i = 0; i < gcdCOMMAND_QUEUES; ++i) { gcmkASSERT(Command->queues[i].signal != gcvNULL); gcmkVERIFY_OK( gckOS_DestroySignal(Command->os, Command->queues[i].signal)); gcmkASSERT(Command->queues[i].logical != gcvNULL); gcmkVERIFY_OK( gckOS_FreeNonPagedMemory(Command->os, Command->pageSize, Command->queues[i].physical, Command->queues[i].logical)); } /* Delete the context switching mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext)); /* Delete the command queue mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue)); /* Mark object as unknown. */ Command->object.type = gcvOBJ_UNKNOWN; /* Free the gckCOMMAND object. */ gcmkVERIFY_OK(gckOS_Free(Command->os, Command)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
static ssize_t store_fscale (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int core, fscale, i, gpu_count; for (i = 0, gpu_count = 0; i < gcdMAX_GPU_COUNT; i++) if (galDevice->kernels[i] != gcvNULL) gpu_count++; /* read input and verify */ SYSFS_VERIFY_INPUT(sscanf(buf, "%d,%d", &core, &fscale), 2); SYSFS_VERIFY_INPUT_RANGE(core, 0, (gpu_count-1)); SYSFS_VERIFY_INPUT_RANGE(fscale, 1, 64); #if gcdENABLE_FSCALE_VAL_ADJUST if(galDevice->kernels[core] != gcvNULL) gcmkVERIFY_OK(gckHARDWARE_SetFscaleValue( galDevice->kernels[core]->hardware, fscale)); #endif return count; }
/******************************************************************************* ** ** gckHEAP_Allocate ** ** Allocate data from the heap. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object. ** ** IN gctSIZE_T Bytes ** Number of byte to allocate. ** ** OUTPUT: ** ** gctPOINTER * Memory ** Pointer to a variable that will hold the address of the allocated ** memory. */ gceSTATUS gckHEAP_Allocate( IN gckHEAP Heap, IN gctSIZE_T Bytes, OUT gctPOINTER * Memory ) { gctBOOL acquired = gcvFALSE; gcskHEAP_PTR heap; gceSTATUS status; gctSIZE_T bytes; gcskNODE_PTR node, used, prevFree = gcvNULL; gctPOINTER memory = gcvNULL; gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Determine number of bytes required for a node. */ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Check if this allocation is bigger than the default allocation size. */ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE)) { /* Adjust allocation size. */ Heap->allocationSize = bytes * 2; } else if (Heap->heap != gcvNULL) { gctINT i; /* 2 retries, since we might need to compact. */ for (i = 0; i < 2; ++i) { /* Walk all the heaps. */ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next) { /* Check if this heap has enough bytes to hold the request. */ if (bytes <= heap->size - gcmSIZEOF(gcskNODE)) { prevFree = gcvNULL; /* Walk the chain of free nodes. */ for (node = heap->freeList; node != gcvNULL; node = node->next ) { gcmkASSERT(node->next != gcdIN_USE); /* Check if this free node has enough bytes. */ if (node->bytes >= bytes) { /* Use the node. */ goto UseNode; } /* Save current free node for linked list management. */ prevFree = node; } } } if (i == 0) { /* Compact the heap. */ gcmkVERIFY_OK(_CompactKernelHeap(Heap)); #if gcmIS_DEBUG(gcdDEBUG_CODE) gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "===== KERNEL HEAP ====="); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of allocations : %12u", Heap->allocCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of bytes allocated : %12llu", Heap->allocBytes); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum allocation size : %12llu", Heap->allocBytesMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Total number of bytes allocated : %12llu", Heap->allocBytesTotal); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of heaps : %12u", Heap->heapCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Heap memory in bytes : %12llu", Heap->heapMemory); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum number of heaps : %12u", Heap->heapCountMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax); #endif } } } /* Release the mutex. */ gcmkONERROR( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); acquired = gcvFALSE; /* Allocate a new heap. */ gcmkONERROR( gckOS_AllocateMemory(Heap->os, Heap->allocationSize, &memory)); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, "Allocated heap 0x%x (%lu bytes)", memory, Heap->allocationSize); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Use the allocated memory as the heap. */ heap = (gcskHEAP_PTR) memory; /* Insert this heap to the head of the chain. */ heap->next = Heap->heap; heap->prev = gcvNULL; heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP); if (heap->next != gcvNULL) { heap->next->prev = heap; } Heap->heap = heap; /* Mark the end of the heap. */ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap + Heap->allocationSize - gcmSIZEOF(gcskNODE) ); node->bytes = 0; node->next = gcvNULL; /* Create a free list. */ node = (gcskNODE_PTR) (heap + 1); heap->freeList = node; /* Initialize the free list. */ node->bytes = heap->size - gcmSIZEOF(gcskNODE); node->next = gcvNULL; /* No previous free. */ prevFree = gcvNULL; #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profiling. */ Heap->heapCount += 1; Heap->heapMemory += Heap->allocationSize; if (Heap->heapCount > Heap->heapCountMax) { Heap->heapCountMax = Heap->heapCount; } if (Heap->heapMemory > Heap->heapMemoryMax) { Heap->heapMemoryMax = Heap->heapMemory; } #endif UseNode: /* Verify some stuff. */ gcmkASSERT(heap != gcvNULL); gcmkASSERT(node != gcvNULL); gcmkASSERT(node->bytes >= bytes); if (heap->prev != gcvNULL) { /* Unlink the heap from the linked list. */ heap->prev->next = heap->next; if (heap->next != gcvNULL) { heap->next->prev = heap->prev; } /* Move the heap to the front of the list. */ heap->next = Heap->heap; heap->prev = gcvNULL; Heap->heap = heap; heap->next->prev = heap; } /* Check if there is enough free space left after usage for another free ** node. */ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE)) { /* Allocated used space from the back of the free list. */ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes); /* Adjust the number of free bytes. */ node->bytes -= bytes; gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE)); } else { /* Remove this free list from the chain. */ if (prevFree == gcvNULL) { heap->freeList = node->next; } else { prevFree->next = node->next; } /* Consume the entire free node. */ used = (gcskNODE_PTR) node; bytes = node->bytes; } /* Mark node as used. */ used->bytes = bytes; used->next = gcdIN_USE; #if gcmIS_DEBUG(gcdDEBUG_CODE) used->timeStamp = ++Heap->timeStamp; #endif #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profile counters. */ Heap->allocCount += 1; Heap->allocBytes += bytes; Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax); Heap->allocBytesTotal += bytes; #endif /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Return pointer to memory. */ *Memory = used + 1; /* Success. */ gcmkFOOTER_ARG("*Memory=0x%x", *Memory); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); } if (memory != gcvNULL) { /* Free the heap memory. */ gckOS_FreeMemory(Heap->os, memory); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckHEAP_Construct ** ** Construct a new gckHEAP object. ** ** INPUT: ** ** gckOS Os ** Pointer to a gckOS object. ** ** gctSIZE_T AllocationSize ** Minimum size per arena. ** ** OUTPUT: ** ** gckHEAP * Heap ** Pointer to a variable that will hold the pointer to the gckHEAP ** object. */ gceSTATUS gckHEAP_Construct( IN gckOS Os, IN gctSIZE_T AllocationSize, OUT gckHEAP * Heap ) { gceSTATUS status; gckHEAP heap = gcvNULL; gctPOINTER pointer = gcvNULL; gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); gcmkVERIFY_ARGUMENT(Heap != gcvNULL); /* Allocate the gckHEAP object. */ gcmkONERROR(gckOS_AllocateMemory(Os, gcmSIZEOF(struct _gckHEAP), &pointer)); heap = pointer; /* Initialize the gckHEAP object. */ heap->object.type = gcvOBJ_HEAP; heap->os = Os; heap->allocationSize = AllocationSize; heap->heap = gcvNULL; #if gcmIS_DEBUG(gcdDEBUG_CODE) heap->timeStamp = 0; #endif #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Zero the counters. */ heap->allocCount = 0; heap->allocBytes = 0; heap->allocBytesMax = 0; heap->allocBytesTotal = 0; heap->heapCount = 0; heap->heapCountMax = 0; heap->heapMemory = 0; heap->heapMemoryMax = 0; #endif /* Create the mutex. */ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex)); /* Return the pointer to the gckHEAP object. */ *Heap = heap; /* Success. */ gcmkFOOTER_ARG("*Heap=0x%x", *Heap); return gcvSTATUS_OK; OnError: /* Roll back. */ if (heap != gcvNULL) { /* Free the heap structure. */ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap)); } /* Return the status. */ gcmkFOOTER(); return status; }