gceSTATUS gckIOMMU_Construct( IN gckOS Os, OUT gckIOMMU * Iommu ) { gceSTATUS status; gckIOMMU iommu = gcvNULL; struct device *dev; int ret; gcmkHEADER(); dev = &Os->device->platform->device->dev; gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu)); gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU)); iommu->domain = iommu_domain_alloc(&platform_bus_type); if (!iommu->domain) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail"); gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); } iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev); ret = iommu_attach_device(iommu->domain, dev); if (ret) { gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret); gcmkONERROR(gcvSTATUS_NOT_SUPPORTED); } iommu->device = dev; _FlatMapping(iommu); *Iommu = iommu; gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: gckIOMMU_Destory(Os, iommu); gcmkFOOTER(); return status; }
static int threadRoutineVG(void *ctxt) { gckGALDEVICE device = (gckGALDEVICE) ctxt; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER, "Starting isr Thread with extension=%p", device); for (;;) { static int down; down = down_interruptible(&device->semas[gcvCORE_VG]); if (down); /*To make gcc 4.6 happye*/ device->dataReadys[gcvCORE_VG] = gcvFALSE; if (device->killThread == gcvTRUE) { /* The daemon exits. */ while (!kthread_should_stop()) { gckOS_Delay(device->os, 1); } return 0; } gckKERNEL_Notify(device->kernels[gcvCORE_VG], gcvNOTIFY_INTERRUPT, gcvFALSE); } }
/******************************************************************************* ** ** gckKERNEL_QueryVideoMemory ** ** Query the amount of video memory. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** OUTPUT: ** ** gcsHAL_INTERFACE * Interface ** Pointer to an gcsHAL_INTERFACE structure that will be filled in with ** the memory information. */ gceSTATUS gckKERNEL_QueryVideoMemory( IN gckKERNEL Kernel, OUT gcsHAL_INTERFACE * Interface ) { GCHAL * gchal; gcmkHEADER_ARG("Kernel=%p", Kernel); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[ENTER] gckKERNEL_QueryVideoMemory"); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Interface != gcvNULL); /* Extract the pointer to the CExtension class. */ gchal = (GCHAL *) Kernel->context; /* Get internal memory size and physical address. */ Interface->u.QueryVideoMemory.internalSize = gchal->GetInternalSize(); Interface->u.QueryVideoMemory.internalPhysical = gchal->GetInternalPhysical(); /* Get external memory size and physical address. */ Interface->u.QueryVideoMemory.externalSize = gchal->GetExternalSize(); Interface->u.QueryVideoMemory.externalPhysical = gchal->GetExternalPhysical(); /* Get contiguous memory size and physical address. */ Interface->u.QueryVideoMemory.contiguousSize = gchal->GetContiguousSize(); Interface->u.QueryVideoMemory.contiguousPhysical = gchal->GetContiguousPhysical(); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[LEAVE] gckKERNEL_QueryVideoMemory(%u)", gcvSTATUS_OK); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckGALDEVICE_Setup_ISR ** ** Start the ISR routine. ** ** INPUT: ** ** gckGALDEVICE Device ** Pointer to an gckGALDEVICE object. ** ** OUTPUT: ** ** Nothing. ** ** RETURNS: ** ** gcvSTATUS_OK ** Setup successfully. ** gcvSTATUS_GENERIC_IO ** Setup failed. */ gceSTATUS gckGALDEVICE_Setup_ISR( IN gckGALDEVICE Device ) { gceSTATUS status; gctINT ret; gcmkHEADER_ARG("Device=0x%x", Device); gcmkVERIFY_ARGUMENT(Device != NULL); if (Device->irqLines[gcvCORE_MAJOR] < 0) { gcmkONERROR(gcvSTATUS_GENERIC_IO); } /* Hook up the isr based on the irq line. */ #ifdef FLAREON gc500_handle.dev_name = "galcore interrupt service"; gc500_handle.dev_id = Device; gc500_handle.handler = isrRoutine; gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER; gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL; ret = dove_gpio_request( DOVE_GPIO0_7, &gc500_handle ); #else ret = request_irq( Device->irqLines[gcvCORE_MAJOR], isrRoutine, IRQF_DISABLED, "galcore interrupt service", Device ); #endif if (ret != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Could not register irq line %d (error=%d)\n", __FUNCTION__, __LINE__, Device->irqLines[gcvCORE_MAJOR], ret ); gcmkONERROR(gcvSTATUS_GENERIC_IO); } /* Mark ISR as initialized. */ Device->isrInitializeds[gcvCORE_MAJOR] = gcvTRUE; gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: gcmkFOOTER(); return status; }
static void _TimerFunction( gctPOINTER Data ) { gceSTATUS status = gcvSTATUS_OK; gckDVFS dvfs = (gckDVFS) Data; gckHARDWARE hardware = dvfs->hardware; gctUINT32 value; gctUINT32 frequency; gctUINT8 scale; gctUINT32 t1, t2, consumed; gckOS_GetTicks(&t1); gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value)); /* determine target sacle. */ _Policy(dvfs, value, &scale); /* Set frequency and voltage. */ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale)); /* Query real frequency. */ gcmkONERROR( gckOS_QueryGPUFrequency(hardware->os, hardware->core, &frequency, &dvfs->currentScale)); _RecordFrequencyHistory(dvfs, frequency); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER, "Current frequency = %d", frequency); /* Set period. */ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency)); OnError: /* Determine next querying time. */ gckOS_GetTicks(&t2); consumed = gcmMIN(((long)t2 - (long)t1), 5); if (dvfs->stop == gcvFALSE) { gcmkVERIFY_OK(gckOS_StartTimer(hardware->os, dvfs->timer, dvfs->pollingTime - consumed)); } return; }
static gctSIZE_T _DumpHeap( IN gcskHEAP_PTR Heap ) { gctPOINTER p; gctSIZE_T leaked = 0; /* Start at first node. */ for (p = Heap + 1;;) { /* Convert the pointer. */ gcskNODE_PTR node = (gcskNODE_PTR) p; /* Check if this is a used node. */ if (node->next == gcdIN_USE) { /* Print the leaking node. */ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP, "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu " "(%08X %c%c%c%c)", node, node->bytes, node->timeStamp, ((gctUINT32_PTR) (node + 1))[0], gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]), gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]), gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]), gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3])); /* Add leaking byte count. */ leaked += node->bytes; } /* Test for end of heap. */ if (node->bytes == 0) { break; } else { /* Move to next node. */ p = (gctUINT8_PTR) node + node->bytes; } } /* Return the number of leaked bytes. */ return leaked; }
/******************************************************************************* ** ** gckVGMMU_FreePages ** ** Free pages inside the page table. ** ** INPUT: ** ** gckVGMMU Mmu ** Pointer to an gckVGMMU object. ** ** gctPOINTER PageTable ** Base address of the page table to free. ** ** gctSIZE_T PageCount ** Number of pages to free. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVGMMU_FreePages( IN gckVGMMU Mmu, IN gctPOINTER PageTable, IN gctSIZE_T PageCount ) { gctUINT32 * table; gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x", Mmu, PageTable, PageCount); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); gcmkVERIFY_ARGUMENT(PageTable != gcvNULL); gcmkVERIFY_ARGUMENT(PageCount > 0); gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): freeing %u pages at index %u @ %p.\n", __FUNCTION__, __LINE__, PageCount, ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical), PageTable ); /* Convert pointer. */ table = (gctUINT32 *) PageTable; /* Mark the page table entries as available. */ while (PageCount-- > 0) { *table++ = (gctUINT32)~0; } gcmkFOOTER_NO(); /* Success. */ return gcvSTATUS_OK; }
static int _FlatMapping( IN gckIOMMU Iommu ) { gceSTATUS status; gctUINT32 physical; for (physical = 0; physical < 0x80000000; physical += PAGE_SIZE) { gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_OS, "Map %x => %x bytes = %d", physical, physical, PAGE_SIZE ); gcmkONERROR(gckIOMMU_Map(Iommu, physical, physical, PAGE_SIZE)); } return gcvSTATUS_OK; OnError: return status; }
/******************************************************************************* ** ** gckVIDMEM_Free ** ** Free an allocated video memory node. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE object. ** ** OUTPUT: ** ** Nothing. */ gceSTATUS gckVIDMEM_Free( IN gcuVIDMEM_NODE_PTR Node ) { gckVIDMEM memory = gcvNULL; gcuVIDMEM_NODE_PTR node; gceSTATUS status; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { if (Node->VidMem.locked > 0) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM, "Node 0x%x is locked (%d)", Node, Node->VidMem.locked); /* Force unlock. */ Node->VidMem.locked = 0; } /* Extract pointer to gckVIDMEM object owning the node. */ memory = Node->VidMem.memory; /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE)); acquired = gcvTRUE; #ifdef __QNXNTO__ /* Reset handle to 0. */ Node->VidMem.logical = gcvNULL; Node->VidMem.handle = 0; /* Don't try to a re-free an already freed node. */ if ((Node->VidMem.nextFree == gcvNULL) && (Node->VidMem.prevFree == gcvNULL) ) #endif { /* Update the number of free bytes. */ memory->freeBytes += Node->VidMem.bytes; /* Find the next free node. */ for (node = Node->VidMem.next; node->VidMem.nextFree == gcvNULL; node = node->VidMem.next) ; /* Insert this node in the free list. */ Node->VidMem.nextFree = node; Node->VidMem.prevFree = node->VidMem.prevFree; Node->VidMem.prevFree->VidMem.nextFree = node->VidMem.prevFree = Node; /* Is the next node a free node and not the sentinel? */ if ((Node->VidMem.next == Node->VidMem.nextFree) && (Node->VidMem.next->VidMem.bytes != 0) ) { /* Merge this node with the next node. */ gcmkONERROR(_Merge(memory->os, node = Node)); gcmkASSERT(node->VidMem.nextFree != node); gcmkASSERT(node->VidMem.prevFree != node); } /* Is the previous node a free node and not the sentinel? */ if ((Node->VidMem.prev == Node->VidMem.prevFree) && (Node->VidMem.prev->VidMem.bytes != 0) ) { /* Merge this node with the previous node. */ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev)); gcmkASSERT(node->VidMem.nextFree != node); gcmkASSERT(node->VidMem.prevFree != node); } } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; } /*************************** Virtual Memory *******************************/ /* Verify the gckKERNEL object pointer. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); #ifdef __QNXNTO__ if (!Node->Virtual.unlockPending && (Node->Virtual.locked > 0)) #else if (!Node->Virtual.pending && (Node->Virtual.locked > 0)) #endif { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM, "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)", Node, Node->Virtual.locked); /* Force unlock. */ Node->Virtual.locked = 0; } #ifdef __QNXNTO__ if (!Node->Virtual.freePending) { if (Node->Virtual.unlockPending) #else if (Node->Virtual.pending) #endif { gcmkASSERT(Node->Virtual.locked == 1); /* Schedule the node to be freed. */ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "gckVIDMEM_Free: Scheduling node 0x%x to be freed later", Node); /* Schedule the video memory to be freed again. */ gcmkONERROR(gckEVENT_FreeVideoMemory(Node->Virtual.kernel->event, Node, gcvKERNEL_PIXEL)); #ifdef __QNXNTO__ Node->Virtual.freePending = gcvTRUE; } #endif /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_SKIP; } else { /* Free the virtual memory. */ gcmkVERIFY_OK(gckOS_FreePagedMemory(Node->Virtual.kernel->os, Node->Virtual.physical, Node->Virtual.bytes)); /* Destroy the gcuVIDMEM_NODE union. */ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node)); } /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_Construct ** ** Construct a new gckVIDMEM object. ** ** INPUT: ** ** gckOS Os ** Pointer to an gckOS object. ** ** gctUINT32 BaseAddress ** Base address for the video memory heap. ** ** gctSIZE_T Bytes ** Number of bytes in the video memory heap. ** ** gctSIZE_T Threshold ** Minimum number of bytes beyond am allocation before the node is ** split. Can be used as a minimum alignment requirement. ** ** gctSIZE_T BankSize ** Number of bytes per physical memory bank. Used by bank ** optimization. ** ** OUTPUT: ** ** gckVIDMEM * Memory ** Pointer to a variable that will hold the pointer to the gckVIDMEM ** object. */ gceSTATUS gckVIDMEM_Construct( IN gckOS Os, IN gctUINT32 BaseAddress, IN gctSIZE_T Bytes, IN gctSIZE_T Threshold, IN gctSIZE_T BankSize, OUT gckVIDMEM * Memory ) { gckVIDMEM memory = gcvNULL; gceSTATUS status; gcuVIDMEM_NODE_PTR node; gctINT i, banks = 0; gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu " "BankSize=%lu", Os, BaseAddress, Bytes, Threshold, BankSize); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Allocate the gckVIDMEM object. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), (gctPOINTER *) &memory)); /* Initialize the gckVIDMEM object. */ memory->object.type = gcvOBJ_VIDMEM; memory->os = Os; /* Set video memory heap information. */ memory->baseAddress = BaseAddress; memory->bytes = Bytes; memory->freeBytes = Bytes; memory->threshold = Threshold; memory->mutex = gcvNULL; BaseAddress = 0; /* Walk all possible banks. */ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i) { gctSIZE_T bytes; if (BankSize == 0) { /* Use all bytes for the first bank. */ bytes = Bytes; } else { /* Compute number of bytes for this bank. */ bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress; if (bytes > Bytes) { /* Make sure we don't exceed the total number of bytes. */ bytes = Bytes; } } if (bytes == 0) { /* Mark heap is not used. */ memory->sentinel[i].VidMem.next = memory->sentinel[i].VidMem.prev = memory->sentinel[i].VidMem.nextFree = memory->sentinel[i].VidMem.prevFree = gcvNULL; continue; } /* Allocate one gcuVIDMEM_NODE union. */ gcmkONERROR( gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER *) &node)); /* Initialize gcuVIDMEM_NODE union. */ node->VidMem.memory = memory; node->VidMem.next = node->VidMem.prev = node->VidMem.nextFree = node->VidMem.prevFree = &memory->sentinel[i]; node->VidMem.offset = BaseAddress; node->VidMem.bytes = bytes; node->VidMem.alignment = 0; node->VidMem.physical = 0; node->VidMem.pool = gcvPOOL_UNKNOWN; node->VidMem.locked = 0; #ifdef __QNXNTO__ node->VidMem.logical = gcvNULL; node->VidMem.handle = 0; #endif /* Initialize the linked list of nodes. */ memory->sentinel[i].VidMem.next = memory->sentinel[i].VidMem.prev = memory->sentinel[i].VidMem.nextFree = memory->sentinel[i].VidMem.prevFree = node; /* Mark sentinel. */ memory->sentinel[i].VidMem.bytes = 0; /* Adjust address for next bank. */ BaseAddress += bytes; Bytes -= bytes; banks ++; } /* Assign all the bank mappings. */ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1; memory->mapping[gcvSURF_BITMAP] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_DEPTH] = banks - 1; memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TEXTURE] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_VERTEX] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_INDEX] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TILE_STATUS] = banks - 1; if (banks > 1) --banks; memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] INDEX: bank %d", memory->mapping[gcvSURF_INDEX]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] VERTEX: bank %d", memory->mapping[gcvSURF_VERTEX]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] TEXTURE: bank %d", memory->mapping[gcvSURF_TEXTURE]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] RENDER_TARGET: bank %d", memory->mapping[gcvSURF_RENDER_TARGET]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] DEPTH: bank %d", memory->mapping[gcvSURF_DEPTH]); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "[GALCORE] TILE_STATUS: bank %d", memory->mapping[gcvSURF_TILE_STATUS]); /* Allocate the mutex. */ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex)); /* Return pointer to the gckVIDMEM object. */ *Memory = memory; /* Success. */ gcmkFOOTER_ARG("*Memory=0x%x", *Memory); return gcvSTATUS_OK; OnError: /* Roll back. */ if (memory != gcvNULL) { if (memory->mutex != gcvNULL) { /* Delete the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex)); } for (i = 0; i < banks; ++i) { /* Free the heap. */ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL); gcmkVERIFY_OK(gckOS_Free(Os, memory->sentinel[i].VidMem.next)); } /* Free the object. */ gcmkVERIFY_OK(gckOS_Free(Os, memory)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_AllocateLinear ** ** Allocate linear memory from the gckVIDMEM object. ** ** INPUT: ** ** gckVIDMEM Memory ** Pointer to an gckVIDMEM object. ** ** gctSIZE_T Bytes ** Number of bytes to allocate. ** ** gctUINT32 Alignment ** Byte alignment for allocation. ** ** gceSURF_TYPE Type ** Type of surface to allocate (use by bank optimization). ** ** OUTPUT: ** ** gcuVIDMEM_NODE_PTR * Node ** Pointer to a variable that will hold the allocated memory node. */ gceSTATUS gckVIDMEM_AllocateLinear( IN gckVIDMEM Memory, IN gctSIZE_T Bytes, IN gctUINT32 Alignment, IN gceSURF_TYPE Type, #ifdef __QNXNTO__ IN gctHANDLE Handle, #endif OUT gcuVIDMEM_NODE_PTR * Node ) { gceSTATUS status; gcuVIDMEM_NODE_PTR node; gctUINT32 alignment; gctINT bank, i; gctBOOL acquired = gcvFALSE; gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d", Memory, Bytes, Alignment, Type); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Node != gcvNULL); #ifdef __QNXNTO__ gcmkVERIFY_ARGUMENT(Handle != gcvNULL); #endif /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE)); acquired = gcvTRUE; if (Bytes > Memory->freeBytes) { /* Not enough memory. */ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } /* Find the default bank for this surface type. */ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping)); bank = Memory->mapping[Type]; alignment = Alignment; /* Find a free node in the default bank. */ node = _FindNode(Memory, bank, Bytes, &alignment); /* Out of memory? */ if (node == gcvNULL) { /* Walk all lower banks. */ for (i = bank - 1; i >= 0; --i) { /* Find a free node inside the current bank. */ node = _FindNode(Memory, i, Bytes, &alignment); if (node != gcvNULL) { break; } } } if (node == gcvNULL) { /* Walk all upper banks. */ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i) { if (Memory->sentinel[i].VidMem.nextFree == gcvNULL) { /* Abort when we reach unused banks. */ break; } /* Find a free node inside the current bank. */ node = _FindNode(Memory, i, Bytes, &alignment); if (node != gcvNULL) { break; } } } if (node == gcvNULL) { /* Out of memory. */ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } /* Do we have an alignment? */ if (alignment > 0) { /* Split the node so it is aligned. */ if (_Split(Memory->os, node, alignment)) { /* Successful split, move to aligned node. */ node = node->VidMem.next; /* Remove alignment. */ alignment = 0; } } /* Do we have enough memory after the allocation to split it? */ if (node->VidMem.bytes - Bytes > Memory->threshold) { /* Adjust the node size. */ _Split(Memory->os, node, Bytes); } /* Remove the node from the free list. */ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree; node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree; node->VidMem.nextFree = node->VidMem.prevFree = gcvNULL; /* Fill in the information. */ node->VidMem.alignment = alignment; node->VidMem.memory = Memory; #ifdef __QNXNTO__ node->VidMem.logical = gcvNULL; node->VidMem.handle = Handle; #endif /* Adjust the number of free bytes. */ Memory->freeBytes -= node->VidMem.bytes; /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); /* Return the pointer to the node. */ *Node = node; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Allocated %u bytes @ 0x%x [0x%08X]", node->VidMem.bytes, node, node->VidMem.offset); /* Success. */ gcmkFOOTER_ARG("*Node=0x%x", *Node); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_Unlock ** ** Unlock a video memory node. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a locked gcuVIDMEM_NODE union. ** ** gceSURF_TYPE Type ** Type of surface to unlock. ** ** gctSIZE_T * CommandSize ** Pointer to a variable specifying the number of bytes in the command ** buffer specified by 'Commands'. If gcvNULL, there is no command ** buffer and the video memory shoud be unlocked synchronously. ** ** gctBOOL * Asynchroneous ** Pointer to a variable specifying whether the surface should be ** unlocked asynchroneously or not. ** ** OUTPUT: ** ** gctBOOL * Asynchroneous ** Pointer to a variable receiving the number of bytes used in the ** command buffer specified by 'Commands'. If gcvNULL, there is no ** command buffer. */ gceSTATUS gckVIDMEM_Unlock( IN gcuVIDMEM_NODE_PTR Node, IN gceSURF_TYPE Type, IN OUT gctBOOL * Asynchroneous ) { gceSTATUS status; gckKERNEL kernel; gckHARDWARE hardware; gctPOINTER buffer; gctSIZE_T requested, bufferSize; gckCOMMAND command = gcvNULL; gceKERNEL_FLUSH flush; gckOS os = gcvNULL; gctBOOL acquired = gcvFALSE; gctBOOL needRelease = gcvFALSE; gctBOOL pendingUnlock = gcvFALSE; gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d", Node, Type, gcmOPT_VALUE(Asynchroneous)); /* Verify the arguments. */ if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { if (Node->VidMem.locked <= 0) { /* The surface was not locked. */ gcmkONERROR(gcvSTATUS_MEMORY_UNLOCKED); } /* Decrement the lock count. */ Node->VidMem.locked --; if (Asynchroneous != gcvNULL) { /* No need for any events. */ *Asynchroneous = gcvFALSE; } } /*************************** Virtual Memory *******************************/ else { /* Verify the gckKERNEL object pointer. */ kernel = Node->Virtual.kernel; gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL); /* Verify the gckHARDWARE object pointer. */ hardware = Node->Virtual.kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Verify the gckCOMMAND object pointer. */ command = Node->Virtual.kernel->command; gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND); if (Asynchroneous == gcvNULL) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "gckVIDMEM_Unlock: Unlocking virtual node 0x%x (%d)", Node, Node->Virtual.locked); /* Get the gckOS object pointer. */ os = kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Grab the mutex. */ gcmkONERROR( gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE)); /* If we need to unlock a node from virtual memory we have to be ** very carefull. If the node is still inside the caches we ** might get a bus error later if the cache line needs to be ** replaced. So - we have to flush the caches before we do ** anything. We also need to stall to make sure the flush has ** happened. However - when we get to this point we are inside ** the interrupt handler and we cannot just gckCOMMAND_Wait ** because it will wait forever. So - what we do here is we ** verify the type of the surface, flush the appropriate cache, ** mark the node as flushed, and issue another unlock to unmap ** the MMU. */ if (!Node->Virtual.contiguous && (Node->Virtual.locked == 1) #ifdef __QNXTO__ && !Node->Virtual.unlockPending #else && !Node->Virtual.pending #endif ) { if (Type == gcvSURF_BITMAP) { /* Flush 2D cache. */ flush = gcvFLUSH_2D; } else if (Type == gcvSURF_RENDER_TARGET) { /* Flush color cache. */ flush = gcvFLUSH_COLOR; } else if (Type == gcvSURF_DEPTH) { /* Flush depth cache. */ flush = gcvFLUSH_DEPTH; } else { /* No flush required. */ flush = (gceKERNEL_FLUSH) 0; } gcmkONERROR( gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested)); if (requested != 0) { gcmkONERROR( gckCOMMAND_Reserve(command, requested, &buffer, &bufferSize)); needRelease = gcvTRUE; gcmkONERROR(gckHARDWARE_Flush(hardware, flush, buffer, &bufferSize)); gcmkONERROR( gckEVENT_Unlock(Node->Virtual.kernel->event, gcvKERNEL_PIXEL, Node, Type)); /* Mark node as pending. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvTRUE; #else Node->Virtual.pending = gcvTRUE; #endif needRelease = gcvFALSE; gcmkONERROR(gckCOMMAND_Execute(command, requested)); pendingUnlock = gcvTRUE; } } if (!pendingUnlock) { if (Node->Virtual.locked == 0) { status = gcvSTATUS_MEMORY_UNLOCKED; goto OnError; } /* Decrement lock count. */ -- Node->Virtual.locked; /* See if we can unlock the resources. */ if (Node->Virtual.locked == 0) { /* Unlock the pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.userPID, Node->Virtual.bytes, Node->Virtual.logical)); #else gcmkONERROR( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.logical)); #endif /* Free the page table. */ if (Node->Virtual.pageTable != gcvNULL) { gcmkONERROR( gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); /* Mark page table as freed. */ Node->Virtual.pageTable = gcvNULL; } /* Mark node as unlocked. */ #ifdef __QNXTO Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif } gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Unmapped virtual node 0x%x from 0x%08X", Node, Node->Virtual.address); } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } else { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Scheduled unlock for virtual node 0x%x", Node); /* Schedule the surface to be unlocked. */ *Asynchroneous = gcvTRUE; } } /* Success. */ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous)); return gcvSTATUS_OK; OnError: if (needRelease) { gcmkVERIFY_OK(gckCOMMAND_Release(command)); } if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVIDMEM_ConstructVirtual ** ** Construct a new gcuVIDMEM_NODE union for virtual memory. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gctSIZE_T Bytes ** Number of byte to allocate. ** ** OUTPUT: ** ** gcuVIDMEM_NODE_PTR * Node ** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer. */ gceSTATUS gckVIDMEM_ConstructVirtual( IN gckKERNEL Kernel, IN gctBOOL Contiguous, IN gctSIZE_T Bytes, #ifdef __QNXNTO__ IN gctHANDLE Handle, #endif OUT gcuVIDMEM_NODE_PTR * Node ) { gckOS os; gceSTATUS status; gcuVIDMEM_NODE_PTR node = gcvNULL; gcmkHEADER_ARG("Kernel=0x%x Bytes=%lu", Kernel, Bytes); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Node != gcvNULL); #ifdef __QNXNTO__ gcmkVERIFY_ARGUMENT(Handle != gcvNULL); #endif /* Extract the gckOS object pointer. */ os = Kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Allocate an gcuVIDMEM_NODE union. */ gcmkONERROR( gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER *) &node)); /* Initialize gcuVIDMEM_NODE union for virtual memory. */ node->Virtual.kernel = Kernel; node->Virtual.contiguous = Contiguous; node->Virtual.locked = 0; node->Virtual.logical = gcvNULL; node->Virtual.pageTable = gcvNULL; node->Virtual.mutex = gcvNULL; #ifdef __QNXNTO__ node->Virtual.next = gcvNULL; node->Virtual.unlockPending = gcvFALSE; node->Virtual.freePending = gcvFALSE; node->Virtual.handle = Handle; #else node->Virtual.pending = gcvFALSE; #endif /* Create the mutex. */ gcmkONERROR( gckOS_CreateMutex(os, &node->Virtual.mutex)); /* Allocate the virtual memory. */ gcmkONERROR( gckOS_AllocatePagedMemoryEx(os, node->Virtual.contiguous, node->Virtual.bytes = Bytes, &node->Virtual.physical)); #ifdef __QNXNTO__ /* Register. */ gckMMU_InsertNode(Kernel->mmu, node); #endif /* Return pointer to the gcuVIDMEM_NODE union. */ *Node = node; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Created virtual node 0x%x for %u bytes @ 0x%x", node, Bytes, node->Virtual.physical); /* Success. */ gcmkFOOTER_ARG("*Node=0x%x", *Node); return gcvSTATUS_OK; OnError: /* Roll back. */ if (node != gcvNULL) { if (node->Virtual.mutex != gcvNULL) { /* Destroy the mutex. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex)); } /* Free the structure. */ gcmkVERIFY_OK(gckOS_Free(os, node)); } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** gckVGMMU_Construct ** ** Construct a new gckVGMMU object. ** ** INPUT: ** ** gckVGKERNEL Kernel ** Pointer to an gckVGKERNEL object. ** ** gctSIZE_T MmuSize ** Number of bytes for the page table. ** ** OUTPUT: ** ** gckVGMMU * Mmu ** Pointer to a variable that receives the gckVGMMU object pointer. */ gceSTATUS gckVGMMU_Construct( IN gckVGKERNEL Kernel, IN gctSIZE_T MmuSize, OUT gckVGMMU * Mmu ) { gckOS os; gckVGHARDWARE hardware; gceSTATUS status; gckVGMMU mmu; gctUINT32 * pageTable; gctUINT32 i; gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(MmuSize > 0); gcmkVERIFY_ARGUMENT(Mmu != gcvNULL); /* Extract the gckOS object pointer. */ os = Kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Extract the gckVGHARDWARE object pointer. */ hardware = Kernel->hardware; gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE); /* Allocate memory for the gckVGMMU object. */ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu); if (status < 0) { /* Error. */ gcmkFATAL( "%s(%d): could not allocate gckVGMMU object.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Initialize the gckVGMMU object. */ mmu->object.type = gcvOBJ_MMU; mmu->os = os; mmu->hardware = hardware; /* Create the mutex. */ status = gckOS_CreateMutex(os, &mmu->mutex); if (status < 0) { /* Roll back. */ mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); gcmkFOOTER(); /* Error. */ return status; } /* Allocate the page table. */ mmu->pageTableSize = MmuSize; status = gckOS_AllocateContiguous(os, gcvFALSE, &mmu->pageTableSize, &mmu->pageTablePhysical, &mmu->pageTableLogical); if (status < 0) { /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex)); mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); /* Error. */ gcmkFATAL( "%s(%d): could not allocate page table.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Compute number of entries in page table. */ mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32); mmu->entry = 0; /* Mark the entire page table as available. */ pageTable = (gctUINT32 *) mmu->pageTableLogical; for (i = 0; i < mmu->entryCount; i++) { pageTable[i] = (gctUINT32)~0; } /* Set page table address. */ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical); if (status < 0) { /* Free the page table. */ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os, mmu->pageTablePhysical, mmu->pageTableLogical, mmu->pageTableSize)); /* Roll back. */ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex)); mmu->object.type = gcvOBJ_UNKNOWN; gcmkVERIFY_OK(gckOS_Free(os, mmu)); /* Error. */ gcmkFATAL( "%s(%d): could not program page table.", __FUNCTION__, __LINE__ ); gcmkFOOTER(); return status; } /* Return the gckVGMMU object pointer. */ *Mmu = mmu; gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): %u entries at %p.(0x%08X)\n", __FUNCTION__, __LINE__, mmu->entryCount, mmu->pageTableLogical, mmu->pageTablePhysical ); gcmkFOOTER_NO(); /* Success. */ return gcvSTATUS_OK; }
static int drv_init(void) #endif { int ret; int result = -EINVAL; gceSTATUS status; gckGALDEVICE device = gcvNULL; struct class* device_class = gcvNULL; gcsDEVICE_CONSTRUCT_ARGS args = { .recovery = recovery, .stuckDump = stuckDump, .gpu3DMinClock = gpu3DMinClock, .contiguousRequested = contiguousRequested, .platform = &platform, .mmu = mmu, }; gcmkHEADER(); printk(KERN_INFO "Galcore version %d.%d.%d.%d\n", gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD); #if !VIVANTE_PROFILER_PM /* when enable gpu profiler, we need to turn off gpu powerMangement */ if (gpuProfiler) { powerManagement = 0; } #endif if (showArgs) { gckOS_DumpParam(); } if (logFileSize != 0) { gckDEBUGFS_Initialize(); } /* Create the GAL device. */ status = gckGALDEVICE_Construct( #if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY irqLine3D0, registerMemBase3D0, registerMemSize3D0, irqLine3D1, registerMemBase3D1, registerMemSize3D1, #else irqLine, registerMemBase, registerMemSize, #endif irqLine2D, registerMemBase2D, registerMemSize2D, irqLineVG, registerMemBaseVG, registerMemSizeVG, contiguousBase, contiguousSize, bankSize, fastClear, compression, baseAddress, physSize, signal, logFileSize, powerManagement, gpuProfiler, &args, &device ); if (gcmIS_ERROR(status)) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the GAL device: status=%d\n", __FUNCTION__, __LINE__, status); goto OnError; } /* Start the GAL device. */ gcmkONERROR(gckGALDEVICE_Start(device)); if ((physSize != 0) && (device->kernels[gcvCORE_MAJOR] != gcvNULL) && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0)) { /* Reset the base address */ device->baseAddress = 0; } /* Register the character device. */ ret = register_chrdev(major, DEVICE_NAME, &driver_fops); if (ret < 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Could not allocate major number for mmap.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } if (major == 0) { major = ret; } /* Create the device class. */ device_class = class_create(THIS_MODULE, "graphics_class"); if (IS_ERR(device_class)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the class.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME); #else device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME); #endif galDevice = device; gpuClass = device_class; #if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_DRIVER, "%s(%d): irqLine3D0=%d, contiguousSize=%lu, memBase3D0=0x%lX\n", __FUNCTION__, __LINE__, irqLine3D0, contiguousSize, registerMemBase3D0 ); #else gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_DRIVER, "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n", __FUNCTION__, __LINE__, irqLine, contiguousSize, registerMemBase ); #endif /* Success. */ gcmkFOOTER_NO(); return 0; OnError: /* Roll back. */ if (device_class != gcvNULL) { device_destroy(device_class, MKDEV(major, 0)); class_destroy(device_class); } if (device != gcvNULL) { gcmkVERIFY_OK(gckGALDEVICE_Stop(device)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(device)); } gcmkFOOTER(); return result; }
/******************************************************************************* ** ** gckKERNEL_MapVideoMemory ** ** Get the logical address for a hardware specific memory address for the ** current process. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gctBOOL InUserSpace ** gcvTRUE to map the memory into the user space. ** ** gctUINT32 Address ** Hardware specific memory address. ** ** OUTPUT: ** ** gctPOINTER * Logical ** Pointer to a variable that will hold the logical address of the ** specified memory address. */ gceSTATUS gckKERNEL_MapVideoMemoryEx( IN gckKERNEL Kernel, IN gceCORE Core, IN gctBOOL InUserSpace, IN gctUINT32 Address, OUT gctPOINTER * Logical ) { GCHAL * gchal; gcePOOL pool; gctUINT32 offset, base; gceSTATUS status; gctPOINTER logical; gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x", Kernel, InUserSpace, Address); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[ENTER] gckKERNEL_MapVideoMemory"); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Logical != gcvNULL); /* Extract the pointer to the GCHAL class. */ gchal = (GCHAL *) Kernel->context; do { #if gcdENABLE_VG if (Core == gcvCORE_VG) { /* Split the memory address into a pool type and offset. */ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset)); } else #endif { /* Split the memory address into a pool type and offset. */ gcmkERR_BREAK(gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset)); } /* Dispatch on pool. */ switch (pool) { case gcvPOOL_LOCAL_INTERNAL: /* Internal memory. */ logical = gchal->GetInternalLogical(); break; case gcvPOOL_LOCAL_EXTERNAL: /* External memory. */ logical = gchal->GetExternalLogical(); break; case gcvPOOL_SYSTEM: /* System memory. */ #if UNDER_CE >= 600 if (InUserSpace) { logical = gchal->GetProcessContiguousLogical(); } else { logical = gchal->GetContiguousLogical(); } #else logical = gchal->GetContiguousLogical(); #endif #if gcdENABLE_VG if (Core == gcvCORE_VG) { gcmkVERIFY_OK(gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, gchal->GetContiguousHeap()->baseAddress, &pool, &base)); } else #endif { gcmkVERIFY_OK(gckHARDWARE_SplitMemory(Kernel->hardware, gchal->GetContiguousHeap()->baseAddress, &pool, &base)); } offset -= base; break; default: /* Invalid memory pool. */ gcmkFATAL("Unknown memory pool: %u", pool); return gcvSTATUS_INVALID_ARGUMENT; } /* Build logical address of specified address. */ *Logical = reinterpret_cast<gctPOINTER> (static_cast<gctUINT8 *>(logical) + offset); } while (gcvFALSE); if (gcmIS_SUCCESS(status)) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, "gckKERNEL_MapVideoMemory: Address 0x%08X maps to %p", Address, *Logical); } gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[LEAVE] gckKERNEL_MapVideoMemory(%u)", status); /* Return the status. */ gcmkFOOTER(); return status; }
static int drv_init(void) #endif { int ret; int result = -EINVAL; gceSTATUS status; gckGALDEVICE device = gcvNULL; struct class* device_class = gcvNULL; gcsDEVICE_CONSTRUCT_ARGS args = { .recovery = recovery, .stuckDump = stuckDump, }; gcmkHEADER(); #if ENABLE_GPU_CLOCK_BY_DRIVER && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) { struct clk * clk; clk = clk_get(NULL, "GCCLK"); if (IS_ERR(clk)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): clk get error: %d\n", __FUNCTION__, __LINE__, PTR_ERR(clk) ); result = -ENODEV; gcmkONERROR(gcvSTATUS_GENERIC_IO); } /* * APMU_GC_156M, APMU_GC_312M, APMU_GC_PLL2, APMU_GC_PLL2_DIV2 currently. * Use the 2X clock. */ if (clk_set_rate(clk, coreClock * 2)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to set core clock.\n", __FUNCTION__, __LINE__ ); result = -EAGAIN; gcmkONERROR(gcvSTATUS_GENERIC_IO); } clk_enable(clk); #if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) gc_pwr(1); # endif } #endif printk(KERN_INFO "Galcore version %d.%d.%d.%d\n", gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD); /* when enable gpu profiler, we need to turn off gpu powerMangement */ if(gpuProfiler) powerManagement = 0; if (showArgs) { gckOS_DumpParam(); } if(logFileSize != 0) { gckDEBUGFS_Initialize(); } /* Create the GAL device. */ status = gckGALDEVICE_Construct( #if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY irqLine3D0, registerMemBase3D0, registerMemSize3D0, irqLine3D1, registerMemBase3D1, registerMemSize3D1, #else irqLine, registerMemBase, registerMemSize, #endif irqLine2D, registerMemBase2D, registerMemSize2D, irqLineVG, registerMemBaseVG, registerMemSizeVG, contiguousBase, contiguousSize, bankSize, fastClear, compression, baseAddress, physSize, signal, logFileSize, powerManagement, gpuProfiler, &args, &device ); if (gcmIS_ERROR(status)) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the GAL device: status=%d\n", __FUNCTION__, __LINE__, status); goto OnError; } /* Start the GAL device. */ gcmkONERROR(gckGALDEVICE_Start(device)); if ((physSize != 0) && (device->kernels[gcvCORE_MAJOR] != gcvNULL) && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0)) { #if !gcdSECURITY status = gckMMU_Enable(device->kernels[gcvCORE_MAJOR]->mmu, baseAddress, physSize); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER, "Enable new MMU: status=%d\n", status); #if gcdMULTI_GPU_AFFINITY status = gckMMU_Enable(device->kernels[gcvCORE_OCL]->mmu, baseAddress, physSize); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER, "Enable new MMU: status=%d\n", status); #endif if ((device->kernels[gcvCORE_2D] != gcvNULL) && (device->kernels[gcvCORE_2D]->hardware->mmuVersion != 0)) { status = gckMMU_Enable(device->kernels[gcvCORE_2D]->mmu, baseAddress, physSize); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER, "Enable new MMU for 2D: status=%d\n", status); } #endif /* Reset the base address */ device->baseAddress = 0; } /* Register the character device. */ ret = register_chrdev(major, DEVICE_NAME, &driver_fops); if (ret < 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Could not allocate major number for mmap.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } if (major == 0) { major = ret; } /* Create the device class. */ /*####modified for marvell-bg2*/ device_class = class_create(THIS_MODULE, "graphics_3d_class"); /*####end for marvell-bg2*/ if (IS_ERR(device_class)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the class.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME); #else device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME); #endif galDevice = device; gpuClass = device_class; #if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_DRIVER, "%s(%d): irqLine3D0=%d, contiguousSize=%lu, memBase3D0=0x%lX\n", __FUNCTION__, __LINE__, irqLine3D0, contiguousSize, registerMemBase3D0 ); #else gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_DRIVER, "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n", __FUNCTION__, __LINE__, irqLine, contiguousSize, registerMemBase ); #endif /* Success. */ gcmkFOOTER_NO(); return 0; OnError: /* Roll back. */ if (device_class != gcvNULL) { device_destroy(device_class, MKDEV(major, 0)); class_destroy(device_class); } if (device != gcvNULL) { gcmkVERIFY_OK(gckGALDEVICE_Stop(device)); gcmkVERIFY_OK(gckGALDEVICE_Destroy(device)); } gcmkFOOTER(); return result; }
int drv_release( struct inode* inode, struct file* filp ) { gceSTATUS status; gcsHAL_PRIVATE_DATA_PTR data; gckGALDEVICE device; gctINT i; gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp); if (filp == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): filp is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } data = filp->private_data; if (data == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): private_data is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } device = data->device; if (device == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): device is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } if (!device->contiguousMapped) { if (data->contiguousLogical != gcvNULL) { gcmkONERROR(gckOS_UnmapMemoryEx( galDevice->os, galDevice->contiguousPhysical, galDevice->contiguousSize, data->contiguousLogical, data->pidOpen )); data->contiguousLogical = gcvNULL; } } /* A process gets detached. */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (galDevice->kernels[i] != gcvNULL) { gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen)); } } kfree(data); filp->private_data = NULL; /* Success. */ gcmkFOOTER_NO(); return 0; OnError: gcmkFOOTER(); return -ENOTTY; }
static gceSTATUS _CompactKernelHeap( IN gckHEAP Heap ) { gcskHEAP_PTR heap, next; gctPOINTER p; gcskHEAP_PTR freeList = gcvNULL; gcmkHEADER_ARG("Heap=0x%x", Heap); /* Walk all the heaps. */ for (heap = Heap->heap; heap != gcvNULL; heap = next) { gcskNODE_PTR lastFree = gcvNULL; /* Zero out the free list. */ heap->freeList = gcvNULL; /* Start at the first node. */ for (p = (gctUINT8_PTR) (heap + 1);;) { /* Convert the pointer. */ gcskNODE_PTR node = (gcskNODE_PTR) p; gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size)); /* Test if this node not used. */ if (node->next != gcdIN_USE) { /* Test if this is the end of the heap. */ if (node->bytes == 0) { break; } /* Test of this is the first free node. */ else if (lastFree == gcvNULL) { /* Initialzie the free list. */ heap->freeList = node; lastFree = node; } else { /* Test if this free node is contiguous with the previous ** free node. */ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p) { /* Just increase the size of the previous free node. */ lastFree->bytes += node->bytes; } else { /* Add to linked list. */ lastFree->next = node; lastFree = node; } } } /* Move to next node. */ p = (gctUINT8_PTR) node + node->bytes; } /* Mark the end of the chain. */ if (lastFree != gcvNULL) { lastFree->next = gcvNULL; } /* Get next heap. */ next = heap->next; /* Check if the entire heap is free. */ if ((heap->freeList != gcvNULL) && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE)) ) { /* Remove the heap from the linked list. */ if (heap->prev == gcvNULL) { Heap->heap = next; } else { heap->prev->next = next; } if (heap->next != gcvNULL) { heap->next->prev = heap->prev; } #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profiling. */ Heap->heapCount -= 1; Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP); #endif /* Add this heap to the list of heaps that need to be freed. */ heap->next = freeList; freeList = heap; } } if (freeList != gcvNULL) { /* Release the mutex, remove any chance for a dead lock. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Free all heaps in the free list. */ for (heap = freeList; heap != gcvNULL; heap = next) { /* Get pointer to the next heap. */ next = heap->next; /* Free the heap. */ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, "Freeing heap 0x%x (%lu bytes)", heap, heap->size + gcmSIZEOF(gcskHEAP)); gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap)); } /* Acquire the mutex again. */ gcmkVERIFY_OK( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); } /* Success. */ gcmkFOOTER_NO(); return gcvSTATUS_OK; }
int drv_open( struct inode* inode, struct file* filp ) { gceSTATUS status; gctBOOL attached = gcvFALSE; gcsHAL_PRIVATE_DATA_PTR data = gcvNULL; gctINT i; gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp); if (filp == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): filp is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN); if (data == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): private_data is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } data->device = galDevice; data->mappedMemory = gcvNULL; data->contiguousLogical = gcvNULL; gcmkONERROR(gckOS_GetProcessID(&data->pidOpen)); /* Attached the process. */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (galDevice->kernels[i] != gcvNULL) { gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE)); } } attached = gcvTRUE; if (!galDevice->contiguousMapped) { if (galDevice->contiguousPhysical != gcvNULL) { gcmkONERROR(gckOS_MapMemory( galDevice->os, galDevice->contiguousPhysical, galDevice->contiguousSize, &data->contiguousLogical )); } } filp->private_data = data; /* Success. */ gcmkFOOTER_NO(); return 0; OnError: if (data != gcvNULL) { if (data->contiguousLogical != gcvNULL) { gcmkVERIFY_OK(gckOS_UnmapMemory( galDevice->os, galDevice->contiguousPhysical, galDevice->contiguousSize, data->contiguousLogical )); } kfree(data); } if (attached) { for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (galDevice->kernels[i] != gcvNULL) { gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE)); } } } gcmkFOOTER(); return -ENOTTY; }
/******************************************************************************* ** ** gckKERNEL_Dispatch ** ** Dispatch a command received from the user HAL layer. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gctBOOL FromUser ** whether the call is from the user space. ** ** gcsHAL_INTERFACE * Interface ** Pointer to a gcsHAL_INTERFACE structure that defines the command to ** be dispatched. ** ** OUTPUT: ** ** gcsHAL_INTERFACE * Interface ** Pointer to a gcsHAL_INTERFACE structure that receives any data to be ** returned. */ gceSTATUS gckKERNEL_Dispatch( IN gckKERNEL Kernel, IN gctBOOL FromUser, IN OUT gcsHAL_INTERFACE * Interface ) { gceSTATUS status; gctUINT32 bitsPerPixel; gctSIZE_T bytes; gcuVIDMEM_NODE_PTR node; gctBOOL locked = gcvFALSE; gctPHYS_ADDR physical; gctUINT32 address; gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x", Kernel, FromUser, Interface); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(Interface != gcvNULL); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, "Dispatching command %d", Interface->command); /* Dispatch on command. */ switch (Interface->command) { case gcvHAL_GET_BASE_ADDRESS: /* Get base address. */ gcmkONERROR( gckOS_GetBaseAddress(Kernel->os, &Interface->u.GetBaseAddress.baseAddress)); break; case gcvHAL_QUERY_VIDEO_MEMORY: /* Query video memory size. */ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface)); break; case gcvHAL_QUERY_CHIP_IDENTITY: /* Query chip identity. */ gcmkONERROR( gckHARDWARE_QueryChipIdentity( Kernel->hardware, &Interface->u.QueryChipIdentity.chipModel, &Interface->u.QueryChipIdentity.chipRevision, &Interface->u.QueryChipIdentity.chipFeatures, &Interface->u.QueryChipIdentity.chipMinorFeatures, &Interface->u.QueryChipIdentity.chipMinorFeatures1)); /* Query chip specifications. */ gcmkONERROR( gckHARDWARE_QueryChipSpecs( Kernel->hardware, &Interface->u.QueryChipIdentity.streamCount, &Interface->u.QueryChipIdentity.registerMax, &Interface->u.QueryChipIdentity.threadCount, &Interface->u.QueryChipIdentity.shaderCoreCount, &Interface->u.QueryChipIdentity.vertexCacheSize, &Interface->u.QueryChipIdentity.vertexOutputBufferSize)); break; case gcvHAL_MAP_MEMORY: physical = Interface->u.MapMemory.physical; /* Map memory. */ gcmkONERROR( gckKERNEL_MapMemory(Kernel, physical, Interface->u.MapMemory.bytes, &Interface->u.MapMemory.logical)); break; case gcvHAL_UNMAP_MEMORY: physical = Interface->u.UnmapMemory.physical; /* Unmap memory. */ gcmkONERROR( gckKERNEL_UnmapMemory(Kernel, physical, Interface->u.UnmapMemory.bytes, Interface->u.UnmapMemory.logical)); break; case gcvHAL_ALLOCATE_NON_PAGED_MEMORY: /* Allocate non-paged memory. */ #ifdef __QNXNTO__ if (FromUser) { gcmkONERROR( gckOS_AllocateNonPagedMemoryShmPool( Kernel->os, FromUser, Interface->pid, Interface->handle, &Interface->u.AllocateNonPagedMemory.bytes, &Interface->u.AllocateNonPagedMemory.physical, &Interface->u.AllocateNonPagedMemory.logical)); break; } #endif gcmkONERROR( gckOS_AllocateNonPagedMemory( Kernel->os, FromUser, &Interface->u.AllocateNonPagedMemory.bytes, &Interface->u.AllocateNonPagedMemory.physical, &Interface->u.AllocateNonPagedMemory.logical)); break; case gcvHAL_FREE_NON_PAGED_MEMORY: physical = Interface->u.FreeNonPagedMemory.physical; /* Free non-paged memory. */ gcmkONERROR( gckOS_FreeNonPagedMemory(Kernel->os, Interface->u.FreeNonPagedMemory.bytes, physical, Interface->u.FreeNonPagedMemory.logical)); break; case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY: /* Allocate contiguous memory. */ #ifdef __QNXNTO__ if (FromUser) { gcmkONERROR( gckOS_AllocateNonPagedMemoryShmPool( Kernel->os, FromUser, Interface->pid, Interface->handle, &Interface->u.AllocateNonPagedMemory.bytes, &Interface->u.AllocateNonPagedMemory.physical, &Interface->u.AllocateNonPagedMemory.logical)); break; } #endif gcmkONERROR( gckOS_AllocateContiguous( Kernel->os, FromUser, &Interface->u.AllocateContiguousMemory.bytes, &Interface->u.AllocateContiguousMemory.physical, &Interface->u.AllocateContiguousMemory.logical)); break; case gcvHAL_FREE_CONTIGUOUS_MEMORY: physical = Interface->u.FreeContiguousMemory.physical; /* Free contiguous memory. */ gcmkONERROR( gckOS_FreeContiguous(Kernel->os, physical, Interface->u.FreeContiguousMemory.logical, Interface->u.FreeContiguousMemory.bytes)); break; case gcvHAL_ALLOCATE_VIDEO_MEMORY: /* Align width and height to tiles. */ gcmkONERROR( gckHARDWARE_AlignToTile(Kernel->hardware, Interface->u.AllocateVideoMemory.type, &Interface->u.AllocateVideoMemory.width, &Interface->u.AllocateVideoMemory.height, gcvNULL)); /* Convert format into bytes per pixel and bytes per tile. */ gcmkONERROR( gckHARDWARE_ConvertFormat(Kernel->hardware, Interface->u.AllocateVideoMemory.format, &bitsPerPixel, gcvNULL)); /* Compute number of bytes for the allocation. */ bytes = Interface->u.AllocateVideoMemory.width * bitsPerPixel * Interface->u.AllocateVideoMemory.height * Interface->u.AllocateVideoMemory.depth / 8; /* Allocate memory. */ #ifdef __QNXNTO__ gcmkONERROR( _AllocateMemory(Kernel, &Interface->u.AllocateVideoMemory.pool, bytes, 64, Interface->u.AllocateVideoMemory.type, Interface->handle, &Interface->u.AllocateVideoMemory.node)); #else gcmkONERROR( _AllocateMemory(Kernel, &Interface->u.AllocateVideoMemory.pool, bytes, 64, Interface->u.AllocateVideoMemory.type, &Interface->u.AllocateVideoMemory.node)); #endif break; case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY: /* Allocate memory. */ #ifdef __QNXNTO__ gcmkONERROR( _AllocateMemory(Kernel, &Interface->u.AllocateLinearVideoMemory.pool, Interface->u.AllocateLinearVideoMemory.bytes, Interface->u.AllocateLinearVideoMemory.alignment, Interface->u.AllocateLinearVideoMemory.type, Interface->handle, &Interface->u.AllocateLinearVideoMemory.node)); /* Set the current user pid in the node, * which is used while locking memory. */ gcmkVERIFY_OK(gckVIDMEM_SetPID( Interface->u.AllocateLinearVideoMemory.node, Interface->pid)); #else gcmkONERROR( _AllocateMemory(Kernel, &Interface->u.AllocateLinearVideoMemory.pool, Interface->u.AllocateLinearVideoMemory.bytes, Interface->u.AllocateLinearVideoMemory.alignment, Interface->u.AllocateLinearVideoMemory.type, &Interface->u.AllocateLinearVideoMemory.node)); #endif break; case gcvHAL_FREE_VIDEO_MEMORY: #ifdef __QNXNTO__ node = Interface->u.FreeVideoMemory.node; if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM && node->VidMem.logical != gcvNULL) { gcmkONERROR( gckKERNEL_UnmapVideoMemory(Kernel, node->VidMem.logical, Interface->pid, node->VidMem.bytes)); node->VidMem.logical = gcvNULL; } #endif /* Free video memory. */ gcmkONERROR( gckVIDMEM_Free(Interface->u.FreeVideoMemory.node)); break; case gcvHAL_LOCK_VIDEO_MEMORY: /* Lock video memory. */ gcmkONERROR( gckVIDMEM_Lock(Interface->u.LockVideoMemory.node, &Interface->u.LockVideoMemory.address)); locked = gcvTRUE; node = Interface->u.LockVideoMemory.node; if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { /* Map video memory address into user space. */ #ifdef __QNXNTO__ if (node->VidMem.logical == gcvNULL) { gcmkONERROR( gckKERNEL_MapVideoMemory(Kernel, FromUser, Interface->u.LockVideoMemory.address, Interface->pid, node->VidMem.bytes, &node->VidMem.logical)); } Interface->u.LockVideoMemory.memory = node->VidMem.logical; #else gcmkONERROR( gckKERNEL_MapVideoMemory(Kernel, FromUser, Interface->u.LockVideoMemory.address, &Interface->u.LockVideoMemory.memory)); #endif #ifdef __QNXNTO__ /* Add more information to node, which is used while unmapping. */ gcmkVERIFY_OK(gckVIDMEM_SetPID( Interface->u.LockVideoMemory.node, Interface->pid)); #endif } else { /* Copy logical memory for virtual memory. */ Interface->u.LockVideoMemory.memory = node->Virtual.logical; /* Success. */ status = gcvSTATUS_OK; } #if gcdSECURE_USER /* Return logical address as physical address. */ Interface->u.LockVideoMemory.address = gcmPTR2INT(Interface->u.LockVideoMemory.memory); #endif break; case gcvHAL_UNLOCK_VIDEO_MEMORY: /* Unlock video memory. */ node = Interface->u.UnlockVideoMemory.node; /* Unlock video memory. */ gcmkONERROR( gckVIDMEM_Unlock(node, Interface->u.UnlockVideoMemory.type, &Interface->u.UnlockVideoMemory.asynchroneous)); break; case gcvHAL_EVENT_COMMIT: /* Commit an event queue. */ gcmkONERROR( gckEVENT_Commit(Kernel->event, Interface->u.Event.queue)); break; case gcvHAL_COMMIT: /* Commit a command and context buffer. */ gcmkONERROR( gckCOMMAND_Commit(Kernel->command, Interface->u.Commit.commandBuffer, Interface->u.Commit.contextBuffer, Interface->u.Commit.process)); break; case gcvHAL_STALL: /* Stall the command queue. */ gcmkONERROR(gckCOMMAND_Stall(Kernel->command)); break; case gcvHAL_MAP_USER_MEMORY: /* Map user memory to DMA. */ gcmkONERROR( gckOS_MapUserMemory(Kernel->os, Interface->u.MapUserMemory.memory, Interface->u.MapUserMemory.size, &Interface->u.MapUserMemory.info, &Interface->u.MapUserMemory.address)); break; case gcvHAL_UNMAP_USER_MEMORY: address = Interface->u.MapUserMemory.address; /* Unmap user memory. */ gcmkONERROR( gckOS_UnmapUserMemory(Kernel->os, Interface->u.UnmapUserMemory.memory, Interface->u.UnmapUserMemory.size, Interface->u.UnmapUserMemory.info, address)); break; #if !USE_NEW_LINUX_SIGNAL case gcvHAL_USER_SIGNAL: gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, "Dispatching gcvHAL_USER_SIGNAL %d", Interface->u.UserSignal.command); /* Dispatch depends on the user signal subcommands. */ switch(Interface->u.UserSignal.command) { case gcvUSER_SIGNAL_CREATE: /* Create a signal used in the user space. */ gcmkONERROR( gckOS_CreateUserSignal(Kernel->os, Interface->u.UserSignal.manualReset, Interface->u.UserSignal.signalType, &Interface->u.UserSignal.id)); break; case gcvUSER_SIGNAL_DESTROY: /* Destroy the signal. */ gcmkONERROR( gckOS_DestroyUserSignal(Kernel->os, Interface->u.UserSignal.id)); break; case gcvUSER_SIGNAL_SIGNAL: /* Signal the signal. */ gcmkONERROR( gckOS_SignalUserSignal(Kernel->os, Interface->u.UserSignal.id, Interface->u.UserSignal.state)); break; case gcvUSER_SIGNAL_WAIT: /* Wait on the signal. */ status = gckOS_WaitUserSignal(Kernel->os, Interface->u.UserSignal.id, Interface->u.UserSignal.wait); break; default: /* Invalid user signal command. */ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } break; #endif case gcvHAL_SET_POWER_MANAGEMENT_STATE: /* Set the power management state. */ gcmkONERROR( gckHARDWARE_SetPowerManagementState( Kernel->hardware, Interface->u.SetPowerManagement.state)); break; case gcvHAL_QUERY_POWER_MANAGEMENT_STATE: /* Chip is not idle. */ Interface->u.QueryPowerManagement.isIdle = gcvFALSE; /* Query the power management state. */ gcmkONERROR(gckHARDWARE_QueryPowerManagementState( Kernel->hardware, &Interface->u.QueryPowerManagement.state)); /* Query the idle state. */ gcmkONERROR( gckHARDWARE_QueryIdle(Kernel->hardware, &Interface->u.QueryPowerManagement.isIdle)); break; case gcvHAL_READ_REGISTER: #if gcdREGISTER_ACCESS_FROM_USER /* Read a register. */ gcmkONERROR( gckOS_ReadRegister(Kernel->os, Interface->u.ReadRegisterData.address, &Interface->u.ReadRegisterData.data)); #else /* No access from user land to read registers. */ Interface->u.ReadRegisterData.data = 0; status = gcvSTATUS_NOT_SUPPORTED; #endif break; case gcvHAL_WRITE_REGISTER: #if gcdREGISTER_ACCESS_FROM_USER /* Write a register. */ gcmkONERROR( gckOS_WriteRegister(Kernel->os, Interface->u.WriteRegisterData.address, Interface->u.WriteRegisterData.data)); #else /* No access from user land to write registers. */ status = gcvSTATUS_NOT_SUPPORTED; #endif break; case gcvHAL_READ_ALL_PROFILE_REGISTERS: #if VIVANTE_PROFILER /* Read all 3D profile registers. */ gcmkONERROR( gckHARDWARE_QueryProfileRegisters( Kernel->hardware, &Interface->u.RegisterProfileData.counters)); #else status = gcvSTATUS_OK; #endif break; case gcvHAL_PROFILE_REGISTERS_2D: #if VIVANTE_PROFILER /* Read all 2D profile registers. */ gcmkONERROR( gckHARDWARE_ProfileEngine2D( Kernel->hardware, Interface->u.RegisterProfileData2D.hwProfile2D)); #else status = gcvSTATUS_OK; #endif break; case gcvHAL_GET_PROFILE_SETTING: #if VIVANTE_PROFILER /* Get profile setting */ Interface->u.GetProfileSetting.enable = Kernel->profileEnable; gcmkVERIFY_OK( gckOS_MemCopy(Interface->u.GetProfileSetting.fileName, Kernel->profileFileName, gcdMAX_PROFILE_FILE_NAME)); #endif status = gcvSTATUS_OK; break; case gcvHAL_SET_PROFILE_SETTING: #if VIVANTE_PROFILER /* Set profile setting */ Kernel->profileEnable = Interface->u.SetProfileSetting.enable; gcmkVERIFY_OK( gckOS_MemCopy(Kernel->profileFileName, Interface->u.SetProfileSetting.fileName, gcdMAX_PROFILE_FILE_NAME)); #endif status = gcvSTATUS_OK; break; case gcvHAL_QUERY_KERNEL_SETTINGS: /* Get kernel settings. */ gcmkONERROR( gckKERNEL_QuerySettings(Kernel, &Interface->u.QueryKernelSettings.settings)); break; case gcvHAL_RESET: /* Reset the hardware. */ gcmkONERROR( gckHARDWARE_Reset(Kernel->hardware)); break; case gcvHAL_DEBUG: /* Set debug level and zones. */ if (Interface->u.Debug.set) { gckOS_SetDebugLevel(Interface->u.Debug.level); gckOS_SetDebugZones(Interface->u.Debug.zones, Interface->u.Debug.enable); } if (Interface->u.Debug.message[0] != '\0') { /* Print a message to the debugger. */ gcmkPRINT(Interface->u.Debug.message); } status = gcvSTATUS_OK; break; case gcvHAL_CACHE: if (Interface->u.Cache.invalidate) { /* Flush and invalidate the cache. */ status = gckOS_CacheInvalidate(Kernel->os, Interface->u.Cache.process, Interface->u.Cache.logical, Interface->u.Cache.bytes); } else { /* Flush the cache. */ status = gckOS_CacheFlush(Kernel->os, Interface->u.Cache.process, Interface->u.Cache.logical, Interface->u.Cache.bytes); } break; default: /* Invalid command. */ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } OnError: /* Save status. */ Interface->status = status; if (gcmIS_ERROR(status)) { if (locked) { /* Roll back the lock. */ gcmkVERIFY_OK( gckVIDMEM_Unlock(Interface->u.LockVideoMemory.node, gcvSURF_TYPE_UNKNOWN, gcvNULL)); } } /* Return the status. */ gcmkFOOTER(); return status; }
/******************************************************************************* ** ** _AllocateMemory ** ** Private function to walk all required memory pools to allocate the requested ** amount of video memory. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gcsHAL_INTERFACE * Interface ** Pointer to a gcsHAL_INTERFACE structure that defines the command to ** be dispatched. ** ** OUTPUT: ** ** gcsHAL_INTERFACE * Interface ** Pointer to a gcsHAL_INTERFACE structure that receives any data to be ** returned. */ static gceSTATUS _AllocateMemory( IN gckKERNEL Kernel, IN OUT gcePOOL * Pool, IN gctSIZE_T Bytes, IN gctSIZE_T Alignment, IN gceSURF_TYPE Type, #ifdef __QNXNTO__ IN gctHANDLE Handle, #endif OUT gcuVIDMEM_NODE_PTR * Node ) { gcePOOL pool; gceSTATUS status; gckVIDMEM videoMemory; gcmkVERIFY_ARGUMENT(Pool != gcvNULL); /* Get initial pool. */ switch (pool = *Pool) { case gcvPOOL_DEFAULT: case gcvPOOL_LOCAL: pool = gcvPOOL_LOCAL_INTERNAL; break; case gcvPOOL_UNIFIED: pool = gcvPOOL_SYSTEM; break; default: break; } do { /* Verify the number of bytes to allocate. */ if (Bytes == 0) { gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT); } if (pool == gcvPOOL_VIRTUAL) { /* Create a gcuVIDMEM_NODE for virtual memory. */ #ifdef __QNXNTO__ gcmkERR_BREAK( gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Handle, Node)); #else gcmkERR_BREAK( gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node)); #endif /* Success. */ break; } else if (pool == gcvPOOL_CONTIGUOUS) { /* Create a gcuVIDMEM_NODE for contiguous memory. */ #ifdef __QNXNTO__ status = gckVIDMEM_ConstructVirtual(Kernel, gcvTRUE, Bytes, Handle, Node); #else status = gckVIDMEM_ConstructVirtual(Kernel, gcvTRUE, Bytes, Node); #endif if (gcmIS_SUCCESS(status)) { /* Memory allocated. */ break; } } else { /* Get pointer to gckVIDMEM object for pool. */ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory); if (gcmIS_SUCCESS(status)) { /* Allocate memory. */ status = gckVIDMEM_AllocateLinear(videoMemory, Bytes, Alignment, Type, #ifdef __QNXNTO__ Handle, #endif Node); if (gcmIS_SUCCESS(status)) { /* Memory allocated. */ (*Node)->VidMem.pool = pool; break; } } } if (pool == gcvPOOL_LOCAL_INTERNAL) { /* Advance to external memory. */ pool = gcvPOOL_LOCAL_EXTERNAL; } else if (pool == gcvPOOL_LOCAL_EXTERNAL) { /* Advance to contiguous system memory. */ pool = gcvPOOL_SYSTEM; } else if (pool == gcvPOOL_SYSTEM) { /* Advance to contiguous memory. */ pool = gcvPOOL_CONTIGUOUS; } else if ((pool == gcvPOOL_CONTIGUOUS) && (Type != gcvSURF_TILE_STATUS) ) { static int count= 1; /* Advance to virtual memory. */ if (count == 1) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, "Try to allocate virtual memory!\n"); count = 0; } pool = gcvPOOL_VIRTUAL; } else { /* Out of pools. */ break; } } /* Loop only for multiple selection pools. */ while ((*Pool == gcvPOOL_DEFAULT) || (*Pool == gcvPOOL_LOCAL) || (*Pool == gcvPOOL_UNIFIED) ); if (gcmIS_SUCCESS(status)) { /* Return pool used for allocation. */ *Pool = pool; } /* Return status. */ return status; }
/******************************************************************************* ** ** gckVGMMU_AllocatePages ** ** Allocate pages inside the page table. ** ** INPUT: ** ** gckVGMMU Mmu ** Pointer to an gckVGMMU object. ** ** gctSIZE_T PageCount ** Number of pages to allocate. ** ** OUTPUT: ** ** gctPOINTER * PageTable ** Pointer to a variable that receives the base address of the page ** table. ** ** gctUINT32 * Address ** Pointer to a variable that receives the hardware specific address. */ gceSTATUS gckVGMMU_AllocatePages( IN gckVGMMU Mmu, IN gctSIZE_T PageCount, OUT gctPOINTER * PageTable, OUT gctUINT32 * Address ) { gceSTATUS status; gctUINT32 tail, index, i; gctUINT32 * table; gctBOOL allocated = gcvFALSE; gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x", Mmu, PageCount, PageTable, Address); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU); gcmkVERIFY_ARGUMENT(PageCount > 0); gcmkVERIFY_ARGUMENT(PageTable != gcvNULL); gcmkVERIFY_ARGUMENT(Address != gcvNULL); gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): %u pages.\n", __FUNCTION__, __LINE__, PageCount ); if (PageCount > Mmu->entryCount) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): page table too small for %u pages.\n", __FUNCTION__, __LINE__, PageCount ); gcmkFOOTER_NO(); /* Not enough pages avaiable. */ return gcvSTATUS_OUT_OF_RESOURCES; } /* Grab the mutex. */ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE); if (status < 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): could not acquire mutex.\n" ,__FUNCTION__, __LINE__ ); gcmkFOOTER(); /* Error. */ return status; } /* Compute the tail for this allocation. */ tail = Mmu->entryCount - PageCount; /* Walk all entries until we find enough slots. */ for (index = Mmu->entry; index <= tail;) { /* Access page table. */ table = (gctUINT32 *) Mmu->pageTableLogical + index; /* See if all slots are available. */ for (i = 0; i < PageCount; i++, table++) { if (*table != ~0) { /* Start from next slot. */ index += i + 1; break; } } if (i == PageCount) { /* Bail out if we have enough page entries. */ allocated = gcvTRUE; break; } } if (!allocated) { if (status >= 0) { /* Walk all entries until we find enough slots. */ for (index = 0; index <= tail;) { /* Access page table. */ table = (gctUINT32 *) Mmu->pageTableLogical + index; /* See if all slots are available. */ for (i = 0; i < PageCount; i++, table++) { if (*table != ~0) { /* Start from next slot. */ index += i + 1; break; } } if (i == PageCount) { /* Bail out if we have enough page entries. */ allocated = gcvTRUE; break; } } } } if (!allocated && (status >= 0)) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_MMU, "%s(%d): not enough free pages for %u pages.\n", __FUNCTION__, __LINE__, PageCount ); /* Not enough empty slots available. */ status = gcvSTATUS_OUT_OF_RESOURCES; } if (status >= 0) { /* Build virtual address. */ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, Address); if (status >= 0) { /* Update current entry into page table. */ Mmu->entry = index + PageCount; /* Return pointer to page table. */ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index; gcmkTRACE_ZONE( gcvLEVEL_INFO, gcvZONE_MMU, "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n", __FUNCTION__, __LINE__, PageCount, index, *Address, *PageTable ); } } /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex)); gcmkFOOTER(); /* Return status. */ return status; }
gceSTATUS _GetPower( IN gckPLATFORM Platform ) { struct device* pdev = &Platform->device->dev; struct imx_priv *priv = Platform->priv; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) struct reset_control *rstc; #endif #ifdef CONFIG_PM /*Init runtime pm for gpu*/ pm_runtime_enable(pdev); priv->pmdev = pdev; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) rstc = devm_reset_control_get(pdev, "gpu3d"); priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc; rstc = devm_reset_control_get(pdev, "gpu2d"); priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; rstc = devm_reset_control_get(pdev, "gpuvg"); priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) /*get gpu regulator*/ priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) priv->gpu_regulator = devm_regulator_get(pdev, "pu"); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) if (IS_ERR(priv->gpu_regulator)) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to get gpu regulator \n", __FUNCTION__, __LINE__); return gcvSTATUS_NOT_FOUND; } #endif #endif /*Initialize the clock structure*/ priv->clk_3d_core = clk_get(pdev, "gpu3d_clk"); if (!IS_ERR(priv->clk_3d_core)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) if (cpu_is_mx6q()) { priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(priv->clk_3d_shader)) { clk_put(priv->clk_3d_core); priv->clk_3d_core = NULL; priv->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } } #else priv->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk"); priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(priv->clk_3d_shader)) { clk_put(priv->clk_3d_core); priv->clk_3d_core = NULL; priv->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } #endif } else { priv->clk_3d_core = NULL; gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n"); } priv->clk_2d_core = clk_get(pdev, "gpu2d_clk"); if (IS_ERR(priv->clk_2d_core)) { priv->clk_2d_core = NULL; gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n"); } else { priv->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk"); if (IS_ERR(priv->clk_2d_axi)) { priv->clk_2d_axi = NULL; gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n"); } priv->clk_vg_axi = clk_get(pdev, "openvg_axi_clk"); if (IS_ERR(priv->clk_vg_axi)) { priv->clk_vg_axi = NULL; gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n"); } } #if gcdENABLE_FSCALE_VAL_ADJUST pdevice = Platform->device; REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); { int ret = 0; ret = driver_create_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock); if(ret) dev_err(&pdevice->dev, "create gpu3DMinClock attr failed (%d)\n", ret); } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) imx6sx_optimize_qosc_for_GPU(Platform); #endif return gcvSTATUS_OK; }
/******************************************************************************* ** ** gckKERNEL_GetVideoMemoryPool ** ** Get the gckVIDMEM object belonging to the specified pool. ** ** INPUT: ** ** gckKERNEL Kernel ** Pointer to an gckKERNEL object. ** ** gcePOOL Pool ** Pool to query gckVIDMEM object for. ** ** OUTPUT: ** ** gckVIDMEM * VideoMemory ** Pointer to a variable that will hold the pointer to the gckVIDMEM ** object belonging to the requested pool. */ gceSTATUS gckKERNEL_GetVideoMemoryPool( IN gckKERNEL Kernel, IN gcePOOL Pool, OUT gckVIDMEM * VideoMemory ) { GCHAL * gchal; gckVIDMEM videoMemory; gceSTATUS status; gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[ENTER] gckHARDWARE_GetVideoMemoryPool"); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL); gcmkVERIFY_ARGUMENT(VideoMemory != gcvNULL); /* Extract the pointer to the GCHAL class. */ gchal = (GCHAL *) Kernel->context; /* Dispatch on pool. */ switch (Pool) { case gcvPOOL_LOCAL_INTERNAL: /* Internal memory. */ videoMemory = gchal->GetInternalHeap(); break; case gcvPOOL_LOCAL_EXTERNAL: /* External memory. */ videoMemory = gchal->GetExternalHeap(); break; case gcvPOOL_SYSTEM: /* System memory. */ videoMemory = gchal->GetContiguousHeap(); break; default: /* Unknown pool. */ videoMemory = gcvNULL; gcmkFATAL("Unknown memory pool: %u", Pool); } /* Return pointer to the gckVIDMEM object. */ *VideoMemory = videoMemory; /* Determine the status. */ status = (videoMemory == gcvNULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK; if (gcmIS_SUCCESS(status)) { gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL, "gckHARDWARE_GetVideoMemoryPool: Pool %u starts at %p", Pool, videoMemory); } gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL, "[LEAVE] gckHARDWARE_GetVideoMemoryPool(%u)", status); /* Return the status. */ gcmkFOOTER_ARG("status=%d, *VideoMemory=%p", status, gcmOPT_VALUE(VideoMemory)); return status; }
/******************************************************************************* ** ** gckVIDMEM_Lock ** ** Lock a video memory node and return it's hardware specific address. ** ** INPUT: ** ** gcuVIDMEM_NODE_PTR Node ** Pointer to a gcuVIDMEM_NODE union. ** ** OUTPUT: ** ** gctUINT32 * Address ** Pointer to a variable that will hold the hardware specific address. */ gceSTATUS gckVIDMEM_Lock( IN gcuVIDMEM_NODE_PTR Node, OUT gctUINT32 * Address ) { gceSTATUS status; gctBOOL acquired = gcvFALSE; gctBOOL locked = gcvFALSE; gckOS os = gcvNULL; gcmkHEADER_ARG("Node=0x%x", Node); /* Verify the arguments. */ gcmkVERIFY_ARGUMENT(Address != gcvNULL); if ((Node == gcvNULL) || (Node->VidMem.memory == gcvNULL) ) { /* Invalid object. */ gcmkONERROR(gcvSTATUS_INVALID_OBJECT); } /**************************** Video Memory ********************************/ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM) { /* Increment the lock count. */ Node->VidMem.locked ++; /* Return the address of the node. */ *Address = Node->VidMem.memory->baseAddress + Node->VidMem.offset + Node->VidMem.alignment; gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Locked node 0x%x (%d) @ 0x%08X", Node, Node->VidMem.locked, *Address); } /*************************** Virtual Memory *******************************/ else { /* Verify the gckKERNEL object pointer. */ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL); /* Extract the gckOS object pointer. */ os = Node->Virtual.kernel->os; gcmkVERIFY_OBJECT(os, gcvOBJ_OS); /* Grab the mutex. */ gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE)); acquired = gcvTRUE; /* Increment the lock count. */ if (Node->Virtual.locked ++ == 0) { /* Is this node pending for a final unlock? */ #ifdef __QNXNTO__ if (!Node->Virtual.contiguous && Node->Virtual.unlockPending) #else if (!Node->Virtual.contiguous && Node->Virtual.pending) #endif { /* Make sure we have a page table. */ gcmkASSERT(Node->Virtual.pageTable != gcvNULL); /* Remove pending unlock. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif } /* First lock - create a page table. */ gcmkASSERT(Node->Virtual.pageTable == gcvNULL); /* Make sure we mark our node as not flushed. */ #ifdef __QNXNTO__ Node->Virtual.unlockPending = gcvFALSE; #else Node->Virtual.pending = gcvFALSE; #endif /* Lock the allocated pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_LockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.userPID, &Node->Virtual.logical, &Node->Virtual.pageCount)); #else gcmkONERROR( gckOS_LockPages(os, Node->Virtual.physical, Node->Virtual.bytes, &Node->Virtual.logical, &Node->Virtual.pageCount)); #endif locked = gcvTRUE; if (Node->Virtual.contiguous) { /* Get physical address directly */ gcmkONERROR(gckOS_GetPhysicalAddress(os, Node->Virtual.logical, &Node->Virtual.address)); } else { /* Allocate pages inside the MMU. */ gcmkONERROR( gckMMU_AllocatePages(Node->Virtual.kernel->mmu, Node->Virtual.pageCount, &Node->Virtual.pageTable, &Node->Virtual.address)); /* Map the pages. */ #ifdef __QNXNTO__ gcmkONERROR( gckOS_MapPages(os, Node->Virtual.physical, Node->Virtual.logical, Node->Virtual.pageCount, Node->Virtual.pageTable)); #else gcmkONERROR( gckOS_MapPages(os, Node->Virtual.physical, Node->Virtual.pageCount, Node->Virtual.pageTable)); #endif gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM, "Mapped virtual node 0x%x to 0x%08X", Node, Node->Virtual.address); } } /* Return hardware address. */ *Address = Node->Virtual.address; /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Success. */ gcmkFOOTER_ARG("*Address=%08x", *Address); return gcvSTATUS_OK; OnError: if (locked) { if (Node->Virtual.pageTable != gcvNULL) { /* Free the pages from the MMU. */ gcmkVERIFY_OK( gckMMU_FreePages(Node->Virtual.kernel->mmu, Node->Virtual.pageTable, Node->Virtual.pageCount)); Node->Virtual.pageTable = gcvNULL; } /* Unlock the pages. */ #ifdef __QNXNTO__ gcmkVERIFY_OK( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.userPID, Node->Virtual.bytes, Node->Virtual.logical)); #else gcmkVERIFY_OK( gckOS_UnlockPages(os, Node->Virtual.physical, Node->Virtual.bytes, Node->Virtual.logical)); #endif } if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex)); } /* Return the status. */ gcmkFOOTER(); return status; }
long drv_ioctl( struct file* filp, unsigned int ioctlCode, unsigned long arg ) { gceSTATUS status; gcsHAL_INTERFACE iface; gctUINT32 copyLen; DRIVER_ARGS drvArgs; gckGALDEVICE device; gcsHAL_PRIVATE_DATA_PTR data; gctINT32 i, count; gckVIDMEM_NODE nodeObject; gcmkHEADER_ARG( "filp=0x%08X ioctlCode=0x%08X arg=0x%08X", filp, ioctlCode, arg ); if (filp == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): filp is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } data = filp->private_data; if (data == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): private_data is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } device = data->device; if (device == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): device is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } if ((ioctlCode != IOCTL_GCHAL_INTERFACE) && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE) ) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): unknown command %d\n", __FUNCTION__, __LINE__, ioctlCode ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /* Get the drvArgs. */ copyLen = copy_from_user( &drvArgs, (void *) arg, sizeof(DRIVER_ARGS) ); if (copyLen != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): error copying of the input arguments.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /* Now bring in the gcsHAL_INTERFACE structure. */ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE)) || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE)) ) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): input or/and output structures are invalid.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } copyLen = copy_from_user( &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE) ); if (copyLen != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): error copying of input HAL interface.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } if (iface.command == gcvHAL_CHIP_INFO) { count = 0; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->kernels[i] != gcvNULL) { #if gcdENABLE_VG if (i == gcvCORE_VG) { iface.u.ChipInfo.types[count] = gcvHARDWARE_VG; } else #endif { gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware, &iface.u.ChipInfo.types[count])); } count++; } } iface.u.ChipInfo.count = count; iface.status = status = gcvSTATUS_OK; } else { if (iface.hardwareType > 7) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): unknown hardwareType %d\n", __FUNCTION__, __LINE__, iface.hardwareType ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } #if gcdENABLE_VG if (device->coreMapping[iface.hardwareType] == gcvCORE_VG) { status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG], (ioctlCode == IOCTL_GCHAL_INTERFACE), &iface); } else #endif { status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]], (ioctlCode == IOCTL_GCHAL_INTERFACE), &iface); } } /* Redo system call after pending signal is handled. */ if (status == gcvSTATUS_INTERRUPTED) { gcmkFOOTER(); return -ERESTARTSYS; } if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY)) { gcuVIDMEM_NODE_PTR node; gctUINT32 processID; gckOS_GetProcessID(&processID); gcmkONERROR(gckVIDMEM_HANDLE_Lookup(device->kernels[device->coreMapping[iface.hardwareType]], processID, (gctUINT32)iface.u.LockVideoMemory.node, &nodeObject)); node = nodeObject->node; /* Special case for mapped memory. */ if ((data->mappedMemory != gcvNULL) && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM) ) { /* Compute offset into mapped memory. */ gctUINT32 offset = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory) - (gctUINT8 *) device->contiguousBase; /* Compute offset into user-mapped region. */ iface.u.LockVideoMemory.memory = gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset); } } /* Copy data back to the user. */ copyLen = copy_to_user( gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE) ); if (copyLen != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): error copying of output HAL interface.\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /* Success. */ gcmkFOOTER_NO(); return 0; OnError: gcmkFOOTER(); return -ENOTTY; }
/******************************************************************************* ** ** gckGALDEVICE_Construct ** ** Constructor. ** ** INPUT: ** ** OUTPUT: ** ** gckGALDEVICE * Device ** Pointer to a variable receiving the gckGALDEVICE object pointer on ** success. */ gceSTATUS gckGALDEVICE_Construct( IN gctINT IrqLine, IN gctUINT32 RegisterMemBase, IN gctSIZE_T RegisterMemSize, IN gctINT IrqLine2D, IN gctUINT32 RegisterMemBase2D, IN gctSIZE_T RegisterMemSize2D, IN gctINT IrqLineVG, IN gctUINT32 RegisterMemBaseVG, IN gctSIZE_T RegisterMemSizeVG, IN gctUINT32 ContiguousBase, IN gctSIZE_T ContiguousSize, IN gctSIZE_T BankSize, IN gctINT FastClear, IN gctINT Compression, IN gctUINT32 PhysBaseAddr, IN gctUINT32 PhysSize, IN gctINT Signal, IN gctUINT LogFileSize, IN struct device *pdev, IN gctINT PowerManagement, OUT gckGALDEVICE *Device ) { gctUINT32 internalBaseAddress = 0, internalAlignment = 0; gctUINT32 externalBaseAddress = 0, externalAlignment = 0; gctUINT32 horizontalTileSize, verticalTileSize; struct resource* mem_region; gctUINT32 physAddr; gctUINT32 physical; gckGALDEVICE device; gceSTATUS status; gctINT32 i; gceHARDWARE_TYPE type; gckDB sharedDB = gcvNULL; gckKERNEL kernel = gcvNULL; gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u " "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u " "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u " "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu " "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d", IrqLine, RegisterMemBase, RegisterMemSize, IrqLine2D, RegisterMemBase2D, RegisterMemSize2D, IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG, ContiguousBase, ContiguousSize, BankSize, FastClear, Compression, PhysBaseAddr, PhysSize, Signal); /* Allocate device structure. */ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN); if (!device) { gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } memset(device, 0, sizeof(struct _gckGALDEVICE)); device->dbgnode = gcvNULL; if(LogFileSize != 0) { if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the debug file system %s/%s \n", __FUNCTION__, __LINE__, PARENT_FILE, DEBUG_FILE ); } else { /*Everything is OK*/ gckDebugFileSystemSetCurrentNode(device->dbgnode); } } #ifdef CONFIG_PM /*Init runtime pm for gpu*/ pm_runtime_enable(pdev); device->pmdev = pdev; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) /*get gpu regulator*/ device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu"); if (IS_ERR(device->gpu_regulator)) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to get gpu regulator %s/%s \n", __FUNCTION__, __LINE__, PARENT_FILE, DEBUG_FILE); gcmkONERROR(gcvSTATUS_NOT_FOUND); } #endif /*Initialize the clock structure*/ if (IrqLine != -1) { device->clk_3d_core = clk_get(pdev, "gpu3d_clk"); if (!IS_ERR(device->clk_3d_core)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) if (cpu_is_mx6q()) { device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(device->clk_3d_shader)) { IrqLine = -1; clk_put(device->clk_3d_core); device->clk_3d_core = NULL; device->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } } #else device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk"); device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(device->clk_3d_shader)) { IrqLine = -1; clk_put(device->clk_3d_core); device->clk_3d_core = NULL; device->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } #endif } else { IrqLine = -1; device->clk_3d_core = NULL; gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n"); } } if ((IrqLine2D != -1) || (IrqLineVG != -1)) { device->clk_2d_core = clk_get(pdev, "gpu2d_clk"); if (IS_ERR(device->clk_2d_core)) { IrqLine2D = -1; IrqLineVG = -1; device->clk_2d_core = NULL; gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n"); } else { if (IrqLine2D != -1) { device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk"); if (IS_ERR(device->clk_2d_axi)) { device->clk_2d_axi = NULL; IrqLine2D = -1; gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n"); } } if (IrqLineVG != -1) { device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk"); if (IS_ERR(device->clk_vg_axi)) { IrqLineVG = -1; device->clk_vg_axi = NULL; gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n"); } } } } if (IrqLine != -1) { device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase; device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize; } if (IrqLine2D != -1) { device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D; device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D; } if (IrqLineVG != -1) { device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG; device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG; } device->requestedContiguousBase = 0; device->requestedContiguousSize = 0; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { physical = device->requestedRegisterMemBases[i]; /* Set up register memory region. */ if (physical != 0) { mem_region = request_mem_region( physical, device->requestedRegisterMemSizes[i], "galcore register region" ); if (mem_region == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to claim %lu bytes @ 0x%08X\n", __FUNCTION__, __LINE__, physical, device->requestedRegisterMemSizes[i] ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->registerBases[i] = (gctPOINTER) ioremap_nocache( physical, device->requestedRegisterMemSizes[i]); if (device->registerBases[i] == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Unable to map %ld bytes @ 0x%08X\n", __FUNCTION__, __LINE__, physical, device->requestedRegisterMemSizes[i] ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } physical += device->requestedRegisterMemSizes[i]; } else { device->registerBases[i] = gcvNULL; } } /* Set the base address */ device->baseAddress = PhysBaseAddr; /* Construct the gckOS object. */ gcmkONERROR(gckOS_Construct(device, &device->os)); if (IrqLine != -1) { /* Construct the gckKERNEL object. */ gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_MAJOR, device, gcvNULL, &device->kernels[gcvCORE_MAJOR])); sharedDB = device->kernels[gcvCORE_MAJOR]->db; /* Initialize core mapping */ for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_MAJOR; } /* Setup the ISR manager. */ gcmkONERROR(gckHARDWARE_SetIsrManager( device->kernels[gcvCORE_MAJOR]->hardware, (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR, (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR, device )); gcmkONERROR(gckHARDWARE_SetFastClear( device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression )); gcmkONERROR(gckHARDWARE_SetPowerManagement( device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement )); #if COMMAND_PROCESSOR_VERSION == 1 /* Start the command queue. */ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command)); #endif } else { device->kernels[gcvCORE_MAJOR] = gcvNULL; } if (IrqLine2D != -1) { gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_2D, device, sharedDB, &device->kernels[gcvCORE_2D])); if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db; /* Verify the hardware type */ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type)); if (type != gcvHARDWARE_2D) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Unexpected hardware type: %d\n", __FUNCTION__, __LINE__, type ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /* Initialize core mapping */ if (device->kernels[gcvCORE_MAJOR] == gcvNULL) { for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_2D; } } else { device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D; } /* Setup the ISR manager. */ gcmkONERROR(gckHARDWARE_SetIsrManager( device->kernels[gcvCORE_2D]->hardware, (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D, (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D, device )); gcmkONERROR(gckHARDWARE_SetPowerManagement( device->kernels[gcvCORE_2D]->hardware, PowerManagement )); #if COMMAND_PROCESSOR_VERSION == 1 /* Start the command queue. */ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command)); #endif } else { device->kernels[gcvCORE_2D] = gcvNULL; } if (IrqLineVG != -1) { #if gcdENABLE_VG gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_VG, device, sharedDB, &device->kernels[gcvCORE_VG])); /* Initialize core mapping */ if (device->kernels[gcvCORE_MAJOR] == gcvNULL && device->kernels[gcvCORE_2D] == gcvNULL ) { for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_VG; } } else { device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG; } gcmkONERROR(gckVGHARDWARE_SetPowerManagement( device->kernels[gcvCORE_VG]->vg->hardware, PowerManagement )); #endif } else { device->kernels[gcvCORE_VG] = gcvNULL; } /* Initialize the ISR. */ device->irqLines[gcvCORE_MAJOR] = IrqLine; device->irqLines[gcvCORE_2D] = IrqLine2D; device->irqLines[gcvCORE_VG] = IrqLineVG; /* Initialize the kernel thread semaphores. */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0); } device->signal = Signal; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->kernels[i] != gcvNULL) break; } if (i == gcdMAX_GPU_COUNT) { gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } #if gcdENABLE_VG if (i == gcvCORE_VG) { /* Query the ceiling of the system memory. */ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory( device->kernels[i]->vg->hardware, &device->systemMemorySize, &device->systemMemoryBaseAddress )); /* query the amount of video memory */ gcmkONERROR(gckVGHARDWARE_QueryMemory( device->kernels[i]->vg->hardware, &device->internalSize, &internalBaseAddress, &internalAlignment, &device->externalSize, &externalBaseAddress, &externalAlignment, &horizontalTileSize, &verticalTileSize )); } else #endif { /* Query the ceiling of the system memory. */ gcmkONERROR(gckHARDWARE_QuerySystemMemory( device->kernels[i]->hardware, &device->systemMemorySize, &device->systemMemoryBaseAddress )); /* query the amount of video memory */ gcmkONERROR(gckHARDWARE_QueryMemory( device->kernels[i]->hardware, &device->internalSize, &internalBaseAddress, &internalAlignment, &device->externalSize, &externalBaseAddress, &externalAlignment, &horizontalTileSize, &verticalTileSize )); } /* Grab the first availiable kernel */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->irqLines[i] != -1) { kernel = device->kernels[i]; break; } } /* Set up the internal memory region. */ if (device->internalSize > 0) { status = gckVIDMEM_Construct( device->os, internalBaseAddress, device->internalSize, internalAlignment, 0, &device->internalVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable internal heap. */ device->internalSize = 0; } else { /* Map internal memory. */ device->internalLogical = (gctPOINTER) ioremap_nocache(physical, device->internalSize); if (device->internalLogical == gcvNULL) { gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical; device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical); physical += device->internalSize; } } if (device->externalSize > 0) { /* create the external memory heap */ status = gckVIDMEM_Construct( device->os, externalBaseAddress, device->externalSize, externalAlignment, 0, &device->externalVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable internal heap. */ device->externalSize = 0; } else { /* Map external memory. */ device->externalLogical = (gctPOINTER) ioremap_nocache(physical, device->externalSize); if (device->externalLogical == gcvNULL) { gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical; device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical); physical += device->externalSize; } } /* set up the contiguous memory */ device->contiguousSize = ContiguousSize; if (ContiguousSize > 0) { if (ContiguousBase == 0) { while (device->contiguousSize > 0) { /* Allocate contiguous memory. */ status = _AllocateMemory( device, device->contiguousSize, &device->contiguousBase, &device->contiguousPhysical, &physAddr ); if (gcmIS_SUCCESS(status)) { device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical); status = gckVIDMEM_Construct( device->os, physAddr | device->systemMemoryBaseAddress, device->contiguousSize, 64, BankSize, &device->contiguousVidMem ); if (gcmIS_SUCCESS(status)) { break; } gcmkONERROR(_FreeMemory( device, device->contiguousBase, device->contiguousPhysical )); gcmRELEASE_NAME(device->contiguousPhysicalName); device->contiguousBase = gcvNULL; device->contiguousPhysical = gcvNULL; } if (device->contiguousSize <= (4 << 20)) { device->contiguousSize = 0; } else { device->contiguousSize -= (4 << 20); } } } else { /* Create the contiguous memory heap. */ status = gckVIDMEM_Construct( device->os, ContiguousBase | device->systemMemoryBaseAddress, ContiguousSize, 64, BankSize, &device->contiguousVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable contiguous memory pool. */ device->contiguousVidMem = gcvNULL; device->contiguousSize = 0; } else { mem_region = request_mem_region( ContiguousBase, ContiguousSize, "galcore managed memory" ); if (mem_region == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to claim %ld bytes @ 0x%08X\n", __FUNCTION__, __LINE__, ContiguousSize, ContiguousBase ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->requestedContiguousBase = ContiguousBase; device->requestedContiguousSize = ContiguousSize; #if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG if (gcmIS_CORE_PRESENT(device, gcvCORE_VG)) { device->contiguousBase #if gcdPAGED_MEMORY_CACHEABLE = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize); #else = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize); #endif if (device->contiguousBase == gcvNULL) { device->contiguousVidMem = gcvNULL; device->contiguousSize = 0; gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } } #endif device->contiguousPhysical = gcvNULL; device->contiguousPhysicalName = 0; device->contiguousSize = ContiguousSize; device->contiguousMapped = gcvTRUE; } } }
/******************************************************************************* ** ** gckHEAP_Allocate ** ** Allocate data from the heap. ** ** INPUT: ** ** gckHEAP Heap ** Pointer to a gckHEAP object. ** ** IN gctSIZE_T Bytes ** Number of byte to allocate. ** ** OUTPUT: ** ** gctPOINTER * Memory ** Pointer to a variable that will hold the address of the allocated ** memory. */ gceSTATUS gckHEAP_Allocate( IN gckHEAP Heap, IN gctSIZE_T Bytes, OUT gctPOINTER * Memory ) { gctBOOL acquired = gcvFALSE; gcskHEAP_PTR heap; gceSTATUS status; gctSIZE_T bytes; gcskNODE_PTR node, used, prevFree = gcvNULL; gctPOINTER memory = gcvNULL; gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes); /* Verify the arguments. */ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP); gcmkVERIFY_ARGUMENT(Bytes > 0); gcmkVERIFY_ARGUMENT(Memory != gcvNULL); /* Determine number of bytes required for a node. */ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Check if this allocation is bigger than the default allocation size. */ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE)) { /* Adjust allocation size. */ Heap->allocationSize = bytes * 2; } else if (Heap->heap != gcvNULL) { gctINT i; /* 2 retries, since we might need to compact. */ for (i = 0; i < 2; ++i) { /* Walk all the heaps. */ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next) { /* Check if this heap has enough bytes to hold the request. */ if (bytes <= heap->size - gcmSIZEOF(gcskNODE)) { prevFree = gcvNULL; /* Walk the chain of free nodes. */ for (node = heap->freeList; node != gcvNULL; node = node->next ) { gcmkASSERT(node->next != gcdIN_USE); /* Check if this free node has enough bytes. */ if (node->bytes >= bytes) { /* Use the node. */ goto UseNode; } /* Save current free node for linked list management. */ prevFree = node; } } } if (i == 0) { /* Compact the heap. */ gcmkVERIFY_OK(_CompactKernelHeap(Heap)); #if gcmIS_DEBUG(gcdDEBUG_CODE) gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "===== KERNEL HEAP ====="); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of allocations : %12u", Heap->allocCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of bytes allocated : %12llu", Heap->allocBytes); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum allocation size : %12llu", Heap->allocBytesMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Total number of bytes allocated : %12llu", Heap->allocBytesTotal); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Number of heaps : %12u", Heap->heapCount); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Heap memory in bytes : %12llu", Heap->heapMemory); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum number of heaps : %12u", Heap->heapCountMax); gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP, "Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax); #endif } } } /* Release the mutex. */ gcmkONERROR( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); acquired = gcvFALSE; /* Allocate a new heap. */ gcmkONERROR( gckOS_AllocateMemory(Heap->os, Heap->allocationSize, &memory)); gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP, "Allocated heap 0x%x (%lu bytes)", memory, Heap->allocationSize); /* Acquire the mutex. */ gcmkONERROR( gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE)); acquired = gcvTRUE; /* Use the allocated memory as the heap. */ heap = (gcskHEAP_PTR) memory; /* Insert this heap to the head of the chain. */ heap->next = Heap->heap; heap->prev = gcvNULL; heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP); if (heap->next != gcvNULL) { heap->next->prev = heap; } Heap->heap = heap; /* Mark the end of the heap. */ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap + Heap->allocationSize - gcmSIZEOF(gcskNODE) ); node->bytes = 0; node->next = gcvNULL; /* Create a free list. */ node = (gcskNODE_PTR) (heap + 1); heap->freeList = node; /* Initialize the free list. */ node->bytes = heap->size - gcmSIZEOF(gcskNODE); node->next = gcvNULL; /* No previous free. */ prevFree = gcvNULL; #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profiling. */ Heap->heapCount += 1; Heap->heapMemory += Heap->allocationSize; if (Heap->heapCount > Heap->heapCountMax) { Heap->heapCountMax = Heap->heapCount; } if (Heap->heapMemory > Heap->heapMemoryMax) { Heap->heapMemoryMax = Heap->heapMemory; } #endif UseNode: /* Verify some stuff. */ gcmkASSERT(heap != gcvNULL); gcmkASSERT(node != gcvNULL); gcmkASSERT(node->bytes >= bytes); if (heap->prev != gcvNULL) { /* Unlink the heap from the linked list. */ heap->prev->next = heap->next; if (heap->next != gcvNULL) { heap->next->prev = heap->prev; } /* Move the heap to the front of the list. */ heap->next = Heap->heap; heap->prev = gcvNULL; Heap->heap = heap; heap->next->prev = heap; } /* Check if there is enough free space left after usage for another free ** node. */ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE)) { /* Allocated used space from the back of the free list. */ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes); /* Adjust the number of free bytes. */ node->bytes -= bytes; gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE)); } else { /* Remove this free list from the chain. */ if (prevFree == gcvNULL) { heap->freeList = node->next; } else { prevFree->next = node->next; } /* Consume the entire free node. */ used = (gcskNODE_PTR) node; bytes = node->bytes; } /* Mark node as used. */ used->bytes = bytes; used->next = gcdIN_USE; #if gcmIS_DEBUG(gcdDEBUG_CODE) used->timeStamp = ++Heap->timeStamp; #endif #if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE) /* Update profile counters. */ Heap->allocCount += 1; Heap->allocBytes += bytes; Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax); Heap->allocBytesTotal += bytes; #endif /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); /* Return pointer to memory. */ *Memory = used + 1; /* Success. */ gcmkFOOTER_ARG("*Memory=0x%x", *Memory); return gcvSTATUS_OK; OnError: if (acquired) { /* Release the mutex. */ gcmkVERIFY_OK( gckOS_ReleaseMutex(Heap->os, Heap->mutex)); } if (memory != gcvNULL) { /* Free the heap memory. */ gckOS_FreeMemory(Heap->os, memory); } /* Return the status. */ gcmkFOOTER(); return status; }
static int drv_mmap( struct file* filp, struct vm_area_struct* vma ) { gceSTATUS status = gcvSTATUS_OK; gcsHAL_PRIVATE_DATA_PTR data; gckGALDEVICE device; gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma); if (filp == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): filp is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } data = filp->private_data; if (data == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): private_data is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } device = data->device; if (device == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): device is NULL\n", __FUNCTION__, __LINE__ ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } #if !gcdPAGED_MEMORY_CACHEABLE vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_flags |= gcdVM_FLAGS; #endif vma->vm_pgoff = 0; if (device->contiguousMapped) { unsigned long size = vma->vm_end - vma->vm_start; int ret = 0; /*####modified for marvell-bg2*/ if (size > device->contiguousSize) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Invalid mapping size. size (%d) too large >= %d\n", __FUNCTION__, __LINE__, size, device->contiguousSize ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /*####end for marvell-bg2*/ ret = io_remap_pfn_range( vma, vma->vm_start, device->requestedContiguousBase >> PAGE_SHIFT, size, vma->vm_page_prot ); if (ret != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): io_remap_pfn_range failed %d\n", __FUNCTION__, __LINE__, ret ); data->mappedMemory = gcvNULL; gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } data->mappedMemory = (gctPOINTER) vma->vm_start; /* Success. */ gcmkFOOTER_NO(); return 0; }