/*******************************************************************************
**  gckKERNEL_FindProcessDB
**
**  Find a record from a process database.
**
**  INPUT:
**
**      gckKERNEL Kernel
**          Pointer to a gckKERNEL object.
**
**      gctUINT32 ProcessID
**          Process ID used to identify the database.
**
**      gceDATABASE_TYPE TYPE
**          Type of the record to remove.
**
**      gctPOINTER Pointer
**          Data of the record to remove.
**
**  OUTPUT:
**
**      gcsDATABASE_RECORD_PTR Record
**          Copy of record.
*/
gceSTATUS
gckKERNEL_FindProcessDB(
    IN gckKERNEL Kernel,
    IN gctUINT32 ProcessID,
    IN gctUINT32 ThreadID,
    IN gceDATABASE_TYPE Type,
    IN gctPOINTER Pointer,
    OUT gcsDATABASE_RECORD_PTR Record
    )
{
    gceSTATUS status;
    gcsDATABASE_PTR database;

    gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
                   Kernel, ProcessID, ThreadID, Type, Pointer);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
    gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);

    /* Find the database. */
    gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));

    /* Find the record. */
    gcmkONERROR(
        gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));

    /* Success. */
    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:
    /* Return the status. */
    gcmkFOOTER();
    return status;
}
gceSTATUS
gckHEAP_ProfileStart(
    IN gckHEAP Heap
    )
{
    gcmkHEADER_ARG("Heap=0x%x", Heap);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);

    /* Zero the counters. */
    Heap->allocCount      = 0;
    Heap->allocBytes      = 0;
    Heap->allocBytesMax   = 0;
    Heap->allocBytesTotal = 0;
    Heap->heapCount       = 0;
    Heap->heapCountMax    = 0;
    Heap->heapMemory      = 0;
    Heap->heapMemoryMax   = 0;

    /* Success. */
    gcmkFOOTER_NO();
    return gcvSTATUS_OK;
}
Пример #3
0
static gceSTATUS
_AllocateMemory(
    IN gckGALDEVICE Device,
    IN gctSIZE_T Bytes,
    OUT gctPOINTER *Logical,
    OUT gctPHYS_ADDR *Physical,
    OUT gctUINT32 *PhysAddr
    )
{
    gceSTATUS status;

    gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);

    gcmkVERIFY_ARGUMENT(Device != NULL);
    gcmkVERIFY_ARGUMENT(Logical != NULL);
    gcmkVERIFY_ARGUMENT(Physical != NULL);
    gcmkVERIFY_ARGUMENT(PhysAddr != NULL);

    gcmkONERROR(gckOS_AllocateContiguous(
        Device->os, gcvFALSE, &Bytes, Physical, Logical
        ));

    *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle - Device->baseAddress;

    /* Success. */
    gcmkFOOTER_ARG(
        "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
        *Logical, *Physical, *PhysAddr
        );

    return gcvSTATUS_OK;

OnError:
    gcmkFOOTER();
    return status;
}
/*******************************************************************************
**
**  gckHEAP_Construct
**
**  Construct a new gckHEAP object.
**
**  INPUT:
**
**      gckOS Os
**          Pointer to a gckOS object.
**
**      gctSIZE_T AllocationSize
**          Minimum size per arena.
**
**  OUTPUT:
**
**      gckHEAP * Heap
**          Pointer to a variable that will hold the pointer to the gckHEAP
**          object.
*/
gceSTATUS
gckHEAP_Construct(
    IN gckOS Os,
    IN gctSIZE_T AllocationSize,
    OUT gckHEAP * Heap
    )
{
    gceSTATUS status;
    gckHEAP heap = gcvNULL;
    gctPOINTER pointer = gcvNULL;

    gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
    gcmkVERIFY_ARGUMENT(Heap != gcvNULL);

    /* Allocate the gckHEAP object. */
    gcmkONERROR(gckOS_AllocateMemory(Os,
                                     gcmSIZEOF(struct _gckHEAP),
                                     &pointer));

    heap = pointer;

    /* Initialize the gckHEAP object. */
    heap->object.type    = gcvOBJ_HEAP;
    heap->os             = Os;
    heap->allocationSize = AllocationSize;
    heap->heap           = gcvNULL;
#if gcmIS_DEBUG(gcdDEBUG_CODE)
    heap->timeStamp      = 0;
#endif

#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
    /* Zero the counters. */
    heap->allocCount      = 0;
    heap->allocBytes      = 0;
    heap->allocBytesMax   = 0;
    heap->allocBytesTotal = 0;
    heap->heapCount       = 0;
    heap->heapCountMax    = 0;
    heap->heapMemory      = 0;
    heap->heapMemoryMax   = 0;
#endif

    /* Create the mutex. */
    gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));

    /* Return the pointer to the gckHEAP object. */
    *Heap = heap;

    /* Success. */
    gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
    return gcvSTATUS_OK;

OnError:
    /* Roll back. */
    if (heap != gcvNULL)
    {
        /* Free the heap structure. */
        gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
Пример #5
0
/*******************************************************************************
**
**  gckVGMMU_Construct
**
**  Construct a new gckVGMMU object.
**
**  INPUT:
**
**      gckVGKERNEL Kernel
**          Pointer to an gckVGKERNEL object.
**
**      gctSIZE_T MmuSize
**          Number of bytes for the page table.
**
**  OUTPUT:
**
**      gckVGMMU * Mmu
**          Pointer to a variable that receives the gckVGMMU object pointer.
*/
gceSTATUS gckVGMMU_Construct(
    IN gckVGKERNEL Kernel,
    IN gctSIZE_T MmuSize,
    OUT gckVGMMU * Mmu
    )
{
    gckOS os;
    gckVGHARDWARE hardware;
    gceSTATUS status;
    gckVGMMU mmu;
    gctUINT32 * pageTable;
    gctUINT32 i;

    gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
    gcmkVERIFY_ARGUMENT(MmuSize > 0);
    gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);

    /* Extract the gckOS object pointer. */
    os = Kernel->os;
    gcmkVERIFY_OBJECT(os, gcvOBJ_OS);

    /* Extract the gckVGHARDWARE object pointer. */
    hardware = Kernel->hardware;
    gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);

    /* Allocate memory for the gckVGMMU object. */
    status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);

    if (status < 0)
    {
        /* Error. */
        gcmkFATAL(
            "%s(%d): could not allocate gckVGMMU object.",
            __FUNCTION__, __LINE__
            );

        gcmkFOOTER();
        return status;
    }

    /* Initialize the gckVGMMU object. */
    mmu->object.type = gcvOBJ_MMU;
    mmu->os = os;
    mmu->hardware = hardware;

    /* Create the mutex. */
    status = gckOS_CreateMutex(os, &mmu->mutex);

    if (status < 0)
    {
        /* Roll back. */
        mmu->object.type = gcvOBJ_UNKNOWN;
        gcmkVERIFY_OK(gckOS_Free(os, mmu));

        gcmkFOOTER();
        /* Error. */
        return status;
    }

    /* Allocate the page table. */
    mmu->pageTableSize = MmuSize;
    status = gckOS_AllocateContiguous(os,
                                      gcvFALSE,
                                      &mmu->pageTableSize,
                                      &mmu->pageTablePhysical,
                                      &mmu->pageTableLogical);

    if (status < 0)
    {
        /* Roll back. */
        gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));

        mmu->object.type = gcvOBJ_UNKNOWN;
        gcmkVERIFY_OK(gckOS_Free(os, mmu));

        /* Error. */
        gcmkFATAL(
            "%s(%d): could not allocate page table.",
            __FUNCTION__, __LINE__
            );

        gcmkFOOTER();
        return status;
    }

    /* Compute number of entries in page table. */
    mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32);
    mmu->entry = 0;

    /* Mark the entire page table as available. */
    pageTable = (gctUINT32 *) mmu->pageTableLogical;
    for (i = 0; i < mmu->entryCount; i++)
    {
        pageTable[i] = (gctUINT32)~0;
    }

    /* Set page table address. */
    status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);

    if (status < 0)
    {
        /* Free the page table. */
        gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
                                      mmu->pageTablePhysical,
                                      mmu->pageTableLogical,
                                      mmu->pageTableSize));

        /* Roll back. */
        gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));

        mmu->object.type = gcvOBJ_UNKNOWN;
        gcmkVERIFY_OK(gckOS_Free(os, mmu));

        /* Error. */
        gcmkFATAL(
            "%s(%d): could not program page table.",
            __FUNCTION__, __LINE__
            );

        gcmkFOOTER();
        return status;
    }

    /* Return the gckVGMMU object pointer. */
    *Mmu = mmu;

    gcmkTRACE_ZONE(
        gcvLEVEL_INFO, gcvZONE_MMU,
        "%s(%d): %u entries at %p.(0x%08X)\n",
        __FUNCTION__, __LINE__,
        mmu->entryCount,
        mmu->pageTableLogical,
        mmu->pageTablePhysical
        );

    gcmkFOOTER_NO();
    /* Success. */
    return gcvSTATUS_OK;
}
/*******************************************************************************
**
**  gckVIDMEM_Free
**
**  Free an allocated video memory node.
**
**  INPUT:
**
**      gcuVIDMEM_NODE_PTR Node
**          Pointer to a gcuVIDMEM_NODE object.
**
**  OUTPUT:
**
**      Nothing.
*/
gceSTATUS
gckVIDMEM_Free(
    IN gcuVIDMEM_NODE_PTR Node
    )
{
    gckVIDMEM memory = gcvNULL;
    gcuVIDMEM_NODE_PTR node;
    gceSTATUS status;
    gctBOOL acquired = gcvFALSE;

    gcmkHEADER_ARG("Node=0x%x", Node);

    /* Verify the arguments. */
    if ((Node == gcvNULL)
    ||  (Node->VidMem.memory == gcvNULL)
    )
    {
        /* Invalid object. */
        gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
    }

    /**************************** Video Memory ********************************/

    if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
    {
        if (Node->VidMem.locked > 0)
        {
            gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
                           "Node 0x%x is locked (%d)",
                           Node, Node->VidMem.locked);

            /* Force unlock. */
            Node->VidMem.locked = 0;
        }

        /* Extract pointer to gckVIDMEM object owning the node. */
        memory = Node->VidMem.memory;

        /* Acquire the mutex. */
        gcmkONERROR(
            gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));

        acquired = gcvTRUE;

#ifdef __QNXNTO__
        /* Reset handle to 0. */
        Node->VidMem.logical = gcvNULL;
        Node->VidMem.handle = 0;

        /* Don't try to a re-free an already freed node. */
        if ((Node->VidMem.nextFree == gcvNULL)
        &&  (Node->VidMem.prevFree == gcvNULL)
        )
#endif
        {
            /* Update the number of free bytes. */
            memory->freeBytes += Node->VidMem.bytes;

            /* Find the next free node. */
            for (node = Node->VidMem.next;
                 node->VidMem.nextFree == gcvNULL;
                 node = node->VidMem.next) ;

            /* Insert this node in the free list. */
            Node->VidMem.nextFree = node;
            Node->VidMem.prevFree = node->VidMem.prevFree;

            Node->VidMem.prevFree->VidMem.nextFree =
            node->VidMem.prevFree                  = Node;

            /* Is the next node a free node and not the sentinel? */
            if ((Node->VidMem.next == Node->VidMem.nextFree)
            &&  (Node->VidMem.next->VidMem.bytes != 0)
            )
            {
                /* Merge this node with the next node. */
                gcmkONERROR(_Merge(memory->os, node = Node));
                gcmkASSERT(node->VidMem.nextFree != node);
                gcmkASSERT(node->VidMem.prevFree != node);
            }

            /* Is the previous node a free node and not the sentinel? */
            if ((Node->VidMem.prev == Node->VidMem.prevFree)
            &&  (Node->VidMem.prev->VidMem.bytes != 0)
            )
            {
                /* Merge this node with the previous node. */
                gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
                gcmkASSERT(node->VidMem.nextFree != node);
                gcmkASSERT(node->VidMem.prevFree != node);
            }
        }

        /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));

        /* Success. */
        gcmkFOOTER_NO();
        return gcvSTATUS_OK;
    }

    /*************************** Virtual Memory *******************************/

    /* Verify the gckKERNEL object pointer. */
    gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);

#ifdef __QNXNTO__
    if (!Node->Virtual.unlockPending && (Node->Virtual.locked > 0))
#else
    if (!Node->Virtual.pending && (Node->Virtual.locked > 0))
#endif
    {
        gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
                       "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
                       Node, Node->Virtual.locked);

        /* Force unlock. */
        Node->Virtual.locked = 0;
    }

#ifdef __QNXNTO__
    if (!Node->Virtual.freePending) { if (Node->Virtual.unlockPending)
#else
    if (Node->Virtual.pending)
#endif
    {
        gcmkASSERT(Node->Virtual.locked == 1);

        /* Schedule the node to be freed. */
        gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                       "gckVIDMEM_Free: Scheduling node 0x%x to be freed later",
                       Node);

        /* Schedule the video memory to be freed again. */
        gcmkONERROR(gckEVENT_FreeVideoMemory(Node->Virtual.kernel->event,
                                             Node,
                                             gcvKERNEL_PIXEL));

#ifdef __QNXNTO__
        Node->Virtual.freePending = gcvTRUE; }
#endif

        /* Success. */
        gcmkFOOTER_NO();
        return gcvSTATUS_SKIP;
    }

    else
    {
        /* Free the virtual memory. */
        gcmkVERIFY_OK(gckOS_FreePagedMemory(Node->Virtual.kernel->os,
                                            Node->Virtual.physical,
                                            Node->Virtual.bytes));

        /* Destroy the gcuVIDMEM_NODE union. */
        gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
    }

    /* Success. */
    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:
    if (acquired)
    {
        /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
/*******************************************************************************
**
**  gckVIDMEM_Construct
**
**  Construct a new gckVIDMEM object.
**
**  INPUT:
**
**      gckOS Os
**          Pointer to an gckOS object.
**
**      gctUINT32 BaseAddress
**          Base address for the video memory heap.
**
**      gctSIZE_T Bytes
**          Number of bytes in the video memory heap.
**
**      gctSIZE_T Threshold
**          Minimum number of bytes beyond am allocation before the node is
**          split.  Can be used as a minimum alignment requirement.
**
**      gctSIZE_T BankSize
**          Number of bytes per physical memory bank.  Used by bank
**          optimization.
**
**  OUTPUT:
**
**      gckVIDMEM * Memory
**          Pointer to a variable that will hold the pointer to the gckVIDMEM
**          object.
*/
gceSTATUS
gckVIDMEM_Construct(
    IN gckOS Os,
    IN gctUINT32 BaseAddress,
    IN gctSIZE_T Bytes,
    IN gctSIZE_T Threshold,
    IN gctSIZE_T BankSize,
    OUT gckVIDMEM * Memory
    )
{
    gckVIDMEM memory = gcvNULL;
    gceSTATUS status;
    gcuVIDMEM_NODE_PTR node;
    gctINT i, banks = 0;

    gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
                   "BankSize=%lu",
                   Os, BaseAddress, Bytes, Threshold, BankSize);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
    gcmkVERIFY_ARGUMENT(Bytes > 0);
    gcmkVERIFY_ARGUMENT(Memory != gcvNULL);

    /* Allocate the gckVIDMEM object. */
    gcmkONERROR(
        gckOS_Allocate(Os,
                       gcmSIZEOF(struct _gckVIDMEM),
                       (gctPOINTER *) &memory));

    /* Initialize the gckVIDMEM object. */
    memory->object.type = gcvOBJ_VIDMEM;
    memory->os          = Os;

    /* Set video memory heap information. */
    memory->baseAddress = BaseAddress;
    memory->bytes       = Bytes;
    memory->freeBytes   = Bytes;
    memory->threshold   = Threshold;
    memory->mutex       = gcvNULL;

    BaseAddress = 0;

    /* Walk all possible banks. */
    for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
    {
        gctSIZE_T bytes;

        if (BankSize == 0)
        {
            /* Use all bytes for the first bank. */
            bytes = Bytes;
        }
        else
        {
            /* Compute number of bytes for this bank. */
            bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;

            if (bytes > Bytes)
            {
                /* Make sure we don't exceed the total number of bytes. */
                bytes = Bytes;
            }
        }

        if (bytes == 0)
        {
            /* Mark heap is not used. */
            memory->sentinel[i].VidMem.next     =
            memory->sentinel[i].VidMem.prev     =
            memory->sentinel[i].VidMem.nextFree =
            memory->sentinel[i].VidMem.prevFree = gcvNULL;
            continue;
        }

        /* Allocate one gcuVIDMEM_NODE union. */
        gcmkONERROR(
            gckOS_Allocate(Os,
                           gcmSIZEOF(gcuVIDMEM_NODE),
                           (gctPOINTER *) &node));

        /* Initialize gcuVIDMEM_NODE union. */
        node->VidMem.memory    = memory;

        node->VidMem.next      =
        node->VidMem.prev      =
        node->VidMem.nextFree  =
        node->VidMem.prevFree  = &memory->sentinel[i];

        node->VidMem.offset    = BaseAddress;
        node->VidMem.bytes     = bytes;
        node->VidMem.alignment = 0;
        node->VidMem.physical  = 0;
        node->VidMem.pool      = gcvPOOL_UNKNOWN;

        node->VidMem.locked    = 0;

#ifdef __QNXNTO__
        node->VidMem.logical   = gcvNULL;
        node->VidMem.handle    = 0;
#endif

        /* Initialize the linked list of nodes. */
        memory->sentinel[i].VidMem.next     =
        memory->sentinel[i].VidMem.prev     =
        memory->sentinel[i].VidMem.nextFree =
        memory->sentinel[i].VidMem.prevFree = node;

        /* Mark sentinel. */
        memory->sentinel[i].VidMem.bytes = 0;

        /* Adjust address for next bank. */
        BaseAddress += bytes;
        Bytes       -= bytes;
        banks       ++;
    }

    /* Assign all the bank mappings. */
    memory->mapping[gcvSURF_RENDER_TARGET]      = banks - 1;
    memory->mapping[gcvSURF_BITMAP]             = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_DEPTH]              = banks - 1;
    memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_TEXTURE]            = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_VERTEX]             = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_INDEX]              = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_TILE_STATUS]        = banks - 1;
    if (banks > 1) --banks;
    memory->mapping[gcvSURF_TYPE_UNKNOWN]       = 0;

    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] INDEX:         bank %d",
                  memory->mapping[gcvSURF_INDEX]);
    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] VERTEX:        bank %d",
                  memory->mapping[gcvSURF_VERTEX]);
    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] TEXTURE:       bank %d",
                  memory->mapping[gcvSURF_TEXTURE]);
    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] RENDER_TARGET: bank %d",
                  memory->mapping[gcvSURF_RENDER_TARGET]);
    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] DEPTH:         bank %d",
                  memory->mapping[gcvSURF_DEPTH]);
    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                  "[GALCORE] TILE_STATUS:   bank %d",
                  memory->mapping[gcvSURF_TILE_STATUS]);

    /* Allocate the mutex. */
    gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));

    /* Return pointer to the gckVIDMEM object. */
    *Memory = memory;

    /* Success. */
    gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
    return gcvSTATUS_OK;

OnError:
    /* Roll back. */
    if (memory != gcvNULL)
    {
        if (memory->mutex != gcvNULL)
        {
            /* Delete the mutex. */
            gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
        }

        for (i = 0; i < banks; ++i)
        {
            /* Free the heap. */
            gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
            gcmkVERIFY_OK(gckOS_Free(Os, memory->sentinel[i].VidMem.next));
        }

        /* Free the object. */
        gcmkVERIFY_OK(gckOS_Free(Os, memory));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
/*******************************************************************************
**
**  gckVIDMEM_Unlock
**
**  Unlock a video memory node.
**
**  INPUT:
**
**      gcuVIDMEM_NODE_PTR Node
**          Pointer to a locked gcuVIDMEM_NODE union.
**
**      gceSURF_TYPE Type
**          Type of surface to unlock.
**
**      gctSIZE_T * CommandSize
**          Pointer to a variable specifying the number of bytes in the command
**          buffer specified by 'Commands'.  If gcvNULL, there is no command
**          buffer and the video memory shoud be unlocked synchronously.
**
**      gctBOOL * Asynchroneous
**          Pointer to a variable specifying whether the surface should be
**          unlocked asynchroneously or not.
**
**  OUTPUT:
**
**      gctBOOL * Asynchroneous
**          Pointer to a variable receiving the number of bytes used in the
**          command buffer specified by 'Commands'.  If gcvNULL, there is no
**          command buffer.
*/
gceSTATUS
gckVIDMEM_Unlock(
    IN gcuVIDMEM_NODE_PTR Node,
    IN gceSURF_TYPE Type,
    IN OUT gctBOOL * Asynchroneous
    )
{
    gceSTATUS status;
    gckKERNEL kernel;
    gckHARDWARE hardware;
    gctPOINTER buffer;
    gctSIZE_T requested, bufferSize;
    gckCOMMAND command = gcvNULL;
    gceKERNEL_FLUSH flush;
    gckOS os = gcvNULL;
    gctBOOL acquired = gcvFALSE;
    gctBOOL needRelease = gcvFALSE;
    gctBOOL pendingUnlock = gcvFALSE;

    gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
                   Node, Type, gcmOPT_VALUE(Asynchroneous));

    /* Verify the arguments. */
    if ((Node == gcvNULL)
    ||  (Node->VidMem.memory == gcvNULL)
    )
    {
        /* Invalid object. */
        gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
    }

    /**************************** Video Memory ********************************/

    if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
    {
        if (Node->VidMem.locked <= 0)
        {
            /* The surface was not locked. */
            gcmkONERROR(gcvSTATUS_MEMORY_UNLOCKED);
        }

        /* Decrement the lock count. */
        Node->VidMem.locked --;

        if (Asynchroneous != gcvNULL)
        {
            /* No need for any events. */
            *Asynchroneous = gcvFALSE;
        }
    }

    /*************************** Virtual Memory *******************************/

    else
    {
        /* Verify the gckKERNEL object pointer. */
        kernel = Node->Virtual.kernel;
        gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);

        /* Verify the gckHARDWARE object pointer. */
        hardware = Node->Virtual.kernel->hardware;
        gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);

        /* Verify the gckCOMMAND object pointer. */
        command = Node->Virtual.kernel->command;
        gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);

        if (Asynchroneous == gcvNULL)
        {
            gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                           "gckVIDMEM_Unlock: Unlocking virtual node 0x%x (%d)",
                           Node,
                           Node->Virtual.locked);

            /* Get the gckOS object pointer. */
            os = kernel->os;
            gcmkVERIFY_OBJECT(os, gcvOBJ_OS);

            /* Grab the mutex. */
            gcmkONERROR(
                gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));

            /* If we need to unlock a node from virtual memory we have to be
            ** very carefull.  If the node is still inside the caches we
            ** might get a bus error later if the cache line needs to be
            ** replaced.  So - we have to flush the caches before we do
            ** anything.  We also need to stall to make sure the flush has
            ** happened.  However - when we get to this point we are inside
            ** the interrupt handler and we cannot just gckCOMMAND_Wait
            ** because it will wait forever.  So - what we do here is we
            ** verify the type of the surface, flush the appropriate cache,
            ** mark the node as flushed, and issue another unlock to unmap
            ** the MMU. */
            if (!Node->Virtual.contiguous
            &&  (Node->Virtual.locked == 1)
#ifdef __QNXTO__
            &&  !Node->Virtual.unlockPending
#else
            &&  !Node->Virtual.pending
#endif
            )
            {
                if (Type == gcvSURF_BITMAP)
                {
                    /* Flush 2D cache. */
                    flush = gcvFLUSH_2D;
                }
                else if (Type == gcvSURF_RENDER_TARGET)
                {
                    /* Flush color cache. */
                    flush = gcvFLUSH_COLOR;
                }
                else if (Type == gcvSURF_DEPTH)
                {
                    /* Flush depth cache. */
                    flush = gcvFLUSH_DEPTH;
                }
                else
                {
                    /* No flush required. */
                    flush = (gceKERNEL_FLUSH) 0;
                }

                gcmkONERROR(
                    gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));

                if (requested != 0)
                {
                    gcmkONERROR(
                        gckCOMMAND_Reserve(command,
                                           requested,
                                           &buffer,
                                           &bufferSize));

                    needRelease = gcvTRUE;

                    gcmkONERROR(gckHARDWARE_Flush(hardware,
                                                  flush,
                                                  buffer,
                                                  &bufferSize));

                    gcmkONERROR(
                        gckEVENT_Unlock(Node->Virtual.kernel->event,
                                        gcvKERNEL_PIXEL,
                                        Node,
                                        Type));

                    /* Mark node as pending. */
#ifdef __QNXNTO__
                    Node->Virtual.unlockPending = gcvTRUE;
#else
                    Node->Virtual.pending = gcvTRUE;
#endif

                    needRelease = gcvFALSE;

                    gcmkONERROR(gckCOMMAND_Execute(command, requested));

                    pendingUnlock = gcvTRUE;
                }
            }

            if (!pendingUnlock)
            {
                if (Node->Virtual.locked == 0)
                {
                    status = gcvSTATUS_MEMORY_UNLOCKED;
                    goto OnError;
                }

                /* Decrement lock count. */
                -- Node->Virtual.locked;

                /* See if we can unlock the resources. */
                if (Node->Virtual.locked == 0)
                {
                    /* Unlock the pages. */
#ifdef __QNXNTO__
                    gcmkONERROR(
                        gckOS_UnlockPages(os,
                                          Node->Virtual.physical,
                                          Node->Virtual.userPID,
                                          Node->Virtual.bytes,
                                          Node->Virtual.logical));
#else
                    gcmkONERROR(
                        gckOS_UnlockPages(os,
                                          Node->Virtual.physical,
                                          Node->Virtual.bytes,
                                          Node->Virtual.logical));
#endif

                    /* Free the page table. */
                    if (Node->Virtual.pageTable != gcvNULL)
                    {
                        gcmkONERROR(
                            gckMMU_FreePages(Node->Virtual.kernel->mmu,
                                             Node->Virtual.pageTable,
                                             Node->Virtual.pageCount));

                        /* Mark page table as freed. */
                        Node->Virtual.pageTable = gcvNULL;
                    }

                    /* Mark node as unlocked. */
#ifdef __QNXTO
                    Node->Virtual.unlockPending = gcvFALSE;
#else
                    Node->Virtual.pending = gcvFALSE;
#endif
                }

                gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                               "Unmapped virtual node 0x%x from 0x%08X",
                               Node, Node->Virtual.address);
            }

            /* Release the mutex. */
            gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
        }

        else
        {
            gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                           "Scheduled unlock for virtual node 0x%x",
                           Node);

            /* Schedule the surface to be unlocked. */
            *Asynchroneous = gcvTRUE;
        }
    }

    /* Success. */
    gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
    return gcvSTATUS_OK;

OnError:
    if (needRelease)
    {
        gcmkVERIFY_OK(gckCOMMAND_Release(command));
    }

    if (acquired)
    {
        /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
/*******************************************************************************
**
**  gcoVIDMEM_FreeHandleMemory
**
**  Free all allocated video memory nodes for a handle.
**
**  INPUT:
**
**      gcoVIDMEM Memory
**          Pointer to an gcoVIDMEM object..
**
**  OUTPUT:
**
**      Nothing.
*/
gceSTATUS
gckVIDMEM_FreeHandleMemory(
    IN gckVIDMEM Memory,
    IN gctHANDLE Handle
    )
{
    gceSTATUS status;
    gctBOOL mutex = gcvFALSE;
    gcuVIDMEM_NODE_PTR node;
    gctINT i;
    gctUINT32 nodeCount = 0, byteCount = 0;
    gctBOOL again;

    gcmkHEADER_ARG("Memory=0x%x Handle=0x%x", Memory, Handle);

    gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);

    gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
    mutex = gcvTRUE;

    /* Walk all sentinels. */
    for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
    {
        /* Bail out of the heap if it is not used. */
        if (Memory->sentinel[i].VidMem.next == gcvNULL)
        {
            break;
        }

        do
        {
            again = gcvFALSE;

            /* Walk all the nodes until we reach the sentinel. */
            for (node = Memory->sentinel[i].VidMem.next;
                 node->VidMem.bytes != 0;
                 node = node->VidMem.next)
            {
                /* Free the node if it was allocated by Handle. */
                if (node->VidMem.handle == Handle)
                {
                    /* Unlock video memory. */
                    while (gckVIDMEM_Unlock(node, gcvSURF_TYPE_UNKNOWN, gcvNULL, gcvNULL)
                        != gcvSTATUS_MEMORY_UNLOCKED)
                        ;

                    nodeCount++;
                    byteCount += node->VidMem.bytes;

                    /* Free video memory. */
                    gcmkVERIFY_OK(gckVIDMEM_Free(node, gcvNULL));

                    /*
                     * Freeing may cause a merge which will invalidate our iteration.
                     * Don't be clever, just restart.
                     */
                    again = gcvTRUE;

                    break;
                }
            }
        }
        while (again);
    }

    gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
    gcmkFOOTER();
    return gcvSTATUS_OK;

OnError:
    if (mutex)
    {
        gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
    }

    gcmkFOOTER();
    return status;
}
gceSTATUS
gckKERNEL_MapLogicalToPhysical(
    IN gckKERNEL Kernel,
    IN gctHANDLE Process,
    IN OUT gctPOINTER * Data
    )
{
    gctUINT i, oldest;
    gceSTATUS status;
    gctUINT32 baseAddress;

    gcmkHEADER_ARG("Kernel=0x%x Process=0x%x *Data=0x%x",
                   Kernel, Process, gcmOPT_POINTER(Data));

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
    gcmkVERIFY_ARGUMENT(Data != gcvNULL);

    /* Get base address. */
    gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));

    /* Walk all used cache slots. */
    for (i = oldest = 0; i < Kernel->cacheSlots; ++i)
    {
        if ((Kernel->cache[i].logical == *Data)
        &&  (Kernel->cache[i].process == Process)
        )
        {
            /* Bail out. */
            break;
        }

        if (i == 0)
        {
            /* First slot. */
            oldest = i;
        }

        /* Test if this cache slot is older. */
        else if (Kernel->cache[i].stamp < Kernel->cache[oldest].stamp)
        {
            oldest = i;
        }
    }

    /* See if we had a match. */
    if (i == Kernel->cacheSlots)
    {
        /* See if there is room in the cache. */
        if (i < gcmCOUNTOF(Kernel->cache))
        {
            /* Just append to the cache. */
            i = Kernel->cacheSlots++;
        }

        else
        {
            /* Evict the oldest cache line. */
            i = oldest;
        }

        /* Initialize the cache line. */
        Kernel->cache[i].logical = *Data;
        Kernel->cache[i].process = Process;

        /* Map the logical address to a DMA address. */
        gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os,
                                             *Data,
                                             &Kernel->cache[i].dma));

        if (baseAddress != 0)
        {
            gctBOOL needBase;

            /* Does this state load need a base address? */
            gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
                                                    ((gctUINT32_PTR) Data)[-1],
                                                    &needBase));

            if (needBase)
            {
                /* Add the base address. */
                Kernel->cache[i].dma += baseAddress;
            }
        }
    }

    /* Increment time stamp of the cache slot. */
    Kernel->cache[i].stamp = Kernel->cacheTimeStamp++;

    /* Return DMA address. */
    *Data = gcmINT2PTR(Kernel->cache[i].dma);

    /* Success. */
    gcmkFOOTER_ARG("*Data=0x%08x", *Data);
    return gcvSTATUS_OK;

OnError:
    gcmkLOG_ERROR_STATUS();
    /* Return the status. */
    gcmkFOOTER();
    return status;
}
Пример #11
0
/*******************************************************************************
**
**  gckGALDEVICE_Construct
**
**  Constructor.
**
**  INPUT:
**
**  OUTPUT:
**
**      gckGALDEVICE * Device
**          Pointer to a variable receiving the gckGALDEVICE object pointer on
**          success.
*/
gceSTATUS
gckGALDEVICE_Construct(
    IN gctINT IrqLine,
    IN gctUINT32 RegisterMemBase,
    IN gctSIZE_T RegisterMemSize,
    IN gctINT IrqLine2D,
    IN gctUINT32 RegisterMemBase2D,
    IN gctSIZE_T RegisterMemSize2D,
    IN gctINT IrqLineVG,
    IN gctUINT32 RegisterMemBaseVG,
    IN gctSIZE_T RegisterMemSizeVG,
    IN gctUINT32 ContiguousBase,
    IN gctSIZE_T ContiguousSize,
    IN gctSIZE_T BankSize,
    IN gctINT FastClear,
    IN gctINT Compression,
    IN gctUINT32 PhysBaseAddr,
    IN gctUINT32 PhysSize,
    IN gctINT Signal,
    IN gctUINT LogFileSize,
    IN struct device *pdev,
    IN gctINT PowerManagement,
    OUT gckGALDEVICE *Device
    )
{
    gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
    gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
    gctUINT32 horizontalTileSize, verticalTileSize;
    struct resource* mem_region;
    gctUINT32 physAddr;
    gctUINT32 physical;
    gckGALDEVICE device;
    gceSTATUS status;
    gctINT32 i;
    gceHARDWARE_TYPE type;
    gckDB sharedDB = gcvNULL;
    gckKERNEL kernel = gcvNULL;

    gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
                   "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
                   "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
                   "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
                   "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
                   IrqLine, RegisterMemBase, RegisterMemSize,
                   IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
                   IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
                   ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
                   PhysBaseAddr, PhysSize, Signal);

    /* Allocate device structure. */
    device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);

    if (!device)
    {
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    memset(device, 0, sizeof(struct _gckGALDEVICE));

   device->dbgnode = gcvNULL;
   if(LogFileSize != 0)
   {
	if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0)
	{
		gcmkTRACE_ZONE(
		gcvLEVEL_ERROR, gcvZONE_DRIVER,
		"%s(%d): Failed to create  the debug file system  %s/%s \n",
		__FUNCTION__, __LINE__,
		PARENT_FILE, DEBUG_FILE
		);
	}
	else
	{
		/*Everything is OK*/
	 	gckDebugFileSystemSetCurrentNode(device->dbgnode);
	}
    }
#ifdef CONFIG_PM
    /*Init runtime pm for gpu*/
    pm_runtime_enable(pdev);
    device->pmdev = pdev;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    /*get gpu regulator*/
    device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
    if (IS_ERR(device->gpu_regulator)) {
	gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
		"%s(%d): Failed to get gpu regulator  %s/%s \n",
		__FUNCTION__, __LINE__,
		PARENT_FILE, DEBUG_FILE);
	gcmkONERROR(gcvSTATUS_NOT_FOUND);
    }
#endif
    /*Initialize the clock structure*/
    if (IrqLine != -1) {
        device->clk_3d_core = clk_get(pdev, "gpu3d_clk");
        if (!IS_ERR(device->clk_3d_core)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
            if (cpu_is_mx6q()) {
	            device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
	            if (IS_ERR(device->clk_3d_shader)) {
	                IrqLine = -1;
	                clk_put(device->clk_3d_core);
	                device->clk_3d_core = NULL;
	                device->clk_3d_shader = NULL;
	                gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
	            }
	          }
#else
	            device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
	            device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
	            if (IS_ERR(device->clk_3d_shader)) {
	                IrqLine = -1;
	                clk_put(device->clk_3d_core);
	                device->clk_3d_core = NULL;
	                device->clk_3d_shader = NULL;
	                gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
	            }
#endif
        } else {
            IrqLine = -1;
            device->clk_3d_core = NULL;
            gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
        }
    }
    if ((IrqLine2D != -1) || (IrqLineVG != -1)) {
        device->clk_2d_core = clk_get(pdev, "gpu2d_clk");
        if (IS_ERR(device->clk_2d_core)) {
            IrqLine2D = -1;
            IrqLineVG = -1;
            device->clk_2d_core = NULL;
            gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
        } else {
	    if (IrqLine2D != -1) {
                device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
                if (IS_ERR(device->clk_2d_axi)) {
                    device->clk_2d_axi = NULL;
                    IrqLine2D = -1;
                    gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
                }
            }
            if (IrqLineVG != -1) {
                device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
                if (IS_ERR(device->clk_vg_axi)) {
                    IrqLineVG = -1;
	                device->clk_vg_axi = NULL;
	                gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
                }
            }
        }
    }

    if (IrqLine != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_MAJOR]    = RegisterMemBase;
        device->requestedRegisterMemSizes[gcvCORE_MAJOR]    = RegisterMemSize;
    }

    if (IrqLine2D != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_2D]       = RegisterMemBase2D;
        device->requestedRegisterMemSizes[gcvCORE_2D]       = RegisterMemSize2D;
    }

    if (IrqLineVG != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_VG]       = RegisterMemBaseVG;
        device->requestedRegisterMemSizes[gcvCORE_VG]       = RegisterMemSizeVG;
    }

    device->requestedContiguousBase  = 0;
    device->requestedContiguousSize  = 0;


    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        physical = device->requestedRegisterMemBases[i];

        /* Set up register memory region. */
        if (physical != 0)
        {
            mem_region = request_mem_region(
                physical, device->requestedRegisterMemSizes[i], "galcore register region"
                );

            if (mem_region == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->registerBases[i] = (gctPOINTER) ioremap_nocache(
                physical, device->requestedRegisterMemSizes[i]);

            if (device->registerBases[i] == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            physical += device->requestedRegisterMemSizes[i];
        }
        else
        {
            device->registerBases[i] = gcvNULL;
        }
    }

    /* Set the base address */
    device->baseAddress = PhysBaseAddr;

    /* Construct the gckOS object. */
    gcmkONERROR(gckOS_Construct(device, &device->os));

    if (IrqLine != -1)
    {
        /* Construct the gckKERNEL object. */
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_MAJOR, device,
            gcvNULL, &device->kernels[gcvCORE_MAJOR]));

        sharedDB = device->kernels[gcvCORE_MAJOR]->db;

        /* Initialize core mapping */
        for (i = 0; i < 8; i++)
        {
            device->coreMapping[i] = gcvCORE_MAJOR;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_MAJOR]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetFastClear(
            device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
            ));

        gcmkONERROR(gckHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_MAJOR] = gcvNULL;
    }

    if (IrqLine2D != -1)
    {
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_2D, device,
            sharedDB, &device->kernels[gcvCORE_2D]));

        if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;

        /* Verify the hardware type */
        gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));

        if (type != gcvHARDWARE_2D)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): Unexpected hardware type: %d\n",
                __FUNCTION__, __LINE__,
                type
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_2D;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_2D]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_2D]->hardware, PowerManagement
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_2D] = gcvNULL;
    }

    if (IrqLineVG != -1)
    {
#if gcdENABLE_VG
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_VG, device,
            sharedDB, &device->kernels[gcvCORE_VG]));
        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL
            && device->kernels[gcvCORE_2D] == gcvNULL
            )
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_VG;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
        }


        gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_VG]->vg->hardware,
            PowerManagement
            ));
#endif
    }
    else
    {
        device->kernels[gcvCORE_VG] = gcvNULL;
    }

    /* Initialize the ISR. */
    device->irqLines[gcvCORE_MAJOR] = IrqLine;
    device->irqLines[gcvCORE_2D]    = IrqLine2D;
    device->irqLines[gcvCORE_VG]    = IrqLineVG;

    /* Initialize the kernel thread semaphores. */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
    }

    device->signal = Signal;

    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->kernels[i] != gcvNULL) break;
    }

    if (i == gcdMAX_GPU_COUNT)
	{
		gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
	}

#if gcdENABLE_VG
    if (i == gcvCORE_VG)
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
                device->kernels[i]->vg->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));
            /* query the amount of video memory */
        gcmkONERROR(gckVGHARDWARE_QueryMemory(
            device->kernels[i]->vg->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }
    else
#endif
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckHARDWARE_QuerySystemMemory(
                device->kernels[i]->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));

            /* query the amount of video memory */
        gcmkONERROR(gckHARDWARE_QueryMemory(
            device->kernels[i]->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }


    /* Grab the first availiable kernel */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->irqLines[i] != -1)
        {
            kernel = device->kernels[i];
            break;
        }
    }

    /* Set up the internal memory region. */
    if (device->internalSize > 0)
    {
        status = gckVIDMEM_Construct(
            device->os,
            internalBaseAddress, device->internalSize, internalAlignment,
            0, &device->internalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->internalSize = 0;
        }
        else
        {
            /* Map internal memory. */
            device->internalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->internalSize);

            if (device->internalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
            device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
            physical += device->internalSize;
        }
    }

    if (device->externalSize > 0)
    {
        /* create the external memory heap */
        status = gckVIDMEM_Construct(
            device->os,
            externalBaseAddress, device->externalSize, externalAlignment,
            0, &device->externalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->externalSize = 0;
        }
        else
        {
            /* Map external memory. */
            device->externalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->externalSize);

            if (device->externalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
            device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
            physical += device->externalSize;
        }
    }

    /* set up the contiguous memory */
    device->contiguousSize = ContiguousSize;

    if (ContiguousSize > 0)
    {
        if (ContiguousBase == 0)
        {
            while (device->contiguousSize > 0)
            {
                /* Allocate contiguous memory. */
                status = _AllocateMemory(
                    device,
                    device->contiguousSize,
                    &device->contiguousBase,
                    &device->contiguousPhysical,
                    &physAddr
                    );

                if (gcmIS_SUCCESS(status))
                {
                    device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
                    status = gckVIDMEM_Construct(
                        device->os,
                        physAddr | device->systemMemoryBaseAddress,
                        device->contiguousSize,
                        64,
                        BankSize,
                        &device->contiguousVidMem
                        );

                    if (gcmIS_SUCCESS(status))
                    {
                        break;
                    }

                    gcmkONERROR(_FreeMemory(
                        device,
                        device->contiguousBase,
                        device->contiguousPhysical
                        ));

                    gcmRELEASE_NAME(device->contiguousPhysicalName);
                    device->contiguousBase     = gcvNULL;
                    device->contiguousPhysical = gcvNULL;
                }

                if (device->contiguousSize <= (4 << 20))
                {
                    device->contiguousSize = 0;
                }
                else
                {
                    device->contiguousSize -= (4 << 20);
                }
            }
        }
        else
        {
            /* Create the contiguous memory heap. */
            status = gckVIDMEM_Construct(
                device->os,
                ContiguousBase | device->systemMemoryBaseAddress,
                ContiguousSize,
                64, BankSize,
                &device->contiguousVidMem
                );

            if (gcmIS_ERROR(status))
            {
                /* Error, disable contiguous memory pool. */
                device->contiguousVidMem = gcvNULL;
                device->contiguousSize   = 0;
            }
            else
            {
                mem_region = request_mem_region(
                    ContiguousBase, ContiguousSize, "galcore managed memory"
                    );

                if (mem_region == gcvNULL)
                {
                    gcmkTRACE_ZONE(
                        gcvLEVEL_ERROR, gcvZONE_DRIVER,
                        "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
                        __FUNCTION__, __LINE__,
                        ContiguousSize, ContiguousBase
                        );

                    gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                }

                device->requestedContiguousBase  = ContiguousBase;
                device->requestedContiguousSize  = ContiguousSize;

#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
                if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
                {
                    device->contiguousBase
#if gcdPAGED_MEMORY_CACHEABLE
                        = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
#else
                        = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
#endif
                    if (device->contiguousBase == gcvNULL)
                    {
                        device->contiguousVidMem = gcvNULL;
                        device->contiguousSize = 0;

                        gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                    }
                }
#endif

                device->contiguousPhysical = gcvNULL;
                device->contiguousPhysicalName = 0;
                device->contiguousSize     = ContiguousSize;
                device->contiguousMapped   = gcvTRUE;
            }
        }
    }
/*******************************************************************************
**
**	gckKERNEL_MapVideoMemory
**
**	Get the logical address for a hardware specific memory address for the
**	current process.
**
**	INPUT:
**
**		gckKERNEL Kernel
**			Pointer to an gckKERNEL object.
**
**      gctBOOL InUserSpace
**          gcvTRUE to map the memory into the user space.
**
**		gctUINT32 Address
**			Hardware specific memory address.
**
**	OUTPUT:
**
**		gctPOINTER * Logical
**			Pointer to a variable that will hold the logical address of the
**			specified memory address.
*/
gceSTATUS gckKERNEL_MapVideoMemoryEx(
	IN gckKERNEL Kernel,
    IN gceCORE   Core,
	IN gctBOOL InUserSpace,
	IN gctUINT32 Address,
	OUT gctPOINTER * Logical
	)
{
	GCHAL * gchal;
	gcePOOL pool;
	gctUINT32 offset, base;
	gceSTATUS status;
	gctPOINTER logical;

    gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
                   Kernel, InUserSpace, Address);

	gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL,
			   "[ENTER] gckKERNEL_MapVideoMemory");

	/* Verify the arguments. */
	gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
	gcmkVERIFY_ARGUMENT(Logical != gcvNULL);

	/* Extract the pointer to the GCHAL class. */
	gchal = (GCHAL *) Kernel->context;

	do
	{
#if gcdENABLE_VG
		if (Core == gcvCORE_VG)
		{
			/* Split the memory address into a pool type and offset. */
			gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
											 Address,
											 &pool,
											 &offset));
		}
		else
#endif
		{
			/* Split the memory address into a pool type and offset. */
			gcmkERR_BREAK(gckHARDWARE_SplitMemory(Kernel->hardware,
											 Address,
											 &pool,
											 &offset));
		}
		/* Dispatch on pool. */
		switch (pool)
		{
		case gcvPOOL_LOCAL_INTERNAL:
			/* Internal memory. */
			logical = gchal->GetInternalLogical();
			break;

		case gcvPOOL_LOCAL_EXTERNAL:
			/* External memory. */
			logical = gchal->GetExternalLogical();
			break;

		case gcvPOOL_SYSTEM:
			/* System memory. */
#if UNDER_CE >= 600
			if (InUserSpace)
			{
				logical = gchal->GetProcessContiguousLogical();
			}
			else
			{
				logical = gchal->GetContiguousLogical();
			}
#else
			logical = gchal->GetContiguousLogical();
#endif

#if gcdENABLE_VG
			if (Core == gcvCORE_VG)
			{
				gcmkVERIFY_OK(gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
												 gchal->GetContiguousHeap()->baseAddress,
												 &pool,
												 &base));
			}
			else
#endif
			{
				gcmkVERIFY_OK(gckHARDWARE_SplitMemory(Kernel->hardware,
												 gchal->GetContiguousHeap()->baseAddress,
												 &pool,
												 &base));
			}
			offset -= base;
			break;

		default:
			/* Invalid memory pool. */
			gcmkFATAL("Unknown memory pool: %u", pool);
			return gcvSTATUS_INVALID_ARGUMENT;
		}

		/* Build logical address of specified address. */
		*Logical = reinterpret_cast<gctPOINTER>
			(static_cast<gctUINT8 *>(logical) + offset);
	}
	while (gcvFALSE);

	if (gcmIS_SUCCESS(status))
	{
		gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
				   "gckKERNEL_MapVideoMemory: Address 0x%08X maps to %p",
				   Address, *Logical);
	}

	gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL,
			   "[LEAVE] gckKERNEL_MapVideoMemory(%u)",
			   status);

	/* Return the status. */
    gcmkFOOTER();
	return status;
}
/*******************************************************************************
**
**	gckKERNEL_GetVideoMemoryPool
**
**	Get the gckVIDMEM object belonging to the specified pool.
**
**	INPUT:
**
**		gckKERNEL Kernel
**			Pointer to an gckKERNEL object.
**
**		gcePOOL Pool
**			Pool to query gckVIDMEM object for.
**
**	OUTPUT:
**
**		gckVIDMEM * VideoMemory
**			Pointer to a variable that will hold the pointer to the gckVIDMEM
**			object belonging to the requested pool.
*/
gceSTATUS gckKERNEL_GetVideoMemoryPool(
	IN gckKERNEL Kernel,
	IN gcePOOL Pool,
	OUT gckVIDMEM * VideoMemory
	)
{
	GCHAL * gchal;
	gckVIDMEM videoMemory;
	gceSTATUS status;

    gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);

	gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL,
			   "[ENTER] gckHARDWARE_GetVideoMemoryPool");

	/* Verify the arguments. */
	gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
	gcmkVERIFY_ARGUMENT(VideoMemory != gcvNULL);

    /* Extract the pointer to the GCHAL class. */
    gchal = (GCHAL *) Kernel->context;

	/* Dispatch on pool. */
	switch (Pool)
	{
	case gcvPOOL_LOCAL_INTERNAL:
		/* Internal memory. */
		videoMemory = gchal->GetInternalHeap();
		break;

	case gcvPOOL_LOCAL_EXTERNAL:
		/* External memory. */
		videoMemory = gchal->GetExternalHeap();
		break;

	case gcvPOOL_SYSTEM:
		/* System memory. */
		videoMemory = gchal->GetContiguousHeap();
		break;

	default:
		/* Unknown pool. */
		videoMemory = gcvNULL;
		gcmkFATAL("Unknown memory pool: %u", Pool);
	}

	/* Return pointer to the gckVIDMEM object. */
	*VideoMemory = videoMemory;

	/* Determine the status. */
	status = (videoMemory == gcvNULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;

	if (gcmIS_SUCCESS(status))
	{
		gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
				   "gckHARDWARE_GetVideoMemoryPool: Pool %u starts at %p",
				   Pool, videoMemory);
	}

	gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_KERNEL,
			   "[LEAVE] gckHARDWARE_GetVideoMemoryPool(%u)",
			   status);

	/* Return the status. */
    gcmkFOOTER_ARG("status=%d, *VideoMemory=%p", status, gcmOPT_VALUE(VideoMemory));
	return status;
}
Пример #14
0
/*******************************************************************************
**
**  gckGALDEVICE_Construct
**
**  Constructor.
**
**  INPUT:
**
**  OUTPUT:
**
**      gckGALDEVICE * Device
**          Pointer to a variable receiving the gckGALDEVICE object pointer on
**          success.
*/
gceSTATUS
gckGALDEVICE_Construct(
    IN gctINT IrqLine,
    IN gctUINT32 RegisterMemBase,
    IN gctSIZE_T RegisterMemSize,
    IN gctINT IrqLine2D,
    IN gctUINT32 RegisterMemBase2D,
    IN gctSIZE_T RegisterMemSize2D,
    IN gctINT IrqLineVG,
    IN gctUINT32 RegisterMemBaseVG,
    IN gctSIZE_T RegisterMemSizeVG,
    IN gctUINT32 ContiguousBase,
    IN gctSIZE_T ContiguousSize,
    IN gctSIZE_T BankSize,
    IN gctINT FastClear,
    IN gctINT Compression,
    IN gctUINT32 PhysBaseAddr,
    IN gctUINT32 PhysSize,
    IN gctINT Signal,
    OUT gckGALDEVICE *Device
    )
{
    gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
    gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
    gctUINT32 horizontalTileSize, verticalTileSize;
    struct resource* mem_region;
    gctUINT32 physAddr;
    gctUINT32 physical;
    gckGALDEVICE device;
    gceSTATUS status;
    gctINT32 i;
    gceHARDWARE_TYPE type;
    gckDB sharedDB = gcvNULL;

    gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
                   "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
                   "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
                   "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
                   "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
                   IrqLine, RegisterMemBase, RegisterMemSize,
                   IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
                   IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
                   ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
                   PhysBaseAddr, PhysSize, Signal);

    /* Allocate device structure. */
    device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);

    if (!device)
    {
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    memset(device, 0, sizeof(struct _gckGALDEVICE));

    if (IrqLine != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_MAJOR]    = RegisterMemBase;
        device->requestedRegisterMemSizes[gcvCORE_MAJOR]    = RegisterMemSize;
    }

    if (IrqLine2D != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_2D]       = RegisterMemBase2D;
        device->requestedRegisterMemSizes[gcvCORE_2D]       = RegisterMemSize2D;
    }

    if (IrqLineVG != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_VG]       = RegisterMemBaseVG;
        device->requestedRegisterMemSizes[gcvCORE_VG]       = RegisterMemSizeVG;
    }

    device->requestedContiguousBase  = 0;
    device->requestedContiguousSize  = 0;


    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        physical = device->requestedRegisterMemBases[i];

        /* Set up register memory region. */
        if (physical != 0)
        {
            mem_region = request_mem_region(
                physical, device->requestedRegisterMemSizes[i], "galcore register region"
                );

#if 0
            if (mem_region == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }
#endif

            device->registerBases[i] = (gctPOINTER) ioremap_nocache(
                physical, device->requestedRegisterMemSizes[i]);

            if (device->registerBases[i] == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            physical += device->requestedRegisterMemSizes[i];
        }
        else
        {
            device->registerBases[i] = gcvNULL;
        }
    }

    /* Set the base address */
    device->baseAddress = PhysBaseAddr;

    /* Construct the gckOS object. */
    gcmkONERROR(gckOS_Construct(device, &device->os));

    if (IrqLine != -1)
    {
        /* Construct the gckKERNEL object. */
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_MAJOR, device,
            gcvNULL, &device->kernels[gcvCORE_MAJOR]));

        sharedDB = device->kernels[gcvCORE_MAJOR]->db;

        /* Initialize core mapping */
        for (i = 0; i < 8; i++)
        {
            device->coreMapping[i] = gcvCORE_MAJOR;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_MAJOR]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetFastClear(
            device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
            ));


#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_MAJOR] = gcvNULL;
    }

    if (IrqLine2D != -1)
    {
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_2D, device,
            sharedDB, &device->kernels[gcvCORE_2D]));

        if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;

        /* Verify the hardware type */
        gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));

        if (type != gcvHARDWARE_2D)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): Unexpected hardware type: %d\n",
                __FUNCTION__, __LINE__,
                type
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_2D;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_2D]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
            device
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_2D] = gcvNULL;
    }

    if (IrqLineVG != -1)
    {
#if gcdENABLE_VG
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_VG, device,
            sharedDB, &device->kernels[gcvCORE_VG]));
        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL
            && device->kernels[gcvCORE_2D] == gcvNULL
            )
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_VG;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
        }

#endif
    }
    else
    {
        device->kernels[gcvCORE_VG] = gcvNULL;
    }

    /* Initialize the ISR. */
    device->irqLines[gcvCORE_MAJOR] = IrqLine;
    device->irqLines[gcvCORE_2D]    = IrqLine2D;
    device->irqLines[gcvCORE_VG]    = IrqLineVG;

    /* Initialize the kernel thread semaphores. */
    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
    }

    device->signal = Signal;

    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        if (device->kernels[i] != gcvNULL) break;
    }

    if (i == gcdCORE_COUNT) gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);

#if gcdENABLE_VG
    if (i == gcvCORE_VG)
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
                device->kernels[i]->vg->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));
            /* query the amount of video memory */
        gcmkONERROR(gckVGHARDWARE_QueryMemory(
            device->kernels[i]->vg->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }
    else
#endif
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckHARDWARE_QuerySystemMemory(
                device->kernels[i]->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));

            /* query the amount of video memory */
        gcmkONERROR(gckHARDWARE_QueryMemory(
            device->kernels[i]->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }


    /* Set up the internal memory region. */
    if (device->internalSize > 0)
    {
        status = gckVIDMEM_Construct(
            device->os,
            internalBaseAddress, device->internalSize, internalAlignment,
            0, &device->internalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->internalSize = 0;
        }
        else
        {
            /* Map internal memory. */
            device->internalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->internalSize);

            if (device->internalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->internalPhysical = (gctPHYS_ADDR) physical;
            physical += device->internalSize;
        }
    }

    if (device->externalSize > 0)
    {
        /* create the external memory heap */
        status = gckVIDMEM_Construct(
            device->os,
            externalBaseAddress, device->externalSize, externalAlignment,
            0, &device->externalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->externalSize = 0;
        }
        else
        {
            /* Map external memory. */
            device->externalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->externalSize);

            if (device->externalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->externalPhysical = (gctPHYS_ADDR) physical;
            physical += device->externalSize;
        }
    }

    /* set up the contiguous memory */
    device->contiguousSize = ContiguousSize;

    if (ContiguousSize > 0)
    {
        if (ContiguousBase == 0)
        {
            while (device->contiguousSize > 0)
            {
                /* Allocate contiguous memory. */
                status = _AllocateMemory(
                    device,
                    device->contiguousSize,
                    &device->contiguousBase,
                    &device->contiguousPhysical,
                    &physAddr
                    );

                if (gcmIS_SUCCESS(status))
                {
                    status = gckVIDMEM_Construct(
                        device->os,
                        physAddr | device->systemMemoryBaseAddress,
                        device->contiguousSize,
                        64,
                        BankSize,
                        &device->contiguousVidMem
                        );

                    if (gcmIS_SUCCESS(status))
                    {
                        break;
                    }

                    gcmkONERROR(_FreeMemory(
                        device,
                        device->contiguousBase,
                        device->contiguousPhysical
                        ));

                    device->contiguousBase     = gcvNULL;
                    device->contiguousPhysical = gcvNULL;
                }

                if (device->contiguousSize <= (4 << 20))
                {
                    device->contiguousSize = 0;
                }
                else
                {
                    device->contiguousSize -= (4 << 20);
                }
            }
        }
        else
        {
            /* Create the contiguous memory heap. */
            status = gckVIDMEM_Construct(
                device->os,
                (ContiguousBase - device->baseAddress) | device->systemMemoryBaseAddress,
                 ContiguousSize,
                64, BankSize,
                &device->contiguousVidMem
                );

            if (gcmIS_ERROR(status))
            {
                /* Error, disable contiguous memory pool. */
                device->contiguousVidMem = gcvNULL;
                device->contiguousSize   = 0;
            }
            else
            {
                mem_region = request_mem_region(
                    ContiguousBase, ContiguousSize, "galcore managed memory"
                    );

#if 0
                if (mem_region == gcvNULL)
                {
                    gcmkTRACE_ZONE(
                        gcvLEVEL_ERROR, gcvZONE_DRIVER,
                        "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
                        __FUNCTION__, __LINE__,
                        ContiguousSize, ContiguousBase
                        );

                    gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                }
#endif

                device->requestedContiguousBase  = ContiguousBase;
                device->requestedContiguousSize  = ContiguousSize;

#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
                if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
                {
                    device->contiguousBase
#if gcdPAGED_MEMORY_CACHEABLE
                        = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
#else
                        = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
#endif
                    if (device->contiguousBase == gcvNULL)
                    {
                        device->contiguousVidMem = gcvNULL;
                        device->contiguousSize = 0;

                        gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                    }
                }
#endif

                device->contiguousPhysical = (gctPHYS_ADDR) ContiguousBase;
                device->contiguousSize     = ContiguousSize;
                device->contiguousMapped   = gcvTRUE;
            }
        }
    }
Пример #15
0
static int drv_mmap(
    struct file* filp,
    struct vm_area_struct* vma
    )
{
    gceSTATUS status;
    gcsHAL_PRIVATE_DATA_PTR data;
    gckGALDEVICE device;

    gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);

    if (filp == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): filp is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    data = filp->private_data;

    if (data == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): private_data is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    device = data->device;

    if (device == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): device is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

#if !gcdPAGED_MEMORY_CACHEABLE
    vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
    vma->vm_flags    |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND;
#endif
    vma->vm_pgoff     = 0;

    if (device->contiguousMapped)
    {
        unsigned long size = vma->vm_end - vma->vm_start;
//####modified for marvell-bg2
        int ret;

        if (size > device->contiguousSize) {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): size (%d) too large >= %d\n",
                __FUNCTION__, __LINE__,
                size, device->contiguousSize
                );
            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

        ret = io_remap_pfn_range(
            vma,
            vma->vm_start,
            (gctUINT32) device->contiguousPhysical >> PAGE_SHIFT,
            size,
            vma->vm_page_prot
            );
//####end for marvell-bg2

        if (ret != 0)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): io_remap_pfn_range failed %d\n",
                __FUNCTION__, __LINE__,
                ret
                );

            data->mappedMemory = gcvNULL;

            gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
        }

        data->mappedMemory = (gctPOINTER) vma->vm_start;
    }
/*******************************************************************************
**
**  gckKERNEL_Dispatch
**
**  Dispatch a command received from the user HAL layer.
**
**  INPUT:
**
**      gckKERNEL Kernel
**          Pointer to an gckKERNEL object.
**
**      gcsHAL_INTERFACE * Interface
**          Pointer to a gcsHAL_INTERFACE structure that defines the command to
**          be dispatched.
**
**  OUTPUT:
**
**      gcsHAL_INTERFACE * Interface
**          Pointer to a gcsHAL_INTERFACE structure that receives any data to be
**          returned.
*/
gceSTATUS gckVGKERNEL_Dispatch(
    IN gckKERNEL Kernel,
    IN gctBOOL FromUser,
    IN OUT gcsHAL_INTERFACE * Interface
    )
{
    gceSTATUS status;
    gcsHAL_INTERFACE * kernelInterface = Interface;
    gcuVIDMEM_NODE_PTR node;
    gctUINT32 processID;

    gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
    gcmkVERIFY_ARGUMENT(Interface != gcvNULL);

    gcmkONERROR(gckOS_GetProcessID(&processID));

    /* Dispatch on command. */
    switch (Interface->command)
    {
    case gcvHAL_QUERY_VIDEO_MEMORY:
        /* Query video memory size. */
        gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
            Kernel, kernelInterface
            ));
        break;

    case gcvHAL_QUERY_CHIP_IDENTITY:
        /* Query chip identity. */
        gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
            Kernel->vg->hardware,
            &kernelInterface->u.QueryChipIdentity.chipModel,
            &kernelInterface->u.QueryChipIdentity.chipRevision,
            &kernelInterface->u.QueryChipIdentity.chipFeatures,
            &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
            &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
            ));
        break;

    case gcvHAL_QUERY_COMMAND_BUFFER:
        /* Query command buffer information. */
        gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
            Kernel,
            &kernelInterface->u.QueryCommandBuffer.information
            ));
        break;
    case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
        /* Allocate non-paged memory. */
        gcmkERR_BREAK(gckOS_AllocateContiguous(
            Kernel->os,
            gcvTRUE,
            &kernelInterface->u.AllocateNonPagedMemory.bytes,
            &kernelInterface->u.AllocateNonPagedMemory.physical,
            &kernelInterface->u.AllocateNonPagedMemory.logical
            ));
        break;

    case gcvHAL_FREE_NON_PAGED_MEMORY:
        /* Unmap user logical out of physical memory first. */
        gcmkERR_BREAK(gckOS_UnmapUserLogical(
            Kernel->os,
            kernelInterface->u.AllocateNonPagedMemory.physical,
            kernelInterface->u.AllocateNonPagedMemory.bytes,
            kernelInterface->u.AllocateNonPagedMemory.logical
            ));

        /* Free non-paged memory. */
        gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
            Kernel->os,
            kernelInterface->u.AllocateNonPagedMemory.bytes,
            kernelInterface->u.AllocateNonPagedMemory.physical,
            kernelInterface->u.AllocateNonPagedMemory.logical
            ));
        break;

    case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
        /* Allocate contiguous memory. */
        gcmkERR_BREAK(gckOS_AllocateContiguous(
            Kernel->os,
            gcvTRUE,
            &kernelInterface->u.AllocateNonPagedMemory.bytes,
            &kernelInterface->u.AllocateNonPagedMemory.physical,
            &kernelInterface->u.AllocateNonPagedMemory.logical
            ));
        break;

    case gcvHAL_FREE_CONTIGUOUS_MEMORY:
        /* Unmap user logical out of physical memory first. */
        gcmkERR_BREAK(gckOS_UnmapUserLogical(
            Kernel->os,
            kernelInterface->u.AllocateNonPagedMemory.physical,
            kernelInterface->u.AllocateNonPagedMemory.bytes,
            kernelInterface->u.AllocateNonPagedMemory.logical
            ));

        /* Free contiguous memory. */
        gcmkERR_BREAK(gckOS_FreeContiguous(
            Kernel->os,
            kernelInterface->u.AllocateNonPagedMemory.physical,
            kernelInterface->u.AllocateNonPagedMemory.logical,
            kernelInterface->u.AllocateNonPagedMemory.bytes
            ));
        break;

    case gcvHAL_ALLOCATE_VIDEO_MEMORY:
        {
            gctSIZE_T bytes;
            gctUINT32 bitsPerPixel;
            gctUINT32 bits;

            /* Align width and height to tiles. */
            gcmkERR_BREAK(gckVGHARDWARE_AlignToTile(
                Kernel->vg->hardware,
                kernelInterface->u.AllocateVideoMemory.type,
                &kernelInterface->u.AllocateVideoMemory.width,
                &kernelInterface->u.AllocateVideoMemory.height
                ));

            /* Convert format into bytes per pixel and bytes per tile. */
            gcmkERR_BREAK(gckVGHARDWARE_ConvertFormat(
                Kernel->vg->hardware,
                kernelInterface->u.AllocateVideoMemory.format,
                &bitsPerPixel,
                gcvNULL
                ));

            /* Compute number of bits for the allocation. */
            bits
                = kernelInterface->u.AllocateVideoMemory.width
                * kernelInterface->u.AllocateVideoMemory.height
                * kernelInterface->u.AllocateVideoMemory.depth
                * bitsPerPixel;

            /* Compute number of bytes for the allocation. */
            bytes = gcmALIGN(bits, 8) / 8;

            /* Allocate memory. */
            gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
                Kernel,
                &kernelInterface->u.AllocateVideoMemory.pool,
                bytes,
                64,
                kernelInterface->u.AllocateVideoMemory.type,
                &kernelInterface->u.AllocateVideoMemory.node
                ));
        }
        break;

    case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
        /* Allocate memory. */
        gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
            Kernel,
            &kernelInterface->u.AllocateLinearVideoMemory.pool,
            kernelInterface->u.AllocateLinearVideoMemory.bytes,
            kernelInterface->u.AllocateLinearVideoMemory.alignment,
            kernelInterface->u.AllocateLinearVideoMemory.type,
            &kernelInterface->u.AllocateLinearVideoMemory.node
            ));

        gcmkERR_BREAK(gckKERNEL_AddProcessDB(Kernel,
           processID, gcvDB_VIDEO_MEMORY,
           Interface->u.AllocateLinearVideoMemory.node,
           gcvNULL,
           kernelInterface->u.AllocateLinearVideoMemory.bytes
           ));

        break;

    case gcvHAL_FREE_VIDEO_MEMORY:
#ifdef __QNXNTO__
        /* Unmap the video memory */
        node = Interface->u.FreeVideoMemory.node;

        if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM) &&
            (node->VidMem.logical != gcvNULL))
        {
            gckKERNEL_UnmapVideoMemory(Kernel,
                                       node->VidMem.logical,
                                       processID,
                                       node->VidMem.bytes);
            node->VidMem.logical = gcvNULL;
        }
#endif /* __QNXNTO__ */

        /* Free video memory. */
        gcmkERR_BREAK(gckVIDMEM_Free(
            Interface->u.FreeVideoMemory.node
            ));

        gcmkERR_BREAK(gckKERNEL_RemoveProcessDB(
            Kernel,
            processID, gcvDB_VIDEO_MEMORY,
            Interface->u.FreeVideoMemory.node
            ));

        break;

    case gcvHAL_MAP_MEMORY:
        /* Map memory. */
        gcmkERR_BREAK(gckKERNEL_MapMemory(
            Kernel,
            kernelInterface->u.MapMemory.physical,
            kernelInterface->u.MapMemory.bytes,
            &kernelInterface->u.MapMemory.logical
            ));
        break;

    case gcvHAL_UNMAP_MEMORY:
        /* Unmap memory. */
        gcmkERR_BREAK(gckKERNEL_UnmapMemory(
            Kernel,
            kernelInterface->u.MapMemory.physical,
            kernelInterface->u.MapMemory.bytes,
            kernelInterface->u.MapMemory.logical
            ));
        break;

    case gcvHAL_MAP_USER_MEMORY:
        /* Map user memory to DMA. */
        gcmkERR_BREAK(gckOS_MapUserMemory(
            Kernel->os,
            gcvCORE_VG,
            kernelInterface->u.MapUserMemory.memory,
            kernelInterface->u.MapUserMemory.physical,
            kernelInterface->u.MapUserMemory.size,
            &kernelInterface->u.MapUserMemory.info,
            &kernelInterface->u.MapUserMemory.address
            ));
        break;

    case gcvHAL_UNMAP_USER_MEMORY:
        /* Unmap user memory. */
        gcmkERR_BREAK(gckOS_UnmapUserMemory(
            Kernel->os,
            gcvCORE_VG,
            kernelInterface->u.UnmapUserMemory.memory,
            kernelInterface->u.UnmapUserMemory.size,
            kernelInterface->u.UnmapUserMemory.info,
            kernelInterface->u.UnmapUserMemory.address
            ));
        break;
    case gcvHAL_LOCK_VIDEO_MEMORY:
        /* Lock video memory. */
        gcmkERR_BREAK(
            gckVIDMEM_Lock(Kernel,
                           Interface->u.LockVideoMemory.node,
                           gcvFALSE,
                           &Interface->u.LockVideoMemory.address));

        node = Interface->u.LockVideoMemory.node;
        if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
        {
            /* Map video memory address into user space. */
#ifdef __QNXNTO__
        if (node->VidMem.logical == gcvNULL)
        {
            gcmkONERROR(
                gckKERNEL_MapVideoMemory(Kernel,
                                         FromUser,
                                         Interface->u.LockVideoMemory.address,
                                         processID,
                                         node->VidMem.bytes,
                                         &node->VidMem.logical));
        }

        Interface->u.LockVideoMemory.memory = node->VidMem.logical;
#else
            gcmkERR_BREAK(
                gckKERNEL_MapVideoMemoryEx(Kernel,
                                         gcvCORE_VG,
                                         FromUser,
                                         Interface->u.LockVideoMemory.address,
                                         &Interface->u.LockVideoMemory.memory));
#endif
        }
        else
        {
            Interface->u.LockVideoMemory.memory = node->Virtual.logical;

            /* Success. */
            status = gcvSTATUS_OK;
        }

#if gcdSECURE_USER
        /* Return logical address as physical address. */
        Interface->u.LockVideoMemory.address =
            gcmPTR2INT(Interface->u.LockVideoMemory.memory);
#endif
        gcmkERR_BREAK(
            gckKERNEL_AddProcessDB(Kernel,
                                   processID, gcvDB_VIDEO_MEMORY_LOCKED,
                                   Interface->u.LockVideoMemory.node,
                                   gcvNULL,
                                   0));
        break;

    case gcvHAL_UNLOCK_VIDEO_MEMORY:
        /* Unlock video memory. */
        node = Interface->u.UnlockVideoMemory.node;

#if gcdSECURE_USER
        /* Save node information before it disappears. */
        if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
        {
            logical = gcvNULL;
            bytes   = 0;
        }
        else
        {
            logical = node->Virtual.logical;
            bytes   = node->Virtual.bytes;
        }
#endif

        /* Unlock video memory. */
        gcmkERR_BREAK(
            gckVIDMEM_Unlock(Kernel,
                             node,
                             Interface->u.UnlockVideoMemory.type,
                             &Interface->u.UnlockVideoMemory.asynchroneous,
                             gcvFALSE));

#if gcdSECURE_USER
        /* Flush the translation cache for virtual surfaces. */
        if (logical != gcvNULL)
        {
            gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
                                                          cache,
                                                          logical,
                                                          bytes));
        }
#endif

        if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
        {
            /* There isn't a event to unlock this node, remove record now */
            gcmkERR_BREAK(
                    gckKERNEL_RemoveProcessDB(Kernel,
                        processID, gcvDB_VIDEO_MEMORY_LOCKED,
                        Interface->u.UnlockVideoMemory.node));
        }

        break;
    case gcvHAL_USER_SIGNAL:
#if !USE_NEW_LINUX_SIGNAL
        /* Dispatch depends on the user signal subcommands. */
        switch(Interface->u.UserSignal.command)
        {
        case gcvUSER_SIGNAL_CREATE:
            /* Create a signal used in the user space. */
            gcmkERR_BREAK(
                gckOS_CreateUserSignal(Kernel->os,
                                       Interface->u.UserSignal.manualReset,
                                       &Interface->u.UserSignal.id));

            gcmkVERIFY_OK(
                gckKERNEL_AddProcessDB(Kernel,
                                       processID, gcvDB_SIGNAL,
                                       gcmINT2PTR(Interface->u.UserSignal.id),
                                       gcvNULL,
                                       0));
            break;

        case gcvUSER_SIGNAL_DESTROY:
            /* Destroy the signal. */
            gcmkERR_BREAK(
                gckOS_DestroyUserSignal(Kernel->os,
                                        Interface->u.UserSignal.id));

            gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
                Kernel,
                processID, gcvDB_SIGNAL,
                gcmINT2PTR(Interface->u.UserSignal.id)));
            break;

        case gcvUSER_SIGNAL_SIGNAL:
            /* Signal the signal. */
            gcmkERR_BREAK(
                gckOS_SignalUserSignal(Kernel->os,
                                       Interface->u.UserSignal.id,
                                       Interface->u.UserSignal.state));
            break;

        case gcvUSER_SIGNAL_WAIT:
            /* Wait on the signal. */
            status = gckOS_WaitUserSignal(Kernel->os,
                                          Interface->u.UserSignal.id,
                                          Interface->u.UserSignal.wait);
            break;

        default:
            /* Invalid user signal command. */
            gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
        }
#endif
        break;

    case gcvHAL_COMMIT:
        /* Commit a command and context buffer. */
        gcmkERR_BREAK(gckVGCOMMAND_Commit(
            Kernel->vg->command,
            kernelInterface->u.VGCommit.context,
            kernelInterface->u.VGCommit.queue,
            kernelInterface->u.VGCommit.entryCount,
            kernelInterface->u.VGCommit.taskTable
            ));
        break;
    case gcvHAL_VERSION:
        kernelInterface->u.Version.major = gcvVERSION_MAJOR;
        kernelInterface->u.Version.minor = gcvVERSION_MINOR;
        kernelInterface->u.Version.patch = gcvVERSION_PATCH;
        kernelInterface->u.Version.build = gcvVERSION_BUILD;
        status = gcvSTATUS_OK;
        break;

    case gcvHAL_GET_BASE_ADDRESS:
        /* Get base address. */
        gcmkERR_BREAK(
            gckOS_GetBaseAddress(Kernel->os,
                                 &kernelInterface->u.GetBaseAddress.baseAddress));
        break;
    default:
        /* Invalid command. */
        status = gcvSTATUS_INVALID_ARGUMENT;
    }

OnError:
    /* Save status. */
    kernelInterface->status = status;

    gcmkFOOTER();

    /* Return the status. */
    return status;
}
/*******************************************************************************
**
**  gckKERNEL_Construct
**
**  Construct a new gckKERNEL object.
**
**  INPUT:
**
**      gckOS Os
**          Pointer to an gckOS object.
**
**      IN gctPOINTER Context
**          Pointer to a driver defined context.
**
**  OUTPUT:
**
**      gckKERNEL * Kernel
**          Pointer to a variable that will hold the pointer to the gckKERNEL
**          object.
*/
gceSTATUS gckVGKERNEL_Construct(
    IN gckOS Os,
    IN gctPOINTER Context,
    IN gckKERNEL  inKernel,
    OUT gckVGKERNEL * Kernel
    )
{
    gceSTATUS status;
    gckVGKERNEL kernel = gcvNULL;

    gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
    gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);

    do
    {
        /* Allocate the gckKERNEL object. */
        gcmkERR_BREAK(gckOS_Allocate(
            Os,
            sizeof(struct _gckVGKERNEL),
            (gctPOINTER *) &kernel
            ));

        /* Initialize the gckKERNEL object. */
        kernel->object.type = gcvOBJ_KERNEL;
        kernel->os          = Os;
        kernel->context     = Context;
        kernel->hardware    = gcvNULL;
        kernel->interrupt   = gcvNULL;
        kernel->command     = gcvNULL;
        kernel->mmu         = gcvNULL;
        kernel->kernel      = inKernel;

        /* Construct the gckVGHARDWARE object. */
        gcmkERR_BREAK(gckVGHARDWARE_Construct(
            Os, &kernel->hardware
            ));

        /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
        kernel->hardware->kernel = kernel;

        /* Construct the gckVGINTERRUPT object. */
        gcmkERR_BREAK(gckVGINTERRUPT_Construct(
            kernel, &kernel->interrupt
            ));

        /* Construct the gckVGCOMMAND object. */
        gcmkERR_BREAK(gckVGCOMMAND_Construct(
            kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
            ));

        /* Construct the gckVGMMU object. */
        gcmkERR_BREAK(gckVGMMU_Construct(
            kernel, gcmKB2BYTES(32), &kernel->mmu
            ));

        /* Return pointer to the gckKERNEL object. */
        *Kernel = kernel;

        gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
        /* Success. */
        return gcvSTATUS_OK;
    }
    while (gcvFALSE);

    /* Roll back. */
    if (kernel != gcvNULL)
    {
        if (kernel->mmu != gcvNULL)
        {
            gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
        }

        if (kernel->command != gcvNULL)
        {
            gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
        }

        if (kernel->interrupt != gcvNULL)
        {
            gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
        }

        if (kernel->hardware != gcvNULL)
        {
            gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
        }

        gcmkVERIFY_OK(gckOS_Free(Os, kernel));
    }

    gcmkFOOTER();
    /* Return status. */
    return status;
}
/*******************************************************************************
**
**  gckKERNEL_Recovery
**
**  Try to recover the GPU from a fatal error.
**
**  INPUT:
**
**      gckKERNEL Kernel
**          Pointer to an gckKERNEL object.
**
**  OUTPUT:
**
**      Nothing.
*/
gceSTATUS
gckKERNEL_Recovery(
    IN gckKERNEL Kernel
    )
{
    gceSTATUS status;
    gckEVENT event;
    gckHARDWARE hardware;
#if gcdSECURE_USER
    gctUINT32 processID;
#endif

    gcmkHEADER_ARG("Kernel=0x%x", Kernel);

    /* Validate the arguemnts. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);

    /* Grab gckEVENT object. */
    event = Kernel->event;
    gcmkVERIFY_OBJECT(event, gcvOBJ_EVENT);

    /* Grab gckHARDWARE object. */
    hardware = Kernel->hardware;
    gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);

    /* Handle all outstanding events now. */
    event->pending = ~0U;
    gcmkONERROR(gckEVENT_Notify(event, 1));

    /* Again in case more events got submitted. */
    event->pending = ~0U;
    gcmkONERROR(gckEVENT_Notify(event, 2));

#if gcdSECURE_USER
    /* Flush the secure mapping cache. */
    gcmkONERROR(gckOS_GetProcessID(&processID));
    gcmkONERROR(gckKERNEL_MapLogicalToPhysical(Kernel, processID, gcvNULL));
#endif

    /* Try issuing a soft reset for the GPU. */
    status = gckHARDWARE_Reset(hardware);
    if (status == gcvSTATUS_NOT_SUPPORTED)
    {
        /* Switch to OFF power.  The next submit should return the GPU to ON
        ** state. */
        gcmkONERROR(
            gckHARDWARE_SetPowerManagementState(hardware,
                                                gcvPOWER_OFF));
    }
    else
    {
        /* Bail out on reset error. */
        gcmkONERROR(status);
    }

    /* Success. */
    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:
    /* Return the status. */
    gcmkFOOTER();
    return status;
}
Пример #19
0
/*******************************************************************************
**
**  gckVIDMEM_Lock
**
**  Lock a video memory node and return it's hardware specific address.
**
**  INPUT:
**
**      gcuVIDMEM_NODE_PTR Node
**          Pointer to a gcuVIDMEM_NODE union.
**
**  OUTPUT:
**
**      gctUINT32 * Address
**          Pointer to a variable that will hold the hardware specific address.
*/
gceSTATUS
gckVIDMEM_Lock(
    IN gcuVIDMEM_NODE_PTR Node,
    OUT gctUINT32 * Address
    )
{
    gceSTATUS status;
    gctBOOL acquired = gcvFALSE;
    gctBOOL locked = gcvFALSE;
    gckOS os = gcvNULL;

    gcmkHEADER_ARG("Node=0x%x", Node);

    /* Verify the arguments. */
    gcmkVERIFY_ARGUMENT(Address != gcvNULL);

    if ((Node == gcvNULL)
    ||  (Node->VidMem.memory == gcvNULL)
    )
    {
        /* Invalid object. */
        gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
    }

    /**************************** Video Memory ********************************/

    if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
    {
        /* Increment the lock count. */
        Node->VidMem.locked ++;

        /* Return the address of the node. */
        *Address = Node->VidMem.memory->baseAddress
                 + Node->VidMem.offset
                 + Node->VidMem.alignment;

        gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                      "Locked node 0x%x (%d) @ 0x%08X",
                      Node,
                      Node->VidMem.locked,
                      *Address);
    }

    /*************************** Virtual Memory *******************************/

    else
    {
        /* Verify the gckKERNEL object pointer. */
        gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);

        /* Extract the gckOS object pointer. */
        os = Node->Virtual.kernel->os;
        gcmkVERIFY_OBJECT(os, gcvOBJ_OS);

        /* Grab the mutex. */
        gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
        acquired = gcvTRUE;

        /* Increment the lock count. */
        if (Node->Virtual.locked ++ == 0)
        {
            /* Is this node pending for a final unlock? */
#ifdef __QNXNTO__
            if (!Node->Virtual.contiguous && Node->Virtual.unlockPending)
#else
            if (!Node->Virtual.contiguous && Node->Virtual.pending)
#endif
            {
                /* Make sure we have a page table. */
                gcmkASSERT(Node->Virtual.pageTable != gcvNULL);

                /* Remove pending unlock. */
#ifdef __QNXNTO__
                Node->Virtual.unlockPending = gcvFALSE;
#else
                Node->Virtual.pending = gcvFALSE;
#endif
            }

            /* First lock - create a page table. */
            gcmkASSERT(Node->Virtual.pageTable == gcvNULL);

            /* Make sure we mark our node as not flushed. */
#ifdef __QNXNTO__
            Node->Virtual.unlockPending = gcvFALSE;
#else
            Node->Virtual.pending = gcvFALSE;
#endif

            /* Lock the allocated pages. */
#ifdef __QNXNTO__
            gcmkONERROR(
                gckOS_LockPages(os,
                                Node->Virtual.physical,
                                Node->Virtual.bytes,
                                Node->Virtual.userPID,
                                &Node->Virtual.logical,
                                &Node->Virtual.pageCount));
#else
            gcmkONERROR(
                gckOS_LockPages(os,
                                Node->Virtual.physical,
                                Node->Virtual.bytes,
                                &Node->Virtual.logical,
                                &Node->Virtual.pageCount));
#endif

            locked = gcvTRUE;

            if (Node->Virtual.contiguous)
            {
                /* Get physical address directly */
                gcmkONERROR(gckOS_GetPhysicalAddress(os,
                                    Node->Virtual.logical,
                                    &Node->Virtual.address));
            }
            else
            {
                /* Allocate pages inside the MMU. */
                gcmkONERROR(
                    gckMMU_AllocatePages(Node->Virtual.kernel->mmu,
                                         Node->Virtual.pageCount,
                                         &Node->Virtual.pageTable,
                                         &Node->Virtual.address));

                /* Map the pages. */
#ifdef __QNXNTO__
                gcmkONERROR(
                    gckOS_MapPages(os,
                                   Node->Virtual.physical,
                                   Node->Virtual.logical,
                                   Node->Virtual.pageCount,
                                   Node->Virtual.pageTable));
#else
                gcmkONERROR(
                    gckOS_MapPages(os,
                                   Node->Virtual.physical,
                                   Node->Virtual.pageCount,
                                   Node->Virtual.pageTable));
#endif

                gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                               "Mapped virtual node 0x%x to 0x%08X",
                               Node,
                               Node->Virtual.address);
            }
        }

        /* Return hardware address. */
        *Address = Node->Virtual.address;

        /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
    }

    /* Success. */
    gcmkFOOTER_ARG("*Address=%08x", *Address);
    return gcvSTATUS_OK;

OnError:
    if (locked)
    {
        if (Node->Virtual.pageTable != gcvNULL)
        {
            /* Free the pages from the MMU. */
            gcmkVERIFY_OK(
                gckMMU_FreePages(Node->Virtual.kernel->mmu,
                                 Node->Virtual.pageTable,
                                 Node->Virtual.pageCount));

            Node->Virtual.pageTable = gcvNULL;
        }

        /* Unlock the pages. */
#ifdef __QNXNTO__
        gcmkVERIFY_OK(
            gckOS_UnlockPages(os,
                              Node->Virtual.physical,
                              Node->Virtual.userPID,
                              Node->Virtual.bytes,
                              Node->Virtual.logical));
#else
        gcmkVERIFY_OK(
            gckOS_UnlockPages(os,
                              Node->Virtual.physical,
                              Node->Virtual.bytes,
                              Node->Virtual.logical));
#endif
    }

    if (acquired)
    {
        /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
/*******************************************************************************
**
**	gckKERNEL_Dispatch
**
**	Dispatch a command received from the user HAL layer.
**
**	INPUT:
**
**		gckKERNEL Kernel
**			Pointer to an gckKERNEL object.
**
**		gctBOOL FromUser
**			whether the call is from the user space.
**
**		gcsHAL_INTERFACE * Interface
**			Pointer to a gcsHAL_INTERFACE structure that defines the command to
**			be dispatched.
**
**	OUTPUT:
**
**		gcsHAL_INTERFACE * Interface
**			Pointer to a gcsHAL_INTERFACE structure that receives any data to be
**			returned.
*/
gceSTATUS
gckKERNEL_Dispatch(
	IN gckKERNEL Kernel,
	IN gctBOOL FromUser,
	IN OUT gcsHAL_INTERFACE * Interface
	)
{
	gceSTATUS status;
	gctUINT32 bitsPerPixel;
	gctSIZE_T bytes;
	gcuVIDMEM_NODE_PTR node;
	gctBOOL locked = gcvFALSE;
	gctPHYS_ADDR physical;
	gctUINT32 address;

	gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
				   Kernel, FromUser, Interface);

	/* Verify the arguments. */
	gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
	gcmkVERIFY_ARGUMENT(Interface != gcvNULL);

	gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
				   "Dispatching command %d", Interface->command);

	/* Dispatch on command. */
	switch (Interface->command)
	{
	case gcvHAL_GET_BASE_ADDRESS:
		/* Get base address. */
		gcmkONERROR(
			gckOS_GetBaseAddress(Kernel->os,
								 &Interface->u.GetBaseAddress.baseAddress));
		break;

    case gcvHAL_QUERY_VIDEO_MEMORY:
        /* Query video memory size. */
        gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
		break;

	case gcvHAL_QUERY_CHIP_IDENTITY:
		/* Query chip identity. */
		gcmkONERROR(
			gckHARDWARE_QueryChipIdentity(
				Kernel->hardware,
				&Interface->u.QueryChipIdentity.chipModel,
				&Interface->u.QueryChipIdentity.chipRevision,
				&Interface->u.QueryChipIdentity.chipFeatures,
				&Interface->u.QueryChipIdentity.chipMinorFeatures,
				&Interface->u.QueryChipIdentity.chipMinorFeatures1));

		/* Query chip specifications. */
		gcmkONERROR(
			gckHARDWARE_QueryChipSpecs(
				Kernel->hardware,
				&Interface->u.QueryChipIdentity.streamCount,
				&Interface->u.QueryChipIdentity.registerMax,
				&Interface->u.QueryChipIdentity.threadCount,
				&Interface->u.QueryChipIdentity.shaderCoreCount,
				&Interface->u.QueryChipIdentity.vertexCacheSize,
				&Interface->u.QueryChipIdentity.vertexOutputBufferSize));
		break;

	case gcvHAL_MAP_MEMORY:
		physical = Interface->u.MapMemory.physical;

		/* Map memory. */
		gcmkONERROR(
			gckKERNEL_MapMemory(Kernel,
								physical,
								Interface->u.MapMemory.bytes,
								&Interface->u.MapMemory.logical));
		break;

	case gcvHAL_UNMAP_MEMORY:
		physical = Interface->u.UnmapMemory.physical;

		/* Unmap memory. */
		gcmkONERROR(
			gckKERNEL_UnmapMemory(Kernel,
								  physical,
								  Interface->u.UnmapMemory.bytes,
								  Interface->u.UnmapMemory.logical));
		break;

	case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
		/* Allocate non-paged memory. */
#ifdef __QNXNTO__
		if (FromUser)
		{
			gcmkONERROR(
				gckOS_AllocateNonPagedMemoryShmPool(
				Kernel->os,
				FromUser,
				Interface->pid,
				Interface->handle,
				&Interface->u.AllocateNonPagedMemory.bytes,
				&Interface->u.AllocateNonPagedMemory.physical,
				&Interface->u.AllocateNonPagedMemory.logical));
			break;
		}
#endif
		gcmkONERROR(
			gckOS_AllocateNonPagedMemory(
				Kernel->os,
				FromUser,
				&Interface->u.AllocateNonPagedMemory.bytes,
				&Interface->u.AllocateNonPagedMemory.physical,
				&Interface->u.AllocateNonPagedMemory.logical));
		break;

	case gcvHAL_FREE_NON_PAGED_MEMORY:
		physical = Interface->u.FreeNonPagedMemory.physical;

		/* Free non-paged memory. */
		gcmkONERROR(
			gckOS_FreeNonPagedMemory(Kernel->os,
									 Interface->u.FreeNonPagedMemory.bytes,
									 physical,
									 Interface->u.FreeNonPagedMemory.logical));
		break;

	case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
		/* Allocate contiguous memory. */
#ifdef __QNXNTO__
		if (FromUser)
		{
			gcmkONERROR(
				gckOS_AllocateNonPagedMemoryShmPool(
				Kernel->os,
				FromUser,
				Interface->pid,
				Interface->handle,
				&Interface->u.AllocateNonPagedMemory.bytes,
				&Interface->u.AllocateNonPagedMemory.physical,
				&Interface->u.AllocateNonPagedMemory.logical));
			break;
		}
#endif
		gcmkONERROR(
			gckOS_AllocateContiguous(
				Kernel->os,
				FromUser,
				&Interface->u.AllocateContiguousMemory.bytes,
				&Interface->u.AllocateContiguousMemory.physical,
				&Interface->u.AllocateContiguousMemory.logical));

		break;

	case gcvHAL_FREE_CONTIGUOUS_MEMORY:
		physical = Interface->u.FreeContiguousMemory.physical;

       /* Free contiguous memory. */
        gcmkONERROR(
            gckOS_FreeContiguous(Kernel->os,
                                 physical,
                                 Interface->u.FreeContiguousMemory.logical,
                                 Interface->u.FreeContiguousMemory.bytes));
        break;

	case gcvHAL_ALLOCATE_VIDEO_MEMORY:
		/* Align width and height to tiles. */
		gcmkONERROR(
			gckHARDWARE_AlignToTile(Kernel->hardware,
									Interface->u.AllocateVideoMemory.type,
									&Interface->u.AllocateVideoMemory.width,
									&Interface->u.AllocateVideoMemory.height,
									gcvNULL));

		/* Convert format into bytes per pixel and bytes per tile. */
		gcmkONERROR(
			gckHARDWARE_ConvertFormat(Kernel->hardware,
									  Interface->u.AllocateVideoMemory.format,
									  &bitsPerPixel,
									  gcvNULL));

		/* Compute number of bytes for the allocation. */
		bytes = Interface->u.AllocateVideoMemory.width * bitsPerPixel
			  * Interface->u.AllocateVideoMemory.height
			  * Interface->u.AllocateVideoMemory.depth / 8;

		/* Allocate memory. */
#ifdef __QNXNTO__
		gcmkONERROR(
			_AllocateMemory(Kernel,
							&Interface->u.AllocateVideoMemory.pool,
							bytes,
							64,
							Interface->u.AllocateVideoMemory.type,
							Interface->handle,
							&Interface->u.AllocateVideoMemory.node));
#else
		gcmkONERROR(
			_AllocateMemory(Kernel,
							&Interface->u.AllocateVideoMemory.pool,
							bytes,
							64,
							Interface->u.AllocateVideoMemory.type,
							&Interface->u.AllocateVideoMemory.node));
#endif
		break;

	case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
		/* Allocate memory. */
#ifdef __QNXNTO__
		gcmkONERROR(
			_AllocateMemory(Kernel,
							&Interface->u.AllocateLinearVideoMemory.pool,
							Interface->u.AllocateLinearVideoMemory.bytes,
							Interface->u.AllocateLinearVideoMemory.alignment,
							Interface->u.AllocateLinearVideoMemory.type,
							Interface->handle,
							&Interface->u.AllocateLinearVideoMemory.node));

		/* Set the current user pid in the node,
		 * which is used while locking memory. */
		gcmkVERIFY_OK(gckVIDMEM_SetPID(
				Interface->u.AllocateLinearVideoMemory.node,
				Interface->pid));
#else
		gcmkONERROR(
			_AllocateMemory(Kernel,
							&Interface->u.AllocateLinearVideoMemory.pool,
							Interface->u.AllocateLinearVideoMemory.bytes,
							Interface->u.AllocateLinearVideoMemory.alignment,
							Interface->u.AllocateLinearVideoMemory.type,
							&Interface->u.AllocateLinearVideoMemory.node));
#endif
		break;

    case gcvHAL_FREE_VIDEO_MEMORY:
#ifdef __QNXNTO__
        node = Interface->u.FreeVideoMemory.node;
        if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM
         && node->VidMem.logical != gcvNULL)
        {
            gcmkONERROR(
                    gckKERNEL_UnmapVideoMemory(Kernel,
                                               node->VidMem.logical,
                                               Interface->pid,
                                               node->VidMem.bytes));
            node->VidMem.logical = gcvNULL;
        }
#endif
        /* Free video memory. */
        gcmkONERROR(
            gckVIDMEM_Free(Interface->u.FreeVideoMemory.node));
        break;

	case gcvHAL_LOCK_VIDEO_MEMORY:
		/* Lock video memory. */
		gcmkONERROR(
			gckVIDMEM_Lock(Interface->u.LockVideoMemory.node,
						   &Interface->u.LockVideoMemory.address));

		locked = gcvTRUE;

		node = Interface->u.LockVideoMemory.node;
		if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
		{
			/* Map video memory address into user space. */
#ifdef __QNXNTO__
        if (node->VidMem.logical == gcvNULL)
        {
			gcmkONERROR(
				gckKERNEL_MapVideoMemory(Kernel,
										 FromUser,
										 Interface->u.LockVideoMemory.address,
										 Interface->pid,
										 node->VidMem.bytes,
										 &node->VidMem.logical));
        }
		Interface->u.LockVideoMemory.memory = node->VidMem.logical;
#else
			gcmkONERROR(
				gckKERNEL_MapVideoMemory(Kernel,
										 FromUser,
										 Interface->u.LockVideoMemory.address,
										 &Interface->u.LockVideoMemory.memory));
#endif

#ifdef __QNXNTO__
			/* Add more information to node, which is used while unmapping. */
			gcmkVERIFY_OK(gckVIDMEM_SetPID(
					Interface->u.LockVideoMemory.node,
					Interface->pid));
#endif
		}

		else
		{
			/* Copy logical memory for virtual memory. */
			Interface->u.LockVideoMemory.memory = node->Virtual.logical;

            /* Success. */
            status = gcvSTATUS_OK;
        }

#if gcdSECURE_USER
        /* Return logical address as physical address. */
        Interface->u.LockVideoMemory.address =
            gcmPTR2INT(Interface->u.LockVideoMemory.memory);
#endif
        break;

	case gcvHAL_UNLOCK_VIDEO_MEMORY:
		/* Unlock video memory. */
		node = Interface->u.UnlockVideoMemory.node;

        /* Unlock video memory. */
        gcmkONERROR(
            gckVIDMEM_Unlock(node,
                             Interface->u.UnlockVideoMemory.type,
                             &Interface->u.UnlockVideoMemory.asynchroneous));
        break;

	case gcvHAL_EVENT_COMMIT:
		/* Commit an event queue. */
		gcmkONERROR(
			gckEVENT_Commit(Kernel->event,
						    Interface->u.Event.queue));
        break;

    case gcvHAL_COMMIT:
        /* Commit a command and context buffer. */
        gcmkONERROR(
            gckCOMMAND_Commit(Kernel->command,
                              Interface->u.Commit.commandBuffer,
                              Interface->u.Commit.contextBuffer,
                              Interface->u.Commit.process));
        break;

    case gcvHAL_STALL:
        /* Stall the command queue. */
        gcmkONERROR(gckCOMMAND_Stall(Kernel->command));
        break;

	case gcvHAL_MAP_USER_MEMORY:
		/* Map user memory to DMA. */
		gcmkONERROR(
			gckOS_MapUserMemory(Kernel->os,
								Interface->u.MapUserMemory.memory,
								Interface->u.MapUserMemory.size,
								&Interface->u.MapUserMemory.info,
								&Interface->u.MapUserMemory.address));
		break;

	case gcvHAL_UNMAP_USER_MEMORY:
		address = Interface->u.MapUserMemory.address;

		/* Unmap user memory. */
		gcmkONERROR(
			gckOS_UnmapUserMemory(Kernel->os,
								  Interface->u.UnmapUserMemory.memory,
								  Interface->u.UnmapUserMemory.size,
								  Interface->u.UnmapUserMemory.info,
								  address));
		break;

#if !USE_NEW_LINUX_SIGNAL
	case gcvHAL_USER_SIGNAL:
     	gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
				   "Dispatching gcvHAL_USER_SIGNAL %d", Interface->u.UserSignal.command);
		/* Dispatch depends on the user signal subcommands. */
		switch(Interface->u.UserSignal.command)
		{
		case gcvUSER_SIGNAL_CREATE:
			/* Create a signal used in the user space. */
			gcmkONERROR(
				gckOS_CreateUserSignal(Kernel->os,
									   Interface->u.UserSignal.manualReset,
                                       Interface->u.UserSignal.signalType,
									   &Interface->u.UserSignal.id));
			break;

		case gcvUSER_SIGNAL_DESTROY:
			/* Destroy the signal. */
			gcmkONERROR(
				gckOS_DestroyUserSignal(Kernel->os,
										Interface->u.UserSignal.id));
			break;

		case gcvUSER_SIGNAL_SIGNAL:
			/* Signal the signal. */
			gcmkONERROR(
				gckOS_SignalUserSignal(Kernel->os,
									   Interface->u.UserSignal.id,
									   Interface->u.UserSignal.state));
			break;

		case gcvUSER_SIGNAL_WAIT:
			/* Wait on the signal. */
			status = gckOS_WaitUserSignal(Kernel->os,
										  Interface->u.UserSignal.id,
										  Interface->u.UserSignal.wait);
			break;

		default:
			/* Invalid user signal command. */
			gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
		}
        break;
#endif

    case gcvHAL_SET_POWER_MANAGEMENT_STATE:
		/* Set the power management state. */
		gcmkONERROR(
			gckHARDWARE_SetPowerManagementState(
				Kernel->hardware,
				Interface->u.SetPowerManagement.state));
		break;

    case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
        /* Chip is not idle. */
        Interface->u.QueryPowerManagement.isIdle = gcvFALSE;

		/* Query the power management state. */
        gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
            Kernel->hardware,
            &Interface->u.QueryPowerManagement.state));

        /* Query the idle state. */
        gcmkONERROR(
            gckHARDWARE_QueryIdle(Kernel->hardware,
                                  &Interface->u.QueryPowerManagement.isIdle));
        break;

    case gcvHAL_READ_REGISTER:
#if gcdREGISTER_ACCESS_FROM_USER
        /* Read a register. */
        gcmkONERROR(
            gckOS_ReadRegister(Kernel->os,
                               Interface->u.ReadRegisterData.address,
                               &Interface->u.ReadRegisterData.data));
#else
		/* No access from user land to read registers. */
		Interface->u.ReadRegisterData.data = 0;
		status = gcvSTATUS_NOT_SUPPORTED;
#endif
        break;

    case gcvHAL_WRITE_REGISTER:
#if gcdREGISTER_ACCESS_FROM_USER
        /* Write a register. */
        gcmkONERROR(
            gckOS_WriteRegister(Kernel->os,
                                Interface->u.WriteRegisterData.address,
                                Interface->u.WriteRegisterData.data));
#else
		/* No access from user land to write registers. */
		status = gcvSTATUS_NOT_SUPPORTED;
#endif
        break;

    case gcvHAL_READ_ALL_PROFILE_REGISTERS:
#if VIVANTE_PROFILER
		/* Read all 3D profile registers. */
		gcmkONERROR(
			gckHARDWARE_QueryProfileRegisters(
				Kernel->hardware,
				&Interface->u.RegisterProfileData.counters));
#else
        status = gcvSTATUS_OK;
#endif
        break;

    case gcvHAL_PROFILE_REGISTERS_2D:
#if VIVANTE_PROFILER
		/* Read all 2D profile registers. */
		gcmkONERROR(
			gckHARDWARE_ProfileEngine2D(
				Kernel->hardware,
				Interface->u.RegisterProfileData2D.hwProfile2D));
#else
        status = gcvSTATUS_OK;
#endif
        break;

	case gcvHAL_GET_PROFILE_SETTING:
#if VIVANTE_PROFILER
		/* Get profile setting */
		Interface->u.GetProfileSetting.enable = Kernel->profileEnable;

		gcmkVERIFY_OK(
			gckOS_MemCopy(Interface->u.GetProfileSetting.fileName,
						  Kernel->profileFileName,
						  gcdMAX_PROFILE_FILE_NAME));
#endif

		status = gcvSTATUS_OK;
        break;

	case gcvHAL_SET_PROFILE_SETTING:
#if VIVANTE_PROFILER
		/* Set profile setting */
		Kernel->profileEnable = Interface->u.SetProfileSetting.enable;

		gcmkVERIFY_OK(
			gckOS_MemCopy(Kernel->profileFileName,
						  Interface->u.SetProfileSetting.fileName,
						  gcdMAX_PROFILE_FILE_NAME));
#endif

        status = gcvSTATUS_OK;
		break;

	case gcvHAL_QUERY_KERNEL_SETTINGS:
		/* Get kernel settings. */
		gcmkONERROR(
			gckKERNEL_QuerySettings(Kernel,
									&Interface->u.QueryKernelSettings.settings));
		break;

	case gcvHAL_RESET:
		/* Reset the hardware. */
		gcmkONERROR(
			gckHARDWARE_Reset(Kernel->hardware));
		break;

    case gcvHAL_DEBUG:
        /* Set debug level and zones. */
        if (Interface->u.Debug.set)
        {
            gckOS_SetDebugLevel(Interface->u.Debug.level);
            gckOS_SetDebugZones(Interface->u.Debug.zones,
                                Interface->u.Debug.enable);
        }

        if (Interface->u.Debug.message[0] != '\0')
        {
            /* Print a message to the debugger. */
            gcmkPRINT(Interface->u.Debug.message);
        }
        status = gcvSTATUS_OK;
        break;

    case gcvHAL_CACHE:
        if (Interface->u.Cache.invalidate)
        {
            /* Flush and invalidate the cache. */
            status = gckOS_CacheInvalidate(Kernel->os,
                                           Interface->u.Cache.process,
                                           Interface->u.Cache.logical,
                                           Interface->u.Cache.bytes);
        }
        else
        {
            /* Flush the cache. */
            status = gckOS_CacheFlush(Kernel->os,
                                      Interface->u.Cache.process,
                                      Interface->u.Cache.logical,
                                      Interface->u.Cache.bytes);
        }
		break;
    	
	default:
		/* Invalid command. */
		gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
	}

OnError:
	/* Save status. */
	Interface->status = status;

    if (gcmIS_ERROR(status))
    {
        if (locked)
        {
            /* Roll back the lock. */
            gcmkVERIFY_OK(
                gckVIDMEM_Unlock(Interface->u.LockVideoMemory.node,
                                 gcvSURF_TYPE_UNKNOWN,
                                 gcvNULL));
        }
    }

	/* Return the status. */
	gcmkFOOTER();
	return status;
}
Пример #21
0
/*******************************************************************************
**
**  gckVIDMEM_ConstructVirtual
**
**  Construct a new gcuVIDMEM_NODE union for virtual memory.
**
**  INPUT:
**
**      gckKERNEL Kernel
**          Pointer to an gckKERNEL object.
**
**      gctSIZE_T Bytes
**          Number of byte to allocate.
**
**  OUTPUT:
**
**      gcuVIDMEM_NODE_PTR * Node
**          Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
*/
gceSTATUS
gckVIDMEM_ConstructVirtual(
    IN gckKERNEL Kernel,
    IN gctBOOL Contiguous,
    IN gctSIZE_T Bytes,
#ifdef __QNXNTO__
    IN gctHANDLE Handle,
#endif
    OUT gcuVIDMEM_NODE_PTR * Node
    )
{
    gckOS os;
    gceSTATUS status;
    gcuVIDMEM_NODE_PTR node = gcvNULL;

    gcmkHEADER_ARG("Kernel=0x%x Bytes=%lu", Kernel, Bytes);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
    gcmkVERIFY_ARGUMENT(Bytes > 0);
    gcmkVERIFY_ARGUMENT(Node != gcvNULL);
#ifdef __QNXNTO__
    gcmkVERIFY_ARGUMENT(Handle != gcvNULL);
#endif

    /* Extract the gckOS object pointer. */
    os = Kernel->os;
    gcmkVERIFY_OBJECT(os, gcvOBJ_OS);

    /* Allocate an gcuVIDMEM_NODE union. */
    gcmkONERROR(
        gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), (gctPOINTER *) &node));

    /* Initialize gcuVIDMEM_NODE union for virtual memory. */
    node->Virtual.kernel        = Kernel;
    node->Virtual.contiguous    = Contiguous;
    node->Virtual.locked        = 0;
    node->Virtual.logical       = gcvNULL;
    node->Virtual.pageTable     = gcvNULL;
    node->Virtual.mutex         = gcvNULL;
#ifdef __QNXNTO__
    node->Virtual.next          = gcvNULL;
    node->Virtual.unlockPending = gcvFALSE;
    node->Virtual.freePending   = gcvFALSE;
    node->Virtual.handle        = Handle;
#else
    node->Virtual.pending       = gcvFALSE;
#endif

    /* Create the mutex. */
    gcmkONERROR(
        gckOS_CreateMutex(os, &node->Virtual.mutex));

    /* Allocate the virtual memory. */
    gcmkONERROR(
        gckOS_AllocatePagedMemoryEx(os,
                                    node->Virtual.contiguous,
                                    node->Virtual.bytes = Bytes,
                                    &node->Virtual.physical));

#ifdef __QNXNTO__
    /* Register. */
    gckMMU_InsertNode(Kernel->mmu, node);
#endif

    /* Return pointer to the gcuVIDMEM_NODE union. */
    *Node = node;

    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                   "Created virtual node 0x%x for %u bytes @ 0x%x",
                   node, Bytes, node->Virtual.physical);

    /* Success. */
    gcmkFOOTER_ARG("*Node=0x%x", *Node);
    return gcvSTATUS_OK;

OnError:
    /* Roll back. */
    if (node != gcvNULL)
    {
        if (node->Virtual.mutex != gcvNULL)
        {
            /* Destroy the mutex. */
            gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex));
        }

        /* Free the structure. */
        gcmkVERIFY_OK(gckOS_Free(os, node));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
gceSTATUS
gckKERNEL_Construct(
	IN gckOS Os,
	IN gctPOINTER Context,
	OUT gckKERNEL * Kernel
	)
{
	gckKERNEL kernel = gcvNULL;
	gceSTATUS status;

	gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);

	/* Verify the arguments. */
	gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
	gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);

	/* Allocate the gckKERNEL object. */
	gcmkONERROR(
		gckOS_Allocate(Os,
					   gcmSIZEOF(struct _gckKERNEL),
					   (gctPOINTER *) &kernel));
#if MRVL_LOW_POWER_MODE_DEBUG
    gcmERR_RETURN(
		gckOS_Allocate(Os, 0x1000000, (gctPOINTER *) &kernel->kernelMSG));

    kernel->msgLen = 0;
#endif

	/* Zero the object pointers. */
	kernel->hardware = gcvNULL;
	kernel->command  = gcvNULL;
	kernel->event    = gcvNULL;
	kernel->mmu      = gcvNULL;

	/* Initialize the gckKERNEL object. */
	kernel->object.type = gcvOBJ_KERNEL;
	kernel->os          = Os;

	/* Save context. */
	kernel->context = Context;

    /* Construct atom holding number of clients. */
    kernel->atomClients = gcvNULL;
    gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
#if gcdSECURE_USER
    kernel->cacheSlots     = 0;
    kernel->cacheTimeStamp = 0;
#endif

	/* Construct the gckHARDWARE object. */
	gcmkONERROR(
		gckHARDWARE_Construct(Os, &kernel->hardware));

	/* Set pointer to gckKERNEL object in gckHARDWARE object. */
	kernel->hardware->kernel = kernel;

	/* Initialize the hardware. */
	gcmkONERROR(
		gckHARDWARE_InitializeHardware(kernel->hardware));

	/* Construct the gckCOMMAND object. */
	gcmkONERROR(
		gckCOMMAND_Construct(kernel, &kernel->command));

	/* Construct the gckEVENT object. */
	gcmkONERROR(
		gckEVENT_Construct(kernel, &kernel->event));

	/* Construct the gckMMU object. */
	gcmkONERROR(
		gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
	kernel->notifyIdle = gcvTRUE; /* gcvFALSE; */

#if VIVANTE_PROFILER
	/* Initialize profile setting */
#if defined ANDROID
	kernel->profileEnable = gcvFALSE;
#else
	kernel->profileEnable = gcvTRUE;
#endif

	gcmkVERIFY_OK(
		gckOS_MemCopy(kernel->profileFileName,
					  DEFAULT_PROFILE_FILE_NAME,
					  gcmSIZEOF(DEFAULT_PROFILE_FILE_NAME) + 1));
#endif

	/* Return pointer to the gckKERNEL object. */
	*Kernel = kernel;

	/* Success. */
	gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
	return gcvSTATUS_OK;

OnError:
	if (kernel != gcvNULL)
	{
    	if (kernel->event != gcvNULL)
	    {
    	    gcmkVERIFY_OK(gckEVENT_Destroy(kernel->event));
    	}

   	 	if (kernel->command != gcvNULL)
    	{
		gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
    	}

    	if (kernel->hardware != gcvNULL)
    	{
        	gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
    	}

        if (kernel->atomClients != gcvNULL)
        {
            gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
        }

    kernel->version = _GC_VERSION_STRING_;

#if MRVL_LOW_POWER_MODE_DEBUG
	gcmkVERIFY_OK(
		gckOS_Free(Os, kernel->kernelMSG));
#endif
    	gcmkVERIFY_OK(gckOS_Free(Os, kernel));
	}

	/* Return the error. */
	gcmkFOOTER();
	return status;
}
Пример #23
0
/*******************************************************************************
**
**  gckVIDMEM_AllocateLinear
**
**  Allocate linear memory from the gckVIDMEM object.
**
**  INPUT:
**
**      gckVIDMEM Memory
**          Pointer to an gckVIDMEM object.
**
**      gctSIZE_T Bytes
**          Number of bytes to allocate.
**
**      gctUINT32 Alignment
**          Byte alignment for allocation.
**
**      gceSURF_TYPE Type
**          Type of surface to allocate (use by bank optimization).
**
**  OUTPUT:
**
**      gcuVIDMEM_NODE_PTR * Node
**          Pointer to a variable that will hold the allocated memory node.
*/
gceSTATUS
gckVIDMEM_AllocateLinear(
    IN gckVIDMEM Memory,
    IN gctSIZE_T Bytes,
    IN gctUINT32 Alignment,
    IN gceSURF_TYPE Type,
#ifdef __QNXNTO__
    IN gctHANDLE Handle,
#endif
    OUT gcuVIDMEM_NODE_PTR * Node
    )
{
    gceSTATUS status;
    gcuVIDMEM_NODE_PTR node;
    gctUINT32 alignment;
    gctINT bank, i;
    gctBOOL acquired = gcvFALSE;

    gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
                   Memory, Bytes, Alignment, Type);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
    gcmkVERIFY_ARGUMENT(Bytes > 0);
    gcmkVERIFY_ARGUMENT(Node != gcvNULL);
#ifdef __QNXNTO__
    gcmkVERIFY_ARGUMENT(Handle != gcvNULL);
#endif

    /* Acquire the mutex. */
    gcmkONERROR(
        gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));

    acquired = gcvTRUE;

    if (Bytes > Memory->freeBytes)
    {
        /* Not enough memory. */
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    /* Find the default bank for this surface type. */
    gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
    bank      = Memory->mapping[Type];
    alignment = Alignment;

    /* Find a free node in the default bank. */
    node = _FindNode(Memory, bank, Bytes, &alignment);

    /* Out of memory? */
    if (node == gcvNULL)
    {
        /* Walk all lower banks. */
        for (i = bank - 1; i >= 0; --i)
        {
            /* Find a free node inside the current bank. */
            node = _FindNode(Memory, i, Bytes, &alignment);
            if (node != gcvNULL)
            {
                break;
            }
        }
    }

    if (node == gcvNULL)
    {
        /* Walk all upper banks. */
        for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
        {
            if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
            {
                /* Abort when we reach unused banks. */
                break;
            }

            /* Find a free node inside the current bank. */
            node = _FindNode(Memory, i, Bytes, &alignment);
            if (node != gcvNULL)
            {
                break;
            }
        }
    }

    if (node == gcvNULL)
    {
        /* Out of memory. */
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    /* Do we have an alignment? */
    if (alignment > 0)
    {
        /* Split the node so it is aligned. */
        if (_Split(Memory->os, node, alignment))
        {
            /* Successful split, move to aligned node. */
            node = node->VidMem.next;

            /* Remove alignment. */
            alignment = 0;
        }
    }

    /* Do we have enough memory after the allocation to split it? */
    if (node->VidMem.bytes - Bytes > Memory->threshold)
    {
        /* Adjust the node size. */
        _Split(Memory->os, node, Bytes);
    }

    /* Remove the node from the free list. */
    node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
    node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
    node->VidMem.nextFree                  =
    node->VidMem.prevFree                  = gcvNULL;

    /* Fill in the information. */
    node->VidMem.alignment = alignment;
    node->VidMem.memory    = Memory;
#ifdef __QNXNTO__
    node->VidMem.logical   = gcvNULL;
    node->VidMem.handle    = Handle;
#endif

    /* Adjust the number of free bytes. */
    Memory->freeBytes -= node->VidMem.bytes;

    /* Release the mutex. */
    gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));

    /* Return the pointer to the node. */
    *Node = node;

    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
                   "Allocated %u bytes @ 0x%x [0x%08X]",
                   node->VidMem.bytes, node, node->VidMem.offset);

    /* Success. */
    gcmkFOOTER_ARG("*Node=0x%x", *Node);
    return gcvSTATUS_OK;

OnError:
    if (acquired)
    {
     /* Release the mutex. */
        gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
int drv_open(
    struct inode* inode,
    struct file* filp
    )
{
    gceSTATUS status;
    gctBOOL attached = gcvFALSE;
    gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
    gctINT i;

    gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);

    if (filp == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): filp is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN);

    if (data == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): private_data is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    data->device             = galDevice;
    data->mappedMemory       = gcvNULL;
    data->contiguousLogical  = gcvNULL;
    gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));

    /* Attached the process. */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (galDevice->kernels[i] != gcvNULL)
        {
            gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
        }
    }
    attached = gcvTRUE;

    if (!galDevice->contiguousMapped)
    {
        if (galDevice->contiguousPhysical != gcvNULL)
        {
            gcmkONERROR(gckOS_MapMemory(
                galDevice->os,
                galDevice->contiguousPhysical,
                galDevice->contiguousSize,
                &data->contiguousLogical
                ));
        }
    }

    filp->private_data = data;

    /* Success. */
    gcmkFOOTER_NO();
    return 0;

OnError:
    if (data != gcvNULL)
    {
        if (data->contiguousLogical != gcvNULL)
        {
            gcmkVERIFY_OK(gckOS_UnmapMemory(
                galDevice->os,
                galDevice->contiguousPhysical,
                galDevice->contiguousSize,
                data->contiguousLogical
                ));
        }

        kfree(data);
    }

    if (attached)
    {
        for (i = 0; i < gcdMAX_GPU_COUNT; i++)
        {
            if (galDevice->kernels[i] != gcvNULL)
            {
                gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
            }
        }
    }

    gcmkFOOTER();
    return -ENOTTY;
}
Пример #25
0
/*******************************************************************************
**
**  gckVGMMU_AllocatePages
**
**  Allocate pages inside the page table.
**
**  INPUT:
**
**      gckVGMMU Mmu
**          Pointer to an gckVGMMU object.
**
**      gctSIZE_T PageCount
**          Number of pages to allocate.
**
**  OUTPUT:
**
**      gctPOINTER * PageTable
**          Pointer to a variable that receives the base address of the page
**          table.
**
**      gctUINT32 * Address
**          Pointer to a variable that receives the hardware specific address.
*/
gceSTATUS gckVGMMU_AllocatePages(
    IN gckVGMMU Mmu,
    IN gctSIZE_T PageCount,
    OUT gctPOINTER * PageTable,
    OUT gctUINT32 * Address
    )
{
    gceSTATUS status;
    gctUINT32 tail, index, i;
    gctUINT32 * table;
    gctBOOL allocated = gcvFALSE;

    gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
        Mmu, PageCount, PageTable, Address);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
    gcmkVERIFY_ARGUMENT(PageCount > 0);
    gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
    gcmkVERIFY_ARGUMENT(Address != gcvNULL);

    gcmkTRACE_ZONE(
        gcvLEVEL_INFO, gcvZONE_MMU,
        "%s(%d): %u pages.\n",
        __FUNCTION__, __LINE__,
        PageCount
        );

    if (PageCount > Mmu->entryCount)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_MMU,
            "%s(%d): page table too small for %u pages.\n",
            __FUNCTION__, __LINE__,
            PageCount
            );

        gcmkFOOTER_NO();
        /* Not enough pages avaiable. */
        return gcvSTATUS_OUT_OF_RESOURCES;
    }

    /* Grab the mutex. */
    status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);

    if (status < 0)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_MMU,
            "%s(%d): could not acquire mutex.\n"
            ,__FUNCTION__, __LINE__
            );

        gcmkFOOTER();
        /* Error. */
        return status;
    }

    /* Compute the tail for this allocation. */
    tail = Mmu->entryCount - PageCount;

    /* Walk all entries until we find enough slots. */
    for (index = Mmu->entry; index <= tail;)
    {
        /* Access page table. */
        table = (gctUINT32 *) Mmu->pageTableLogical + index;

        /* See if all slots are available. */
        for (i = 0; i < PageCount; i++, table++)
        {
            if (*table != ~0)
            {
                /* Start from next slot. */
                index += i + 1;
                break;
            }
        }

        if (i == PageCount)
        {
            /* Bail out if we have enough page entries. */
            allocated = gcvTRUE;
            break;
        }
    }

    if (!allocated)
    {
        if (status >= 0)
        {
            /* Walk all entries until we find enough slots. */
            for (index = 0; index <= tail;)
            {
                /* Access page table. */
                table = (gctUINT32 *) Mmu->pageTableLogical + index;

                /* See if all slots are available. */
                for (i = 0; i < PageCount; i++, table++)
                {
                    if (*table != ~0)
                    {
                        /* Start from next slot. */
                        index += i + 1;
                        break;
                    }
                }

                if (i == PageCount)
                {
                    /* Bail out if we have enough page entries. */
                    allocated = gcvTRUE;
                    break;
                }
            }
        }
    }

    if (!allocated && (status >= 0))
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_MMU,
            "%s(%d): not enough free pages for %u pages.\n",
            __FUNCTION__, __LINE__,
            PageCount
            );

        /* Not enough empty slots available. */
        status = gcvSTATUS_OUT_OF_RESOURCES;
    }

    if (status >= 0)
    {
        /* Build virtual address. */
        status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
                                                 index,
                                                 0,
                                                 Address);

        if (status >= 0)
        {
            /* Update current entry into page table. */
            Mmu->entry = index + PageCount;

            /* Return pointer to page table. */
            *PageTable = (gctUINT32 *)  Mmu->pageTableLogical + index;

            gcmkTRACE_ZONE(
                gcvLEVEL_INFO, gcvZONE_MMU,
                "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
                __FUNCTION__, __LINE__,
                PageCount,
                index,
                *Address,
                *PageTable
                );
            }
    }

    /* Release the mutex. */
    gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
    gcmkFOOTER();

    /* Return status. */
    return status;
}
int drv_release(
    struct inode* inode,
    struct file* filp
    )
{
    gceSTATUS status;
    gcsHAL_PRIVATE_DATA_PTR data;
    gckGALDEVICE device;
    gctINT i;

    gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);

    if (filp == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): filp is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    data = filp->private_data;

    if (data == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): private_data is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    device = data->device;

    if (device == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): device is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    if (!device->contiguousMapped)
    {
        if (data->contiguousLogical != gcvNULL)
        {
            gcmkONERROR(gckOS_UnmapMemoryEx(
                galDevice->os,
                galDevice->contiguousPhysical,
                galDevice->contiguousSize,
                data->contiguousLogical,
                data->pidOpen
                ));

            data->contiguousLogical = gcvNULL;
        }
    }

    /* A process gets detached. */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (galDevice->kernels[i] != gcvNULL)
        {
            gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
        }
    }

    kfree(data);
    filp->private_data = NULL;

    /* Success. */
    gcmkFOOTER_NO();
    return 0;

OnError:
    gcmkFOOTER();
    return -ENOTTY;
}
static gceSTATUS
_CompactKernelHeap(
    IN gckHEAP Heap
    )
{
    gcskHEAP_PTR heap, next;
    gctPOINTER p;
    gcskHEAP_PTR freeList = gcvNULL;

    gcmkHEADER_ARG("Heap=0x%x", Heap);

    /* Walk all the heaps. */
    for (heap = Heap->heap; heap != gcvNULL; heap = next)
    {
        gcskNODE_PTR lastFree = gcvNULL;

        /* Zero out the free list. */
        heap->freeList = gcvNULL;

        /* Start at the first node. */
        for (p = (gctUINT8_PTR) (heap + 1);;)
        {
            /* Convert the pointer. */
            gcskNODE_PTR node = (gcskNODE_PTR) p;

            gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));

            /* Test if this node not used. */
            if (node->next != gcdIN_USE)
            {
                /* Test if this is the end of the heap. */
                if (node->bytes == 0)
                {
                    break;
                }

                /* Test of this is the first free node. */
                else if (lastFree == gcvNULL)
                {
                    /* Initialzie the free list. */
                    heap->freeList = node;
                    lastFree       = node;
                }

                else
                {
                    /* Test if this free node is contiguous with the previous
                    ** free node. */
                    if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
                    {
                        /* Just increase the size of the previous free node. */
                        lastFree->bytes += node->bytes;
                    }
                    else
                    {
                        /* Add to linked list. */
                        lastFree->next = node;
                        lastFree       = node;
                    }
                }
            }

            /* Move to next node. */
            p = (gctUINT8_PTR) node + node->bytes;
        }

        /* Mark the end of the chain. */
        if (lastFree != gcvNULL)
        {
            lastFree->next = gcvNULL;
        }

        /* Get next heap. */
        next = heap->next;

        /* Check if the entire heap is free. */
        if ((heap->freeList != gcvNULL)
        &&  (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
        )
        {
            /* Remove the heap from the linked list. */
            if (heap->prev == gcvNULL)
            {
                Heap->heap = next;
            }
            else
            {
                heap->prev->next = next;
            }

            if (heap->next != gcvNULL)
            {
                heap->next->prev = heap->prev;
            }

#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
            /* Update profiling. */
            Heap->heapCount  -= 1;
            Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
#endif

            /* Add this heap to the list of heaps that need to be freed. */
            heap->next = freeList;
            freeList   = heap;
        }
    }

    if (freeList != gcvNULL)
    {
        /* Release the mutex, remove any chance for a dead lock. */
        gcmkVERIFY_OK(
            gckOS_ReleaseMutex(Heap->os, Heap->mutex));

        /* Free all heaps in the free list. */
        for (heap = freeList; heap != gcvNULL; heap = next)
        {
            /* Get pointer to the next heap. */
            next = heap->next;

            /* Free the heap. */
            gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
                           "Freeing heap 0x%x (%lu bytes)",
                           heap, heap->size + gcmSIZEOF(gcskHEAP));
            gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
        }

        /* Acquire the mutex again. */
        gcmkVERIFY_OK(
            gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
    }

    /* Success. */
    gcmkFOOTER_NO();
    return gcvSTATUS_OK;
}
long drv_ioctl(
    struct file* filp,
    unsigned int ioctlCode,
    unsigned long arg
    )
{
    gceSTATUS status;
    gcsHAL_INTERFACE iface;
    gctUINT32 copyLen;
    DRIVER_ARGS drvArgs;
    gckGALDEVICE device;
    gcsHAL_PRIVATE_DATA_PTR data;
    gctINT32 i, count;
    gckVIDMEM_NODE nodeObject;

    gcmkHEADER_ARG(
        "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
        filp, ioctlCode, arg
        );

    if (filp == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): filp is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    data = filp->private_data;

    if (data == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): private_data is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    device = data->device;

    if (device == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): device is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
    &&  (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
    )
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): unknown command %d\n",
            __FUNCTION__, __LINE__,
            ioctlCode
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    /* Get the drvArgs. */
    copyLen = copy_from_user(
        &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
        );

    if (copyLen != 0)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): error copying of the input arguments.\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    /* Now bring in the gcsHAL_INTERFACE structure. */
    if ((drvArgs.InputBufferSize  != sizeof(gcsHAL_INTERFACE))
    ||  (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
    )
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): input or/and output structures are invalid.\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    copyLen = copy_from_user(
        &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
        );

    if (copyLen != 0)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): error copying of input HAL interface.\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    if (iface.command == gcvHAL_CHIP_INFO)
    {
        count = 0;
        for (i = 0; i < gcdMAX_GPU_COUNT; i++)
        {
            if (device->kernels[i] != gcvNULL)
            {
#if gcdENABLE_VG
                if (i == gcvCORE_VG)
                {
                    iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
                }
                else
#endif
                {
                    gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
                                                      &iface.u.ChipInfo.types[count]));
                }
                count++;
            }
        }

        iface.u.ChipInfo.count = count;
        iface.status = status = gcvSTATUS_OK;
    }
    else
    {
        if (iface.hardwareType > 7)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): unknown hardwareType %d\n",
                __FUNCTION__, __LINE__,
                iface.hardwareType
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

#if gcdENABLE_VG
        if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
        {
            status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
                                        (ioctlCode == IOCTL_GCHAL_INTERFACE),
                                        &iface);
        }
        else
#endif
        {
            status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
                                        (ioctlCode == IOCTL_GCHAL_INTERFACE),
                                        &iface);
        }
    }

    /* Redo system call after pending signal is handled. */
    if (status == gcvSTATUS_INTERRUPTED)
    {
        gcmkFOOTER();
        return -ERESTARTSYS;
    }

    if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
    {
        gcuVIDMEM_NODE_PTR node;
        gctUINT32 processID;

        gckOS_GetProcessID(&processID);

        gcmkONERROR(gckVIDMEM_HANDLE_Lookup(device->kernels[device->coreMapping[iface.hardwareType]],
                                processID,
                                (gctUINT32)iface.u.LockVideoMemory.node,
                                &nodeObject));
        node = nodeObject->node;

        /* Special case for mapped memory. */
        if ((data->mappedMemory != gcvNULL)
        &&  (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
        )
        {
            /* Compute offset into mapped memory. */
            gctUINT32 offset
                = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
                - (gctUINT8 *) device->contiguousBase;

            /* Compute offset into user-mapped region. */
            iface.u.LockVideoMemory.memory =
                gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
        }
    }

    /* Copy data back to the user. */
    copyLen = copy_to_user(
        gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
        );

    if (copyLen != 0)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): error copying of output HAL interface.\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    /* Success. */
    gcmkFOOTER_NO();
    return 0;

OnError:
    gcmkFOOTER();
    return -ENOTTY;
}
/*******************************************************************************
**
**  gckHEAP_Allocate
**
**  Allocate data from the heap.
**
**  INPUT:
**
**      gckHEAP Heap
**          Pointer to a gckHEAP object.
**
**      IN gctSIZE_T Bytes
**          Number of byte to allocate.
**
**  OUTPUT:
**
**      gctPOINTER * Memory
**          Pointer to a variable that will hold the address of the allocated
**          memory.
*/
gceSTATUS
gckHEAP_Allocate(
    IN gckHEAP Heap,
    IN gctSIZE_T Bytes,
    OUT gctPOINTER * Memory
    )
{
    gctBOOL acquired = gcvFALSE;
    gcskHEAP_PTR heap;
    gceSTATUS status;
    gctSIZE_T bytes;
    gcskNODE_PTR node, used, prevFree = gcvNULL;
    gctPOINTER memory = gcvNULL;

    gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);

    /* Verify the arguments. */
    gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
    gcmkVERIFY_ARGUMENT(Bytes > 0);
    gcmkVERIFY_ARGUMENT(Memory != gcvNULL);

    /* Determine number of bytes required for a node. */
    bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);

    /* Acquire the mutex. */
    gcmkONERROR(
        gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));

    acquired = gcvTRUE;

    /* Check if this allocation is bigger than the default allocation size. */
    if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
    {
        /* Adjust allocation size. */
        Heap->allocationSize = bytes * 2;
    }

    else if (Heap->heap != gcvNULL)
    {
        gctINT i;

        /* 2 retries, since we might need to compact. */
        for (i = 0; i < 2; ++i)
        {
            /* Walk all the heaps. */
            for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
            {
                /* Check if this heap has enough bytes to hold the request. */
                if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
                {
                    prevFree = gcvNULL;

                    /* Walk the chain of free nodes. */
                    for (node = heap->freeList;
                         node != gcvNULL;
                         node = node->next
                    )
                    {
                        gcmkASSERT(node->next != gcdIN_USE);

                        /* Check if this free node has enough bytes. */
                        if (node->bytes >= bytes)
                        {
                            /* Use the node. */
                            goto UseNode;
                        }

                        /* Save current free node for linked list management. */
                        prevFree = node;
                    }
                }
            }

            if (i == 0)
            {
                /* Compact the heap. */
                gcmkVERIFY_OK(_CompactKernelHeap(Heap));

#if gcmIS_DEBUG(gcdDEBUG_CODE)
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "===== KERNEL HEAP =====");
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Number of allocations           : %12u",
                               Heap->allocCount);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Number of bytes allocated       : %12llu",
                               Heap->allocBytes);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Maximum allocation size         : %12llu",
                               Heap->allocBytesMax);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Total number of bytes allocated : %12llu",
                               Heap->allocBytesTotal);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Number of heaps                 : %12u",
                               Heap->heapCount);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Heap memory in bytes            : %12llu",
                               Heap->heapMemory);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Maximum number of heaps         : %12u",
                               Heap->heapCountMax);
                gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
                               "Maximum heap memory in bytes    : %12llu",
                               Heap->heapMemoryMax);
#endif
            }
        }
    }

    /* Release the mutex. */
    gcmkONERROR(
        gckOS_ReleaseMutex(Heap->os, Heap->mutex));

    acquired = gcvFALSE;

    /* Allocate a new heap. */
    gcmkONERROR(
        gckOS_AllocateMemory(Heap->os,
                             Heap->allocationSize,
                             &memory));

    gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
                   "Allocated heap 0x%x (%lu bytes)",
                   memory, Heap->allocationSize);

    /* Acquire the mutex. */
    gcmkONERROR(
        gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));

    acquired = gcvTRUE;

    /* Use the allocated memory as the heap. */
    heap = (gcskHEAP_PTR) memory;

    /* Insert this heap to the head of the chain. */
    heap->next = Heap->heap;
    heap->prev = gcvNULL;
    heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);

    if (heap->next != gcvNULL)
    {
        heap->next->prev = heap;
    }
    Heap->heap = heap;

    /* Mark the end of the heap. */
    node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
                          + Heap->allocationSize
                          - gcmSIZEOF(gcskNODE)
                          );
    node->bytes = 0;
    node->next  = gcvNULL;

    /* Create a free list. */
    node           = (gcskNODE_PTR) (heap + 1);
    heap->freeList = node;

    /* Initialize the free list. */
    node->bytes = heap->size - gcmSIZEOF(gcskNODE);
    node->next  = gcvNULL;

    /* No previous free. */
    prevFree = gcvNULL;

#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
    /* Update profiling. */
    Heap->heapCount  += 1;
    Heap->heapMemory += Heap->allocationSize;

    if (Heap->heapCount > Heap->heapCountMax)
    {
        Heap->heapCountMax = Heap->heapCount;
    }
    if (Heap->heapMemory > Heap->heapMemoryMax)
    {
        Heap->heapMemoryMax = Heap->heapMemory;
    }
#endif

UseNode:
    /* Verify some stuff. */
    gcmkASSERT(heap != gcvNULL);
    gcmkASSERT(node != gcvNULL);
    gcmkASSERT(node->bytes >= bytes);

    if (heap->prev != gcvNULL)
    {
        /* Unlink the heap from the linked list. */
        heap->prev->next = heap->next;
        if (heap->next != gcvNULL)
        {
            heap->next->prev = heap->prev;
        }

        /* Move the heap to the front of the list. */
        heap->next       = Heap->heap;
        heap->prev       = gcvNULL;
        Heap->heap       = heap;
        heap->next->prev = heap;
    }

    /* Check if there is enough free space left after usage for another free
    ** node. */
    if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
    {
        /* Allocated used space from the back of the free list. */
        used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);

        /* Adjust the number of free bytes. */
        node->bytes -= bytes;
        gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
    }
    else
    {
        /* Remove this free list from the chain. */
        if (prevFree == gcvNULL)
        {
            heap->freeList = node->next;
        }
        else
        {
            prevFree->next = node->next;
        }

        /* Consume the entire free node. */
        used  = (gcskNODE_PTR) node;
        bytes = node->bytes;
    }

    /* Mark node as used. */
    used->bytes     = bytes;
    used->next      = gcdIN_USE;
#if gcmIS_DEBUG(gcdDEBUG_CODE)
    used->timeStamp = ++Heap->timeStamp;
#endif

#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
    /* Update profile counters. */
    Heap->allocCount      += 1;
    Heap->allocBytes      += bytes;
    Heap->allocBytesMax    = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
    Heap->allocBytesTotal += bytes;
#endif

    /* Release the mutex. */
    gcmkVERIFY_OK(
        gckOS_ReleaseMutex(Heap->os, Heap->mutex));

    /* Return pointer to memory. */
    *Memory = used + 1;

    /* Success. */
    gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
    return gcvSTATUS_OK;

OnError:
    if (acquired)
    {
        /* Release the mutex. */
        gcmkVERIFY_OK(
            gckOS_ReleaseMutex(Heap->os, Heap->mutex));
    }

    if (memory != gcvNULL)
    {
        /* Free the heap memory. */
        gckOS_FreeMemory(Heap->os, memory);
    }

    /* Return the status. */
    gcmkFOOTER();
    return status;
}
static int drv_mmap(
    struct file* filp,
    struct vm_area_struct* vma
    )
{
    gceSTATUS status = gcvSTATUS_OK;
    gcsHAL_PRIVATE_DATA_PTR data;
    gckGALDEVICE device;

    gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);

    if (filp == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): filp is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    data = filp->private_data;

    if (data == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): private_data is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

    device = data->device;

    if (device == gcvNULL)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_ERROR, gcvZONE_DRIVER,
            "%s(%d): device is NULL\n",
            __FUNCTION__, __LINE__
            );

        gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
    }

#if !gcdPAGED_MEMORY_CACHEABLE
    vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
    vma->vm_flags    |= gcdVM_FLAGS;
#endif
    vma->vm_pgoff     = 0;

    if (device->contiguousMapped)
    {
        unsigned long size = vma->vm_end - vma->vm_start;
        int ret = 0;

/*####modified for marvell-bg2*/
        if (size > device->contiguousSize)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): Invalid mapping size. size (%d) too large >= %d\n",
                __FUNCTION__, __LINE__,
                size, device->contiguousSize
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }
/*####end for marvell-bg2*/

        ret = io_remap_pfn_range(
            vma,
            vma->vm_start,
            device->requestedContiguousBase >> PAGE_SHIFT,
            size,
            vma->vm_page_prot
            );

        if (ret != 0)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): io_remap_pfn_range failed %d\n",
                __FUNCTION__, __LINE__,
                ret
                );

            data->mappedMemory = gcvNULL;

            gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
        }

        data->mappedMemory = (gctPOINTER) vma->vm_start;

        /* Success. */
        gcmkFOOTER_NO();
        return 0;
    }