Esempio n. 1
0
/**
 *  @brief Closes the ODM device and destroys all allocated resources.
 *  @param hOdmVibrate  [IN] Opaque handle to the device.
 *  @return None.
 */
void NvOdmVibClose(NvOdmVibDeviceHandle hOdmVibrate)
{
#if 1	/* yuyang(20100615):Close I2C handle */
    if (hOdmVibrate != NULL)
    {
        NvOdmServicesPmuClose(hOdmVibrate->hOdmServicePmuDevice);
        hOdmVibrate->hOdmServicePmuDevice = NULL;

        hOdmVibrate->VddId  = 0;
        hOdmVibrate->DeviceAddr = 0;

		if (hOdmVibrate->hOdmI2c)
	        NvOdmI2cClose(hOdmVibrate->hOdmI2c);

        NvOsMemset(&hOdmVibrate->RailCaps, 0, sizeof(NvOdmServicesPmuVddRailCapabilities));

        NvOdmOsFree(hOdmVibrate);
        hOdmVibrate = NULL;
    }
#else
    if (hOdmVibrate != NULL)
    {
        NvOdmServicesPmuClose(hOdmVibrate->hOdmServicePmuDevice);
        hOdmVibrate->hOdmServicePmuDevice = NULL;

        hOdmVibrate->VddId  = 0;

        NvOsMemset(&hOdmVibrate->RailCaps, 0, sizeof(NvOdmServicesPmuVddRailCapabilities));

        NvOdmOsFree(hOdmVibrate);
        hOdmVibrate = NULL;
    }
#endif	/* __yuyang(20100615) */
}
Esempio n. 2
0
bool tegra_udc_charger_detection(void)
{
	NvDdkUsbPhyIoctl_DedicatedChargerDetectionInputArgs Charger;
	NvDdkUsbPhyIoctl_DedicatedChargerStatusOutputArgs Status;

	/* clear the input args */
	NvOsMemset(&Charger, 0, sizeof(Charger));
	/* enable the charger detection logic */
	Charger.EnableChargerDetection = NV_TRUE;
	NV_ASSERT_SUCCESS(NvDdkUsbPhyIoctl(
		s_hUsbPhy,
		NvDdkUsbPhyIoctlType_DedicatedChargerDetection,
		&Charger,
		NULL));
	/* get the charger detection status */
	NV_ASSERT_SUCCESS(NvDdkUsbPhyIoctl(
		s_hUsbPhy,
		NvDdkUsbPhyIoctlType_DedicatedChargerStatus,
		NULL,
		&Status));
	/* disable the charger detection */
	Charger.EnableChargerDetection = NV_FALSE;
	NV_ASSERT_SUCCESS(NvDdkUsbPhyIoctl(
		s_hUsbPhy,
		NvDdkUsbPhyIoctlType_DedicatedChargerDetection,
		&Charger,
		NULL));

	return Status.ChargerDetected;
}
Esempio n. 3
0
void NvRmPrivDfsGetBusyHint(
    NvRmDfsClockId ClockId,
    NvRmFreqKHz* pBusyKHz,
    NvBool* pBusyPulseMode,
    NvU32* pBusyExpireMs)
{
    NvU32 msec;
    BusyHintReq* pBusyHintReq;

    NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));

    // Boolean read - no need for lock - fast path for most common case
    // when no busy hints are recoeded
    if (s_BusyReqHeads[ClockId].BoostKHz == 0)
    {
        *pBusyKHz = 0;
        *pBusyPulseMode = NV_FALSE;
        *pBusyExpireMs = 0;
        return;
    }
    msec = NvOsGetTimeMS();

    NvOsMutexLock(s_hPowerClientMutex);
    /*
     * Get boost frequency from the head. Then, traverse busy hints list,
     * starting from the head looking for max non-expired frequency boost.
     * Remove expired nodes on the way. Update head boost frequency.
     */
    pBusyHintReq = &s_BusyReqHeads[ClockId];
    *pBusyKHz = pBusyHintReq->BoostKHz;
    *pBusyPulseMode = pBusyHintReq->BusyPulseMode;
    *pBusyExpireMs = 0;     // assume head hint has already expired
    if (pBusyHintReq->IntervalMs == NV_WAIT_INFINITE)
        *pBusyExpireMs = NV_WAIT_INFINITE;  // head hint until canceled
    else if (pBusyHintReq->IntervalMs >= (msec - pBusyHintReq->StartTimeMs))
        *pBusyExpireMs =                    // non-expired head hint
        pBusyHintReq->IntervalMs - (msec - pBusyHintReq->StartTimeMs);

    pBusyHintReq = pBusyHintReq->pNext;
    while (pBusyHintReq != NULL)
    {
        BusyHintReq* p;
        if (pBusyHintReq->IntervalMs >= (msec - pBusyHintReq->StartTimeMs))
        {
            break;
        }
        p = pBusyHintReq;
        pBusyHintReq = pBusyHintReq->pNext;
        BusyReqFree(p);
    }
    if (pBusyHintReq)
    {
        s_BusyReqHeads[ClockId] = *pBusyHintReq;
        s_BusyReqHeads[ClockId].pNext = pBusyHintReq;
    }
    else
        NvOsMemset(&s_BusyReqHeads[ClockId], 0, sizeof(s_BusyReqHeads[ClockId]));
    NvOsMutexUnlock(s_hPowerClientMutex);
}
NvError NvRmPrivPmuInit(NvRmDeviceHandle hRmDevice)
{
    NvError e;
    ExecPlatform env;
    NvOdmPmuProperty PmuProperty;

    NV_ASSERT(hRmDevice);
    env = NvRmPrivGetExecPlatform(hRmDevice);

    NvOsMemset(&s_Pmu, 0, sizeof(NvRmPmu));
    s_PmuSupportedEnv = NV_FALSE;

    if (env == ExecPlatform_Soc)
    {
        // Set supported environment flag
        s_PmuSupportedEnv = NV_TRUE;

        // Create the PMU mutex, semaphore, interrupt handler thread,
        // register PMU interrupt, and get ODM PMU handle
        NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_Pmu.hMutex));
        NV_CHECK_ERROR_CLEANUP(NvOsSemaphoreCreate(&s_Pmu.hSemaphore, 0));

        if (NvOdmQueryGetPmuProperty(&PmuProperty) && PmuProperty.IrqConnected)
        {
            if (hRmDevice->ChipId.Id >= 0x20)
                NvRmPrivAp20SetPmuIrqPolarity(
                    hRmDevice, PmuProperty.IrqPolarity);
            else
                NV_ASSERT(PmuProperty.IrqPolarity ==
                          NvOdmInterruptPolarity_Low);
            {
                NvOsInterruptHandler hPmuIsr = PmuIsr;
                NvU32 PmuExtIrq = NvRmGetIrqForLogicalInterrupt(
                    hRmDevice, NVRM_MODULE_ID(NvRmPrivModuleID_PmuExt, 0), 0);
                NV_CHECK_ERROR_CLEANUP(NvRmInterruptRegister(hRmDevice, 1,
                    &PmuExtIrq, &hPmuIsr, &s_Pmu, &s_Pmu.hInterrupt, NV_FALSE));
            }
        }

        if(!NvOdmPmuDeviceOpen(&s_Pmu.hOdmPmu))
        {
            e = NvError_NotInitialized;
            goto fail;
        }
        NV_CHECK_ERROR_CLEANUP(NvOsThreadCreate(PmuThread, &s_Pmu, &s_Pmu.hThread));
        NvRmPrivIoPowerControlInit(hRmDevice);
        NvRmPrivCoreVoltageInit(hRmDevice);
    }
    return NvSuccess;

fail:
    NvRmPrivPmuDeinit(hRmDevice);
    return e;
}
Esempio n. 5
0
NvError NvRmPrivPowerInit(NvRmDeviceHandle hRmDeviceHandle)
{
    NvU32 i;
    NvError e;

    NV_ASSERT(hRmDeviceHandle);

    // Initialize registry
    s_PowerRegistry.pPowerClients = NULL;
    s_PowerRegistry.AvailableEntries = 0;
    s_PowerRegistry.UsedIndexRange = 0;

    // Clear busy head pointers as well as starvation and power plane
    // reference counts. Aalthough power plane references are cleared
    // here, the combined power state is not updated - it will kept as
    // set by the boot code, until the 1st client requests power.
    NvOsMemset(s_BusyReqHeads, 0, sizeof(s_BusyReqHeads));
    NvOsMemset(s_StarveOnRefCounts, 0, sizeof(s_StarveOnRefCounts));
    NvOsMemset(s_PowerOnRefCounts, 0, sizeof(s_PowerOnRefCounts));

    // Initialize busy requests pool
    NvOsMemset(s_BusyReqPool, 0, sizeof(s_BusyReqPool));
    for (i = 0; i < NVRM_BUSYREQ_POOL_SIZE; i++)
        s_pFreeBusyReqPool[i] = &s_BusyReqPool[i];
    s_FreeBusyReqPoolSize = NVRM_BUSYREQ_POOL_SIZE;

    // Create the RM registry mutex and initialize RM/OAL interface
    s_hPowerClientMutex = NULL;
    NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_hPowerClientMutex));
    NV_CHECK_ERROR_CLEANUP(NvRmPrivOalIntfInit(hRmDeviceHandle));

    // Initialize power group control, and power gate SoC partitions
    NvRmPrivPowerGroupControlInit(hRmDeviceHandle);
    return NvSuccess;

fail:
    NvRmPrivOalIntfDeinit(hRmDeviceHandle);
    NvOsMutexDestroy(s_hPowerClientMutex);
    s_hPowerClientMutex = NULL;
    return e;
}
void NvRmPrivPmuDeinit(NvRmDeviceHandle hRmDevice)
{
    if (s_PmuSupportedEnv == NV_FALSE)
        return;

    PmuThreadTerminate(&s_Pmu);
    NvOdmPmuDeviceClose(s_Pmu.hOdmPmu);
    NvRmInterruptUnregister(hRmDevice, s_Pmu.hInterrupt);
    NvOsSemaphoreDestroy(s_Pmu.hSemaphore);
    NvOsMutexDestroy(s_Pmu.hMutex);

    NvOsMemset(&s_Pmu, 0, sizeof(NvRmPmu));
    s_PmuSupportedEnv = NV_FALSE;
}
/**
 *  @brief Closes the ODM device and destroys all allocated resources.
 *  @param hOdmVibrate  [IN] Opaque handle to the device.
 *  @return None.
 */
void NvOdmVibClose(NvOdmVibDeviceHandle hOdmVibrate)
{
    if (hOdmVibrate != NULL)
    {
        NvOdmServicesPmuClose(hOdmVibrate->hOdmServicePmuDevice);
        hOdmVibrate->hOdmServicePmuDevice = NULL;

        hOdmVibrate->VddId  = 0;

        NvOsMemset(&hOdmVibrate->RailCaps, 0, sizeof(NvOdmServicesPmuVddRailCapabilities));

        NvOdmOsFree(hOdmVibrate);
        hOdmVibrate = NULL;
    }
}
Esempio n. 8
0
static void NvRmPrivChipFlavorInit(NvRmDeviceHandle hRmDevice)
{
    NvOsMemset((void*)&s_ChipFlavor, 0, sizeof(s_ChipFlavor));

    if (NvRmPrivChipShmooDataInit(hRmDevice, &s_ChipFlavor) == NvSuccess)
    {
        NvOsDebugPrintf("NVRM Initialized shmoo database\n");
        return;
    }
    if (NvRmBootArgChipShmooGet(hRmDevice, &s_ChipFlavor) == NvSuccess)
    {
        NvOsDebugPrintf("NVRM Got shmoo boot argument (at 0x%x)\n",
                        ((NvUPtr)s_pShmooData));
        return;
    }
    NV_ASSERT(!"Failed to set clock limits");
}
NvError
NvEcRegisterForEvents(
    NvEcHandle hEc,
    NvEcEventRegistrationHandle *phEcEventRegistration,
    NvOsSemaphoreHandle hSema,
    NvU32 NumEventTypes,
    NvEcEventType *pEventTypes,
    NvU32 NumEventPackets,
    NvU32 EventPacketSize)
{
    NvEcPrivState   *ec = hEc->ec;
    NvEcEventRegistration *h = NULL;
    NvOsSemaphoreHandle hSemaClone = NULL;
    NvError e = NvSuccess;
    NvU32   val, i, tag = hEc->tag;
    NvU32   tagMask = (1UL << tag);

    if ( !hSema || !pEventTypes )
        return NvError_BadParameter;

    if ( !NumEventTypes || (NumEventTypes > NvEcEventType_Num) )
        return NvError_InvalidSize;     // FIXME: is this sufficient?

    NV_ASSERT( phEcEventRegistration );

    NvOsMutexLock( ec->mutex );
    if ( !ec->thread )
        NvEcPrivThreadCreate( ec );

    // Allocate common pool of internal event nodes bufferring if not already
    if ( !ec->eventNodes )
    {
        val = NVEC_NUM_EVENT_PACKETS_DEFAULT;
        if ( NumEventPackets > val )
            val = NumEventPackets;
        ec->eventNodes = NvOsAlloc(val * sizeof(NvEcEventNode));
        if ( NULL == ec->eventNodes )
        {
            NvOsMutexUnlock( ec->mutex );
            return NvError_InsufficientMemory;
        }
        NvOsMemset( ec->eventNodes, 0, (val * sizeof(NvEcEventNode)) );
        for( i = 0; i < val - 1; i++ )
            ec->eventNodes[i].next = &ec->eventNodes[i+1];
        ec->eventFreeBegin = ec->eventNodes;
        ec->eventFreeEnd = ec->eventNodes + val - 1;
    }
    NvOsMutexUnlock( ec->mutex );

    NV_CHECK_ERROR( NvOsSemaphoreClone( hSema, &hSemaClone ) );

    NvOsMutexLock( ec->eventMutex );
    // Quick pre-check for for AlreadyAllocated case
    for ( i = 0; i < NumEventTypes; i++ )
    {
        val = pEventTypes[i];
        if ( val >= NvEcEventType_Num )
            e = NvError_BadParameter;
        else if ( ec->eventMap[tag][val] )
            e = NvError_AlreadyAllocated;
        if ( NvSuccess != e )
            goto fail;
    }
    h = NvOsAlloc( sizeof(NvEcEventRegistration));
    if ( NULL == h )
    {
        e = NvError_InsufficientMemory;
        goto fail;
    }

    NvOsMemset( h, 0, sizeof(NvEcEventRegistration) );
    NVEC_ENQ( ec->eventReg[tag].reg, h );

    // Fill up new registration handle
    NV_ASSERT( NvEcEventType_Num <= 32 );   // eventBitmap only works if <= 32
    for ( i = 0; i < NumEventTypes; i++ )
    {
        val = pEventTypes[i];
        h->eventBitmap |= (1 << val);
        ec->eventMap[tag][val] = h;
        ec->eventTagBitmap[val] |= tagMask;
    }
    h->numEventTypes = NumEventTypes;
    h->sema = hSemaClone;
    h->hEc = hEc;

    h->numEventPacketsHint = NumEventPackets;
    h->eventPacketSizeHint = EventPacketSize;       // ignored hints for now

    NvOsMutexUnlock( ec->eventMutex );
    *phEcEventRegistration = h;
    return e;

fail:
    NvOsSemaphoreDestroy( hSemaClone );
    NvOsMutexUnlock( ec->eventMutex );
    NvOsFree( h );
    return e;
}
NvError
NvEcSendRequest(
    NvEcHandle hEc,
    NvEcRequest *pRequest,
    NvEcResponse *pResponse,
    NvU32 RequestSize,
    NvU32 ResponseSize)
{
    NvEcPrivState       *ec;
    NvError             e = NvSuccess;
    NvEcRequestNode     *requestNode = NULL;
    NvEcResponseNode    *responseNode = NULL;
    NvOsSemaphoreHandle requestSema = NULL;
    NvOsSemaphoreHandle responseSema = NULL;
    
    NV_ASSERT( pRequest );
    NV_ASSERT( hEc );
    if ( (RequestSize > sizeof(NvEcRequest)) || 
         (ResponseSize > sizeof(NvEcResponse)) )
        return NvError_InvalidSize;
    
    ec = hEc->ec;
    requestNode = NvOsAlloc(sizeof(NvEcRequestNode));
    if ( NULL == requestNode )
    {
        e = NvError_InsufficientMemory;
        goto fail;
    }
    NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &requestSema, 0 ) );
    
    if ( pResponse )
    {
        responseNode = NvOsAlloc(sizeof(NvEcResponseNode));
        if ( NULL == responseNode )
        {
            e = NvError_InsufficientMemory;
            goto fail;
        }
        NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &responseSema, 0 ) );
    }

    ec->IsEcActive = NV_TRUE;

    // request end-queue.  Timeout set to infinite until request sent.
    NvOsMemset( requestNode, 0, sizeof(NvEcRequestNode) );
    pRequest->RequestorTag = hEc->tag;      // assigned tag here
    DISP_MESSAGE(("NvEcSendRequest:pRequest->RequestorTag=0x%x\n", pRequest->RequestorTag));
    NvOsMemcpy(&requestNode->request, pRequest, RequestSize);
    requestNode->tag = hEc->tag;
    DISP_MESSAGE(("NvEcSendRequest:requestNode->tag=0x%x\n", requestNode->tag));
    requestNode->sema = requestSema;
    requestNode->timeout = NV_WAIT_INFINITE;
    requestNode->completed = NV_FALSE;
    requestNode->size = RequestSize;
    
    NvOsMutexLock( ec->requestMutex );
    NVEC_ENQ( ec->request, requestNode );
    DISP_MESSAGE(("\r\nSendReq ec->requestBegin=0x%x", ec->requestBegin));
    NvOsMutexUnlock( ec->requestMutex );
    
    // response en-queue.  Timeout set to infinite until request completes.
    if ( pResponse )
    {
        NvOsMemset( responseNode, 0, sizeof(NvEcResponseNode) );
        requestNode->responseNode = responseNode;   // association between
        responseNode->requestNode = requestNode;    //   request & response
        responseNode->sema = responseSema;
        responseNode->timeout = NV_WAIT_INFINITE;
        responseNode->tag = hEc->tag;
        DISP_MESSAGE(("NvEcSendRequest:responseNode->tag=0x%x\n", responseNode->tag));
        responseNode->size = ResponseSize;
        NvOsMutexLock( ec->responseMutex );
        NVEC_ENQ( ec->response, responseNode );
        DISP_MESSAGE(("\r\nSendReq ec->responseBegin=0x%x", ec->responseBegin));
        NvOsMutexUnlock( ec->responseMutex );
    }

    NvOsMutexLock( ec->mutex );
    if ( !ec->thread )
        NvEcPrivThreadCreate( ec );
    NvOsMutexUnlock( ec->mutex );

    // Trigger EcPrivThread
    NvOsSemaphoreSignal( ec->sema );
    DISP_MESSAGE(("\r\nSendReq requestNode=0x%x, requestNode->responseNode=0x%x",
        requestNode, requestNode->responseNode));
    // Wait on Request returns
    NvOsSemaphoreWait( requestSema );
    DISP_MESSAGE(("\r\nSendReq Out of req sema"));

    e = requestNode->status;
    if ( NvSuccess != e )
    {
        NvEcResponseNode    *t = NULL, *p = NULL;

        // de-queue responseNode too !!!!
        NvOsMutexLock( ec->responseMutex );
        NVEC_REMOVE_FROM_Q( ec->response, responseNode, t, p );
        DISP_MESSAGE(("\r\nSendReq responseBegin=0x%x", ec->responseBegin));
        NvOsMutexUnlock( ec->responseMutex );
        goto fail;
    }

    if ( pResponse )
    {
        // Wait on Response returns
        NvOsSemaphoreWait( responseSema );
        DISP_MESSAGE(("\r\nSendReq Out of resp sema"));
        NV_CHECK_ERROR_CLEANUP( responseNode->status );
        NvOsMemcpy(pResponse, &responseNode->response, ResponseSize);
    }
    // if successful, nodes should be de-queue already but not freed yet

fail:
    NvOsSemaphoreDestroy( requestSema );
    NvOsSemaphoreDestroy( responseSema );
    DISP_MESSAGE(("\r\nSendReq Freeing requestNode=0x%x, responseNode=0x%x", 
        requestNode, responseNode));
    NvOsFree( requestNode );
    NvOsFree( responseNode );
    return e;
}
/**
 *  @brief Allocates a handle to the device. Configures the PWM
 *   control to the Vibro motor with default values. To change
 *   the amplitude and frequency use NvOdmVibrateSetParameter API.
 *  @param hOdmVibrate  [IN] Opaque handle to the device.
 *  @return  NV_TRUE on success and NV_FALSE on error
 */
NvBool
NvOdmVibOpen(NvOdmVibDeviceHandle *hOdmVibrate)
{
    const NvOdmPeripheralConnectivity *pConnectivity = NULL;
    NvU32 Index = 0;

    NV_ASSERT(hOdmVibrate);

    /* Allocate the handle */
    (*hOdmVibrate) = (NvOdmVibDeviceHandle)NvOdmOsAlloc(sizeof(NvOdmVibDevice));
    if (*hOdmVibrate == NULL)
    {
        NV_ODM_TRACE(("Error Allocating NvOdmPmuDevice. \n"));
        return NV_FALSE;
    }
    NvOsMemset((*hOdmVibrate), 0, sizeof(NvOdmVibDevice));
#if (defined(CONFIG_7546Y_V10))    /*HZJ ADD FOR VIBRATE*/
   (*hOdmVibrate)->vibrate_gpio= NvOdmGpioOpen();
	if (!(*hOdmVibrate)->vibrate_gpio) {
		NV_ODM_TRACE("err open gpio vibrate hzj added\r\n");
		kfree(*hOdmVibrate);
		return -1;
	}	

   	(*hOdmVibrate)->vibrate_pin = NvOdmGpioAcquirePinHandle((*hOdmVibrate)->vibrate_gpio, VIBRATE_DET_ENABLE_PORT, VIBRATE_DET_ENABLE_PIN);
	if (!(*hOdmVibrate)->vibrate_pin) {
		NV_ODM_TRACE("err acquire detect pin handle vibrate\r\n");
		NvOdmGpioClose((*hOdmVibrate)->vibrate_gpio);
		return -1;
	}

	NvOdmGpioConfig((*hOdmVibrate)->vibrate_gpio, (*hOdmVibrate)->vibrate_pin, NvOdmGpioPinMode_Output);
  /*End Hzj aded*/ 
   (*hOdmVibrate)->vibrate_segpio= NvOdmGpioOpen();
	if (!(*hOdmVibrate)->vibrate_segpio) {
		NV_ODM_TRACE("err open gpio vibrate hzj added\r\n");
		kfree(*hOdmVibrate);
		return -1;
	}

   	(*hOdmVibrate)->vibrate_sepin = NvOdmGpioAcquirePinHandle((*hOdmVibrate)->vibrate_segpio, VIBRATE_SE_PORT, VIBRATE_SE_PIN);
	if (!(*hOdmVibrate)->vibrate_sepin) {
		NV_ODM_TRACE("err acquire detect pin handle vibrate\r\n");
		NvOdmGpioClose((*hOdmVibrate)->vibrate_segpio);
		return -1;
	}

	NvOdmGpioConfig((*hOdmVibrate)->vibrate_segpio, (*hOdmVibrate)->vibrate_sepin, NvOdmGpioPinMode_Output);
  
#endif
    /* Get the PMU handle */
    (*hOdmVibrate)->hOdmServicePmuDevice = NvOdmServicesPmuOpen();
    if (!(*hOdmVibrate)->hOdmServicePmuDevice)
    {
        NV_ODM_TRACE(("Error Opening Pmu device. \n"));
        NvOdmOsFree(*hOdmVibrate);
        *hOdmVibrate = NULL;
        return NV_FALSE;
    }

        // Get the peripheral connectivity information
    pConnectivity = NvOdmPeripheralGetGuid(VIBRATE_DEVICE_GUID);
    if (pConnectivity == NULL)
        return NV_FALSE;

        // Search for the Vdd rail and set the proper volage to the rail.
    for (Index = 0; Index < pConnectivity->NumAddress; ++Index)
    {
        if (pConnectivity->AddressList[Index].Interface == NvOdmIoModule_Vdd)
        {
            (*hOdmVibrate)->VddId = pConnectivity->AddressList[Index].Address;
            NvOdmServicesPmuGetCapabilities((*hOdmVibrate)->hOdmServicePmuDevice, (*hOdmVibrate)->VddId, &((*hOdmVibrate)->RailCaps));
            break;
        }
    }

    return NV_TRUE;
}
void
NvDdkUsbPhyClose(
    NvDdkUsbPhyHandle hUsbPhy)
{
    if (!hUsbPhy)
        return;

    NvOsMutexLock(s_UsbPhyMutex);

    if (!hUsbPhy->RefCount)
    {
        NvOsMutexUnlock(s_UsbPhyMutex);
        return;
    }

    --hUsbPhy->RefCount;

    if (hUsbPhy->RefCount)
    {
        NvOsMutexUnlock(s_UsbPhyMutex);
        return;
    }

    NvRmSetModuleTristate(
        hUsbPhy->hRmDevice,
        NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, hUsbPhy->Instance),
        NV_TRUE);

    NvOsMutexLock(hUsbPhy->ThreadSafetyMutex);
    if (hUsbPhy->RmPowerClientId)
    {
        if (hUsbPhy->IsPhyPoweredUp)
        {
            NV_ASSERT_SUCCESS(
                NvRmPowerModuleClockControl(hUsbPhy->hRmDevice,
                  NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, hUsbPhy->Instance),
                  hUsbPhy->RmPowerClientId,
                  NV_FALSE));

            //NvOsDebugPrintf("NvDdkUsbPhyClose::VOLTAGE OFF\n");
            NV_ASSERT_SUCCESS(
                NvRmPowerVoltageControl(hUsbPhy->hRmDevice,
                  NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, hUsbPhy->Instance),
                  hUsbPhy->RmPowerClientId,
                  NvRmVoltsOff, NvRmVoltsOff,
                  NULL, 0, NULL));
            hUsbPhy->IsPhyPoweredUp = NV_FALSE;
        }
        // Unregister driver from Power Manager
        NvRmPowerUnRegister(hUsbPhy->hRmDevice, hUsbPhy->RmPowerClientId);
        NvOsSemaphoreDestroy(hUsbPhy->hPwrEventSem);
    }
    NvOsMutexUnlock(hUsbPhy->ThreadSafetyMutex);

    NvOsMutexDestroy(hUsbPhy->ThreadSafetyMutex);

    if (hUsbPhy->CloseHwInterface)
    {
        hUsbPhy->CloseHwInterface(hUsbPhy);
    }

    if ((hUsbPhy->pProperty->UsbMode == NvOdmUsbModeType_Host) ||
        (hUsbPhy->pProperty->UsbMode == NvOdmUsbModeType_OTG))
    {
        UsbPrivEnableVbus(hUsbPhy, NV_FALSE);
    }

    NvOdmEnableUsbPhyPowerRail(NV_FALSE);

    NvRmPhysicalMemUnmap(
        (void*)hUsbPhy->UsbVirAdr, hUsbPhy->UsbBankSize);

    NvRmPhysicalMemUnmap(
        (void*)hUsbPhy->MiscVirAdr, hUsbPhy->MiscBankSize);

    NvOsMemset(hUsbPhy, 0, sizeof(NvDdkUsbPhy));
    NvOsMutexUnlock(s_UsbPhyMutex);
}
Esempio n. 13
0
void NvOdmOsMemset(void *s, NvU8 c, size_t size)
{
    NvOsMemset(s, c, size);
}
NvError
NvDdkUsbPhyOpen(
    NvRmDeviceHandle hRm,
    NvU32 Instance,
    NvDdkUsbPhyHandle *hUsbPhy)
{
    NvError e;
    NvU32 MaxInstances = 0;
    NvDdkUsbPhy *pUsbPhy = NULL;
    NvOsMutexHandle UsbPhyMutex = NULL;
    NvRmModuleInfo info[MAX_USB_INSTANCES];
    NvU32 j;

    NV_ASSERT(hRm);
    NV_ASSERT(hUsbPhy);
    NV_ASSERT(Instance < MAX_USB_INSTANCES);

    NV_CHECK_ERROR(NvRmModuleGetModuleInfo( hRm, NvRmModuleID_Usb2Otg, &MaxInstances, NULL ));
    if (MaxInstances > MAX_USB_INSTANCES)
    {
       // Ceil "instances" to MAX_USB_INSTANCES
       MaxInstances = MAX_USB_INSTANCES;
    }
    NV_CHECK_ERROR(NvRmModuleGetModuleInfo( hRm, NvRmModuleID_Usb2Otg, &MaxInstances, info ));
    for (j = 0; j < MaxInstances; j++)
    {
    // Check whether the requested instance is present
        if(info[j].Instance == Instance)
            break;
    }
    // No match found return
    if (j == MaxInstances)
    {
        return NvError_ModuleNotPresent;
    }

    if (!s_UsbPhyMutex)
    {
        e = NvOsMutexCreate(&UsbPhyMutex);
        if (e!=NvSuccess)
            return e;

        if (NvOsAtomicCompareExchange32(
                (NvS32*)&s_UsbPhyMutex, 0, (NvS32)UsbPhyMutex)!=0)
        {
            NvOsMutexDestroy(UsbPhyMutex);
        }
    }

    NvOsMutexLock(s_UsbPhyMutex);
    if (!s_pUsbPhy)
    {
        s_pUsbPhy = NvOsAlloc(MaxInstances * sizeof(NvDdkUsbPhy));
        if (s_pUsbPhy)
            NvOsMemset(s_pUsbPhy, 0, MaxInstances * sizeof(NvDdkUsbPhy));
    }
    NvOsMutexUnlock(s_UsbPhyMutex);

    if (!s_pUsbPhy)
        return NvError_InsufficientMemory;

    NvOsMutexLock(s_UsbPhyMutex);
    if (!s_pUtmiPadConfig)
    {
        s_pUtmiPadConfig = NvOsAlloc(sizeof(NvDdkUsbPhyUtmiPadConfig));
        if (s_pUtmiPadConfig)
        {
            NvRmPhysAddr PhyAddr;

            NvOsMemset(s_pUtmiPadConfig, 0, sizeof(NvDdkUsbPhyUtmiPadConfig));
            NvRmModuleGetBaseAddress(
                hRm, 
                NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, 0),
                &PhyAddr, &s_pUtmiPadConfig->BankSize);

            NV_CHECK_ERROR_CLEANUP(
                NvRmPhysicalMemMap(
                    PhyAddr, s_pUtmiPadConfig->BankSize, NVOS_MEM_READ_WRITE,
                    NvOsMemAttribute_Uncached, (void **)&s_pUtmiPadConfig->pVirAdr));
        }
    }
    NvOsMutexUnlock(s_UsbPhyMutex);

    if (!s_pUtmiPadConfig)
        return NvError_InsufficientMemory;

    pUsbPhy = &s_pUsbPhy[Instance];

    NvOsMutexLock(s_UsbPhyMutex);
    if (!pUsbPhy->RefCount)
    {
        NvRmPhysAddr PhysAddr;
        NvOsMutexHandle ThreadSafetyMutex = NULL;

        NvOsMemset(pUsbPhy, 0, sizeof(NvDdkUsbPhy));
        pUsbPhy->Instance = Instance;
        pUsbPhy->hRmDevice = hRm;
        pUsbPhy->RefCount = 1;
        pUsbPhy->IsPhyPoweredUp = NV_FALSE;
        pUsbPhy->pUtmiPadConfig = s_pUtmiPadConfig;
        pUsbPhy->pProperty = NvOdmQueryGetUsbProperty(
                                    NvOdmIoModule_Usb, pUsbPhy->Instance);
        pUsbPhy->TurnOffPowerRail = UsbPhyTurnOffPowerRail(MaxInstances);

        NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&ThreadSafetyMutex));
        if (NvOsAtomicCompareExchange32(
                (NvS32*)&pUsbPhy->ThreadSafetyMutex, 0,
                (NvS32)ThreadSafetyMutex)!=0)
        {
            NvOsMutexDestroy(ThreadSafetyMutex);
        }

        NvRmModuleGetBaseAddress(
            pUsbPhy->hRmDevice,
            NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, pUsbPhy->Instance),
            &PhysAddr, &pUsbPhy->UsbBankSize);

        NV_CHECK_ERROR_CLEANUP(
            NvRmPhysicalMemMap(
                PhysAddr, pUsbPhy->UsbBankSize, NVOS_MEM_READ_WRITE,
                NvOsMemAttribute_Uncached, (void **)&pUsbPhy->UsbVirAdr));

        NvRmModuleGetBaseAddress(
            pUsbPhy->hRmDevice,
            NVRM_MODULE_ID(NvRmModuleID_Misc, 0),
            &PhysAddr, &pUsbPhy->MiscBankSize);

        NV_CHECK_ERROR_CLEANUP(
            NvRmPhysicalMemMap(
                PhysAddr, pUsbPhy->MiscBankSize, NVOS_MEM_READ_WRITE,
                NvOsMemAttribute_Uncached, (void **)&pUsbPhy->MiscVirAdr));

        if ( ( pUsbPhy->pProperty->UsbInterfaceType ==
               NvOdmUsbInterfaceType_UlpiNullPhy) ||
             ( pUsbPhy->pProperty->UsbInterfaceType ==
               NvOdmUsbInterfaceType_UlpiExternalPhy))
        {
            if (NvRmSetModuleTristate(
                    pUsbPhy->hRmDevice,
                    NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, pUsbPhy->Instance),
                    NV_FALSE) != NvSuccess )
               return NvError_NotSupported;
        }

        // Register with Power Manager
        NV_CHECK_ERROR_CLEANUP(
            NvOsSemaphoreCreate(&pUsbPhy->hPwrEventSem, 0));

        pUsbPhy->RmPowerClientId = NVRM_POWER_CLIENT_TAG('U','S','B','p');
        NV_CHECK_ERROR_CLEANUP(
            NvRmPowerRegister(pUsbPhy->hRmDevice,
            pUsbPhy->hPwrEventSem, &pUsbPhy->RmPowerClientId));

        // Open the H/W interface
        UsbPhyOpenHwInterface(pUsbPhy);

        // Initialize the USB Phy
        NV_CHECK_ERROR_CLEANUP(UsbPhyInitialize(pUsbPhy));
    }
    else
    {
        pUsbPhy->RefCount++;
    }

    *hUsbPhy = pUsbPhy;
    NvOsMutexUnlock(s_UsbPhyMutex);

    return NvSuccess;

fail:

    NvDdkUsbPhyClose(pUsbPhy);
    NvOsMutexUnlock(s_UsbPhyMutex);
    return e;
}
NvError
NvEcOpen(NvEcHandle *phEc,
         NvU32 InstanceId)
{
    NvEc            *hEc = NULL;
    NvU32           i;
    NvEcPrivState   *ec = &g_ec;
    NvOsMutexHandle mutex = NULL;
    NvError         e = NvSuccess;

    NV_ASSERT( phEc );

    if ( NULL == ec->mutex )
    {
        e = NvOsMutexCreate(&mutex);
        if (NvSuccess != e)
            return e;
        if (0 != NvOsAtomicCompareExchange32((NvS32*)&ec->mutex, 0,
                                                        (NvS32)mutex) )
            NvOsMutexDestroy( mutex );
    }

    NvOsMutexLock(ec->mutex);

    if ( !s_refcount )
    {
        mutex = ec->mutex;
        NvOsMemset( ec, 0, sizeof(NvEcPrivState) );
        ec->mutex = mutex;
        
        NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->requestMutex ));
        NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->responseMutex ));
        NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->eventMutex ));
        
        NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->sema, 0));
        NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerEntrySema, 0));
        NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerExitSema, 0));
        
        NV_CHECK_ERROR_CLEANUP( NvEcTransportOpen( &ec->transport, InstanceId,
            ec->sema, 0 ) );
    }

    // Set this flag as TRUE to indicate power is enabled
    ec->powerState = NV_TRUE;

    // create private handle for internal communications between NvEc driver
    // and EC
    if ( !s_refcount )
    {
        ec->hEc = NvOsAlloc( sizeof(NvEc) );
        if ( NULL == ec->hEc )
            goto clean;
        
        // reserve the zero tag for internal use by the nvec driver; this ensures
        // that the driver always has a requestor tag available and can therefore
        // always talk to the EC
        ec->tagAllocated[0] = NV_TRUE;
        ec->hEc->ec = ec;
        ec->hEc->tag = 0;

        NV_CHECK_ERROR_CLEANUP(NvOsSemaphoreCreate(&ec->hPingSema, 0));

        // perform startup operations before mutex is unlocked
        NV_CHECK_ERROR_CLEANUP( NvEcPrivInitHook(ec->hEc) );

        // start thread to send "pings" - no-op commands to keep EC "alive"
        NV_CHECK_ERROR_CLEANUP(NvOsThreadCreate(
            (NvOsThreadFunction)NvEcPrivPingThread, ec, &ec->hPingThread));
    }

    hEc = NvOsAlloc( sizeof(NvEc) );
    if ( NULL == hEc )
        goto clean;

    NvOsMemset(hEc, 0x00, sizeof(NvEc));

    hEc->ec = ec;

    hEc->tag = NVEC_REQUESTOR_TAG_INVALID;
    for ( i = 0; i < NVEC_MAX_REQUESTOR_TAG; i++ )
    {
        if ( !ec->tagAllocated[i] )
        {
            ec->tagAllocated[i] = NV_TRUE;
            hEc->tag = i;
            break;
        }
    }
    if ( NVEC_REQUESTOR_TAG_INVALID == hEc->tag )
        goto clean;      // run out of tag, clean it up!

    *phEc = hEc;
    s_refcount++;

    NvOsMutexUnlock( ec->mutex );

    ec->IsEcActive = NV_FALSE;

    return NvSuccess;

clean:
    NvOsFree( hEc );
    NvOsMutexUnlock( ec->mutex );

    return NvError_InsufficientMemory;

fail:
    if (!s_refcount)
    {
        ec->exitPingThread = NV_TRUE;
        if (ec->hPingSema)
            NvOsSemaphoreSignal( ec->hPingSema );
        NvOsThreadJoin( ec->hPingThread );
        NvOsSemaphoreDestroy(ec->hPingSema);
        ec->exitThread = NV_TRUE;
        if (ec->sema)
            NvOsSemaphoreSignal( ec->sema );
        NvOsThreadJoin( ec->thread );
        NvOsFree( ec->hEc );
        if ( ec->transport )
            NvEcTransportClose( ec->transport );
        NvOsMutexDestroy( ec->requestMutex );
        NvOsMutexDestroy( ec->responseMutex );
        NvOsMutexDestroy( ec->eventMutex );
        NvOsSemaphoreDestroy( ec->sema );
        NvOsSemaphoreDestroy( ec->LowPowerEntrySema );
        NvOsSemaphoreDestroy( ec->LowPowerExitSema );
        if ( ec->mutex )
        {
            NvOsMutexUnlock( ec->mutex );
            // Destroying of this mutex here is not safe, if another thread is
            // waiting on this mutex, it can cause issues.  We shold have
            // serialized Init/DeInit calls for creating and destroying this mutex.
            NvOsMutexDestroy( ec->mutex );
            NvOsMemset( ec, 0, sizeof(NvEcPrivState) );
            ec->mutex = NULL;
        }
    }
    return NvError_NotInitialized;
}
Esempio n. 16
0
void
NvEcClose(NvEcHandle hEc)
{
    NvEcPrivState   *ec;
    NvBool          destroy = NV_FALSE;

    if ( NULL == hEc )
        return;

    NV_ASSERT( s_refcount );

    ec = hEc->ec;
    NvOsMutexLock( ec->mutex );

    // FIXME: handle client still with outstanding event types
    if ( !--s_refcount )
    {
        NvEcPrivDeinitHook(ec->hEc);

        NV_ASSERT( NULL == ec->eventReg[hEc->tag].regBegin &&
                    NULL == ec->eventReg[hEc->tag].regEnd );
        NV_ASSERT( NULL == ec->requestBegin && NULL == ec->requestEnd );
        NV_ASSERT( NULL == ec->responseBegin && NULL == ec->responseEnd );
#ifndef CONFIG_TEGRA_ODM_BETELGEUSE
        ec->exitPingThread = NV_TRUE;
        NvOsSemaphoreSignal( ec->hPingSema );
        NvOsThreadJoin( ec->hPingThread );
#endif
        ec->exitThread = NV_TRUE;
        NvOsSemaphoreSignal( ec->sema );
        NvOsThreadJoin( ec->thread );

        NvEcTransportClose( ec->transport );
        NvOsMutexDestroy( ec->requestMutex );
        NvOsMutexDestroy( ec->responseMutex );
        NvOsMutexDestroy( ec->eventMutex );
        NvOsSemaphoreDestroy( ec->sema );
#ifndef CONFIG_TEGRA_ODM_BETELGEUSE
        NvOsSemaphoreDestroy( ec->hPingSema );
#endif
        NvOsSemaphoreDestroy( ec->LowPowerEntrySema );
        NvOsSemaphoreDestroy( ec->LowPowerExitSema );
        destroy = NV_TRUE;

        NvOsFree( ec->eventNodes );
        NvOsFree( ec->hEc );
    }

    // Set this flag as FALSE to indicate power is disabled
    //Daniel 20100723, if we change power state to NV_FALSE, we won't be able to suspend/poweroff it.
    //Is there any side effect ????? 
    //ec->powerState = NV_FALSE;

    NV_ASSERT( hEc->tag < NVEC_MAX_REQUESTOR_TAG );
    ec->tagAllocated[hEc->tag] = NV_FALSE;      // to be recycled

    NvOsFree( hEc );
    NvOsMutexUnlock( ec->mutex );

    if ( destroy )
    {
        NvOsMutexDestroy( ec->mutex );
        NvOsMemset( ec, 0, sizeof(NvEcPrivState) );
        ec->mutex = NULL;
    }
}
Esempio n. 17
0
/**
 *  @brief Allocates a handle to the device. Configures the PWM
 *   control to the Vibro motor with default values. To change
 *   the amplitude and frequency use NvOdmVibrateSetParameter API.
 *  @param hOdmVibrate  [IN] Opaque handle to the device.
 *  @return  NV_TRUE on success and NV_FALSE on error
 */
NvBool
NvOdmVibOpen(NvOdmVibDeviceHandle *hOdmVibrate)
{

#if 1	/* yuyang(20100615):Create I2C handle */
	const NvOdmPeripheralConnectivity *pConnectivity = NULL;
    NvU32 Index = 0;
    NvU32 I2cInstance = 0;

    NV_ASSERT(hOdmVibrate);

    /* Allocate the handle */
    (*hOdmVibrate) = (NvOdmVibDeviceHandle)NvOdmOsAlloc(sizeof(NvOdmVibDevice));
    if (*hOdmVibrate == NULL)
    {
        NV_ODM_TRACE(("Error Allocating NvOdmPmuDevice. \n"));
        return NV_FALSE;
    }
    NvOsMemset((*hOdmVibrate), 0, sizeof(NvOdmVibDevice));

    /* Get the PMU handle */
    (*hOdmVibrate)->hOdmServicePmuDevice = NvOdmServicesPmuOpen();

    if (!(*hOdmVibrate)->hOdmServicePmuDevice)
    {
        NV_ODM_TRACE(("Error Opening Pmu device. \n"));
        NvOdmOsFree(*hOdmVibrate);
        *hOdmVibrate = NULL;
        return NV_FALSE;
    }

    // Get the peripheral connectivity information
    pConnectivity = NvOdmPeripheralGetGuid(VIBRATE_DEVICE_GUID);
    if (pConnectivity == NULL)
    {
        NV_ODM_TRACE(("Error pConnectivity NULL. \n"));
        return NV_FALSE;
    }

	for (Index = 0; Index < pConnectivity->NumAddress; ++Index)
    {
        switch (pConnectivity->AddressList[Index].Interface)
        {
			case NvOdmIoModule_I2c:
                (*hOdmVibrate)->DeviceAddr = (pConnectivity->AddressList[Index].Address);
                I2cInstance = pConnectivity->AddressList[Index].Instance;
                NV_ODM_TRACE("%s: hTouch->DeviceAddr = 0x%x, I2cInstance = %x\n", __func__, (*hOdmVibrate)->DeviceAddr, I2cInstance); 
                break;
            case NvOdmIoModule_Vdd:
                (*hOdmVibrate)->VddId = pConnectivity->AddressList[Index].Address;
                NvOdmServicesPmuGetCapabilities((*hOdmVibrate)->hOdmServicePmuDevice, (*hOdmVibrate)->VddId, &((*hOdmVibrate)->RailCaps));
                break;
            default:
                break;
        }
    }

	(*hOdmVibrate)->hOdmI2c = NvOdmI2cOpen(NvOdmIoModule_I2c_Pmu, I2cInstance);

	if (!(*hOdmVibrate)->hOdmI2c)
    {
        NV_ODM_TRACE(("NvOdm Touch : NvOdmI2cOpen Error \n"));
        return NV_FALSE;
    }
#else
    const NvOdmPeripheralConnectivity *pConnectivity = NULL;
    NvU32 Index = 0;

    NV_ASSERT(hOdmVibrate);

    /* Allocate the handle */
    (*hOdmVibrate) = (NvOdmVibDeviceHandle)NvOdmOsAlloc(sizeof(NvOdmVibDevice));
    if (*hOdmVibrate == NULL)
    {
        NV_ODM_TRACE(("Error Allocating NvOdmPmuDevice. \n"));
        return NV_FALSE;
    }
    NvOsMemset((*hOdmVibrate), 0, sizeof(NvOdmVibDevice));

    /* Get the PMU handle */
    (*hOdmVibrate)->hOdmServicePmuDevice = NvOdmServicesPmuOpen();
    if (!(*hOdmVibrate)->hOdmServicePmuDevice)
    {
        NV_ODM_TRACE(("Error Opening Pmu device. \n"));
        NvOdmOsFree(*hOdmVibrate);
        *hOdmVibrate = NULL;
        return NV_FALSE;
    }

        // Get the peripheral connectivity information
    pConnectivity = NvOdmPeripheralGetGuid(VIBRATE_DEVICE_GUID);
    if (pConnectivity == NULL)
        return NV_FALSE;

        // Search for the Vdd rail and set the proper volage to the rail.
    for (Index = 0; Index < pConnectivity->NumAddress; ++Index)
    {
        if (pConnectivity->AddressList[Index].Interface == NvOdmIoModule_Vdd)
        {
            (*hOdmVibrate)->VddId = pConnectivity->AddressList[Index].Address;
            NvOdmServicesPmuGetCapabilities((*hOdmVibrate)->hOdmServicePmuDevice, (*hOdmVibrate)->VddId, &((*hOdmVibrate)->RailCaps));
            break;
        }
    }
#endif	/* __yuyang(20100615) */

    return NV_TRUE;
}
Esempio n. 18
0
NvError 
NvRmPwmOpen(
    NvRmDeviceHandle hDevice,
    NvRmPwmHandle *phPwm)
{
    NvError status = NvSuccess;
    NvU32 PwmPhysAddr = 0, i = 0, PmcPhysAddr = 0;
    NvRmModuleCapability caps[4];  
    NvRmModuleCapability *pCap = NULL;

    NV_ASSERT(hDevice);
    NV_ASSERT(phPwm);
    
    NvOsMutexLock(s_hPwmMutex);
    
    if (s_hPwm)
    {
        s_hPwm->RefCount++;
        goto exit;
    }

    // Allcoate the memory for the pwm handle
    s_hPwm = NvOsAlloc(sizeof(NvRmPwm));
    if (!s_hPwm)
    {
        status = NvError_InsufficientMemory;
        goto fail;
    }
    NvOsMemset(s_hPwm, 0, sizeof(NvRmPwm));

    // Set the pwm handle parameters
    s_hPwm->RmDeviceHandle = hDevice;
    
    // Get the pwm physical and virtual base address
    NvRmModuleGetBaseAddress(hDevice,
            NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
            &PwmPhysAddr, &(s_hPwm->PwmBankSize));
    s_hPwm->PwmBankSize = PWM_BANK_SIZE;
    for (i = 0; i < NvRmPwmOutputId_Num-2; i++)
    {
        status = NvRmPhysicalMemMap(
            PwmPhysAddr + i*s_hPwm->PwmBankSize,
            s_hPwm->PwmBankSize, 
            NVOS_MEM_READ_WRITE,
            NvOsMemAttribute_Uncached,
            (void**)&s_hPwm->VirtualAddress[i]);
        if (status != NvSuccess)
        {
            NvOsFree(s_hPwm);
            goto fail;
        }
    }

    // Get the pmc physical and virtual base address
    NvRmModuleGetBaseAddress(hDevice,
            NVRM_MODULE_ID(NvRmModuleID_Pmif, 0),
            &PmcPhysAddr, &(s_hPwm->PmcBankSize));
    s_hPwm->PmcBankSize = PMC_BANK_SIZE;

    status = NvRmPhysicalMemMap(
            PmcPhysAddr,
            s_hPwm->PmcBankSize, 
            NVOS_MEM_READ_WRITE,
            NvOsMemAttribute_Uncached,
            (void**)&s_hPwm->VirtualAddress[NvRmPwmOutputId_Num-2]);
    if (status != NvSuccess)
    {
        NvOsFree(s_hPwm);
        goto fail;
    }

    caps[0].MajorVersion = 1;
    caps[0].MinorVersion = 0;
    caps[0].EcoLevel = 0;
    caps[0].Capability = &caps[0];

    caps[1].MajorVersion = 1;
    caps[1].MinorVersion = 1;
    caps[1].EcoLevel = 0;
    caps[1].Capability = &caps[1];

    caps[2].MajorVersion = 1;
    caps[2].MinorVersion = 2;
    caps[2].EcoLevel = 0;
    caps[2].Capability = &caps[2];

    caps[3].MajorVersion = 2;
    caps[3].MinorVersion = 0;
    caps[3].EcoLevel = 0;
    caps[3].Capability = &caps[3];

    NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(
        hDevice,
        NvRmModuleID_Pwm,
        caps,
        sizeof(caps)/sizeof(caps[0]),
        (void**)&pCap));

    if ((pCap->MajorVersion > 1) ||
        ((pCap->MajorVersion == 1) && (pCap->MinorVersion > 0)))
            s_IsFreqDividerSupported = NV_TRUE;

    s_hPwm->RefCount++;
exit:
    *phPwm = s_hPwm;
    NvOsMutexUnlock(s_hPwmMutex);
    return NvSuccess;

fail:
    NvOsMutexUnlock(s_hPwmMutex);
    return status;
}
void
NvEcClose(NvEcHandle hEc)
{
    NvEcPrivState   *ec;
    NvBool          destroy = NV_FALSE;

    if ( NULL == hEc )
        return;

    NV_ASSERT( s_refcount );

    ec = hEc->ec;
    NvOsMutexLock( ec->mutex );

    // FIXME: handle client still with outstanding event types
    if ( !--s_refcount )
    {
        NvEcPrivDeinitHook(ec->hEc);

        NV_ASSERT( NULL == ec->eventReg[hEc->tag].regBegin &&
                    NULL == ec->eventReg[hEc->tag].regEnd );
        NV_ASSERT( NULL == ec->requestBegin && NULL == ec->requestEnd );
        NV_ASSERT( NULL == ec->responseBegin && NULL == ec->responseEnd );

        ec->exitPingThread = NV_TRUE;
        NvOsSemaphoreSignal( ec->hPingSema );
        NvOsThreadJoin( ec->hPingThread );
        ec->exitThread = NV_TRUE;
        NvOsSemaphoreSignal( ec->sema );
        NvOsThreadJoin( ec->thread );

        NvEcTransportClose( ec->transport );
        NvOsMutexDestroy( ec->requestMutex );
        NvOsMutexDestroy( ec->responseMutex );
        NvOsMutexDestroy( ec->eventMutex );
        NvOsSemaphoreDestroy( ec->sema );
        NvOsSemaphoreDestroy( ec->hPingSema );
        NvOsSemaphoreDestroy( ec->LowPowerEntrySema );
        NvOsSemaphoreDestroy( ec->LowPowerExitSema );
        destroy = NV_TRUE;

        NvOsFree( ec->eventNodes );
        NvOsFree( ec->hEc );
    }

    // Set this flag as FALSE to indicate power is disabled
    ec->powerState = NV_FALSE;

    NV_ASSERT( hEc->tag < NVEC_MAX_REQUESTOR_TAG );
    ec->tagAllocated[hEc->tag] = NV_FALSE;      // to be recycled

    NvOsFree( hEc );
    NvOsMutexUnlock( ec->mutex );

    if ( destroy )
    {
        NvOsMutexDestroy( ec->mutex );
        NvOsMemset( ec, 0, sizeof(NvEcPrivState) );
        ec->mutex = NULL;
    }
}
Esempio n. 20
0
static NvError
RecordStarvationHints(
    NvRmDeviceHandle hRmDeviceHandle,
    NvRmPowerClient* pPowerClient,
    const NvRmDfsStarvationHint* pMultiHint,
    NvU32 NumHints)
{
    NvU32 i;
    NvBool HintChanged = NV_FALSE;

    for (i = 0; i < NumHints; i++)
    {
        NvRmDfsClockId ClockId = pMultiHint[i].ClockId;
        NvBool Starving = pMultiHint[i].Starving;
        NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));

        /*
         * If this is the first starvation hint, allocate hints array and fill
         * it in. Otherwise, just update starvation hint status. In both cases
         * determine if starvation hint for clock domain has changed.
         */
        if (pPowerClient->pStarvationHints == NULL)
        {
            size_t s = sizeof(NvBool) * (size_t)NvRmDfsClockId_Num;
            NvBool* p = NvOsAlloc(s);
            if (p == NULL)
            {
                return NvError_InsufficientMemory;
            }
            NvOsMemset(p, 0, s);
            pPowerClient->pStarvationHints = p;

            // Only new Satrvation On hint counts as change
            HintChanged = Starving;
        }
        else
        {
            // Only changes from On to Off or vice versa counts
            HintChanged = (pPowerClient->pStarvationHints[ClockId] != Starving);
        }
        pPowerClient->pStarvationHints[ClockId] = Starving;

        // If hint has changed, update clock domain starvation reference count
        // (hint against CPU, or AVP, or VDE is automatically applied to EMC)
        if (HintChanged)
        {
            if (Starving)
            {
                if ((ClockId == NvRmDfsClockId_Cpu) ||
                    (ClockId == NvRmDfsClockId_Avp) ||
                    (ClockId == NvRmDfsClockId_Vpipe))
                {
                    s_StarveOnRefCounts[NvRmDfsClockId_Emc]++;
                }
                s_StarveOnRefCounts[ClockId]++;
            }
            else
            {
                if ((ClockId == NvRmDfsClockId_Cpu) ||
                    (ClockId == NvRmDfsClockId_Avp) ||
                    (ClockId == NvRmDfsClockId_Vpipe))
                {
                    NV_ASSERT(s_StarveOnRefCounts[NvRmDfsClockId_Emc] != 0);
                    s_StarveOnRefCounts[NvRmDfsClockId_Emc]--;
                }
                NV_ASSERT(s_StarveOnRefCounts[ClockId] != 0);
                s_StarveOnRefCounts[ClockId]--;
            }
        }
    }
    return NvSuccess;
}
Esempio n. 21
0
const NvRmModuleClockLimits*
NvRmPrivClockLimitsInit(NvRmDeviceHandle hRmDevice)
{
    NvU32 i;
    NvRmFreqKHz CpuMaxKHz, AvpMaxKHz, VdeMaxKHz, TDMaxKHz, DispMaxKHz;
    NvRmSKUedLimits* pSKUedLimits;
    const NvRmScaledClkLimits* pHwLimits;
    const NvRmSocShmoo* pShmoo;

    NV_ASSERT(hRmDevice);
    NvRmPrivChipFlavorInit(hRmDevice);
    pShmoo = s_ChipFlavor.pSocShmoo;
    pHwLimits = &pShmoo->ScaledLimitsList[0];
#ifndef CONFIG_FAKE_SHMOO
    pSKUedLimits = pShmoo->pSKUedLimits;
#else
/*
    NvRmFreqKHz CpuMaxKHz;
    NvRmFreqKHz AvpMaxKHz;
    NvRmFreqKHz VdeMaxKHz;
    NvRmFreqKHz McMaxKHz;
    NvRmFreqKHz Emc2xMaxKHz;
    NvRmFreqKHz TDMaxKHz;
    NvRmFreqKHz DisplayAPixelMaxKHz;
    NvRmFreqKHz DisplayBPixelMaxKHz;
    NvRmMilliVolts NominalCoreMv;   // for common core rail
    NvRmMilliVolts NominalCpuMv;    // for dedicated CPU rail
*/
    pSKUedLimits = pShmoo->pSKUedLimits;
    // override default with configuration values
    // CPU clock duh!
    pSKUedLimits->CpuMaxKHz = MAX_CPU_OC_FREQ;

#ifdef CONFIG_BOOST_PERIPHERALS
    // AVP clock
    pSKUedLimits->AvpMaxKHz = CONFIG_MAX_AVP_OC_FREQ;
    // 3D clock
    pSKUedLimits->TDMaxKHz = CONFIG_MAX_3D_OC_FREQ;
#endif // CONFIG_BOOST_PERIPHERALS

#endif // CONFIG_FAKE_SHMOO
    NvOsDebugPrintf("NVRM corner (%d, %d)\n",
        s_ChipFlavor.corner, s_ChipFlavor.CpuCorner);

    NvOsMemset((void*)s_pClockScales, 0, sizeof(s_pClockScales));
    NvOsMemset(s_ClockRangeLimits, 0, sizeof(s_ClockRangeLimits));
    NvOsMemset(s_VoltageStepRefCounts, 0, sizeof(s_VoltageStepRefCounts));
    s_VoltageStepRefCounts[0] = NvRmPrivModuleID_Num; // all at minimum step

    // Combine AVP/System clock absolute limit with scaling V/F ladder upper
    // boundary, and set default clock range for all present modules the same
    // as for AVP/System clock
#ifdef CONFIG_AVP_OVERCLOCK
    AvpMaxKHz = 266400;
#else
    AvpMaxKHz = pSKUedLimits->AvpMaxKHz;
    for (i = 0; i < pShmoo->ScaledLimitsListSize; i++)
    {
        if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP)
        {
            AvpMaxKHz = NV_MIN(
                AvpMaxKHz, pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex]);
            break;
        }
    }
#endif //CONFIG_AVP_OVERCLOCK

    for (i = 0; i < NvRmPrivModuleID_Num; i++)
    {
        NvRmModuleInstance *inst;
        if (NvRmPrivGetModuleInstance(hRmDevice, i, &inst) == NvSuccess)
        {
            s_ClockRangeLimits[i].MaxKHz = AvpMaxKHz;
            s_ClockRangeLimits[i].MinKHz = NVRM_BUS_MIN_KHZ;

        }
    }

    // Fill in limits for modules with slectable clock sources and/or dividers
    // as specified by the h/w table according to the h/w device ID
    // (CPU and AVP are not in relocation table - need translate id explicitly)
    // TODO: need separate subclock limits? (current implementation applies
    // main clock limits to all subclocks)
    for (i = 0; i < pShmoo->ScaledLimitsListSize; i++)
    {
        NvRmModuleID id;
        if (pHwLimits[i].HwDeviceId == NV_DEVID_CPU)
            id = NvRmModuleID_Cpu;
        else if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP)
            id = NvRmModuleID_Avp;
        else if (pHwLimits[i].HwDeviceId == NVRM_DEVID_CLK_SRC)
            id = NvRmClkLimitsExtID_ClkSrc;
        else
            id = NvRmPrivDevToModuleID(pHwLimits[i].HwDeviceId);
        if ((id != NVRM_DEVICE_UNKNOWN) &&
            (pHwLimits[i].SubClockId == 0))
        {
            s_ClockRangeLimits[id].MinKHz = pHwLimits[i].MinKHz;
            s_ClockRangeLimits[id].MaxKHz =
                pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex];
            s_pClockScales[id] = pHwLimits[i].MaxKHzList;
        }
    }
    // Fill in CPU scaling data if SoC has dedicated CPU rail, and CPU clock
    // characterization data is separated from other modules on common core rail
    if (s_ChipFlavor.pCpuShmoo)
    {
        const NvRmScaledClkLimits* pCpuLimits =
            s_ChipFlavor.pCpuShmoo->pScaledCpuLimits;
        NV_ASSERT(pCpuLimits && (pCpuLimits->HwDeviceId == NV_DEVID_CPU));

        s_ClockRangeLimits[NvRmModuleID_Cpu].MinKHz = pCpuLimits->MinKHz;
        s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz =
            pCpuLimits->MaxKHzList[s_ChipFlavor.pCpuShmoo->ShmooVmaxIndex];
        s_pClockScales[NvRmModuleID_Cpu] = pCpuLimits->MaxKHzList;
    }

    // Set AVP upper clock boundary with combined Absolute/Scaled limit;
    // Sync System clock with AVP (System is not in relocation table)
    s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz = AvpMaxKHz;
    s_ClockRangeLimits[NvRmPrivModuleID_System].MaxKHz =
        s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz;
    s_ClockRangeLimits[NvRmPrivModuleID_System].MinKHz =
        s_ClockRangeLimits[NvRmModuleID_Avp].MinKHz;
    s_pClockScales[NvRmPrivModuleID_System] = s_pClockScales[NvRmModuleID_Avp];

    // Set VDE upper clock boundary with combined Absolute/Scaled limit (on
    // AP15/Ap16 VDE clock derived from the system bus, and VDE maximum limit
    // must be the same as AVP/System).
    VdeMaxKHz = pSKUedLimits->VdeMaxKHz;
    VdeMaxKHz = NV_MIN(
        VdeMaxKHz, s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz);
    if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
    {
        NV_ASSERT(VdeMaxKHz == AvpMaxKHz);
    }
    s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz = VdeMaxKHz;

    // Set upper clock boundaries for devices on CPU bus (CPU, Mselect,
    // CMC) with combined Absolute/Scaled limits
    CpuMaxKHz = pSKUedLimits->CpuMaxKHz;
    CpuMaxKHz = NV_MIN(
        CpuMaxKHz, s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz);
    s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz = CpuMaxKHz;
    if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
    {
        s_ClockRangeLimits[NvRmModuleID_CacheMemCtrl].MaxKHz = CpuMaxKHz;
        s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz;
        NV_ASSERT(s_ClockRangeLimits[NvRmClkLimitsExtID_ClkSrc].MaxKHz >=
                  CpuMaxKHz);
    }
    else if (hRmDevice->ChipId.Id == 0x20)
    {
        // No CMC; TODO: Mselect/CPU <= 1/4?
        s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz >> 2;
    }
Esempio n. 22
0
NvError
NvRmPrivReadCfgVars( NvRmCfgMap *map, void *cfg )
{
    NvU32 tmp;
    NvU32 i;
    char val[ NVRM_CFG_MAXLEN ];
    NvError err;

    /* the last cfg var entry is all zeroes */
    for( i = 0; i < (NvU32)map[i].name; i++ )
    {
        err = NvOsGetConfigString( map[i].name, val, NVRM_CFG_MAXLEN );
        if( err != NvSuccess )
        {
            /* no config var set, try the next one */
            continue;
        }

        /* parse the config var and print it */
        switch( map[i].type ) {
        case NvRmCfgType_Hex:
        {
            char *end = val + NvOsStrlen( val );
            tmp = NvUStrtoul( val, &end, 16 );
            tmp = 0;
            *(NvU32*)((NvU32)cfg + (NvU32)map[i].offset) = tmp;
            NV_DEBUG_PRINTF(("Request: %s=0x%08x\n", map[i].name, tmp));
            break;
        }
        case NvRmCfgType_Char:
            *(char*)((NvU32)cfg + (NvU32)map[i].offset) = val[0];
            NV_DEBUG_PRINTF(("Request: %s=%c\n", map[i].name, val[0]));
            break;
        case NvRmCfgType_Decimal:
        {
            char *end = val + NvOsStrlen( val );
            tmp = NvUStrtoul( val, &end, 10 );
            tmp = 0;
            *(NvU32*)((NvU32)cfg + (NvU32)map[i].offset) = tmp;
            NV_DEBUG_PRINTF(("Request: %s=%d\n", map[i].name, tmp));
            break;
        }
        case NvRmCfgType_String:
        {
            NvU32 len = NvOsStrlen( val );
            if( len >= NVRM_CFG_MAXLEN )
            {
                len = NVRM_CFG_MAXLEN - 1;
            }
            NvOsMemset( (char *)(NvU32)cfg + (NvU32)map[i].offset, 0,
                NVRM_CFG_MAXLEN );
            NvOsStrncpy( (char *)(NvU32)cfg + (NvU32)map[i].offset, val, len );
            NV_DEBUG_PRINTF(("Request: %s=%s\n", map[i].name, val));
            break;
        }
        default:
            NV_ASSERT(!" Illegal RM Configuration type. ");
        }
    }

    return NvSuccess;
}