void NvRmPmuGetBatteryFullLifeTime( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvU32 * pLifeTime ) { if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); NvOdmPmuGetBatteryFullLifeTime( s_Pmu.hOdmPmu,(NvOdmPmuBatteryInstance)batteryInst, pLifeTime); NvOsMutexUnlock(s_Pmu.hMutex); }
void NvRmPowerEventNotify( NvRmDeviceHandle hRmDeviceHandle, NvRmPowerEvent Event) { NV_ASSERT(hRmDeviceHandle); // Just in case if (Event == NvRmPowerEvent_NoEvent) return; NvOsMutexLock(s_hPowerClientMutex); PowerEventNotify(hRmDeviceHandle, Event); NvOsMutexUnlock(s_hPowerClientMutex); }
static ssize_t tegra_battery_store_property( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int value = 0; value = simple_strtoul(buf, NULL, 0); NvOsMutexLock(batt_dev->hBattEventMutex); batt_dev->batt_status_poll_period = value; NvOsMutexUnlock(batt_dev->hBattEventMutex); NvOsSemaphoreSignal(batt_dev->hOdmSemaphore); return count; }
NvError NvRmPowerStarvationHintMulti( NvRmDeviceHandle hRmDeviceHandle, NvU32 ClientId, const NvRmDfsStarvationHint* pMultiHint, NvU32 NumHints) { NvError error; NvRmPowerClient* pPowerClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId); NV_ASSERT(hRmDeviceHandle); NV_ASSERT(pMultiHint && NumHints); /* Do nothing on platforms where there is no freq scaling like QT and FPGA */ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc) { return NvSuccess; } // Do nothing if DFS is disabled, and therefore all clocks are maxed anyway if (NvRmDfsGetState(hRmDeviceHandle) <= NvRmDfsRunState_Disabled) { return NvSuccess; } NvOsMutexLock(s_hPowerClientMutex); // Check if this client ID was registered; return error otherwise if (ClientIndex < pRegistry->UsedIndexRange) { pPowerClient = pRegistry->pPowerClients[ClientIndex]; } if ((pPowerClient == NULL) || (pPowerClient->id != ClientId)) { NvOsMutexUnlock(s_hPowerClientMutex); return NvError_BadValue; } // Add new stravtion hint error = RecordStarvationHints( hRmDeviceHandle, pPowerClient, pMultiHint, NumHints); NvOsMutexUnlock(s_hPowerClientMutex); if (error == NvSuccess) NvRmPrivStarvationHintPrintf( ClientIndex, pPowerClient->tag, pMultiHint, NumHints); return error; }
void NvBatteryEventHandlerThread(void *args) { NvU8 BatteryState = 0, BatteryEvent = 0; NvBool suspend_flag; for (;;) { NvOsSemaphoreWaitTimeout(batt_dev->hOdmSemaphore, batt_dev->batt_status_poll_period); if (batt_dev->exitThread) break; if (!batt_dev->hOdmBattDev) continue; NvOsMutexLock(batt_dev->hBattEventMutex); suspend_flag = batt_dev->inSuspend; NvOsMutexUnlock(batt_dev->hBattEventMutex); if (suspend_flag) continue; pr_info("\tBATTERY: polling battery information! --->>>\n"); NvOdmBatteryGetBatteryStatus(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, &BatteryState); NvOdmBatteryGetEvent(batt_dev->hOdmBattDev, &BatteryEvent); if ((BatteryState == NVODM_BATTERY_STATUS_UNKNOWN) || (BatteryEvent == NvOdmBatteryEventType_Num)) { /* Do nothing */ } else { if (BatteryEvent & NvOdmBatteryEventType_RemainingCapacityAlarm) { if (BatteryState == (NVODM_BATTERY_STATUS_CRITICAL | NVODM_BATTERY_STATUS_VERY_CRITICAL | NVODM_BATTERY_STATUS_DISCHARGING)) { pr_info("nvec_battery:calling kernel_power_off...\n"); kernel_power_off(); } } else { /* Update the battery and power supply info for other events */ power_supply_changed(&tegra_power_supplies[NvCharger_Type_Battery]); //power_supply_changed(&tegra_power_supplies[NvCharger_Type_USB]); // power_supply_changed(&tegra_power_supplies[NvCharger_Type_AC]); } } } }
NvBool NvRmPmuIsRtcInitialized( NvRmDeviceHandle hRmDevice) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuIsRtcInitialized(s_Pmu.hOdmPmu); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
void NvRmPmuGetBatteryChemistry( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvRmPmuBatteryChemistry * pChemistry ) { if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); NvOdmPmuGetBatteryChemistry(s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, (NvOdmPmuBatteryChemistry*)pChemistry); NvOsMutexUnlock(s_Pmu.hMutex); }
NvError NvRmKernelPowerSuspend( NvRmDeviceHandle hRmDeviceHandle ) { NvOdmSocPowerState state = NvRmPowerLowestStateGet(); if (state == NvOdmSocPowerState_Suspend) NvRmPrivPowerGroupSuspend(hRmDeviceHandle); #if NVRM_POWER_DEBUG_SUSPEND_ENTRY NvOsMutexLock(s_hPowerClientMutex); { NvU32 i; ModuleVoltageReq* pVoltageReq = NULL; NvRmPowerClient* pPowerClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NvRmPowerState s = NvRmPrivPowerGetState(hRmDeviceHandle); // Report combined RM power stste and active modules NvOsDebugPrintf("RM power state before suspend: %s (%d)\n", ((s == NvRmPowerState_Active) ? "Active" : ((s == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle")), s); if (s == NvRmPowerState_Active) { for (i = 0; i < pRegistry->UsedIndexRange; i++) { pPowerClient = pRegistry->pPowerClients[i]; if (pPowerClient) { pVoltageReq = pPowerClient->pVoltageReqHead; while (pVoltageReq != NULL) { if (pVoltageReq->MaxVolts != NvRmVoltsOff) { // could also set some bad e = NvError_Bad??? NvOsDebugPrintf("Active Module: 0x%x\n", pVoltageReq->ModuleId); } pVoltageReq = pVoltageReq->pNext; } } } } } NvOsMutexUnlock(s_hPowerClientMutex); #endif return NvSuccess; }
NvError NvRmAnalogInterfaceControl( NvRmDeviceHandle hDevice, NvRmAnalogInterface Interface, NvBool Enable, void *Config, NvU32 ConfigLength ) { NvError err = NvSuccess; NvU32 id; NvU32 inst; NV_ASSERT( hDevice ); id = NVRM_ANALOG_INTERFACE_ID( Interface ); inst = NVRM_ANALOG_INTERFACE_INSTANCE( Interface ); NvOsMutexLock( hDevice->mutex ); switch( id ) { case NvRmAnalogInterface_Dsi: break; case NvRmAnalogInterface_ExternalMemory: break; case NvRmAnalogInterface_Hdmi: break; case NvRmAnalogInterface_Lcd: break; case NvRmAnalogInterface_Uart: break; case NvRmAnalogInterface_Sdio: break; case NvRmAnalogInterface_Tv: err = NvRmPrivTvDcControl( hDevice, Enable, inst, Config, ConfigLength ); break; case NvRmAnalogInterface_VideoInput: err = NvRmPrivVideoInputControl( hDevice, Enable, inst, Config, ConfigLength); break; default: NV_ASSERT(!"Unknown Analog interface passed. "); } NvOsMutexUnlock( hDevice->mutex ); return err; }
NvBool NvRmPmuWriteAlarm( NvRmDeviceHandle hRmDevice, NvU32 Count) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuWriteAlarm(s_Pmu.hOdmPmu, Count); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
NvBool NvRmPmuGetAcLineStatus( NvRmDeviceHandle hRmDevice, NvRmPmuAcLineStatus * pStatus ) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuGetAcLineStatus(s_Pmu.hOdmPmu, (NvOdmPmuAcLineStatus*)pStatus); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
NvBool NvRmPmuGetBatteryStatus( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvU8 * pStatus ) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuGetBatteryStatus( s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, pStatus); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
/* get global variable gUsbCurrLimitC value and set it into /proc/usbCurrLimitInfo for user space read */ static int tegra_usbCurrLimit_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; /* NV_DRIVER_TRACE (("tegra_usbCurrLimit_read_proc:start\n")); */ NvOsMutexLock(usbCurrLimit_lock); len += snprintf(page+len, count-len,"%d", gUsbCurrLimitC); NvOsMutexUnlock(usbCurrLimit_lock); *eof = 1; /* NV_DRIVER_TRACE (("tegra_usbCurrLimit_read_proc:end\n")); */ return len; }
void NvRmPmuGetVoltage( NvRmDeviceHandle hDevice, NvU32 vddId, NvU32 * pMilliVolts) { NvU32 i; if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++) { if (NvOdmPmuGetVoltage(s_Pmu.hOdmPmu, vddId, pMilliVolts)) break; } NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT); NvOsMutexUnlock(s_Pmu.hMutex); }
void NvRmPwmClose(NvRmPwmHandle hPwm) { NvU32 i; if (!hPwm) return; NV_ASSERT(hPwm->RefCount); NvOsMutexLock(s_hPwmMutex); hPwm->RefCount--; if (hPwm->RefCount == 0) { // Unmap the pwm register virtual address space for (i = 0; i < NvRmPwmOutputId_Num-2; i++) { NvRmPhysicalMemUnmap((void*)s_hPwm->VirtualAddress[i], s_hPwm->PwmBankSize); } // Unmap the pmc register virtual address space NvRmPhysicalMemUnmap( (void*)s_hPwm->VirtualAddress[NvRmPwmOutputId_Num-2], s_hPwm->PmcBankSize); if (s_IsPwmFirstConfig) { // Disable power PwmPowerConfigure(hPwm, NV_FALSE); // Unregister with RM power NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID); // Tri-state the pin-mux pins NV_ASSERT_SUCCESS(NvRmSetModuleTristate(hPwm->RmDeviceHandle, NVRM_MODULE_ID(NvRmModuleID_Pwm, 0), NV_TRUE)); s_IsPwmFirstConfig = NV_FALSE; } NvOsFree(s_hPwm); s_hPwm = NULL; } NvOsMutexUnlock(s_hPwmMutex); }
static void NvEcPrivProcessPostSendRequest( NvEcPrivState *ec, NvError transportStatus ) { NvEcRequestNode *requestNode = NULL; NvEcResponseNode *responseNode; NvEcPrivFindAndDequeueRequest(ec, &requestNode, transportStatus, NV_TRUE); // update corresponding responseNode timeout if ( requestNode ) { requestNode->completed = NV_TRUE; responseNode = requestNode->responseNode; if ( responseNode ) { NvOsMutexLock( ec->responseMutex ); NV_ASSERT(ec->responseBegin); if ( NV_WAIT_INFINITE == ec->timeout[NVEC_IDX_RESPONSE] ) { // no current pending response on timeout watch. // Update response queue timeout. responseNode->timeout = NVEC_RESPONSE_TIMEOUT_DEFAULT; ec->timeout[NVEC_IDX_RESPONSE] = responseNode->timeout; ec->timeoutBase[NVEC_IDX_RESPONSE] = ec->lastTime; DISP_MESSAGE(("\r\nec->timeout[NVEC_IDX_RESPONSE] is set to=%d", ec->timeout[NVEC_IDX_RESPONSE])); } else { // Update this response timeout with current lastTime as base responseNode->timeout = NVEC_RESPONSE_TIMEOUT_DEFAULT + NVEC_TIME_BASE(ec, NVEC_IDX_RESPONSE); // wraparound time difference will work too. } NvOsMutexUnlock( ec->responseMutex ); } NvOsSemaphoreSignal( requestNode->sema ); // all request stuff should be done before signal } }
static int tegra_usbCurrLimit_write_proc (struct file *file, const char *buffer, unsigned long count, void *data) { int len = 0; static char proc_buf[4]; if ( copy_from_user(proc_buf, buffer, count) ) { return -EFAULT; } if ( strncmp( proc_buf, "1", 1) == 0 ) { gUsbCurrLimitC = 1; NvOsMutexLock(usbCurrLimit_lock); len += snprintf(proc_buf, count-len, "%d",gUsbCurrLimitC); NvOsMutexUnlock(usbCurrLimit_lock); return len; } else { NvOsDebugNprintf("tegra_usbCurrLimit_write_proc fail\n"); return -1; } }
void NvRmPmuSetChargingCurrentLimit( NvRmDeviceHandle hRmDevice, NvRmPmuChargingPath ChargingPath, NvU32 ChargingCurrentLimitMa, NvU32 ChargerType) { NvU32 i; if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++) { if (NvOdmPmuSetChargingCurrent( s_Pmu.hOdmPmu, (NvOdmPmuChargingPath)ChargingPath, ChargingCurrentLimitMa, ChargerType)) break; } NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT); NvOsMutexUnlock(s_Pmu.hMutex); }
NvError NvEcPowerResume(void) { NvError e = NvSuccess; NvEcPrivState *ec = &g_ec; NvOsMutexLock(ec->mutex); // Call transport's power on if it's OFF state if (ec->powerState == NV_FALSE) { NV_CHECK_ERROR_CLEANUP( NvEcTransportPowerResume(ec->transport) ); ec->powerState = NV_TRUE; ec->EnterLowPowerState = NV_FALSE; // Signal priv thread to get out of power suspend. NvOsSemaphoreSignal(ec->LowPowerExitSema); // Perform post-resume EC operations NvEcPrivPowerResumeHook(ec->hEc); } fail: NvOsMutexUnlock(ec->mutex); return e; }
NvU32 NvRmGetKeyValue(NvRmDeviceHandle hRm, NvU32 KeyID) { Key *pList = s_pKeyList; NvU32 Value = 0; unsigned int i; NvOsMutexLock(s_Mutex); while (pList) { for (i=0; i<pList->Count; i++) { if (pList->KeyID[i] == KeyID) { Value = pList->Value[i]; goto cleanup; } } pList = pList->pNextKey; } cleanup: NvOsMutexUnlock(s_Mutex); // Returning value as 0 since key is not present return Value; }
NvError NvRmPowerRegister( NvRmDeviceHandle hRmDeviceHandle, NvOsSemaphoreHandle hEventSemaphore, NvU32* pClientId) { NvU32 FreeIndex; NvError error; NvOsSemaphoreHandle hSema = NULL; NvRmPowerClient* pNewClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NV_ASSERT(hRmDeviceHandle); NV_ASSERT(pClientId); // If non-zero semaphore handle is passed, duplicate it to be avialable // after the call. Abort registration if non-zero handle is invalid if (hEventSemaphore != NULL) { error = NvOsSemaphoreClone(hEventSemaphore, &hSema); if (error != NvSuccess) { NV_ASSERT(!" Power Register Semaphore Clone error. "); } } NvOsMutexLock(s_hPowerClientMutex); // Find free registry entry for the new client for (FreeIndex = 0; FreeIndex < pRegistry->UsedIndexRange; FreeIndex++) { if (pRegistry->pPowerClients[FreeIndex] == NULL) break; } if (FreeIndex == pRegistry->AvailableEntries) { // If all avilable entries are used, re-size registry array NvU32 entries = pRegistry->AvailableEntries + NVRM_POWER_REGISTRY_DELTA; size_t s = sizeof(*pRegistry->pPowerClients) * (size_t)entries; NvRmPowerClient** p = NvOsRealloc(pRegistry->pPowerClients, s); if (p == NULL) { NvU32 old_size; /* fall back to NvOsAlloc */ p = NvOsAlloc( s ); if( p == NULL ) { goto failed; } /* copy the old data, free, etc, */ old_size = sizeof(*pRegistry->pPowerClients) * pRegistry->AvailableEntries; NvOsMemcpy( p, pRegistry->pPowerClients, old_size ); NvOsFree( pRegistry->pPowerClients ); } pRegistry->pPowerClients = p; pRegistry->AvailableEntries = entries; } if (FreeIndex == pRegistry->UsedIndexRange) { // If reached used index range boundary, advance it pRegistry->UsedIndexRange++; } // Allocate and store new client record pointer in registry (null-pointer // marks registry entry as free, so it's OK to store it before error check) pNewClient = NvOsAlloc(sizeof(*pNewClient)); pRegistry->pPowerClients[FreeIndex] = pNewClient; if (pNewClient == NULL) { goto failed; } // Fill in new client entry pNewClient->hEventSemaphore = hSema; pNewClient->Event = NvRmPowerEvent_NoEvent; pNewClient->pVoltageReqHead = NULL; pNewClient->pClockReqHead = NULL; pNewClient->pStarvationHints = NULL; pNewClient->tag = *pClientId; /* * Combine index with client pointer into registration ID returned to the * client. This will make it a little bit more difficult for not-registered * clients to guess/re-use IDs */ pNewClient->id = NVRM_POWER_INDEX2ID(FreeIndex, (NvU32)pClientId); *pClientId = pNewClient->id; NvOsMutexUnlock(s_hPowerClientMutex); return NvSuccess; failed: NvOsFree(pNewClient); NvOsSemaphoreDestroy(hSema); NvOsMutexUnlock(s_hPowerClientMutex); return NvError_InsufficientMemory; }
/* * Process receive (response & event) and update individual timeout. * * Return NvError_InsufficientMemory due to 2 conditions: * - internal event queue (ec->eventNodes) too small. * - client did NvEcRegisterForEvents. * Skip TransportGetReceivePacket and transport will keep NACK'ing EC in this * error case. */ static NvError NvEcPrivProcessReceiveEvent( NvEcPrivState *ec, NvError transportStatus ) { NvError e = NvSuccess; NvEcEventNode *eventNode = NULL; NvEcEventRegistration *reg = NULL; NvEcEvent *packet = NULL; NvEcEventType eventType; NvU32 i, tagBitmap; NvOsMutexLock( ec->eventMutex ); if ( ec->eventFreeBegin ) { eventNode = ec->eventFreeBegin; packet = &eventNode->event; } else { e = NvError_InsufficientMemory; goto fail; } NV_CHECK_ERROR( NvEcTransportGetReceivePacket( ec->transport, (NvEcResponse *)packet, sizeof(NvEcEvent) ) ); // nothing we can do here if error! eventType = packet->EventType; NV_ASSERT( eventType < NvEcEventType_Num ); e = NvError_InvalidState; // init to event type never registered i = 0; tagBitmap = ec->eventTagBitmap[eventType]; while( tagBitmap ) { NV_ASSERT( i < NvEcEventType_Num ); if ( tagBitmap & 1 ) { reg = ec->eventMap[i][eventType]; NV_ASSERT( reg ); if ( NvSuccess != e ) { // dequeue from free and enqueue into ready if not done yet ec->eventFreeBegin = eventNode->next; if ( ec->eventFreeBegin == NULL ) ec->eventFreeEnd = NULL; eventNode->timeout = NVEC_EVENT_TIMEOUT_DEFAULT; // ??? eventNode->tagBitmap = ec->eventTagBitmap[eventType]; eventNode->next = NULL; NVEC_ENQ( ec->eventReady, eventNode ); e = NvSuccess; } NvOsSemaphoreSignal( reg->sema ); } i++; tagBitmap = tagBitmap >> 1; } fail: NvOsMutexUnlock( ec->eventMutex ); return e; }
/* * Traverse response nodes with these: * - one response matching the tag param (returns the node). If bypassing * tag checking, use INVALID tag as parameter. * - all responses timeout (will signal back too) * - Update individual responseNode's timeout by rebasing to * EcPrivThread-global time (hEc->lastTime). * - Update shortest timeout value for response queue. */ static void NvEcPrivFindAndDequeueResponse( NvEcPrivState *ec, NvEcResponse *response, NvEcResponseNode **pResponseNode ) { NvEcResponseNode *t = NULL, *p = NULL, *temp; NvU32 timeout = NV_WAIT_INFINITE; NvBool remove = NV_FALSE, found = NV_FALSE; NvBool SignalSema; NvOsMutexLock( ec->responseMutex ); NV_ASSERT(ec->responseBegin); DISP_MESSAGE(("\r\nFindDQRes responseBegin=0x%x", ec->responseBegin)); if ( ec->responseBegin ) { t = ec->responseBegin; while( t ) { SignalSema = NV_FALSE; /* FIXME: just match tag? more to match? * There may be the cases where spurious response is received from EC. * Response should not be removed from the queue until req is complete. */ DISP_MESSAGE(("t->tag=0x%x\n", t->tag)); if (response) DISP_MESSAGE(("response->RequestorTag=0x%x\n", response->RequestorTag)); if ( response && !found && (t->tag == response->RequestorTag) && t->requestNode->completed ) { if ( pResponseNode ) *pResponseNode = t; found = NV_TRUE; remove = NV_TRUE; } else { #if ENABLE_TIMEOUT if ( t->timeout <= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE) ) { t->status = NvError_Timeout; SignalSema = NV_TRUE; remove = NV_TRUE; DISP_MESSAGE(("Resp Timeout Respnode=0x%x", t)); } else { // This check is needed for spurious response case handling. if (t->timeout != NV_WAIT_INFINITE) t->timeout -= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE); // update this response timeout w/ lastTime as base } #endif } if ( remove ) { temp = t; NVEC_UNLINK( ec->response, t, p ); DISP_MESSAGE(("\r\nFindDQRes removed=0x%x, removed->next=0x%x, " "prev=0x%x ec->responseBegin=0x%x", t, t->next, p, ec->responseBegin)); remove = NV_FALSE; if (p) t = p->next; else t = ec->responseBegin; if (SignalSema == NV_TRUE) NvOsSemaphoreSignal( temp->sema ); } else { if ( timeout > t->timeout ) timeout = t->timeout; p = t; t = t->next; } } // update with per-queue timeout and timeoutBase ec->timeout[NVEC_IDX_RESPONSE] = timeout; ec->timeoutBase[NVEC_IDX_RESPONSE] = ec->lastTime; DISP_MESSAGE(("\r\nec->timeout[NVEC_IDX_RESPONSE] is set to=%d", ec->timeout[NVEC_IDX_RESPONSE])); } if (found == NV_FALSE) NvOsDebugPrintf("\r\n***NVEC:Received Spurious Response from EC."); NvOsMutexUnlock( ec->responseMutex ); }
void NvEcClose(NvEcHandle hEc) { NvEcPrivState *ec; NvBool destroy = NV_FALSE; if ( NULL == hEc ) return; NV_ASSERT( s_refcount ); ec = hEc->ec; NvOsMutexLock( ec->mutex ); // FIXME: handle client still with outstanding event types if ( !--s_refcount ) { NvEcPrivDeinitHook(ec->hEc); NV_ASSERT( NULL == ec->eventReg[hEc->tag].regBegin && NULL == ec->eventReg[hEc->tag].regEnd ); NV_ASSERT( NULL == ec->requestBegin && NULL == ec->requestEnd ); NV_ASSERT( NULL == ec->responseBegin && NULL == ec->responseEnd ); ec->exitPingThread = NV_TRUE; NvOsSemaphoreSignal( ec->hPingSema ); NvOsThreadJoin( ec->hPingThread ); ec->exitThread = NV_TRUE; NvOsSemaphoreSignal( ec->sema ); NvOsThreadJoin( ec->thread ); NvEcTransportClose( ec->transport ); NvOsMutexDestroy( ec->requestMutex ); NvOsMutexDestroy( ec->responseMutex ); NvOsMutexDestroy( ec->eventMutex ); NvOsSemaphoreDestroy( ec->sema ); NvOsSemaphoreDestroy( ec->hPingSema ); NvOsSemaphoreDestroy( ec->LowPowerEntrySema ); NvOsSemaphoreDestroy( ec->LowPowerExitSema ); destroy = NV_TRUE; NvOsFree( ec->eventNodes ); NvOsFree( ec->hEc ); } // Set this flag as FALSE to indicate power is disabled ec->powerState = NV_FALSE; NV_ASSERT( hEc->tag < NVEC_MAX_REQUESTOR_TAG ); ec->tagAllocated[hEc->tag] = NV_FALSE; // to be recycled NvOsFree( hEc ); NvOsMutexUnlock( ec->mutex ); if ( destroy ) { NvOsMutexDestroy( ec->mutex ); NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = NULL; } }
NvError NvEcOpen(NvEcHandle *phEc, NvU32 InstanceId) { NvEc *hEc = NULL; NvU32 i; NvEcPrivState *ec = &g_ec; NvOsMutexHandle mutex = NULL; NvError e = NvSuccess; NV_ASSERT( phEc ); if ( NULL == ec->mutex ) { e = NvOsMutexCreate(&mutex); if (NvSuccess != e) return e; if (0 != NvOsAtomicCompareExchange32((NvS32*)&ec->mutex, 0, (NvS32)mutex) ) NvOsMutexDestroy( mutex ); } NvOsMutexLock(ec->mutex); if ( !s_refcount ) { mutex = ec->mutex; NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = mutex; NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->requestMutex )); NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->responseMutex )); NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->eventMutex )); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->sema, 0)); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerEntrySema, 0)); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerExitSema, 0)); NV_CHECK_ERROR_CLEANUP( NvEcTransportOpen( &ec->transport, InstanceId, ec->sema, 0 ) ); } // Set this flag as TRUE to indicate power is enabled ec->powerState = NV_TRUE; // create private handle for internal communications between NvEc driver // and EC if ( !s_refcount ) { ec->hEc = NvOsAlloc( sizeof(NvEc) ); if ( NULL == ec->hEc ) goto clean; // reserve the zero tag for internal use by the nvec driver; this ensures // that the driver always has a requestor tag available and can therefore // always talk to the EC ec->tagAllocated[0] = NV_TRUE; ec->hEc->ec = ec; ec->hEc->tag = 0; NV_CHECK_ERROR_CLEANUP(NvOsSemaphoreCreate(&ec->hPingSema, 0)); // perform startup operations before mutex is unlocked NV_CHECK_ERROR_CLEANUP( NvEcPrivInitHook(ec->hEc) ); // start thread to send "pings" - no-op commands to keep EC "alive" NV_CHECK_ERROR_CLEANUP(NvOsThreadCreate( (NvOsThreadFunction)NvEcPrivPingThread, ec, &ec->hPingThread)); } hEc = NvOsAlloc( sizeof(NvEc) ); if ( NULL == hEc ) goto clean; NvOsMemset(hEc, 0x00, sizeof(NvEc)); hEc->ec = ec; hEc->tag = NVEC_REQUESTOR_TAG_INVALID; for ( i = 0; i < NVEC_MAX_REQUESTOR_TAG; i++ ) { if ( !ec->tagAllocated[i] ) { ec->tagAllocated[i] = NV_TRUE; hEc->tag = i; break; } } if ( NVEC_REQUESTOR_TAG_INVALID == hEc->tag ) goto clean; // run out of tag, clean it up! *phEc = hEc; s_refcount++; NvOsMutexUnlock( ec->mutex ); ec->IsEcActive = NV_FALSE; return NvSuccess; clean: NvOsFree( hEc ); NvOsMutexUnlock( ec->mutex ); return NvError_InsufficientMemory; fail: if (!s_refcount) { ec->exitPingThread = NV_TRUE; if (ec->hPingSema) NvOsSemaphoreSignal( ec->hPingSema ); NvOsThreadJoin( ec->hPingThread ); NvOsSemaphoreDestroy(ec->hPingSema); ec->exitThread = NV_TRUE; if (ec->sema) NvOsSemaphoreSignal( ec->sema ); NvOsThreadJoin( ec->thread ); NvOsFree( ec->hEc ); if ( ec->transport ) NvEcTransportClose( ec->transport ); NvOsMutexDestroy( ec->requestMutex ); NvOsMutexDestroy( ec->responseMutex ); NvOsMutexDestroy( ec->eventMutex ); NvOsSemaphoreDestroy( ec->sema ); NvOsSemaphoreDestroy( ec->LowPowerEntrySema ); NvOsSemaphoreDestroy( ec->LowPowerExitSema ); if ( ec->mutex ) { NvOsMutexUnlock( ec->mutex ); // Destroying of this mutex here is not safe, if another thread is // waiting on this mutex, it can cause issues. We shold have // serialized Init/DeInit calls for creating and destroying this mutex. NvOsMutexDestroy( ec->mutex ); NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = NULL; } } return NvError_NotInitialized; }
NvError NvEcUnregisterForEvents( NvEcEventRegistrationHandle hEcEventRegistration) { NvEcPrivState *ec; NvU32 tag; NvU32 tagMask; NvError e = NvSuccess; NvEcEventRegistration *p = NULL, *reg = NULL; NvEcEventNode *eventNode, *t; NvU32 i; if( NULL == hEcEventRegistration ) return NvError_BadParameter; ec = hEcEventRegistration->hEc->ec; tag = hEcEventRegistration->hEc->tag; tagMask = (1UL << tag); NvOsMutexLock( ec->eventMutex ); NVEC_REMOVE_FROM_Q( ec->eventReg[tag].reg, hEcEventRegistration, reg, p ); if ( !reg ) { e = NvError_BadParameter; // can't find the handle goto fail; } eventNode = ec->eventReadyBegin; while ( eventNode ) { // pre advance eventNode since current one could be removed t = eventNode; eventNode = eventNode->next; if ( (reg->eventBitmap & (1UL << t->event.EventType)) && (t->tagBitmap & tagMask) ) { t->tagBitmap &= ~tagMask; if ( !t->tagBitmap ) { NvEcPrivRemoveEventFromReady( ec, t ); } } } // remove global references to this registration i = 0; while( reg->eventBitmap ) { NV_ASSERT(i < NvEcEventType_Num); if ( reg->eventBitmap & 1 ) { ec->eventTagBitmap[i] &= ~tagMask; ec->eventMap[tag][i] = NULL; } reg->eventBitmap = reg->eventBitmap >> 1; i++; } NvOsSemaphoreDestroy( reg->sema ); NvOsFree( hEcEventRegistration ); fail: NvOsMutexUnlock( ec->eventMutex ); return e; }
NvError NvEcRegisterForEvents( NvEcHandle hEc, NvEcEventRegistrationHandle *phEcEventRegistration, NvOsSemaphoreHandle hSema, NvU32 NumEventTypes, NvEcEventType *pEventTypes, NvU32 NumEventPackets, NvU32 EventPacketSize) { NvEcPrivState *ec = hEc->ec; NvEcEventRegistration *h = NULL; NvOsSemaphoreHandle hSemaClone = NULL; NvError e = NvSuccess; NvU32 val, i, tag = hEc->tag; NvU32 tagMask = (1UL << tag); if ( !hSema || !pEventTypes ) return NvError_BadParameter; if ( !NumEventTypes || (NumEventTypes > NvEcEventType_Num) ) return NvError_InvalidSize; // FIXME: is this sufficient? NV_ASSERT( phEcEventRegistration ); NvOsMutexLock( ec->mutex ); if ( !ec->thread ) NvEcPrivThreadCreate( ec ); // Allocate common pool of internal event nodes bufferring if not already if ( !ec->eventNodes ) { val = NVEC_NUM_EVENT_PACKETS_DEFAULT; if ( NumEventPackets > val ) val = NumEventPackets; ec->eventNodes = NvOsAlloc(val * sizeof(NvEcEventNode)); if ( NULL == ec->eventNodes ) { NvOsMutexUnlock( ec->mutex ); return NvError_InsufficientMemory; } NvOsMemset( ec->eventNodes, 0, (val * sizeof(NvEcEventNode)) ); for( i = 0; i < val - 1; i++ ) ec->eventNodes[i].next = &ec->eventNodes[i+1]; ec->eventFreeBegin = ec->eventNodes; ec->eventFreeEnd = ec->eventNodes + val - 1; } NvOsMutexUnlock( ec->mutex ); NV_CHECK_ERROR( NvOsSemaphoreClone( hSema, &hSemaClone ) ); NvOsMutexLock( ec->eventMutex ); // Quick pre-check for for AlreadyAllocated case for ( i = 0; i < NumEventTypes; i++ ) { val = pEventTypes[i]; if ( val >= NvEcEventType_Num ) e = NvError_BadParameter; else if ( ec->eventMap[tag][val] ) e = NvError_AlreadyAllocated; if ( NvSuccess != e ) goto fail; } h = NvOsAlloc( sizeof(NvEcEventRegistration)); if ( NULL == h ) { e = NvError_InsufficientMemory; goto fail; } NvOsMemset( h, 0, sizeof(NvEcEventRegistration) ); NVEC_ENQ( ec->eventReg[tag].reg, h ); // Fill up new registration handle NV_ASSERT( NvEcEventType_Num <= 32 ); // eventBitmap only works if <= 32 for ( i = 0; i < NumEventTypes; i++ ) { val = pEventTypes[i]; h->eventBitmap |= (1 << val); ec->eventMap[tag][val] = h; ec->eventTagBitmap[val] |= tagMask; } h->numEventTypes = NumEventTypes; h->sema = hSemaClone; h->hEc = hEc; h->numEventPacketsHint = NumEventPackets; h->eventPacketSizeHint = EventPacketSize; // ignored hints for now NvOsMutexUnlock( ec->eventMutex ); *phEcEventRegistration = h; return e; fail: NvOsSemaphoreDestroy( hSemaClone ); NvOsMutexUnlock( ec->eventMutex ); NvOsFree( h ); return e; }
NvError NvEcSendRequest( NvEcHandle hEc, NvEcRequest *pRequest, NvEcResponse *pResponse, NvU32 RequestSize, NvU32 ResponseSize) { NvEcPrivState *ec; NvError e = NvSuccess; NvEcRequestNode *requestNode = NULL; NvEcResponseNode *responseNode = NULL; NvOsSemaphoreHandle requestSema = NULL; NvOsSemaphoreHandle responseSema = NULL; NV_ASSERT( pRequest ); NV_ASSERT( hEc ); if ( (RequestSize > sizeof(NvEcRequest)) || (ResponseSize > sizeof(NvEcResponse)) ) return NvError_InvalidSize; ec = hEc->ec; requestNode = NvOsAlloc(sizeof(NvEcRequestNode)); if ( NULL == requestNode ) { e = NvError_InsufficientMemory; goto fail; } NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &requestSema, 0 ) ); if ( pResponse ) { responseNode = NvOsAlloc(sizeof(NvEcResponseNode)); if ( NULL == responseNode ) { e = NvError_InsufficientMemory; goto fail; } NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &responseSema, 0 ) ); } ec->IsEcActive = NV_TRUE; // request end-queue. Timeout set to infinite until request sent. NvOsMemset( requestNode, 0, sizeof(NvEcRequestNode) ); pRequest->RequestorTag = hEc->tag; // assigned tag here DISP_MESSAGE(("NvEcSendRequest:pRequest->RequestorTag=0x%x\n", pRequest->RequestorTag)); NvOsMemcpy(&requestNode->request, pRequest, RequestSize); requestNode->tag = hEc->tag; DISP_MESSAGE(("NvEcSendRequest:requestNode->tag=0x%x\n", requestNode->tag)); requestNode->sema = requestSema; requestNode->timeout = NV_WAIT_INFINITE; requestNode->completed = NV_FALSE; requestNode->size = RequestSize; NvOsMutexLock( ec->requestMutex ); NVEC_ENQ( ec->request, requestNode ); DISP_MESSAGE(("\r\nSendReq ec->requestBegin=0x%x", ec->requestBegin)); NvOsMutexUnlock( ec->requestMutex ); // response en-queue. Timeout set to infinite until request completes. if ( pResponse ) { NvOsMemset( responseNode, 0, sizeof(NvEcResponseNode) ); requestNode->responseNode = responseNode; // association between responseNode->requestNode = requestNode; // request & response responseNode->sema = responseSema; responseNode->timeout = NV_WAIT_INFINITE; responseNode->tag = hEc->tag; DISP_MESSAGE(("NvEcSendRequest:responseNode->tag=0x%x\n", responseNode->tag)); responseNode->size = ResponseSize; NvOsMutexLock( ec->responseMutex ); NVEC_ENQ( ec->response, responseNode ); DISP_MESSAGE(("\r\nSendReq ec->responseBegin=0x%x", ec->responseBegin)); NvOsMutexUnlock( ec->responseMutex ); } NvOsMutexLock( ec->mutex ); if ( !ec->thread ) NvEcPrivThreadCreate( ec ); NvOsMutexUnlock( ec->mutex ); // Trigger EcPrivThread NvOsSemaphoreSignal( ec->sema ); DISP_MESSAGE(("\r\nSendReq requestNode=0x%x, requestNode->responseNode=0x%x", requestNode, requestNode->responseNode)); // Wait on Request returns NvOsSemaphoreWait( requestSema ); DISP_MESSAGE(("\r\nSendReq Out of req sema")); e = requestNode->status; if ( NvSuccess != e ) { NvEcResponseNode *t = NULL, *p = NULL; // de-queue responseNode too !!!! NvOsMutexLock( ec->responseMutex ); NVEC_REMOVE_FROM_Q( ec->response, responseNode, t, p ); DISP_MESSAGE(("\r\nSendReq responseBegin=0x%x", ec->responseBegin)); NvOsMutexUnlock( ec->responseMutex ); goto fail; } if ( pResponse ) { // Wait on Response returns NvOsSemaphoreWait( responseSema ); DISP_MESSAGE(("\r\nSendReq Out of resp sema")); NV_CHECK_ERROR_CLEANUP( responseNode->status ); NvOsMemcpy(pResponse, &responseNode->response, ResponseSize); } // if successful, nodes should be de-queue already but not freed yet fail: NvOsSemaphoreDestroy( requestSema ); NvOsSemaphoreDestroy( responseSema ); DISP_MESSAGE(("\r\nSendReq Freeing requestNode=0x%x, responseNode=0x%x", requestNode, responseNode)); NvOsFree( requestNode ); NvOsFree( responseNode ); return e; }
void NvEcClose(NvEcHandle hEc) { NvEcPrivState *ec; NvBool destroy = NV_FALSE; if ( NULL == hEc ) return; NV_ASSERT( s_refcount ); ec = hEc->ec; NvOsMutexLock( ec->mutex ); // FIXME: handle client still with outstanding event types if ( !--s_refcount ) { NvEcPrivDeinitHook(ec->hEc); NV_ASSERT( NULL == ec->eventReg[hEc->tag].regBegin && NULL == ec->eventReg[hEc->tag].regEnd ); NV_ASSERT( NULL == ec->requestBegin && NULL == ec->requestEnd ); NV_ASSERT( NULL == ec->responseBegin && NULL == ec->responseEnd ); #ifndef CONFIG_TEGRA_ODM_BETELGEUSE ec->exitPingThread = NV_TRUE; NvOsSemaphoreSignal( ec->hPingSema ); NvOsThreadJoin( ec->hPingThread ); #endif ec->exitThread = NV_TRUE; NvOsSemaphoreSignal( ec->sema ); NvOsThreadJoin( ec->thread ); NvEcTransportClose( ec->transport ); NvOsMutexDestroy( ec->requestMutex ); NvOsMutexDestroy( ec->responseMutex ); NvOsMutexDestroy( ec->eventMutex ); NvOsSemaphoreDestroy( ec->sema ); #ifndef CONFIG_TEGRA_ODM_BETELGEUSE NvOsSemaphoreDestroy( ec->hPingSema ); #endif NvOsSemaphoreDestroy( ec->LowPowerEntrySema ); NvOsSemaphoreDestroy( ec->LowPowerExitSema ); destroy = NV_TRUE; NvOsFree( ec->eventNodes ); NvOsFree( ec->hEc ); } // Set this flag as FALSE to indicate power is disabled //Daniel 20100723, if we change power state to NV_FALSE, we won't be able to suspend/poweroff it. //Is there any side effect ????? //ec->powerState = NV_FALSE; NV_ASSERT( hEc->tag < NVEC_MAX_REQUESTOR_TAG ); ec->tagAllocated[hEc->tag] = NV_FALSE; // to be recycled NvOsFree( hEc ); NvOsMutexUnlock( ec->mutex ); if ( destroy ) { NvOsMutexDestroy( ec->mutex ); NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = NULL; } }
NvError NvRmPowerVoltageControl( NvRmDeviceHandle hRmDeviceHandle, NvRmModuleID ModuleId, NvU32 ClientId, NvRmMilliVolts MinVolts, NvRmMilliVolts MaxVolts, const NvRmMilliVolts* PrefVoltageList, NvU32 PrefVoltageListCount, NvRmMilliVolts* pCurrentVolts) { NvError error; NvU32 PowerGroup = 0; NvBool PowerChanged = NV_FALSE; NvRmModuleInstance *pInstance = NULL; ModuleVoltageReq* pVoltageReq = NULL; NvRmPowerClient* pPowerClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId); /* validate the Rm Handle */ NV_ASSERT(hRmDeviceHandle); // Validate module ID and get associated Power Group if (ModuleId == NvRmPrivModuleID_System) { PowerGroup = NVRM_POWERGROUP_NPG_AUTO; } else { error = NvRmPrivGetModuleInstance(hRmDeviceHandle, ModuleId, &pInstance); if (error != NvSuccess) { NV_ASSERT(!" Voltage control: Invalid module ID. "); return NvError_ModuleNotPresent; } PowerGroup = pInstance->DevPowerGroup; NV_ASSERT(PowerGroup < NV_POWERGROUP_MAX); } NvOsMutexLock(s_hPowerClientMutex); // Check if this ID was registered; return error otherwise if (ClientIndex < pRegistry->UsedIndexRange) { pPowerClient = pRegistry->pPowerClients[ClientIndex]; } if ((pPowerClient == NULL) || (pPowerClient->id != ClientId)) { NvOsMutexUnlock(s_hPowerClientMutex); return NvError_BadValue; } // Search for the previously recorded voltage request for this module pVoltageReq = pPowerClient->pVoltageReqHead; while ((pVoltageReq != NULL) && (pVoltageReq->ModuleId != ModuleId)) { pVoltageReq = pVoltageReq->pNext; } // If it is a new voltage request record, allocate and fill it in, // otherwise just update power status. In both cases determine if // power requirements for the module have changed. if (pVoltageReq == NULL) { pVoltageReq = NvOsAlloc(sizeof(*pVoltageReq)); if (pVoltageReq == NULL) { NvOsMutexUnlock(s_hPowerClientMutex); return NvError_InsufficientMemory; } // Link at head pVoltageReq->pNext = pPowerClient->pVoltageReqHead; pPowerClient->pVoltageReqHead = pVoltageReq; pVoltageReq->ModuleId = ModuleId; pVoltageReq->PowerGroup = PowerGroup; pVoltageReq->PowerCycled = NV_FALSE; // Only new power On request counts as change PowerChanged = (MaxVolts != NvRmVoltsOff); } else { // Only changes from On to Off or vice versa counts PowerChanged = (pVoltageReq->MaxVolts != MaxVolts) && ((pVoltageReq->MaxVolts == NvRmVoltsOff) || (MaxVolts == NvRmVoltsOff)); } // Record new power request voltages pVoltageReq->MinVolts = MinVolts; pVoltageReq->MaxVolts = MaxVolts; // If module power requirements have changed, update power group reference // count, and execute the respective h/w power control procedure if (PowerChanged) { if (MaxVolts != NvRmVoltsOff) { s_PowerOnRefCounts[PowerGroup]++; if (s_PowerOnRefCounts[PowerGroup] == 1) { NvRmMilliVolts v = NvRmPrivPowerGroupGetVoltage(hRmDeviceHandle, PowerGroup); if (v == NvRmVoltsOff) { RecordPowerCycle(hRmDeviceHandle, PowerGroup); NvRmPrivPowerGroupControl(hRmDeviceHandle, PowerGroup, NV_TRUE); } } } else { NV_ASSERT(s_PowerOnRefCounts[PowerGroup] != 0); if (s_PowerOnRefCounts[PowerGroup] == 0) { NVRM_POWER_PRINTF(("Power balance failed: module %d\n", ModuleId)); } s_PowerOnRefCounts[PowerGroup]--; if (s_PowerOnRefCounts[PowerGroup] == 0) { NvRmPrivPowerGroupControl(hRmDeviceHandle, PowerGroup, NV_FALSE); } } } ReportRmPowerState(hRmDeviceHandle); // Return current voltage, unless this is the first request after module // was power cycled by RM; in the latter case return NvRmVoltsCycled value if (pCurrentVolts != NULL) { *pCurrentVolts = NvRmPrivPowerGroupGetVoltage(hRmDeviceHandle, PowerGroup); if (pVoltageReq->PowerCycled && (*pCurrentVolts != NvRmVoltsOff)) { *pCurrentVolts = NvRmVoltsCycled; } } // In any case clear power cycled indicator pVoltageReq->PowerCycled = NV_FALSE; NvOsMutexUnlock(s_hPowerClientMutex); return NvSuccess; }