NvError NvRmPowerGetState( NvRmDeviceHandle hRmDeviceHandle, NvRmPowerState* pState) { NV_ASSERT(hRmDeviceHandle); NV_ASSERT(pState); NvOsMutexLock(s_hPowerClientMutex); *pState = NvRmPrivPowerGetState(hRmDeviceHandle); NvOsMutexUnlock(s_hPowerClientMutex); return NvSuccess; }
NvError NvRmKernelPowerResume( NvRmDeviceHandle hRmDeviceHandle ) { NvOdmSocPowerState state = NvRmPowerLowestStateGet(); NvOsMutexLock(s_hPowerClientMutex); ReportRmPowerState(hRmDeviceHandle); NvOsMutexUnlock(s_hPowerClientMutex); if (state == NvOdmSocPowerState_Suspend) NvRmPrivPowerGroupResume(hRmDeviceHandle); return NvSuccess; }
//20101121 , HW power off in thermal limit [START] NvU32 NvRmPmuSetHwPowerOffConfig( NvRmDeviceHandle hDevice, NvBool Enable) { NvU32 i, value; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); value = NvOdmPmuSetHwPowerOffConfig(s_Pmu.hOdmPmu, Enable); NvOsMutexUnlock(s_Pmu.hMutex); return value; }
static void nvec_battery_early_suspend(struct early_suspend *h) { if(batt_dev==NULL){ pr_info("\tBATTERY: battery no ready!\n"); return; } pr_info("\tBATTERY: stop battery query during suspend --->>>\n"); /* stop query Battery */ NvOsMutexLock(batt_dev->hBattEventMutex); batt_dev->inSuspend = NV_TRUE; NvOsMutexUnlock(batt_dev->hBattEventMutex); }
static void nvec_battery_late_resume(struct early_suspend *h) { if(batt_dev==NULL){ pr_info("\tBATTERY: battery no ready!\n"); return; } pr_info("\tBATTERY: start battery query after resume --->>>\n"); /* start query Battery */ NvOsMutexLock(batt_dev->hBattEventMutex); batt_dev->inSuspend = NV_FALSE; NvOsMutexUnlock(batt_dev->hBattEventMutex); }
void NvRmPmuGetBatteryFullLifeTime( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvU32 * pLifeTime ) { if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); NvOdmPmuGetBatteryFullLifeTime( s_Pmu.hOdmPmu,(NvOdmPmuBatteryInstance)batteryInst, pLifeTime); NvOsMutexUnlock(s_Pmu.hMutex); }
static ssize_t tegra_battery_store_property( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int value = 0; value = simple_strtoul(buf, NULL, 0); NvOsMutexLock(batt_dev->hBattEventMutex); batt_dev->batt_status_poll_period = value; NvOsMutexUnlock(batt_dev->hBattEventMutex); NvOsSemaphoreSignal(batt_dev->hOdmSemaphore); return count; }
void NvRmPowerEventNotify( NvRmDeviceHandle hRmDeviceHandle, NvRmPowerEvent Event) { NV_ASSERT(hRmDeviceHandle); // Just in case if (Event == NvRmPowerEvent_NoEvent) return; NvOsMutexLock(s_hPowerClientMutex); PowerEventNotify(hRmDeviceHandle, Event); NvOsMutexUnlock(s_hPowerClientMutex); }
void NvBatteryEventHandlerThread(void *args) { NvU8 BatteryState = 0, BatteryEvent = 0; NvBool suspend_flag; for (;;) { NvOsSemaphoreWaitTimeout(batt_dev->hOdmSemaphore, batt_dev->batt_status_poll_period); if (batt_dev->exitThread) break; if (!batt_dev->hOdmBattDev) continue; NvOsMutexLock(batt_dev->hBattEventMutex); suspend_flag = batt_dev->inSuspend; NvOsMutexUnlock(batt_dev->hBattEventMutex); if (suspend_flag) continue; pr_info("\tBATTERY: polling battery information! --->>>\n"); NvOdmBatteryGetBatteryStatus(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, &BatteryState); NvOdmBatteryGetEvent(batt_dev->hOdmBattDev, &BatteryEvent); if ((BatteryState == NVODM_BATTERY_STATUS_UNKNOWN) || (BatteryEvent == NvOdmBatteryEventType_Num)) { /* Do nothing */ } else { if (BatteryEvent & NvOdmBatteryEventType_RemainingCapacityAlarm) { if (BatteryState == (NVODM_BATTERY_STATUS_CRITICAL | NVODM_BATTERY_STATUS_VERY_CRITICAL | NVODM_BATTERY_STATUS_DISCHARGING)) { pr_info("nvec_battery:calling kernel_power_off...\n"); kernel_power_off(); } } else { /* Update the battery and power supply info for other events */ power_supply_changed(&tegra_power_supplies[NvCharger_Type_Battery]); //power_supply_changed(&tegra_power_supplies[NvCharger_Type_USB]); // power_supply_changed(&tegra_power_supplies[NvCharger_Type_AC]); } } } }
void NvRmPmuGetBatteryChemistry( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvRmPmuBatteryChemistry * pChemistry ) { if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); NvOdmPmuGetBatteryChemistry(s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, (NvOdmPmuBatteryChemistry*)pChemistry); NvOsMutexUnlock(s_Pmu.hMutex); }
NvBool NvRmPmuIsRtcInitialized( NvRmDeviceHandle hRmDevice) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuIsRtcInitialized(s_Pmu.hOdmPmu); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
NvError NvRmAnalogInterfaceControl( NvRmDeviceHandle hDevice, NvRmAnalogInterface Interface, NvBool Enable, void *Config, NvU32 ConfigLength ) { NvError err = NvSuccess; NvU32 id; NvU32 inst; NV_ASSERT( hDevice ); id = NVRM_ANALOG_INTERFACE_ID( Interface ); inst = NVRM_ANALOG_INTERFACE_INSTANCE( Interface ); NvOsMutexLock( hDevice->mutex ); switch( id ) { case NvRmAnalogInterface_Dsi: break; case NvRmAnalogInterface_ExternalMemory: break; case NvRmAnalogInterface_Hdmi: break; case NvRmAnalogInterface_Lcd: break; case NvRmAnalogInterface_Uart: break; case NvRmAnalogInterface_Sdio: break; case NvRmAnalogInterface_Tv: err = NvRmPrivTvDcControl( hDevice, Enable, inst, Config, ConfigLength ); break; case NvRmAnalogInterface_VideoInput: err = NvRmPrivVideoInputControl( hDevice, Enable, inst, Config, ConfigLength); break; default: NV_ASSERT(!"Unknown Analog interface passed. "); } NvOsMutexUnlock( hDevice->mutex ); return err; }
NvError NvRmKernelPowerSuspend( NvRmDeviceHandle hRmDeviceHandle ) { NvOdmSocPowerState state = NvRmPowerLowestStateGet(); if (state == NvOdmSocPowerState_Suspend) NvRmPrivPowerGroupSuspend(hRmDeviceHandle); #if NVRM_POWER_DEBUG_SUSPEND_ENTRY NvOsMutexLock(s_hPowerClientMutex); { NvU32 i; ModuleVoltageReq* pVoltageReq = NULL; NvRmPowerClient* pPowerClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NvRmPowerState s = NvRmPrivPowerGetState(hRmDeviceHandle); // Report combined RM power stste and active modules NvOsDebugPrintf("RM power state before suspend: %s (%d)\n", ((s == NvRmPowerState_Active) ? "Active" : ((s == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle")), s); if (s == NvRmPowerState_Active) { for (i = 0; i < pRegistry->UsedIndexRange; i++) { pPowerClient = pRegistry->pPowerClients[i]; if (pPowerClient) { pVoltageReq = pPowerClient->pVoltageReqHead; while (pVoltageReq != NULL) { if (pVoltageReq->MaxVolts != NvRmVoltsOff) { // could also set some bad e = NvError_Bad??? NvOsDebugPrintf("Active Module: 0x%x\n", pVoltageReq->ModuleId); } pVoltageReq = pVoltageReq->pNext; } } } } } NvOsMutexUnlock(s_hPowerClientMutex); #endif return NvSuccess; }
NvBool NvRmPmuGetAcLineStatus( NvRmDeviceHandle hRmDevice, NvRmPmuAcLineStatus * pStatus ) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuGetAcLineStatus(s_Pmu.hOdmPmu, (NvOdmPmuAcLineStatus*)pStatus); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
NvBool NvRmPmuWriteAlarm( NvRmDeviceHandle hRmDevice, NvU32 Count) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuWriteAlarm(s_Pmu.hOdmPmu, Count); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
NvBool NvRmPmuGetBatteryStatus( NvRmDeviceHandle hRmDevice, NvRmPmuBatteryInstance batteryInst, NvU8 * pStatus ) { NvBool ReturnStatus = NV_FALSE; if (!s_PmuSupportedEnv) return NV_FALSE; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); ReturnStatus = NvOdmPmuGetBatteryStatus( s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, pStatus); NvOsMutexUnlock(s_Pmu.hMutex); return ReturnStatus; }
/* get global variable gUsbCurrLimitC value and set it into /proc/usbCurrLimitInfo for user space read */ static int tegra_usbCurrLimit_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; /* NV_DRIVER_TRACE (("tegra_usbCurrLimit_read_proc:start\n")); */ NvOsMutexLock(usbCurrLimit_lock); len += snprintf(page+len, count-len,"%d", gUsbCurrLimitC); NvOsMutexUnlock(usbCurrLimit_lock); *eof = 1; /* NV_DRIVER_TRACE (("tegra_usbCurrLimit_read_proc:end\n")); */ return len; }
void NvRmPmuGetVoltage( NvRmDeviceHandle hDevice, NvU32 vddId, NvU32 * pMilliVolts) { NvU32 i; if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++) { if (NvOdmPmuGetVoltage(s_Pmu.hOdmPmu, vddId, pMilliVolts)) break; } NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT); NvOsMutexUnlock(s_Pmu.hMutex); }
void NvRmPwmClose(NvRmPwmHandle hPwm) { NvU32 i; if (!hPwm) return; NV_ASSERT(hPwm->RefCount); NvOsMutexLock(s_hPwmMutex); hPwm->RefCount--; if (hPwm->RefCount == 0) { // Unmap the pwm register virtual address space for (i = 0; i < NvRmPwmOutputId_Num-2; i++) { NvRmPhysicalMemUnmap((void*)s_hPwm->VirtualAddress[i], s_hPwm->PwmBankSize); } // Unmap the pmc register virtual address space NvRmPhysicalMemUnmap( (void*)s_hPwm->VirtualAddress[NvRmPwmOutputId_Num-2], s_hPwm->PmcBankSize); if (s_IsPwmFirstConfig) { // Disable power PwmPowerConfigure(hPwm, NV_FALSE); // Unregister with RM power NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID); // Tri-state the pin-mux pins NV_ASSERT_SUCCESS(NvRmSetModuleTristate(hPwm->RmDeviceHandle, NVRM_MODULE_ID(NvRmModuleID_Pwm, 0), NV_TRUE)); s_IsPwmFirstConfig = NV_FALSE; } NvOsFree(s_hPwm); s_hPwm = NULL; } NvOsMutexUnlock(s_hPwmMutex); }
static void NvEcPrivProcessPostSendRequest( NvEcPrivState *ec, NvError transportStatus ) { NvEcRequestNode *requestNode = NULL; NvEcResponseNode *responseNode; NvEcPrivFindAndDequeueRequest(ec, &requestNode, transportStatus, NV_TRUE); // update corresponding responseNode timeout if ( requestNode ) { requestNode->completed = NV_TRUE; responseNode = requestNode->responseNode; if ( responseNode ) { NvOsMutexLock( ec->responseMutex ); NV_ASSERT(ec->responseBegin); if ( NV_WAIT_INFINITE == ec->timeout[NVEC_IDX_RESPONSE] ) { // no current pending response on timeout watch. // Update response queue timeout. responseNode->timeout = NVEC_RESPONSE_TIMEOUT_DEFAULT; ec->timeout[NVEC_IDX_RESPONSE] = responseNode->timeout; ec->timeoutBase[NVEC_IDX_RESPONSE] = ec->lastTime; DISP_MESSAGE(("\r\nec->timeout[NVEC_IDX_RESPONSE] is set to=%d", ec->timeout[NVEC_IDX_RESPONSE])); } else { // Update this response timeout with current lastTime as base responseNode->timeout = NVEC_RESPONSE_TIMEOUT_DEFAULT + NVEC_TIME_BASE(ec, NVEC_IDX_RESPONSE); // wraparound time difference will work too. } NvOsMutexUnlock( ec->responseMutex ); } NvOsSemaphoreSignal( requestNode->sema ); // all request stuff should be done before signal } }
static int tegra_usbCurrLimit_write_proc (struct file *file, const char *buffer, unsigned long count, void *data) { int len = 0; static char proc_buf[4]; if ( copy_from_user(proc_buf, buffer, count) ) { return -EFAULT; } if ( strncmp( proc_buf, "1", 1) == 0 ) { gUsbCurrLimitC = 1; NvOsMutexLock(usbCurrLimit_lock); len += snprintf(proc_buf, count-len, "%d",gUsbCurrLimitC); NvOsMutexUnlock(usbCurrLimit_lock); return len; } else { NvOsDebugNprintf("tegra_usbCurrLimit_write_proc fail\n"); return -1; } }
void NvRmPmuSetChargingCurrentLimit( NvRmDeviceHandle hRmDevice, NvRmPmuChargingPath ChargingPath, NvU32 ChargingCurrentLimitMa, NvU32 ChargerType) { NvU32 i; if (!s_PmuSupportedEnv) return; NV_ASSERT(s_Pmu.hMutex); NvOsMutexLock(s_Pmu.hMutex); for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++) { if (NvOdmPmuSetChargingCurrent( s_Pmu.hOdmPmu, (NvOdmPmuChargingPath)ChargingPath, ChargingCurrentLimitMa, ChargerType)) break; } NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT); NvOsMutexUnlock(s_Pmu.hMutex); }
NvError NvEcPowerResume(void) { NvError e = NvSuccess; NvEcPrivState *ec = &g_ec; NvOsMutexLock(ec->mutex); // Call transport's power on if it's OFF state if (ec->powerState == NV_FALSE) { NV_CHECK_ERROR_CLEANUP( NvEcTransportPowerResume(ec->transport) ); ec->powerState = NV_TRUE; ec->EnterLowPowerState = NV_FALSE; // Signal priv thread to get out of power suspend. NvOsSemaphoreSignal(ec->LowPowerExitSema); // Perform post-resume EC operations NvEcPrivPowerResumeHook(ec->hEc); } fail: NvOsMutexUnlock(ec->mutex); return e; }
NvU32 NvRmGetKeyValue(NvRmDeviceHandle hRm, NvU32 KeyID) { Key *pList = s_pKeyList; NvU32 Value = 0; unsigned int i; NvOsMutexLock(s_Mutex); while (pList) { for (i=0; i<pList->Count; i++) { if (pList->KeyID[i] == KeyID) { Value = pList->Value[i]; goto cleanup; } } pList = pList->pNextKey; } cleanup: NvOsMutexUnlock(s_Mutex); // Returning value as 0 since key is not present return Value; }
/* * Process receive (response & event) and update individual timeout. * * Return NvError_InsufficientMemory due to 2 conditions: * - internal event queue (ec->eventNodes) too small. * - client did NvEcRegisterForEvents. * Skip TransportGetReceivePacket and transport will keep NACK'ing EC in this * error case. */ static NvError NvEcPrivProcessReceiveEvent( NvEcPrivState *ec, NvError transportStatus ) { NvError e = NvSuccess; NvEcEventNode *eventNode = NULL; NvEcEventRegistration *reg = NULL; NvEcEvent *packet = NULL; NvEcEventType eventType; NvU32 i, tagBitmap; NvOsMutexLock( ec->eventMutex ); if ( ec->eventFreeBegin ) { eventNode = ec->eventFreeBegin; packet = &eventNode->event; } else { e = NvError_InsufficientMemory; goto fail; } NV_CHECK_ERROR( NvEcTransportGetReceivePacket( ec->transport, (NvEcResponse *)packet, sizeof(NvEcEvent) ) ); // nothing we can do here if error! eventType = packet->EventType; NV_ASSERT( eventType < NvEcEventType_Num ); e = NvError_InvalidState; // init to event type never registered i = 0; tagBitmap = ec->eventTagBitmap[eventType]; while( tagBitmap ) { NV_ASSERT( i < NvEcEventType_Num ); if ( tagBitmap & 1 ) { reg = ec->eventMap[i][eventType]; NV_ASSERT( reg ); if ( NvSuccess != e ) { // dequeue from free and enqueue into ready if not done yet ec->eventFreeBegin = eventNode->next; if ( ec->eventFreeBegin == NULL ) ec->eventFreeEnd = NULL; eventNode->timeout = NVEC_EVENT_TIMEOUT_DEFAULT; // ??? eventNode->tagBitmap = ec->eventTagBitmap[eventType]; eventNode->next = NULL; NVEC_ENQ( ec->eventReady, eventNode ); e = NvSuccess; } NvOsSemaphoreSignal( reg->sema ); } i++; tagBitmap = tagBitmap >> 1; } fail: NvOsMutexUnlock( ec->eventMutex ); return e; }
/* * Traverse response nodes with these: * - one response matching the tag param (returns the node). If bypassing * tag checking, use INVALID tag as parameter. * - all responses timeout (will signal back too) * - Update individual responseNode's timeout by rebasing to * EcPrivThread-global time (hEc->lastTime). * - Update shortest timeout value for response queue. */ static void NvEcPrivFindAndDequeueResponse( NvEcPrivState *ec, NvEcResponse *response, NvEcResponseNode **pResponseNode ) { NvEcResponseNode *t = NULL, *p = NULL, *temp; NvU32 timeout = NV_WAIT_INFINITE; NvBool remove = NV_FALSE, found = NV_FALSE; NvBool SignalSema; NvOsMutexLock( ec->responseMutex ); NV_ASSERT(ec->responseBegin); DISP_MESSAGE(("\r\nFindDQRes responseBegin=0x%x", ec->responseBegin)); if ( ec->responseBegin ) { t = ec->responseBegin; while( t ) { SignalSema = NV_FALSE; /* FIXME: just match tag? more to match? * There may be the cases where spurious response is received from EC. * Response should not be removed from the queue until req is complete. */ DISP_MESSAGE(("t->tag=0x%x\n", t->tag)); if (response) DISP_MESSAGE(("response->RequestorTag=0x%x\n", response->RequestorTag)); if ( response && !found && (t->tag == response->RequestorTag) && t->requestNode->completed ) { if ( pResponseNode ) *pResponseNode = t; found = NV_TRUE; remove = NV_TRUE; } else { #if ENABLE_TIMEOUT if ( t->timeout <= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE) ) { t->status = NvError_Timeout; SignalSema = NV_TRUE; remove = NV_TRUE; DISP_MESSAGE(("Resp Timeout Respnode=0x%x", t)); } else { // This check is needed for spurious response case handling. if (t->timeout != NV_WAIT_INFINITE) t->timeout -= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE); // update this response timeout w/ lastTime as base } #endif } if ( remove ) { temp = t; NVEC_UNLINK( ec->response, t, p ); DISP_MESSAGE(("\r\nFindDQRes removed=0x%x, removed->next=0x%x, " "prev=0x%x ec->responseBegin=0x%x", t, t->next, p, ec->responseBegin)); remove = NV_FALSE; if (p) t = p->next; else t = ec->responseBegin; if (SignalSema == NV_TRUE) NvOsSemaphoreSignal( temp->sema ); } else { if ( timeout > t->timeout ) timeout = t->timeout; p = t; t = t->next; } } // update with per-queue timeout and timeoutBase ec->timeout[NVEC_IDX_RESPONSE] = timeout; ec->timeoutBase[NVEC_IDX_RESPONSE] = ec->lastTime; DISP_MESSAGE(("\r\nec->timeout[NVEC_IDX_RESPONSE] is set to=%d", ec->timeout[NVEC_IDX_RESPONSE])); } if (found == NV_FALSE) NvOsDebugPrintf("\r\n***NVEC:Received Spurious Response from EC."); NvOsMutexUnlock( ec->responseMutex ); }
void NvEcClose(NvEcHandle hEc) { NvEcPrivState *ec; NvBool destroy = NV_FALSE; if ( NULL == hEc ) return; NV_ASSERT( s_refcount ); ec = hEc->ec; NvOsMutexLock( ec->mutex ); // FIXME: handle client still with outstanding event types if ( !--s_refcount ) { NvEcPrivDeinitHook(ec->hEc); NV_ASSERT( NULL == ec->eventReg[hEc->tag].regBegin && NULL == ec->eventReg[hEc->tag].regEnd ); NV_ASSERT( NULL == ec->requestBegin && NULL == ec->requestEnd ); NV_ASSERT( NULL == ec->responseBegin && NULL == ec->responseEnd ); ec->exitPingThread = NV_TRUE; NvOsSemaphoreSignal( ec->hPingSema ); NvOsThreadJoin( ec->hPingThread ); ec->exitThread = NV_TRUE; NvOsSemaphoreSignal( ec->sema ); NvOsThreadJoin( ec->thread ); NvEcTransportClose( ec->transport ); NvOsMutexDestroy( ec->requestMutex ); NvOsMutexDestroy( ec->responseMutex ); NvOsMutexDestroy( ec->eventMutex ); NvOsSemaphoreDestroy( ec->sema ); NvOsSemaphoreDestroy( ec->hPingSema ); NvOsSemaphoreDestroy( ec->LowPowerEntrySema ); NvOsSemaphoreDestroy( ec->LowPowerExitSema ); destroy = NV_TRUE; NvOsFree( ec->eventNodes ); NvOsFree( ec->hEc ); } // Set this flag as FALSE to indicate power is disabled ec->powerState = NV_FALSE; NV_ASSERT( hEc->tag < NVEC_MAX_REQUESTOR_TAG ); ec->tagAllocated[hEc->tag] = NV_FALSE; // to be recycled NvOsFree( hEc ); NvOsMutexUnlock( ec->mutex ); if ( destroy ) { NvOsMutexDestroy( ec->mutex ); NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = NULL; } }
NvError NvEcOpen(NvEcHandle *phEc, NvU32 InstanceId) { NvEc *hEc = NULL; NvU32 i; NvEcPrivState *ec = &g_ec; NvOsMutexHandle mutex = NULL; NvError e = NvSuccess; NV_ASSERT( phEc ); if ( NULL == ec->mutex ) { e = NvOsMutexCreate(&mutex); if (NvSuccess != e) return e; if (0 != NvOsAtomicCompareExchange32((NvS32*)&ec->mutex, 0, (NvS32)mutex) ) NvOsMutexDestroy( mutex ); } NvOsMutexLock(ec->mutex); if ( !s_refcount ) { mutex = ec->mutex; NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = mutex; NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->requestMutex )); NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->responseMutex )); NV_CHECK_ERROR_CLEANUP( NvOsMutexCreate( &ec->eventMutex )); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->sema, 0)); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerEntrySema, 0)); NV_CHECK_ERROR_CLEANUP( NvOsSemaphoreCreate( &ec->LowPowerExitSema, 0)); NV_CHECK_ERROR_CLEANUP( NvEcTransportOpen( &ec->transport, InstanceId, ec->sema, 0 ) ); } // Set this flag as TRUE to indicate power is enabled ec->powerState = NV_TRUE; // create private handle for internal communications between NvEc driver // and EC if ( !s_refcount ) { ec->hEc = NvOsAlloc( sizeof(NvEc) ); if ( NULL == ec->hEc ) goto clean; // reserve the zero tag for internal use by the nvec driver; this ensures // that the driver always has a requestor tag available and can therefore // always talk to the EC ec->tagAllocated[0] = NV_TRUE; ec->hEc->ec = ec; ec->hEc->tag = 0; NV_CHECK_ERROR_CLEANUP(NvOsSemaphoreCreate(&ec->hPingSema, 0)); // perform startup operations before mutex is unlocked NV_CHECK_ERROR_CLEANUP( NvEcPrivInitHook(ec->hEc) ); // start thread to send "pings" - no-op commands to keep EC "alive" NV_CHECK_ERROR_CLEANUP(NvOsThreadCreate( (NvOsThreadFunction)NvEcPrivPingThread, ec, &ec->hPingThread)); } hEc = NvOsAlloc( sizeof(NvEc) ); if ( NULL == hEc ) goto clean; NvOsMemset(hEc, 0x00, sizeof(NvEc)); hEc->ec = ec; hEc->tag = NVEC_REQUESTOR_TAG_INVALID; for ( i = 0; i < NVEC_MAX_REQUESTOR_TAG; i++ ) { if ( !ec->tagAllocated[i] ) { ec->tagAllocated[i] = NV_TRUE; hEc->tag = i; break; } } if ( NVEC_REQUESTOR_TAG_INVALID == hEc->tag ) goto clean; // run out of tag, clean it up! *phEc = hEc; s_refcount++; NvOsMutexUnlock( ec->mutex ); ec->IsEcActive = NV_FALSE; return NvSuccess; clean: NvOsFree( hEc ); NvOsMutexUnlock( ec->mutex ); return NvError_InsufficientMemory; fail: if (!s_refcount) { ec->exitPingThread = NV_TRUE; if (ec->hPingSema) NvOsSemaphoreSignal( ec->hPingSema ); NvOsThreadJoin( ec->hPingThread ); NvOsSemaphoreDestroy(ec->hPingSema); ec->exitThread = NV_TRUE; if (ec->sema) NvOsSemaphoreSignal( ec->sema ); NvOsThreadJoin( ec->thread ); NvOsFree( ec->hEc ); if ( ec->transport ) NvEcTransportClose( ec->transport ); NvOsMutexDestroy( ec->requestMutex ); NvOsMutexDestroy( ec->responseMutex ); NvOsMutexDestroy( ec->eventMutex ); NvOsSemaphoreDestroy( ec->sema ); NvOsSemaphoreDestroy( ec->LowPowerEntrySema ); NvOsSemaphoreDestroy( ec->LowPowerExitSema ); if ( ec->mutex ) { NvOsMutexUnlock( ec->mutex ); // Destroying of this mutex here is not safe, if another thread is // waiting on this mutex, it can cause issues. We shold have // serialized Init/DeInit calls for creating and destroying this mutex. NvOsMutexDestroy( ec->mutex ); NvOsMemset( ec, 0, sizeof(NvEcPrivState) ); ec->mutex = NULL; } } return NvError_NotInitialized; }
NvError NvEcUnregisterForEvents( NvEcEventRegistrationHandle hEcEventRegistration) { NvEcPrivState *ec; NvU32 tag; NvU32 tagMask; NvError e = NvSuccess; NvEcEventRegistration *p = NULL, *reg = NULL; NvEcEventNode *eventNode, *t; NvU32 i; if( NULL == hEcEventRegistration ) return NvError_BadParameter; ec = hEcEventRegistration->hEc->ec; tag = hEcEventRegistration->hEc->tag; tagMask = (1UL << tag); NvOsMutexLock( ec->eventMutex ); NVEC_REMOVE_FROM_Q( ec->eventReg[tag].reg, hEcEventRegistration, reg, p ); if ( !reg ) { e = NvError_BadParameter; // can't find the handle goto fail; } eventNode = ec->eventReadyBegin; while ( eventNode ) { // pre advance eventNode since current one could be removed t = eventNode; eventNode = eventNode->next; if ( (reg->eventBitmap & (1UL << t->event.EventType)) && (t->tagBitmap & tagMask) ) { t->tagBitmap &= ~tagMask; if ( !t->tagBitmap ) { NvEcPrivRemoveEventFromReady( ec, t ); } } } // remove global references to this registration i = 0; while( reg->eventBitmap ) { NV_ASSERT(i < NvEcEventType_Num); if ( reg->eventBitmap & 1 ) { ec->eventTagBitmap[i] &= ~tagMask; ec->eventMap[tag][i] = NULL; } reg->eventBitmap = reg->eventBitmap >> 1; i++; } NvOsSemaphoreDestroy( reg->sema ); NvOsFree( hEcEventRegistration ); fail: NvOsMutexUnlock( ec->eventMutex ); return e; }
NvError NvEcRegisterForEvents( NvEcHandle hEc, NvEcEventRegistrationHandle *phEcEventRegistration, NvOsSemaphoreHandle hSema, NvU32 NumEventTypes, NvEcEventType *pEventTypes, NvU32 NumEventPackets, NvU32 EventPacketSize) { NvEcPrivState *ec = hEc->ec; NvEcEventRegistration *h = NULL; NvOsSemaphoreHandle hSemaClone = NULL; NvError e = NvSuccess; NvU32 val, i, tag = hEc->tag; NvU32 tagMask = (1UL << tag); if ( !hSema || !pEventTypes ) return NvError_BadParameter; if ( !NumEventTypes || (NumEventTypes > NvEcEventType_Num) ) return NvError_InvalidSize; // FIXME: is this sufficient? NV_ASSERT( phEcEventRegistration ); NvOsMutexLock( ec->mutex ); if ( !ec->thread ) NvEcPrivThreadCreate( ec ); // Allocate common pool of internal event nodes bufferring if not already if ( !ec->eventNodes ) { val = NVEC_NUM_EVENT_PACKETS_DEFAULT; if ( NumEventPackets > val ) val = NumEventPackets; ec->eventNodes = NvOsAlloc(val * sizeof(NvEcEventNode)); if ( NULL == ec->eventNodes ) { NvOsMutexUnlock( ec->mutex ); return NvError_InsufficientMemory; } NvOsMemset( ec->eventNodes, 0, (val * sizeof(NvEcEventNode)) ); for( i = 0; i < val - 1; i++ ) ec->eventNodes[i].next = &ec->eventNodes[i+1]; ec->eventFreeBegin = ec->eventNodes; ec->eventFreeEnd = ec->eventNodes + val - 1; } NvOsMutexUnlock( ec->mutex ); NV_CHECK_ERROR( NvOsSemaphoreClone( hSema, &hSemaClone ) ); NvOsMutexLock( ec->eventMutex ); // Quick pre-check for for AlreadyAllocated case for ( i = 0; i < NumEventTypes; i++ ) { val = pEventTypes[i]; if ( val >= NvEcEventType_Num ) e = NvError_BadParameter; else if ( ec->eventMap[tag][val] ) e = NvError_AlreadyAllocated; if ( NvSuccess != e ) goto fail; } h = NvOsAlloc( sizeof(NvEcEventRegistration)); if ( NULL == h ) { e = NvError_InsufficientMemory; goto fail; } NvOsMemset( h, 0, sizeof(NvEcEventRegistration) ); NVEC_ENQ( ec->eventReg[tag].reg, h ); // Fill up new registration handle NV_ASSERT( NvEcEventType_Num <= 32 ); // eventBitmap only works if <= 32 for ( i = 0; i < NumEventTypes; i++ ) { val = pEventTypes[i]; h->eventBitmap |= (1 << val); ec->eventMap[tag][val] = h; ec->eventTagBitmap[val] |= tagMask; } h->numEventTypes = NumEventTypes; h->sema = hSemaClone; h->hEc = hEc; h->numEventPacketsHint = NumEventPackets; h->eventPacketSizeHint = EventPacketSize; // ignored hints for now NvOsMutexUnlock( ec->eventMutex ); *phEcEventRegistration = h; return e; fail: NvOsSemaphoreDestroy( hSemaClone ); NvOsMutexUnlock( ec->eventMutex ); NvOsFree( h ); return e; }