NvError NvEcPowerResume(void) { NvError e = NvSuccess; NvEcPrivState *ec = &g_ec; NvOsMutexLock(ec->mutex); // Call transport's power on if it's OFF state if (ec->powerState == NV_FALSE) { NvOsDebugPrintf("ec_rs NvEcPowerResume 1\n"); NV_CHECK_ERROR_CLEANUP( NvEcTransportPowerResume(ec->transport) ); ec->powerState = NV_TRUE; ec->EnterLowPowerState = NV_FALSE; // Signal priv thread to get out of power suspend. NvOsSemaphoreSignal(ec->LowPowerExitSema); // Perform post-resume EC operations NvEcPrivPowerResumeHook(ec->hEc); NvOsDebugPrintf("ec_rs NvEcPowerResume 2\n"); } fail: NvOsMutexUnlock(ec->mutex); return e; }
static void ReportRmPowerState(NvRmDeviceHandle hRmDeviceHandle) { NvU32 i; NvRmPowerState OldRmState = NvRmPrivPowerGetState(hRmDeviceHandle); NvRmPowerState NewRmState = NvRmPowerState_Idle; // RM clients are in h/w autonomous (bypass) state if there are Power On // references for NPG_AUTO group only; RM clients are in active state if // there are Power On references for any other group if (s_PowerOnRefCounts[NVRM_POWERGROUP_NPG_AUTO] != 0) NewRmState = NvRmPowerState_AutoHw; for (i = 0; i < NV_POWERGROUP_MAX; i++) { if (s_PowerOnRefCounts[i] != 0) { NewRmState = NvRmPowerState_Active; break; } } if (NewRmState == OldRmState) return; #if NVRM_POWER_VERBOSE_PRINTF NVRM_POWER_PRINTF(("RM Clients Power State: %s\n", ((NewRmState == NvRmPowerState_Active) ? "Active" : ((NewRmState == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle")))); #endif /* * Set new combined RM clients power state in the storage shared with the * OS adaptation layer. Check the previous state; if it was any of the low * power states (i.e., this is the 1st RM power state report after suspend) * notify all clients about wake up event. */ NvRmPrivPowerSetState(hRmDeviceHandle, NewRmState); switch (OldRmState) { case NvRmPowerState_LP0: NvOsDebugPrintf("*** Wakeup from LP0 *** wake-source: 0x%x\n", NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, 0x14)); PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP0); break; case NvRmPowerState_LP1: NvOsDebugPrintf("*** Wakeup from LP1 ***\n"); PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1); break; case NvRmPowerState_SkippedLP0: NvOsDebugPrintf("*** Wakeup after Skipped LP0 ***\n"); // resume procedure after Skipped LP0 is the same as after LP1 PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1); break; default: break; } }
NvError NvRmKernelPowerSuspend( NvRmDeviceHandle hRmDeviceHandle ) { NvOdmSocPowerState state = NvRmPowerLowestStateGet(); if (state == NvOdmSocPowerState_Suspend) NvRmPrivPowerGroupSuspend(hRmDeviceHandle); #if NVRM_POWER_DEBUG_SUSPEND_ENTRY NvOsMutexLock(s_hPowerClientMutex); { NvU32 i; ModuleVoltageReq* pVoltageReq = NULL; NvRmPowerClient* pPowerClient = NULL; NvRmPowerRegistry* pRegistry = &s_PowerRegistry; NvRmPowerState s = NvRmPrivPowerGetState(hRmDeviceHandle); // Report combined RM power stste and active modules NvOsDebugPrintf("RM power state before suspend: %s (%d)\n", ((s == NvRmPowerState_Active) ? "Active" : ((s == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle")), s); if (s == NvRmPowerState_Active) { for (i = 0; i < pRegistry->UsedIndexRange; i++) { pPowerClient = pRegistry->pPowerClients[i]; if (pPowerClient) { pVoltageReq = pPowerClient->pVoltageReqHead; while (pVoltageReq != NULL) { if (pVoltageReq->MaxVolts != NvRmVoltsOff) { // could also set some bad e = NvError_Bad??? NvOsDebugPrintf("Active Module: 0x%x\n", pVoltageReq->ModuleId); } pVoltageReq = pVoltageReq->pNext; } } } } } NvOsMutexUnlock(s_hPowerClientMutex); #endif return NvSuccess; }
NvError NvEcPowerSuspend( NvEcPowerState PowerState) { NvError e = NvSuccess; NvEcPrivState *ec = &g_ec; NvOsMutexLock(ec->mutex); NvOsDebugPrintf("ec_rs NvEcPowerSuspend PowerState=0x%x, ec->powerState=0x%x\n", PowerState, ec->powerState); // Call transport's power off only if it's in ON state if (ec->powerState == NV_TRUE) { // Perform pre-suspend EC operations NV_CHECK_ERROR_CLEANUP( NvEcPrivPowerSuspendHook(ec->hEc, PowerState) ); // Enter low power state ec->EnterLowPowerState = NV_TRUE; // Signal priv thread to get ready for power suspend. NvOsSemaphoreSignal(ec->sema); // Wait till priv thread is ready for power suspend. NvOsSemaphoreWait(ec->LowPowerEntrySema); e = NvEcTransportPowerSuspend(ec->transport); ec->powerState = NV_FALSE; } fail: NvOsMutexUnlock(ec->mutex); return e; }
static NvBool SdioOdmWlanPower(NvOdmSdioHandle hOdmSdio, NvBool IsEnable) { NvU32 RequestedPeriod, ReturnedPeriod; NvOdmServicesPwmHandle hOdmPwm = NULL; if (IsEnable) { // Wlan Power On Reset Sequence NvOdmGpioSetState(hOdmSdio->hGpio, hOdmSdio->hPwrPin, 0x0); NvOdmGpioSetState(hOdmSdio->hGpio, hOdmSdio->hResetPin, 0x0); NvOdmOsSleepMS(200); NvOdmGpioSetState(hOdmSdio->hGpio, hOdmSdio->hPwrPin, 0x1); NvOdmGpioSetState(hOdmSdio->hGpio, hOdmSdio->hResetPin, 0x1); NvOdmOsSleepMS(200); // Enable 32KHz clock out hOdmPwm = NvOdmPwmOpen(); if (!hOdmPwm) { NvOsDebugPrintf("sdio_odm: NvOdmPwmOpen failed\n"); return NV_FALSE; } RequestedPeriod = 0; NvOdmPwmConfig(hOdmPwm, NvOdmPwmOutputId_Blink, NvOdmPwmMode_Blink_32KHzClockOutput, 0, &RequestedPeriod, &ReturnedPeriod); NvOdmPwmClose(hOdmPwm); } else { // Power Off sequence NvOdmGpioSetState(hOdmSdio->hGpio, hOdmSdio->hPwrPin, 0x0); } return NV_TRUE; }
static void NvRmPrivChipFlavorInit(NvRmDeviceHandle hRmDevice) { NvOsMemset((void*)&s_ChipFlavor, 0, sizeof(s_ChipFlavor)); if (NvRmPrivChipShmooDataInit(hRmDevice, &s_ChipFlavor) == NvSuccess) { NvOsDebugPrintf("NVRM Initialized shmoo database\n"); return; } if (NvRmBootArgChipShmooGet(hRmDevice, &s_ChipFlavor) == NvSuccess) { NvOsDebugPrintf("NVRM Got shmoo boot argument (at 0x%x)\n", ((NvUPtr)s_pShmooData)); return; } NV_ASSERT(!"Failed to set clock limits"); }
NvBool NvOdmBatteryDeviceOpen(NvOdmBatteryDeviceHandle *hDevice, NvOdmOsSemaphoreHandle *hOdmSemaphore) { NvOdmBatteryDevice *pBattContext = NULL; NvU32 i; NvError NvStatus = NvError_Success; NvU32 PinState; pBattContext = NvOdmOsAlloc(sizeof(NvOdmBatteryDevice)); if (!pBattContext) { NvOsDebugPrintf(("NvOdmOsAlloc failed to allocate pBattContext.")); return NV_FALSE; } NvOdmOsMemset(pBattContext, 0, sizeof(NvOdmBatteryDevice)); NvStatus = NvRmOpen(&pBattContext->hRm, 0); if (NvStatus != NvError_Success) goto Cleanup; NvStatus = NvRmGpioOpen(pBattContext->hRm, &pBattContext->hGpio); if (NvStatus != NvError_Success) goto Cleanup; pBattContext->pGpioPinInfo = NvOdmQueryGpioPinMap( NvOdmGpioPinGroup_Battery, 0, &pBattContext->PinCount); if (pBattContext->pGpioPinInfo == NULL) { goto Cleanup; } for (i = 0; i < pBattContext->PinCount; i++ ) { /*Need the pin 1 to be set to Output for charging of the battery. */ if (i == 1) { NvRmGpioAcquirePinHandle( pBattContext->hGpio, pBattContext->pGpioPinInfo[i].Port, pBattContext->pGpioPinInfo[i].Pin, &pBattContext->hPin); if (!pBattContext->hPin) { goto Cleanup; } NvRmGpioConfigPins(pBattContext->hGpio, &pBattContext->hPin, 1, NvRmGpioPinMode_Output); PinState = NvRmGpioPinState_Low; NvRmGpioWritePins(pBattContext->hGpio, &pBattContext->hPin, &PinState,1); } } *hDevice = pBattContext; return NV_TRUE; Cleanup: NvOdmBatteryDeviceClose(pBattContext); return NV_FALSE; }
void NvBatteryEventHandlerThread(void *args) { NvU8 BatteryState = 0, BatteryEvent = 0; for (;;) { NvOsSemaphoreWait(batt_dev->hOdmSemaphore); if (batt_dev->exitThread) break; if (!batt_dev->hOdmBattDev) continue; NvOdmBatteryGetBatteryStatus(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, &BatteryState); NvOdmBatteryGetEvent(batt_dev->hOdmBattDev, &BatteryEvent); NvOsDebugPrintf("ec_rs BatteryEvent = 0x%x\n", BatteryEvent); if ((BatteryState == NVODM_BATTERY_STATUS_UNKNOWN) || (BatteryEvent == NvOdmBatteryEventType_Num)) { /* Do nothing */ } else { //if (BatteryEvent & NvOdmBatteryEventType_RemainingCapacityAlarm) { if (BatteryEvent & NvOdmBatteryEventType_LowBatteryIntr) { //Daniel 20100701, just force off while receive LOW_BAT# interrupt(not low capacity alarm). //if (BatteryState == (NVODM_BATTERY_STATUS_CRITICAL | // NVODM_BATTERY_STATUS_VERY_CRITICAL | // NVODM_BATTERY_STATUS_DISCHARGING)) { // pr_info("nvec_battery:calling kernel_power_off...\n"); NvOsDebugPrintf("ec_rs batt low battery interrupt.\r\n"); // kernel_power_off(); //} } else { /* Update the battery and power supply info for other events */ power_supply_changed(&tegra_power_supplies[NvPowerSupply_TypeBattery]); power_supply_changed(&tegra_power_supplies[NvPowerSupply_TypeAC]); } } } }
void McStat_Report( NvU32 client_id_0, NvU32 client_0_cycles, NvU32 client_id_1, NvU32 client_1_cycles, NvU32 llc_client_id, NvU32 llc_client_clocks, NvU32 llc_client_cycles, NvU32 mc_clocks) { NvOsDebugPrintf("LLC Client %d Count: 0x%.8X, %u\n", llc_client_id, llc_client_cycles, llc_client_cycles); NvOsDebugPrintf("LLC Client %d Clocks: 0x%.8X, %u\n", llc_client_id, llc_client_clocks, llc_client_clocks); NvOsDebugPrintf("Client %.3d Count: 0x%.8X, %u\n", client_id_0, client_0_cycles, client_0_cycles); NvOsDebugPrintf("Client %.3d Count: 0x%.8X, %u\n", client_id_1, client_1_cycles, client_1_cycles); NvOsDebugPrintf("Total MC Clocks: 0x%.8X, %u\n", mc_clocks, mc_clocks); }
NvError ReadObsData( NvRmDeviceHandle rm, NvRmModuleID modID, NvU32 start_index, NvU32 length, NvU32 *value) { NvU32 i = 0, offset = 0, value1, value2; NvU32 timeout; NvU32 partID = 0xffffffff; NvU32 index, temp; for (i = 0; i < ObsInfoTableSize; i++) { if (modID == ObsInfoTable[i].modSelect) { partID = ObsInfoTable[i].partSelect; break; } } if (i == ObsInfoTableSize) { return NvError_BadParameter; } for(offset = 0; offset < length; offset++) { index = start_index + offset; temp = NV_DRF_DEF(APB_MISC_GP, OBSCTRL, OBS_EN, ENABLE) | NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_MOD_SEL, modID) | NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_PART_SEL, partID) | NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_SIG_SEL, index) ; NV_REGW(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0, temp); value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0); timeout = 100; do { value2 = value1; value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSDATA_0); timeout --; } while (value1 != value2 && timeout); NvOsDebugPrintf("OBS bus modID 0x%x index 0x%x = value 0x%x", modID, index, value1); value[offset] = value1; } return NvSuccess; }
void NvRmPrivGetCpuIdInfo(NvU32 *id,NvU32 *family,NvU32 *major,NvU32 *minor,NvU32 *sku) { #define TAG "GetCpuIdInfo: " NvRmDeviceHandle rm; rm=(NvRmDeviceHandle)NvOsAlloc(sizeof(NvRmDevice)); if(rm==NULL) { NvOsDebugPrintf(TAG "NvOsAlloc rm fail!\n"); return; } NvRmPrivReadChipId(rm); #if 0 typedef enum { NvRmChipFamily_Gpu = 0, NvRmChipFamily_Handheld = 1, NvRmChipFamily_BrChips = 2, NvRmChipFamily_Crush = 3, NvRmChipFamily_Mcp = 4, NvRmChipFamily_Ck = 5, NvRmChipFamily_Vaio = 6, NvRmChipFamily_HandheldSoc = 7, NvRmChipFamily_Force32 = 0x7FFFFFFF, } NvRmChipFamily; #endif if(id!=NULL) *id=rm->ChipId.Id; if(family!=NULL) *family=rm->ChipId.Family; if(major!=NULL) *major=rm->ChipId.Family; if(minor!=NULL) *minor=rm->ChipId.Minor; if(sku!=NULL) *sku=rm->ChipId.SKU; /*if(id!=NULL&&family!=NULL&&major!=NULL&&minor!=NULL&&sku!=NULL) NvOsDebugPrintf( "second Chip Id: 0x%x Family:0x%x Major: 0x%x Minor: 0x%x " "SKU: 0x%x\n", *id,*family, *major, *minor, *sku );*/ NvOsFree(rm); }
static void McErrorIntHandler(void* args) { NvU32 RegVal; NvU32 IntStatus; NvU32 IntClear = 0; NvRmDeviceHandle hRm = (NvRmDeviceHandle)args; IntStatus = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0); if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_AXI_INT, IntStatus) ) { IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_AXI_INT, SET); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_DECERR_AXI_ADR_0); NvOsDebugPrintf("AXI DecErrAddress=0x%x ", RegVal); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_DECERR_AXI_STATUS_0); NvOsDebugPrintf("AXI DecErrStatus=0x%x ", RegVal); } if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, IntStatus) ) { IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, SET); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_DECERR_EMEM_OTHERS_ADR_0); NvOsDebugPrintf("EMEM DecErrAddress=0x%x ", RegVal); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_DECERR_EMEM_OTHERS_STATUS_0); NvOsDebugPrintf("EMEM DecErrStatus=0x%x ", RegVal); } if ( NV_DRF_VAL(MC, INTSTATUS, INVALID_GART_PAGE_INT, IntStatus) ) { IntClear |= NV_DRF_DEF(MC, INTSTATUS, INVALID_GART_PAGE_INT, SET); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_GART_ERROR_ADDR_0); NvOsDebugPrintf("GART DecErrAddress=0x%x ", RegVal); RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_GART_ERROR_REQ_0); NvOsDebugPrintf("GART DecErrStatus=0x%x ", RegVal); } NV_ASSERT(!"MC Decode Error "); // Clear the interrupt. NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0, IntClear); NvRmInterruptDone(s_McInterruptHandle); }
void NvRmPrivStarDttPolicyUpdate( NvRmDeviceHandle hRmDevice, NvS32 TemperatureC, NvRmDtt* pDtt) { NvRmDttAp20PolicyRange Range; Range = (NvRmDttAp20PolicyRange)pDtt->TcorePolicy.PolicyRange; switch (Range) { case NvRmDttAp20PolicyRange_ThrottleDown: if(!ThermalLimitPwrOffEnalble){ NvOsDebugPrintf(">>>>>> DTT: NvRmDttAp20PolicyRange_ThrottleDown!!!!!! <<<<<< \n"); ThermalLimitPwrOffEnalble = NV_TRUE; NvOsDebugPrintf(">>>>>> DTT: HWPowerOffConfig ON!!!!!! <<<<<< \n"); NvRmPmuSetHwPowerOffConfig(s_hRmGlobal, NV_TRUE); } break; case NvRmDttAp20PolicyRange_FreeRunning: case NvRmDttAp20PolicyRange_LimitVoltage: default: if(ThermalLimitPwrOffEnalble){ if(Range==NvRmDttAp20PolicyRange_FreeRunning) NvOsDebugPrintf(">>>>>> DTT: NvRmDttAp20PolicyRange_FreeRunning \n" ); else if(Range==NvRmDttAp20PolicyRange_LimitVoltage) NvOsDebugPrintf(">>>>>> DTT: NvRmDttAp20PolicyRange_LimitVoltage \n" ); else NvOsDebugPrintf(">>>>>> DTT: NvRmDttAp20PolicyRange_unknow \n" ); ThermalLimitPwrOffEnalble = NV_FALSE; NvRmPmuSetHwPowerOffConfig(s_hRmGlobal, NV_FALSE); NvOsDebugPrintf(">>>>>> DTT: HWPowerOffConfig OFF!!!!!! <<<<<< \n"); } break; } }
long nvrm_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { NvError err; NvOsIoctlParams p; NvU32 size; NvU32 small_buf[8]; void *ptr = 0; long e; NvBool bAlloc = NV_FALSE; struct nvrm_file_priv *priv = file->private_data; switch( cmd ) { case NvRmIoctls_Generic: { NvDispatchCtx dctx; dctx.Rt = s_RtHandle; dctx.Client = priv->rt_client; dctx.PackageIdx = 0; err = NvOsCopyIn( &p, (void *)arg, sizeof(p) ); if( err != NvSuccess ) { printk( "NvRmIoctls_Generic: copy in failed\n" ); goto fail; } //printk( "NvRmIoctls_Generic: %d %d %d\n", p.InBufferSize, // p.InOutBufferSize, p.OutBufferSize ); size = p.InBufferSize + p.InOutBufferSize + p.OutBufferSize; if( size <= sizeof(small_buf) ) { ptr = small_buf; } else { ptr = NvOsAlloc( size ); if( !ptr ) { printk( "NvRmIoctls_Generic: alloc failure (%d bytes)\n", size ); goto fail; } bAlloc = NV_TRUE; } err = NvOsCopyIn( ptr, p.pBuffer, p.InBufferSize + p.InOutBufferSize ); if( err != NvSuccess ) { printk( "NvRmIoctls_Generic: copy in failure\n" ); goto fail; } if (priv->su) { err = NvRm_Dispatch( ptr, p.InBufferSize + p.InOutBufferSize, ((NvU8 *)ptr) + p.InBufferSize, p.InOutBufferSize + p.OutBufferSize, &dctx ); } else { err = NvRm_Dispatch_Others( ptr, p.InBufferSize + p.InOutBufferSize, ((NvU8 *)ptr) + p.InBufferSize, p.InOutBufferSize + p.OutBufferSize, &dctx ); } if( err != NvSuccess ) { printk( "NvRmIoctls_Generic: dispatch failure\n" ); goto fail; } if( p.InOutBufferSize || p.OutBufferSize ) { err = NvOsCopyOut( ((NvU8 *)((NvOsIoctlParams *)arg)->pBuffer) + p.InBufferSize, ((NvU8 *)ptr) + p.InBufferSize, p.InOutBufferSize + p.OutBufferSize ); if( err != NvSuccess ) { printk( "NvRmIoctls_Generic: copy out failure\n" ); goto fail; } } break; } case NvRmIoctls_NvRmGraphics: printk( "NvRmIoctls_NvRmGraphics: not supported\n" ); goto fail; case NvRmIoctls_NvRmFbControl: printk( "NvRmIoctls_NvRmFbControl: deprecated \n" ); break; case NvRmIoctls_NvRmMemRead: case NvRmIoctls_NvRmMemWrite: case NvRmIoctls_NvRmMemReadStrided: case NvRmIoctls_NvRmGetCarveoutInfo: case NvRmIoctls_NvRmMemWriteStrided: goto fail; case NvRmIoctls_NvRmMemMapIntoCallerPtr: // FIXME: implement? printk( "NvRmIoctls_NvRmMemMapIntoCallerPtr: not supported\n" ); goto fail; case NvRmIoctls_NvRmBootDone: return tegra_start_dvfsd(); case NvRmIoctls_NvRmGetClientId: err = NvOsCopyIn(&p, (void*)arg, sizeof(p)); if (err != NvSuccess) { NvOsDebugPrintf("NvRmIoctls_NvRmGetClientId: copy in failed\n"); goto fail; } NV_ASSERT(p.InBufferSize == 0); NV_ASSERT(p.OutBufferSize == sizeof(NvRtClientHandle)); NV_ASSERT(p.InOutBufferSize == 0); if (NvOsCopyOut(p.pBuffer, &priv->rt_client, sizeof(NvRtClientHandle)) != NvSuccess) { NvOsDebugPrintf("Failed to copy client id\n"); goto fail; } break; case NvRmIoctls_NvRmClientAttach: { NvRtClientHandle Client; err = NvOsCopyIn(&p, (void*)arg, sizeof(p)); if (err != NvSuccess) { NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n"); goto fail; } NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle)); NV_ASSERT(p.OutBufferSize == 0); NV_ASSERT(p.InOutBufferSize == 0); if (NvOsCopyIn((void*)&Client, p.pBuffer, sizeof(NvRtClientHandle)) != NvSuccess) { NvOsDebugPrintf("Failed to copy client id\n"); goto fail; } NV_ASSERT(Client || !"Bad client"); if (Client == priv->rt_client) { // The daemon is attaching to itself, no need to add refcount break; } if (NvRtAddClientRef(s_RtHandle, Client) != NvSuccess) { NvOsDebugPrintf("Client ref add unsuccessful\n"); goto fail; } break; } case NvRmIoctls_NvRmClientDetach: { NvRtClientHandle Client; err = NvOsCopyIn(&p, (void*)arg, sizeof(p)); if (err != NvSuccess) { NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n"); goto fail; } NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle)); NV_ASSERT(p.OutBufferSize == 0); NV_ASSERT(p.InOutBufferSize == 0); if (NvOsCopyIn((void*)&Client, p.pBuffer, sizeof(NvRtClientHandle)) != NvSuccess) { NvOsDebugPrintf("Failed to copy client id\n"); goto fail; } NV_ASSERT(Client || !"Bad client"); if (Client == priv->rt_client) { // The daemon is detaching from itself, no need to dec refcount break; } client_detach(Client); break; } // FIXME: power ioctls? default: printk( "unknown ioctl code\n" ); goto fail; } e = 0; goto clean; fail: e = -EINVAL; clean: if( bAlloc ) { NvOsFree( ptr ); } return e; }
static int tegra_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { NvU8 name[BATTERY_INFO_NAME_LEN] = {0}; int technology = 0; NvU8 state = 0; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = POWER_SUPPLY_STATUS_UNKNOWN; if (!NvOdmBatteryGetBatteryStatus(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, &state)) return -ENODEV; if (state == NVODM_BATTERY_STATUS_UNKNOWN) { batt_dev->present = NV_FALSE; val->intval = POWER_SUPPLY_STATUS_UNKNOWN; } else if (state == NVODM_BATTERY_STATUS_NO_BATTERY) { batt_dev->present = NV_FALSE; val->intval = POWER_SUPPLY_STATUS_UNKNOWN; } else if (state & NVODM_BATTERY_STATUS_CHARGING) { batt_dev->present = NV_TRUE; val->intval = POWER_SUPPLY_STATUS_CHARGING; } else if (state & NVODM_BATTERY_STATUS_DISCHARGING) { batt_dev->present = NV_TRUE; val->intval = POWER_SUPPLY_STATUS_DISCHARGING; } else if (state & NVODM_BATTERY_STATUS_IDLE) { batt_dev->present = NV_TRUE; val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; } if (!batt_dev->present ) { batt_dev->voltage = 0; batt_dev->current_ma = 0; batt_dev->current_avg = 0; batt_dev->temp = 0; batt_dev->percent_remain = 0; batt_dev->lifetime = 0; batt_dev->consumed = 0; batt_dev->capacity = 0; batt_dev->capacity_crit = 0; batt_dev->capacity_remain = 0; } else { /* * Getting the battery info once here so for the other property * requests there will not be lot of ec req */ if (tegra_battery_data(NvOdmBatteryInst_Main)) { if (batt_dev->percent_remain == 100) { val->intval = POWER_SUPPLY_STATUS_FULL; } //Daniel 20100903, if (batt<=5%) and batt present and discharging, lock suspend. if((batt_dev->percent_remain <= 5) && (val->intval == POWER_SUPPLY_STATUS_DISCHARGING)) { if(mylock_flag == NV_FALSE) { wake_lock(&mylock); mylock_flag = NV_TRUE; NvOsDebugPrintf("ec_rs batt 5% wake_lock\n"); } //Daniel 20100918, if (batt<=4%) and send batt=0% to trigger shutdown or just call kernel_power_off();. if(batt_dev->percent_remain <= 4) { NvOsDebugPrintf("ec_rs low_batt_cnt = %d\n", low_batt_cnt); batt_dev->percent_remain = 0; //to trigger APP shutdown procedure (workaround, app 5% shutdown isn't ready). low_batt_cnt++; if(low_batt_cnt > 8) { //do it on next battery polling NvOsDebugPrintf("ec_rs kernel_power_off.\r\n"); msleep(500); kernel_power_off(); } } else low_batt_cnt = 0; } else if(((batt_dev->percent_remain > 5) || (val->intval != POWER_SUPPLY_STATUS_DISCHARGING))) { if(mylock_flag == NV_TRUE) { wake_unlock(&mylock); mylock_flag = NV_FALSE; NvOsDebugPrintf("ec_rs batt 5% wake_unlock\n"); } } } } break; case POWER_SUPPLY_PROP_HEALTH: if (batt_dev->present) val->intval = POWER_SUPPLY_HEALTH_GOOD; else val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; break; case POWER_SUPPLY_PROP_PRESENT: if (!NvOdmBatteryGetBatteryStatus(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, &state)) return -EINVAL; if (state == NVODM_BATTERY_STATUS_UNKNOWN) { batt_dev->present = NV_FALSE; val->intval = POWER_SUPPLY_STATUS_UNKNOWN; } else { if (state == NVODM_BATTERY_STATUS_NO_BATTERY) { batt_dev->present = NV_FALSE; val->intval = NV_FALSE; } if (state & (NVODM_BATTERY_STATUS_HIGH | NVODM_BATTERY_STATUS_LOW | NVODM_BATTERY_STATUS_CRITICAL | NVODM_BATTERY_STATUS_CHARGING | NVODM_BATTERY_STATUS_DISCHARGING | NVODM_BATTERY_STATUS_IDLE)) { batt_dev->present = NV_TRUE; val->intval = NV_TRUE; } } break; case POWER_SUPPLY_PROP_TECHNOLOGY: tegra_get_battery_tech(&technology, NvOdmBatteryInst_Main); val->intval = technology; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = batt_dev->percent_remain; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = batt_dev->voltage*1000; break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = batt_dev->current_ma; break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = batt_dev->current_avg; break; case POWER_SUPPLY_PROP_CHARGE_NOW: val->intval = batt_dev->capacity_remain; break; case POWER_SUPPLY_PROP_CHARGE_FULL: val->intval = batt_dev->capacity; break; case POWER_SUPPLY_PROP_CHARGE_EMPTY: val->intval = batt_dev->capacity_crit; break; case POWER_SUPPLY_PROP_TEMP: /* returned value is degrees C * 10 */ //Daniel 20100706, its unit is 0.1 degree-K, not 0.01 degree-C. //Daniel 20100707, convert from tenths of a degree-K to tenths of a degree-C. //val->intval = batt_dev->temp/10; val->intval = batt_dev->temp - 2730; break; case POWER_SUPPLY_PROP_MODEL_NAME: if (!NvOdmBatteryGetModel(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, name)) return -EINVAL; strncpy((char *)val->strval, name, strlen(name)); break; case POWER_SUPPLY_PROP_MANUFACTURER: if (!NvOdmBatteryGetManufacturer(batt_dev->hOdmBattDev, NvOdmBatteryInst_Main, name)) return -EINVAL; strncpy((char *)val->strval, name, strlen(name)); break; default: return -EINVAL; } return 0; }
/* * Always use one EcPrivThread-global clock/time for timeout calculations */ static void NvEcPrivThread( void * args ) { NvEcPrivState *ec = (NvEcPrivState *)args; NvU32 t, timeout = NV_WAIT_INFINITE; NvU32 tStatus = 0; NvError wait = NvSuccess; NvError e; while( !ec->exitThread ) { #if ENABLE_TIMEOUT if ( timeout ) wait = NvOsSemaphoreWaitTimeout( ec->sema, timeout ); #else NvOsSemaphoreWait( ec->sema ); wait = NvSuccess; #endif #if ENABLE_FAKE_TIMEOUT_TEST t = ec->lastTime + 0x200; #else t = NvOsGetTimeMS(); #endif ec->timeDiff = t - ec->lastTime; ec->lastTime = t; // update last timer value if ( !timeout || (wait == NvError_Timeout) ) { // timeout case NvEcPrivProcessTimeout( ec ); } // look for any pending packets tStatus = NvEcTransportQueryStatus( ec->transport ); e = NvSuccess; /* * SEND_COMPLETE event must be processed before RESPONSE_RECEIVE_COMPLETE * event as SEND_COMPLETE event schedules timeout for RESPONSE_RECEIVE event. */ if ( tStatus & (NVEC_TRANSPORT_STATUS_SEND_COMPLETE | NVEC_TRANSPORT_STATUS_SEND_ERROR) ) { NvEcPrivProcessPostSendRequest( ec, (tStatus & NVEC_TRANSPORT_STATUS_SEND_COMPLETE) ? NvSuccess : NvError_I2cWriteFailed ); } if ( tStatus & (NVEC_TRANSPORT_STATUS_RESPONSE_RECEIVE_ERROR | NVEC_TRANSPORT_STATUS_RESPONSE_RECEIVE_COMPLETE) ) { e = (tStatus & NVEC_TRANSPORT_STATUS_RESPONSE_RECEIVE_COMPLETE) ? NvSuccess : NvError_I2cReadFailed; e = NvEcPrivProcessReceiveResponse( ec, e ); // return ignored. Could be spurious response. } if ( tStatus & (NVEC_TRANSPORT_STATUS_EVENT_RECEIVE_ERROR | NVEC_TRANSPORT_STATUS_EVENT_RECEIVE_COMPLETE) ) { e = (tStatus & NVEC_TRANSPORT_STATUS_EVENT_RECEIVE_COMPLETE) ? NvSuccess : NvError_I2cReadFailed; e = NvEcPrivProcessReceiveEvent( ec, e ); // return ignored. Could be spurious event. } if ( tStatus & NVEC_TRANSPORT_STATUS_EVENT_PACKET_MAX_NACK ) { // signal the ping thread to send a ping command since max // number of nacks have been sent to the EC if (ec->hPingSema) { NvOsSemaphoreSignal(ec->hPingSema); } } // send request whenever possible if ( (ec->timeout[NVEC_IDX_REQUEST] == NV_WAIT_INFINITE) && (ec->EnterLowPowerState == NV_FALSE) ) NvEcPrivProcessSendRequest( ec ); #if ENABLE_TIMEOUT timeout = NvEcPrivUpdateActualTimeout( ec ); #endif if (ec->EnterLowPowerState) { // This code assumes that EC is already in kept in sleep mode by // either shim or top level code. And there will not be any activity // going on SM bus. if (ec->timeout[NVEC_IDX_REQUEST] != NV_WAIT_INFINITE) { NvOsDebugPrintf("\r\nNvEc has active requests during suspend. " "It shouldn't have. check it."); } // No active request is pending. Enter into low power state. // Signal power suspend API to enter into suspend mode. NvOsSemaphoreSignal(ec->LowPowerEntrySema); // Wait till power resume API signals resume operation. NvOsSemaphoreWait(ec->LowPowerExitSema); // Update the timeouts for the active responses, which are scheduled // to receive before system entering into suspend. #if ENABLE_TIMEOUT NvEcPrivResetActiveRequestResponseTimeouts( ec ); timeout = NvEcPrivUpdateActualTimeout( ec ); #endif // ENABLE_TIMEOUT } } }
void NvRmPrivAp20DttPolicyUpdate( NvRmDeviceHandle hRmDevice, NvS32 TemperatureC, NvRmDtt* pDtt) { NvRmDttAp20PolicyRange Range; // CPU throttling limits are set at 50% of CPU frequency range (no // throttling below this value), and at CPU frequency boundary that // matches specified voltage throttling limit. if ((!s_CpuThrottleMaxKHz) || (!s_CpuThrottleMinKHz)) { NvU32 steps; const NvRmFreqKHz* p = NvRmPrivModuleVscaleGetMaxKHzList( hRmDevice, NvRmModuleID_Cpu, &steps); NV_ASSERT(p && steps); for (; steps != 0 ; steps--) { if (NVRM_DTT_VOLTAGE_THROTTLE_MV >= NvRmPrivModuleVscaleGetMV( hRmDevice, NvRmModuleID_Cpu, p[steps-1])) break; } NV_ASSERT(steps); s_CpuThrottleMaxKHz = NV_MIN( NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz, p[steps-1]); s_CpuThrottleMinKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz / NVRM_DTT_RATIO_MAX; NV_ASSERT(s_CpuThrottleMaxKHz > s_CpuThrottleMinKHz); NV_ASSERT(s_CpuThrottleMinKHz > NVRM_DTT_CPU_DELTA_KHZ); s_CpuThrottleKHz = s_CpuThrottleMaxKHz; NV_ASSERT(pDtt->TcoreCaps.Tmin < (NVRM_DTT_DEGREES_LOW - NVRM_DTT_DEGREES_HYSTERESIS)); NV_ASSERT(pDtt->TcoreCaps.Tmax > NVRM_DTT_DEGREES_HIGH); } // Advanced policy range state machine (one step at a time) Range = (NvRmDttAp20PolicyRange)pDtt->TcorePolicy.PolicyRange; switch (Range) { case NvRmDttAp20PolicyRange_Unknown: Range = NvRmDttAp20PolicyRange_FreeRunning; // fall through case NvRmDttAp20PolicyRange_FreeRunning: if (TemperatureC >= NVRM_DTT_DEGREES_LOW) Range = NvRmDttAp20PolicyRange_LimitVoltage; break; case NvRmDttAp20PolicyRange_LimitVoltage: if (TemperatureC <= (NVRM_DTT_DEGREES_LOW - NVRM_DTT_DEGREES_HYSTERESIS)) Range = NvRmDttAp20PolicyRange_FreeRunning; else if (TemperatureC >= NVRM_DTT_DEGREES_HIGH) Range = NvRmDttAp20PolicyRange_ThrottleDown; break; case NvRmDttAp20PolicyRange_ThrottleDown: if (TemperatureC <= (NVRM_DTT_DEGREES_HIGH - NVRM_DTT_DEGREES_HYSTERESIS)) Range = NvRmDttAp20PolicyRange_LimitVoltage; break; default: break; } /* * Fill in new policy. Temperature limits are set around current * temperature for the next out-of-limit interrupt (possible exception * - temperature "jump" over two ranges would result in two interrupts * in a row before limits cover the temperature). Polling time is set * always in ThrottleDown range, and only for poll mode in other ranges. */ pDtt->CoreTemperatureC = TemperatureC; switch (Range) { case NvRmDttAp20PolicyRange_FreeRunning: pDtt->TcorePolicy.LowLimit = pDtt->TcoreLowLimitCaps.MinValue; pDtt->TcorePolicy.HighLimit = NVRM_DTT_DEGREES_LOW; pDtt->TcorePolicy.UpdateIntervalUs = pDtt->UseIntr ? NV_WAIT_INFINITE : (NVRM_DTT_POLL_MS_SLOW * 1000); break; case NvRmDttAp20PolicyRange_LimitVoltage: pDtt->TcorePolicy.LowLimit = NVRM_DTT_DEGREES_LOW - NVRM_DTT_DEGREES_HYSTERESIS; pDtt->TcorePolicy.HighLimit = NVRM_DTT_DEGREES_HIGH; pDtt->TcorePolicy.UpdateIntervalUs = pDtt->UseIntr ? NV_WAIT_INFINITE : (NVRM_DTT_POLL_MS_FAST * 1000); break; case NvRmDttAp20PolicyRange_ThrottleDown: pDtt->TcorePolicy.LowLimit = NVRM_DTT_DEGREES_HIGH - NVRM_DTT_DEGREES_HYSTERESIS; pDtt->TcorePolicy.HighLimit = pDtt->TcoreHighLimitCaps.MaxValue; pDtt->TcorePolicy.UpdateIntervalUs = NVRM_DTT_POLL_MS_CRITICAL * 1000; break; default: NV_ASSERT(!"Invalid DTT policy range"); NvOsDebugPrintf("DTT: Invalid Range = %d\n", Range); pDtt->TcorePolicy.HighLimit = ODM_TMON_PARAMETER_UNSPECIFIED; pDtt->TcorePolicy.LowLimit = ODM_TMON_PARAMETER_UNSPECIFIED; pDtt->TcorePolicy.PolicyRange = NvRmDttAp20PolicyRange_Unknown; return; } pDtt->TcorePolicy.PolicyRange = (NvU32)Range; }
/* * Traverse response nodes with these: * - one response matching the tag param (returns the node). If bypassing * tag checking, use INVALID tag as parameter. * - all responses timeout (will signal back too) * - Update individual responseNode's timeout by rebasing to * EcPrivThread-global time (hEc->lastTime). * - Update shortest timeout value for response queue. */ static void NvEcPrivFindAndDequeueResponse( NvEcPrivState *ec, NvEcResponse *response, NvEcResponseNode **pResponseNode ) { NvEcResponseNode *t = NULL, *p = NULL, *temp; NvU32 timeout = NV_WAIT_INFINITE; NvBool remove = NV_FALSE, found = NV_FALSE; NvBool SignalSema; NvOsMutexLock( ec->responseMutex ); NV_ASSERT(ec->responseBegin); DISP_MESSAGE(("\r\nFindDQRes responseBegin=0x%x", ec->responseBegin)); if ( ec->responseBegin ) { t = ec->responseBegin; while( t ) { SignalSema = NV_FALSE; /* FIXME: just match tag? more to match? * There may be the cases where spurious response is received from EC. * Response should not be removed from the queue until req is complete. */ DISP_MESSAGE(("t->tag=0x%x\n", t->tag)); if (response) DISP_MESSAGE(("response->RequestorTag=0x%x\n", response->RequestorTag)); if ( response && !found && (t->tag == response->RequestorTag) && t->requestNode->completed ) { if ( pResponseNode ) *pResponseNode = t; found = NV_TRUE; remove = NV_TRUE; } else { #if ENABLE_TIMEOUT if ( t->timeout <= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE) ) { t->status = NvError_Timeout; SignalSema = NV_TRUE; remove = NV_TRUE; DISP_MESSAGE(("Resp Timeout Respnode=0x%x", t)); } else { // This check is needed for spurious response case handling. if (t->timeout != NV_WAIT_INFINITE) t->timeout -= NVEC_TIMEDIFF_WITH_BASE(ec, NVEC_IDX_RESPONSE); // update this response timeout w/ lastTime as base } #endif } if ( remove ) { temp = t; NVEC_UNLINK( ec->response, t, p ); DISP_MESSAGE(("\r\nFindDQRes removed=0x%x, removed->next=0x%x, " "prev=0x%x ec->responseBegin=0x%x", t, t->next, p, ec->responseBegin)); remove = NV_FALSE; if (p) t = p->next; else t = ec->responseBegin; if (SignalSema == NV_TRUE) NvOsSemaphoreSignal( temp->sema ); } else { if ( timeout > t->timeout ) timeout = t->timeout; p = t; t = t->next; } } // update with per-queue timeout and timeoutBase ec->timeout[NVEC_IDX_RESPONSE] = timeout; ec->timeoutBase[NVEC_IDX_RESPONSE] = ec->lastTime; DISP_MESSAGE(("\r\nec->timeout[NVEC_IDX_RESPONSE] is set to=%d", ec->timeout[NVEC_IDX_RESPONSE])); } if (found == NV_FALSE) NvOsDebugPrintf("\r\n***NVEC:Received Spurious Response from EC."); NvOsMutexUnlock( ec->responseMutex ); }
void NvRmPrivReadChipId( NvRmDeviceHandle rm ) { #if (NVCPU_IS_X86 && NVOS_IS_WINDOWS) NvRmChipId *id; NV_ASSERT( rm ); id = &rm->ChipId; id->Family = NvRmChipFamily_HandheldSoc; id->Id = 0x15; id->Major = 0x0; id->Minor = 0x0; id->SKU = 0x0; id->Netlist = 0x0; id->Patch = 0x0; #else NvU32 reg; NvRmChipId *id; NvU32 fam; char *s; NvU8 *VirtAddr; NvError e; NV_ASSERT( rm ); id = &rm->ChipId; /* Hard coding the address of the chip ID address space, as we haven't yet * parsed the relocation table. */ e = NvRmPhysicalMemMap(0x70000000, 0x1000, NVOS_MEM_READ_WRITE, NvOsMemAttribute_Uncached, (void **)&VirtAddr); if (e != NvSuccess) { NV_DEBUG_PRINTF(("APB misc aperture map failure\n")); return; } /* chip id is in the misc aperture */ reg = NV_READ32( VirtAddr + APB_MISC_GP_HIDREV_0 ); id->Id = (NvU16)NV_DRF_VAL( APB_MISC_GP, HIDREV, CHIPID, reg ); id->Major = (NvU8)NV_DRF_VAL( APB_MISC_GP, HIDREV, MAJORREV, reg ); id->Minor = (NvU8)NV_DRF_VAL( APB_MISC_GP, HIDREV, MINORREV, reg ); fam = NV_DRF_VAL( APB_MISC_GP, HIDREV, HIDFAM, reg ); switch( fam ) { case APB_MISC_GP_HIDREV_0_HIDFAM_GPU: id->Family = NvRmChipFamily_Gpu; s = "GPU"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_HANDHELD: id->Family = NvRmChipFamily_Handheld; s = "Handheld"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_BR_CHIPS: id->Family = NvRmChipFamily_BrChips; s = "BrChips"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_CRUSH: id->Family = NvRmChipFamily_Crush; s = "Crush"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_MCP: id->Family = NvRmChipFamily_Mcp; s = "MCP"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_CK: id->Family = NvRmChipFamily_Ck; s = "Ck"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_VAIO: id->Family = NvRmChipFamily_Vaio; s = "Vaio"; break; case APB_MISC_GP_HIDREV_0_HIDFAM_HANDHELD_SOC: id->Family = NvRmChipFamily_HandheldSoc; s = "Handheld SOC"; break; default: NV_ASSERT( !"bad chip family" ); NvRmPhysicalMemUnmap(VirtAddr, 0x1000); return; } reg = NV_READ32( VirtAddr + APB_MISC_GP_EMU_REVID_0 ); id->Netlist = (NvU16)NV_DRF_VAL( APB_MISC_GP, EMU_REVID, NETLIST, reg ); id->Patch = (NvU16)NV_DRF_VAL( APB_MISC_GP, EMU_REVID, PATCH, reg ); if( id->Major == 0 ) { char *emu; if( id->Netlist == 0 ) { NvOsDebugPrintf( "Simulation Chip: 0x%x\n", id->Id ); } else { if( id->Minor == 0 ) { emu = "QuickTurn"; } else { emu = "FPGA"; } NvOsDebugPrintf( "Emulation (%s) Chip: 0x%x Netlist: 0x%x " "Patch: 0x%x\n", emu, id->Id, id->Netlist, id->Patch ); } } else { // on real silicon NvRmPrivGetSku( rm ); NvOsDebugPrintf( "Chip Id: 0x%x (%s) Major: 0x%x Minor: 0x%x " "SKU: 0x%x\n", id->Id, s, id->Major, id->Minor, id->SKU ); } // add a sanity check here, so that if we think we are on sim, but don't // detect a sim/quickturn netlist bail out with an error if ( NvRmIsSimulation() && id->Major != 0 ) { // this should all get optimized away in release builds because the // above will get evaluated to if ( 0 ) NV_ASSERT(!"invalid major version number for simulation"); } NvRmPhysicalMemUnmap(VirtAddr, 0x1000); #endif }
const NvRmModuleClockLimits* NvRmPrivClockLimitsInit(NvRmDeviceHandle hRmDevice) { NvU32 i; NvRmFreqKHz CpuMaxKHz, AvpMaxKHz, VdeMaxKHz, TDMaxKHz, DispMaxKHz; NvRmSKUedLimits* pSKUedLimits; const NvRmScaledClkLimits* pHwLimits; const NvRmSocShmoo* pShmoo; NV_ASSERT(hRmDevice); NvRmPrivChipFlavorInit(hRmDevice); pShmoo = s_ChipFlavor.pSocShmoo; pHwLimits = &pShmoo->ScaledLimitsList[0]; #ifndef CONFIG_FAKE_SHMOO pSKUedLimits = pShmoo->pSKUedLimits; #else /* NvRmFreqKHz CpuMaxKHz; NvRmFreqKHz AvpMaxKHz; NvRmFreqKHz VdeMaxKHz; NvRmFreqKHz McMaxKHz; NvRmFreqKHz Emc2xMaxKHz; NvRmFreqKHz TDMaxKHz; NvRmFreqKHz DisplayAPixelMaxKHz; NvRmFreqKHz DisplayBPixelMaxKHz; NvRmMilliVolts NominalCoreMv; // for common core rail NvRmMilliVolts NominalCpuMv; // for dedicated CPU rail */ pSKUedLimits = pShmoo->pSKUedLimits; // override default with configuration values // CPU clock duh! pSKUedLimits->CpuMaxKHz = MAX_CPU_OC_FREQ; #ifdef CONFIG_BOOST_PERIPHERALS // AVP clock pSKUedLimits->AvpMaxKHz = CONFIG_MAX_AVP_OC_FREQ; // 3D clock pSKUedLimits->TDMaxKHz = CONFIG_MAX_3D_OC_FREQ; #endif // CONFIG_BOOST_PERIPHERALS #endif // CONFIG_FAKE_SHMOO NvOsDebugPrintf("NVRM corner (%d, %d)\n", s_ChipFlavor.corner, s_ChipFlavor.CpuCorner); NvOsMemset((void*)s_pClockScales, 0, sizeof(s_pClockScales)); NvOsMemset(s_ClockRangeLimits, 0, sizeof(s_ClockRangeLimits)); NvOsMemset(s_VoltageStepRefCounts, 0, sizeof(s_VoltageStepRefCounts)); s_VoltageStepRefCounts[0] = NvRmPrivModuleID_Num; // all at minimum step // Combine AVP/System clock absolute limit with scaling V/F ladder upper // boundary, and set default clock range for all present modules the same // as for AVP/System clock #ifdef CONFIG_AVP_OVERCLOCK AvpMaxKHz = 266400; #else AvpMaxKHz = pSKUedLimits->AvpMaxKHz; for (i = 0; i < pShmoo->ScaledLimitsListSize; i++) { if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP) { AvpMaxKHz = NV_MIN( AvpMaxKHz, pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex]); break; } } #endif //CONFIG_AVP_OVERCLOCK for (i = 0; i < NvRmPrivModuleID_Num; i++) { NvRmModuleInstance *inst; if (NvRmPrivGetModuleInstance(hRmDevice, i, &inst) == NvSuccess) { s_ClockRangeLimits[i].MaxKHz = AvpMaxKHz; s_ClockRangeLimits[i].MinKHz = NVRM_BUS_MIN_KHZ; } } // Fill in limits for modules with slectable clock sources and/or dividers // as specified by the h/w table according to the h/w device ID // (CPU and AVP are not in relocation table - need translate id explicitly) // TODO: need separate subclock limits? (current implementation applies // main clock limits to all subclocks) for (i = 0; i < pShmoo->ScaledLimitsListSize; i++) { NvRmModuleID id; if (pHwLimits[i].HwDeviceId == NV_DEVID_CPU) id = NvRmModuleID_Cpu; else if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP) id = NvRmModuleID_Avp; else if (pHwLimits[i].HwDeviceId == NVRM_DEVID_CLK_SRC) id = NvRmClkLimitsExtID_ClkSrc; else id = NvRmPrivDevToModuleID(pHwLimits[i].HwDeviceId); if ((id != NVRM_DEVICE_UNKNOWN) && (pHwLimits[i].SubClockId == 0)) { s_ClockRangeLimits[id].MinKHz = pHwLimits[i].MinKHz; s_ClockRangeLimits[id].MaxKHz = pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex]; s_pClockScales[id] = pHwLimits[i].MaxKHzList; } } // Fill in CPU scaling data if SoC has dedicated CPU rail, and CPU clock // characterization data is separated from other modules on common core rail if (s_ChipFlavor.pCpuShmoo) { const NvRmScaledClkLimits* pCpuLimits = s_ChipFlavor.pCpuShmoo->pScaledCpuLimits; NV_ASSERT(pCpuLimits && (pCpuLimits->HwDeviceId == NV_DEVID_CPU)); s_ClockRangeLimits[NvRmModuleID_Cpu].MinKHz = pCpuLimits->MinKHz; s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz = pCpuLimits->MaxKHzList[s_ChipFlavor.pCpuShmoo->ShmooVmaxIndex]; s_pClockScales[NvRmModuleID_Cpu] = pCpuLimits->MaxKHzList; } // Set AVP upper clock boundary with combined Absolute/Scaled limit; // Sync System clock with AVP (System is not in relocation table) s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz = AvpMaxKHz; s_ClockRangeLimits[NvRmPrivModuleID_System].MaxKHz = s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz; s_ClockRangeLimits[NvRmPrivModuleID_System].MinKHz = s_ClockRangeLimits[NvRmModuleID_Avp].MinKHz; s_pClockScales[NvRmPrivModuleID_System] = s_pClockScales[NvRmModuleID_Avp]; // Set VDE upper clock boundary with combined Absolute/Scaled limit (on // AP15/Ap16 VDE clock derived from the system bus, and VDE maximum limit // must be the same as AVP/System). VdeMaxKHz = pSKUedLimits->VdeMaxKHz; VdeMaxKHz = NV_MIN( VdeMaxKHz, s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz); if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16)) { NV_ASSERT(VdeMaxKHz == AvpMaxKHz); } s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz = VdeMaxKHz; // Set upper clock boundaries for devices on CPU bus (CPU, Mselect, // CMC) with combined Absolute/Scaled limits CpuMaxKHz = pSKUedLimits->CpuMaxKHz; CpuMaxKHz = NV_MIN( CpuMaxKHz, s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz); s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz = CpuMaxKHz; if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16)) { s_ClockRangeLimits[NvRmModuleID_CacheMemCtrl].MaxKHz = CpuMaxKHz; s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz; NV_ASSERT(s_ClockRangeLimits[NvRmClkLimitsExtID_ClkSrc].MaxKHz >= CpuMaxKHz); } else if (hRmDevice->ChipId.Id == 0x20) { // No CMC; TODO: Mselect/CPU <= 1/4? s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz >> 2; }
NvBool NvOdmMouseReset(NvOdmMouseDeviceHandle hDevice) { NvError err = NvError_Success; NvEcRequest Request = {0}; NvEcResponse Response = {0}; NvU32 count = 0, MousePort = 0, i = 0; NvBool ret = NV_FALSE; NvOdmMouseDevice *hMouseDev = (NvOdmMouseDevice *)hDevice; MousePort = MOUSE_PS2_PORT_ID_0; count = CMD_MAX_RETRIES + 1; while ((ret==NV_FALSE) && (count--)) { // fill up request structure Request.PacketType = NvEcPacketType_Request; Request.RequestType = NvEcRequestResponseType_AuxDevice; Request.RequestSubtype = ((NvEcRequestResponseSubtype) (NV_DRF_NUM(NVEC,SUBTYPE,AUX_PORT_ID, MousePort))) | ((NvEcRequestResponseSubtype)NvEcAuxDeviceSubtype_SendCommand); Request.NumPayloadBytes = 2; Request.Payload[0] = 0xFF; // set the reset command Request.Payload[1] = 3; // Request to EC err = NvEcSendRequest(hMouseDev->hEc, &Request, &Response, sizeof(Request), sizeof(Response)); if (NvSuccess != err) { //NVODMMOUSE_PRINTF(("NvEcSendRequest failed !!")); NvOsDebugPrintf("NvEcSendRequest failed !!\n"); NvOsWaitUS(100000); continue; } // mouse not found if (NvEcStatus_Success != Response.Status) { //NVODMMOUSE_PRINTF(("EC response failed !!")); NvOsDebugPrintf("EC response failed !!\n"); //if (MousePort != MOUSE_PS2_PORT_ID_1) //{ // count = CMD_MAX_RETRIES + 1; // MousePort = MOUSE_PS2_PORT_ID_1; //} NvOsWaitUS(100000); continue; } if (Response.NumPayloadBytes != 3) continue; // success if (Response.Payload[0] == 0xFA) { ret = NV_TRUE; // at lease one Mouse found! hMouseDev->ValidMousePorts[i] = MousePort; //if (MousePort != MOUSE_PS2_PORT_ID_1) //{ // count = CMD_MAX_RETRIES + 1; // MousePort = MOUSE_PS2_PORT_ID_1; // i++; // continue; //} } } return ret; }