示例#1
0
NvError NvRmPrivAp20ChipUniqueId(NvRmDeviceHandle hDevHandle,void* pId)
{
    NvU64*  pOut = (NvU64*)pId; // Pointer to output buffer
    NvU32   OldRegData;                // Old register contents
    NvU32   NewRegData;                // New register contents

    NV_ASSERT(hDevHandle);
    NV_ASSERT(pId);
#if NV_USE_FUSE_CLOCK_ENABLE
    // Enable fuse clock
    Ap20EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_TRUE);
#endif
    // Access to unique id is protected, so make sure all registers visible first.
    OldRegData = NV_REGR(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
    NewRegData = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, MISC_CLK_ENB, CFG_ALL_VISIBLE, 1, OldRegData);
    NV_REGW(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0, NewRegData);

    // Read the secure id from the fuse registers and copy to the output buffer.
    *pOut = ((NvU64)NV_REGR(hDevHandle, (NvRmPrivModuleID)NvRmModuleID_Fuse, 0, FUSE_JTAG_SECUREID_0_0)) |
            (((NvU64)NV_REGR(hDevHandle, (NvRmPrivModuleID)NvRmModuleID_Fuse, 0, FUSE_JTAG_SECUREID_1_0)) << 32);

    // Restore the protected registers enable to the way we found it.
    NV_REGW(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0, OldRegData);
#if NV_USE_FUSE_CLOCK_ENABLE
    // Disable fuse clock
    Ap20EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_FALSE);
#endif
    return NvError_Success;
}
示例#2
0
NvError
ReadObsData(
        NvRmDeviceHandle rm,
        NvRmModuleID modID,
        NvU32 start_index,
        NvU32 length,
        NvU32 *value)
{
    NvU32 i = 0, offset = 0, value1, value2;
    NvU32 timeout;
    NvU32 partID = 0xffffffff;
    NvU32 index, temp;

    for (i = 0; i < ObsInfoTableSize; i++)
    {
        if (modID == ObsInfoTable[i].modSelect)
        {
        partID = ObsInfoTable[i].partSelect;
        break;
        }
    }
    if (i == ObsInfoTableSize)
    {
        return NvError_BadParameter;
    }

    for(offset = 0; offset < length; offset++)
    {
        index = start_index + offset;
        temp = NV_DRF_DEF(APB_MISC_GP, OBSCTRL, OBS_EN, ENABLE) |
            NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_MOD_SEL, modID) |
            NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_PART_SEL, partID) |
            NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_SIG_SEL, index) ;
        NV_REGW(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0, temp);
        value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0);
        timeout = 100;
        do {
            value2 = value1;
            value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSDATA_0);
            timeout --;
        } while (value1 != value2 && timeout);
        NvOsDebugPrintf("OBS bus modID 0x%x index 0x%x = value 0x%x",
                modID, index, value1);
        value[offset] = value1;
    }
    return NvSuccess;
}
示例#3
0
static void
ReportRmPowerState(NvRmDeviceHandle hRmDeviceHandle)
{
    NvU32 i;
    NvRmPowerState OldRmState = NvRmPrivPowerGetState(hRmDeviceHandle);
    NvRmPowerState NewRmState = NvRmPowerState_Idle;

    // RM clients are in h/w autonomous (bypass) state if there are Power On
    // references for NPG_AUTO group only; RM clients are in active state if
    // there are Power On references for any other group
    if (s_PowerOnRefCounts[NVRM_POWERGROUP_NPG_AUTO] != 0)
        NewRmState = NvRmPowerState_AutoHw;

    for (i = 0; i < NV_POWERGROUP_MAX; i++)
    {
        if (s_PowerOnRefCounts[i] != 0)
        {
            NewRmState = NvRmPowerState_Active;
            break;
        }
    }
    if (NewRmState == OldRmState)
        return;

#if NVRM_POWER_VERBOSE_PRINTF
    NVRM_POWER_PRINTF(("RM Clients Power State: %s\n",
        ((NewRmState == NvRmPowerState_Active) ? "Active" :
         ((NewRmState == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle"))));
#endif
    /*
     * Set new combined RM clients power state in the storage shared with the
     * OS adaptation layer. Check the previous state; if it was any of the low
     * power states (i.e., this is the 1st RM power state report after suspend)
     * notify all clients about wake up event.
     */
    NvRmPrivPowerSetState(hRmDeviceHandle, NewRmState);
    switch (OldRmState)
    {
        case NvRmPowerState_LP0:
            NvOsDebugPrintf("*** Wakeup from LP0 *** wake-source: 0x%x\n",
                    NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, 0x14));
            PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP0);
            break;
        case NvRmPowerState_LP1:
            NvOsDebugPrintf("*** Wakeup from LP1 ***\n");
            PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1);
            break;
        case NvRmPowerState_SkippedLP0:
            NvOsDebugPrintf("*** Wakeup after Skipped LP0 ***\n");
            // resume procedure after Skipped LP0 is the same as after LP1
            PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1);
            break;
        default:
            break;
    }
}
示例#4
0
static void
McStatAp1x_Stop(
        NvRmDeviceHandle rm,
        NvU32 *client_0_cycles,
        NvU32 *client_1_cycles,
        NvU32 *llc_client_cycles,
        NvU32 *llc_client_clocks,
        NvU32 *mc_clocks)
{
    *llc_client_cycles = NV_REGR(rm, NvRmPrivModuleID_ExternalMemoryController,
                           0, EMC_STAT_LLMC_COUNT_0_0);
    *llc_client_clocks = NV_REGR(rm, NvRmPrivModuleID_ExternalMemoryController,
                           0, EMC_STAT_LLMC_CLOCKS_0);
    *client_0_cycles = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
                         0, MC_STAT_EMC_COUNT_0_0);
    *client_1_cycles = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
                         0, MC_STAT_EMC_COUNT_1_0);
    *mc_clocks = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
                   0, MC_STAT_EMC_CLOCKS_0);
}
NvU32
NvRmPrivGetBctCustomerOption(NvRmDeviceHandle hRm)
{
    if (!NvRmIsSimulation())
    {
        return NV_REGR(hRm, NvRmModuleID_Pmif, 0, APBDEV_PMC_SCRATCH20_0);
    }
    else
    {
        return 0;
    }
}
示例#6
0
void NvRmPrivIoPowerControl(
    NvRmDeviceHandle hRmDeviceHandle,
    NvU32 NoIoPwrMask,
    NvBool Enable)
{
    NvU32 reg = NV_REGR(
        hRmDeviceHandle, NvRmModuleID_Pmif, 0, APBDEV_PMC_NO_IOPOWER_0);
    reg = Enable ? (reg & (~NoIoPwrMask)) : (reg | NoIoPwrMask);

#if NV_NO_IOPOWER_CONTROL
    NV_REGW(hRmDeviceHandle,
            NvRmModuleID_Pmif, 0, APBDEV_PMC_NO_IOPOWER_0, reg);
#endif
}
示例#7
0
/* This function sets some performance timings for Mc & Emc.  Numbers are from
 * the Arch team.
 *
 */
void NvRmPrivAp15SetupMc(NvRmDeviceHandle hRm)
{
    NvU32   reg, mask;
    reg = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
              MC_LOWLATENCY_CONFIG_0);
    mask = NV_DRF_DEF(MC, LOWLATENCY_CONFIG, CMCR_LL_CTRL, ENABLE) |
           NV_DRF_DEF(MC, LOWLATENCY_CONFIG, CMCR_LL_SEND_BOTH, ENABLE) |
           NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_CTRL, ENABLE) |
           NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_SEND_BOTH, ENABLE);
    if ( mask != (reg & mask) )
        NV_ASSERT(!"MC LL Path not enabled!");

    /* 1) TIMEOUT value for VDE is 256 cycles, 3D, 2D timeouts are disabled, all others 512 cycles. */
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_CTRL_0,    0x00000028);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_CMC_0,     0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_DC_0,      0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_DCB_0,     0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_EPP_0,     0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_G2_0,      0x0);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_HC_0,      0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_ISP_0,     0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPCORE_0,  0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEA_0,    0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEB_0,    0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEC_0,    0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_NV_0,      0x0);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_PPCS_0,    0x88888888);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_VDE_0,     0x44444444);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT1_VDE_0,    0x44444444);
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_VI_0,      0x88888888);

    /* 2) Command Queue values should be 2,2,6 for better performance. */
    NV_REGW(hRm, NvRmPrivModuleID_ExternalMemoryController, 0, EMC_CMDQ_0,   0x00002206);

    /* 3) MC_EMEM_ARB_CFG0_0 Should have optimal values for 166Mhz DRAM.
     *    27:22 EMEM_BANKCNT_NSP_TH (0xC seems to be better for 166Mhz)
     *    21:16 EMEM_BANKCNT_TH     (0x8 seems to be better for 166Mhz)
     *
     *    MC_EMEM_ARB_CFG0_0 <= 0x0308_1010
     */

    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_EMEM_ARB_CFG0_0,    0x03081010);
}
示例#8
0
static void McErrorIntHandler(void* args)
{
    NvU32 RegVal;
    NvU32 IntStatus;
    NvU32 IntClear = 0;
    NvRmDeviceHandle hRm = (NvRmDeviceHandle)args;

    IntStatus = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0);
    if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_AXI_INT, IntStatus) )
    {
        IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_AXI_INT, SET);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_DECERR_AXI_ADR_0);
        NvOsDebugPrintf("AXI DecErrAddress=0x%x ", RegVal);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_DECERR_AXI_STATUS_0);
        NvOsDebugPrintf("AXI DecErrStatus=0x%x ", RegVal);
    }
    if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, IntStatus) )
    {
        IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, SET);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_DECERR_EMEM_OTHERS_ADR_0);
        NvOsDebugPrintf("EMEM DecErrAddress=0x%x ", RegVal);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_DECERR_EMEM_OTHERS_STATUS_0);
        NvOsDebugPrintf("EMEM DecErrStatus=0x%x ", RegVal);
    }
    if ( NV_DRF_VAL(MC, INTSTATUS, INVALID_GART_PAGE_INT, IntStatus) )
    {
        IntClear |= NV_DRF_DEF(MC, INTSTATUS, INVALID_GART_PAGE_INT, SET);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_GART_ERROR_ADDR_0);
        NvOsDebugPrintf("GART DecErrAddress=0x%x ", RegVal);
        RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
                     MC_GART_ERROR_REQ_0);
        NvOsDebugPrintf("GART DecErrStatus=0x%x ", RegVal);
    }

    NV_ASSERT(!"MC Decode Error ");
    // Clear the interrupt.
    NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0, IntClear);
    NvRmInterruptDone(s_McInterruptHandle);
}
NvError
NvRmAp20GetStraps(
    NvRmDeviceHandle hDevice,
    NvRmStrapGroup StrapGroup,
    NvU32* pStrapValue)
{
    NvU32 reg = NV_REGR(
        hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_STRAPPING_OPT_A_0);

    switch (StrapGroup)
    {
        case NvRmStrapGroup_RamCode:
            reg = NV_DRF_VAL(APB_MISC_PP, STRAPPING_OPT_A, RAM_CODE, reg);
            break;
        default:
            return NvError_NotSupported;
    }
    *pStrapValue = reg;
    return NvSuccess;
}
示例#10
0
NvRmMilliVolts
NvRmPrivPowerGroupGetVoltage(
    NvRmDeviceHandle hRmDeviceHandle,
    NvU32 PowerGroup)
{
    NvRmMilliVolts Voltage = NvRmVoltsUnspecified;
    if (PowerGroup >= NV_POWERGROUP_MAX)
        return Voltage;     // "virtual" groups are always On

    // Do not check non-gated power group  - it is On by definition
    if (s_PowerGroupIds[PowerGroup] != NV_POWERGROUP_INVALID)
    {
        NvU32 reg = NV_REGR(
            hRmDeviceHandle, NvRmModuleID_Pmif, 0, APBDEV_PMC_PWRGATE_STATUS_0);
        if ((reg & (0x1 << s_PowerGroupIds[PowerGroup])) == 0x0)
        {
            // Specified power group is gated
            Voltage = NvRmVoltsOff;
        }
    }
    return Voltage;
}
void NvRmPrivAp20EmcParametersAdjust(NvRmDfs* pDfs)
{
    NvRmDfsParam EmcParamDddr2 = { NVRM_DFS_PARAM_EMC_AP20_DDR2 };
    NvRmDfsParam EmcParamLpDddr2 = { NVRM_DFS_PARAM_EMC_AP20_LPDDR2 };

    NvU32 RegValue = NV_REGR(pDfs->hRm,
        NvRmPrivModuleID_ExternalMemoryController, 0, EMC_FBIO_CFG5_0);

    // Overwrite default EMC parameters and LP2 policy with SDRAM type specific
    // settings
    switch (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_TYPE, RegValue))
    {
        case EMC_FBIO_CFG5_0_DRAM_TYPE_LPDDR2:
            pDfs->DfsParameters[NvRmDfsClockId_Emc] = EmcParamLpDddr2;
            break;

        case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR2:
            pDfs->DfsParameters[NvRmDfsClockId_Emc] = EmcParamDddr2;
            break;

        default:
            NV_ASSERT(!"Not supported DRAM type");
    }
}
NvRmPmRequest 
NvRmPrivAp20GetPmRequest(
    NvRmDeviceHandle hRmDevice,
    const NvRmDfsSampler* pCpuSampler,
    NvRmFreqKHz* pCpuKHz)
{
    // Assume initial slave CPU1 On request
    static NvRmPmRequest s_LastPmRequest = (NvRmPmRequest_CpuOnFlag | 0x1);
    static NvRmFreqKHz s_Cpu1OnMinKHz = 0, s_Cpu1OffMaxKHz = 0;
    static NvU32 s_Cpu1OnPendingCnt = 0, s_Cpu1OffPendingCnt = 0;

    NvU32 t;
    NvRmPmRequest PmRequest = NvRmPmRequest_None;
    NvBool Cpu1Off =
        (0 != NV_DRF_VAL(CLK_RST_CONTROLLER, RST_CPU_CMPLX_SET, SET_CPURESET1,
                         NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
                                 CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0)));
    NvRmFreqKHz CpuLoadGaugeKHz;

    // Use clocks for load when no busy hint, otherwise use activity metrics
    if (NvRmPrivDfsGetBusyHintActive(NvRmDfsClockId_Cpu)) {
        CpuLoadGaugeKHz = pCpuSampler->AverageKHz;
    } else {
        CpuLoadGaugeKHz = *pCpuKHz;
    }

    // Slave CPU1 power management policy thresholds:
    // - use fixed values if they are defined explicitly, otherwise
    // - set CPU1 OffMax threshold at 3/4 of cpu frequency range,
    //   and 1/2 of max frequency as CPU1 OnMin threshold
    if ((s_Cpu1OffMaxKHz == 0) && (s_Cpu1OnMinKHz == 0))
    {
        NvRmFreqKHz MaxKHz =
            NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;

        s_Cpu1OnMinKHz = NVRM_CPU1_ON_MIN_KHZ ?
                         NVRM_CPU1_ON_MIN_KHZ : (MaxKHz / 2);
        s_Cpu1OffMaxKHz = NVRM_CPU1_OFF_MAX_KHZ ?
                          NVRM_CPU1_OFF_MAX_KHZ : (3 * MaxKHz / 4);
        NV_ASSERT(s_Cpu1OnMinKHz < s_Cpu1OffMaxKHz);
    }

    // Timestamp
    if (s_pTimerUs == NULL)
        s_pTimerUs = NvRmPrivAp15GetTimerUsVirtAddr(hRmDevice);
    t = NV_READ32(s_pTimerUs);

    /*
     * Request OS kernel to turn CPU1 Off if all of the following is true:
     * (a) CPU frequency is below OnMin threshold, 
     * (b) CPU1 is actually On
     *
     * Request OS kernel to turn CPU1 On if all of the following is true:
     * (a) CPU frequency is above OffMax threshold 
     * (b) CPU1 is actually Off
     */
    if (CpuLoadGaugeKHz < s_Cpu1OnMinKHz)
    {
        s_Cpu1OnPendingCnt = 0;
        if ((s_Cpu1OffPendingCnt & 0x1) == 0)
        {
            s_Cpu1OffPendingCnt = t | 0x1;  // Use LSb as a delay start flag
            return PmRequest;
        }
        if ((t - s_Cpu1OffPendingCnt) < (NVRM_CPU1_OFF_PENDING_MS * 1000))
            return PmRequest;

        if (!Cpu1Off)
        {
            s_LastPmRequest = PmRequest = (NvRmPmRequest_CpuOffFlag | 0x1);
            s_Cpu1OffPendingCnt = 0;   // re-start delay after request
        }
#if NVRM_TEST_PMREQUEST_UP_MODE
        NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
            CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0,
            CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0_SET_CPURESET1_FIELD);
#endif
    }
    else if (CpuLoadGaugeKHz > s_Cpu1OffMaxKHz)
    {
        s_Cpu1OffPendingCnt = 0;
        if ((s_Cpu1OnPendingCnt & 0x1) == 0)
        {
            s_Cpu1OnPendingCnt = t | 0x1;  // Use LSb as a delay start flag
            return PmRequest;
        }
        if ((t - s_Cpu1OnPendingCnt) < (NVRM_CPU1_ON_PENDING_MS * 1000))
            return PmRequest;

        if (Cpu1Off)
        {
            s_LastPmRequest = PmRequest = (NvRmPmRequest_CpuOnFlag | 0x1);
            *pCpuKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
            s_Cpu1OnPendingCnt = 0;  // re-start delay after request
        }
#if NVRM_TEST_PMREQUEST_UP_MODE
        NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
            CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0,
            CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0_CLR_CPURESET1_FIELD);
#endif
    }
    else
    {   // Re-start both delays inside hysteresis loop
        s_Cpu1OnPendingCnt = 0;
        s_Cpu1OffPendingCnt = 0;
    }
    return PmRequest;
}
示例#13
0
static void
PowerGroupPowerControl(
    NvRmDeviceHandle hRmDeviceHandle,
    NvU32 PowerGroup,
    NvBool Enable)
{
    NvU32 reg, Id, Mask, Status;

    // Do nothing if not SoC platform
    NV_ASSERT(hRmDeviceHandle);
    if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
        return;

    // Do nothing if power group is already in requested state
    NV_ASSERT(s_PowerGroupIds[PowerGroup] != NV_POWERGROUP_INVALID);
    Id = s_PowerGroupIds[PowerGroup];
    Mask = (0x1 << Id);
    Status = Mask & NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
                            APBDEV_PMC_PWRGATE_STATUS_0);
    if (Enable == (Status != 0x0))
        return;

    /*
     * Gating procedure:
     * - assert resets to all modules in power group
     * - toggle power gate
     *
     * Ungating procedure
     * - assert resets to all modules in power group (redundunt)
     * - toggle power gate
     * - enable clocks to all modules in power group
     * - reset propagation delay
     * - remove clamping
     * - disable clocks to all modules in power group
     * - de-assert reset to all modules in power group
     *
     * Special note on toggle timers( shared with OAL which does CPU power
     * gating): per convention with OAL default settings are never changed.
     */
    PowerGroupResetControl(hRmDeviceHandle, PowerGroup, NV_TRUE);

    reg = NV_DRF_DEF(APBDEV_PMC, PWRGATE_TOGGLE, START, ENABLE) | Id;
    NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
            APBDEV_PMC_PWRGATE_TOGGLE_0, reg);
    for (;;)
    {
        reg = NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
                      APBDEV_PMC_PWRGATE_STATUS_0);
        if (Status != (reg & Mask))
            break;
    }
    if (Enable)
    {
        PowerGroupClockControl(hRmDeviceHandle, PowerGroup, NV_TRUE);
        NvOsWaitUS(NVRM_RESET_DELAY);

        // PCIE and VDE clamping masks are swapped relatively to
        // partition Ids (bug 602975)
        if (PowerGroup == NV_POWERGROUP_PCIE)
            Mask = 0x1 << s_PowerGroupIds[NV_POWERGROUP_VDE];
        else if (PowerGroup == NV_POWERGROUP_VDE)
            Mask = 0x1 << s_PowerGroupIds[NV_POWERGROUP_PCIE];

        NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
                APBDEV_PMC_REMOVE_CLAMPING_CMD_0, Mask);
        for (;;)
        {
            reg = NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
                          APBDEV_PMC_REMOVE_CLAMPING_CMD_0);
            if (reg == 0)
                break;
        }
        PowerGroupClockControl(hRmDeviceHandle, PowerGroup, NV_FALSE);
        PowerGroupResetControl(hRmDeviceHandle, PowerGroup, NV_FALSE);
    }
}