VOID PerfCtlThread(IN PVOID Context) { PDEVICE_EXTENSION2 pDevExt = (PDEVICE_EXTENSION2)Context; ULONG Exceptions = 0; ULONG CyclesPerSecond; ULONG CycleCount = 0; ULONG64 oldtime = 0; // calculate how many times the thread goes round per second WDF_REL_TIMEOUT_IN_MS() CyclesPerSecond = 1000 / pDevExt->Config.ThreadDelay; KdPrint(("LPCFILTER: PerfCtlThread Enter with %d ms delay, %d stay alive limit, %d Stats per second \n", pDevExt->Config.ThreadDelay, pDevExt->Config.StayAliveFailureLimit, pDevExt->Config.StatsSampleRate)); while(pDevExt->ThreadGo) { ULONG x = 0; KAFFINITY tempAffinity; BOOLEAN statstaken = FALSE; ULONG64 nowtime = 0; ULONG64 difftime = 0; LARGE_INTEGER largeint1; NTSTATUS Status; LARGE_INTEGER waittimeout; KAFFINITY oldAffinity; ULONG ThreadDelay; waittimeout.LowPart = (pDevExt->Config.ThreadDelay * -10000); // units of 100 nanoseconds, 100 x 10^-9 waittimeout.HighPart = -1; // go to sleep for our specified time Status = KeDelayExecutionThread(KernelMode, FALSE, &waittimeout); // // For each CPU switch to it // for(x = 0 ; x < pDevExt->NumProcs ; x++) { #if defined X64ARCH tempAffinity = (1i64 << x);// If you ever wondered what 1i64 is and // why such an odd way of representing '1' is needed // take a look at http://msdn.microsoft.com/en-us/library/ke55d167.aspx #elif defined X86ARCH tempAffinity = (1 << x); #endif // if it is a multi proc system switch to the next CPU if(pDevExt->NumProcs > 1) { // first time though make a note of the original affinity if(x == 0) { oldAffinity = KeSetSystemAffinityThreadEx(tempAffinity ); } else { KeSetSystemAffinityThreadEx(tempAffinity ); } } // // We need to keep stats on the CPU frequencies as requested // if( pDevExt->Config.StatsSampleRate && (CycleCount == (CyclesPerSecond / pDevExt->Config.StatsSampleRate)) ) { KIRQL kirql; ULONG64 MsrData = 0; ULONG y; PLPCCPUFrequency pFreqData; // if it is the first time round if(statstaken == FALSE) { KeQueryTickCount (&largeint1); memcpy(&nowtime, &largeint1, sizeof(ULONG64)); nowtime = nowtime * pDevExt->MillisPerTick; difftime = nowtime - oldtime; oldtime = nowtime; } // set the flag to show we took stats already, we will rest the cyclecount later, # // but also reuse the same tick count for all the CPUs stats data statstaken = TRUE; // // Read the IA32_PERF_STS register and for that value increment the hit count for the CPU // KeRaiseIrql(HIGH_LEVEL, &kirql); try { ULONG MSR_RANGE_MASK = 0; if(pDevExt->PCTAccessType == ADDRESS_SPACE_FFHW) { if(pDevExt->CpuManuf == INTEL) { MsrData = RdMsr(IA32_PERF_STS); MSR_RANGE_MASK = INTEL_MSR_RANGE; } else if(pDevExt->CpuManuf == AMD) { MsrData = RdMsr(AMD_PERF_STS); MSR_RANGE_MASK = AMD_MSR_RANGE; } } pDevExt->pStats->TimeStamp = nowtime; pFreqData = (PLPCCPUFrequency) ( ((PUCHAR)pDevExt->pStats) + sizeof(LPCPstateStats) + (sizeof(LPCCPUFrequency) * x * pDevExt->pStats->NumFrequencies) ); for(y = 0 ; y < pDevExt->pStats->NumFrequencies ; y++) { if(pFreqData[y].Status == (MsrData & MSR_RANGE_MASK)) { pFreqData[y].Time += difftime; break; } } // on some CPUs rdmsr(CPU_PERF_STS) returns a value not in the reported list of values // so add the time to the highest speed if(y == pDevExt->pStats->NumFrequencies) pFreqData[0].Time += difftime; KeLowerIrql(kirql); } except(EXCEPTION_EXECUTE_HANDLER) // if we dont mask out the top 231 bits on edx for armsr we get an exception on x64 CPUs { NTSTATUS status = STATUS_SUCCESS; KeLowerIrql(kirql); status = GetExceptionCode(); KdPrint(("LPCFILTER: Exception thrown reading MSR status is 0x%X\n", status)); Exceptions++; if(Exceptions > 10) // if we got this many exceptions in a row then give up pDevExt->ThreadGo = FALSE; } }// end if(CycleCount == (CyclesPerSecond .... // // So we have our stats data, now we need to set the CPU speed if we have been told to do so // if(pDevExt->pCPUDB[x].PState != 0xFFFFFFFF) { ULONG MsrDataLo = 0; ULONG MsrDataHi = 0; PACPI_METHOD_ARGUMENT pArg; ULONG index = 0; PPSSData pPSSDataLow = NULL; PPSSData pPSSDataHigh = NULL; ULONG StayAlive; ULONG StayAliveFailureLimit; KIRQL kirql; ULONG MSR_RANGE_MASK = 0; pArg = &(((PACPI_EVAL_OUTPUT_BUFFER)pDevExt->pPSSData)->Argument[0]); // Loop through the PPSData to the highest and lowest set value for(index = 0 ; (index < ((PACPI_EVAL_OUTPUT_BUFFER)pDevExt->pPSSData)->Count) && (index <= pDevExt->pCPUDB[x].PState) ; index++) { if(index == 0) pPSSDataHigh = (PPSSData)&(pArg->Data[0]); pPSSDataLow = (PPSSData)&(pArg->Data[0]); pArg = ACPI_METHOD_NEXT_ARGUMENT(pArg); } if(pDevExt->CpuManuf == INTEL) { MSR_RANGE_MASK = INTEL_MSR_RANGE; } else if(pDevExt->CpuManuf == AMD) { MSR_RANGE_MASK = AMD_MSR_RANGE; } MsrDataLo = (MsrDataLo & ~MSR_RANGE_MASK) | (pPSSDataLow[4].Data & MSR_RANGE_MASK); MsrDataHi = (MsrDataLo & ~MSR_RANGE_MASK) | (pPSSDataHigh[4].Data & MSR_RANGE_MASK); // if the app didnt set StayAlive to 0 then after a period of time, StayAliveFailureLimit times the thread delay, // we stop setting the CPU low since the app probably got descheduled due to intensive work by another // process StayAlive = InterlockedIncrement(&pDevExt->pCPUDB[x].StayAliveCount); StayAliveFailureLimit = pDevExt->Config.StayAliveFailureLimit; KeRaiseIrql(HIGH_LEVEL, &kirql); // // Set the CPU speed low, or to high once before we stop managing that CPU // if(StayAlive < (StayAliveFailureLimit + 1)) { try { if(StayAlive < StayAliveFailureLimit) { if(pDevExt->PCTAccessType == ADDRESS_SPACE_FFHW) { #if defined X86ARCH if(pDevExt->CpuManuf == INTEL) { WrMsrExINTEL(MsrDataLo); } else if(pDevExt->CpuManuf == AMD) { WrMsrExAMD(MsrDataLo); } #endif #if defined X64ARCH if(pDevExt->CpuManuf == INTEL) { WrMsr64ExINTEL(MsrDataLo); } else if(pDevExt->CpuManuf == AMD) { WrMsr64ExAMD(MsrDataLo); } #endif } } else// when stayalive is at StayAliveFailureLimit we set the CPU high, then dont touch it { if(pDevExt->PCTAccessType == ADDRESS_SPACE_FFHW) { #if defined X86ARCH if(pDevExt->CpuManuf == INTEL) { WrMsrExINTEL(MsrDataHi); } else if(pDevExt->CpuManuf == AMD) { WrMsrExAMD(MsrDataHi); } #endif #if defined X64ARCH if(pDevExt->CpuManuf == INTEL) { WrMsr64ExINTEL(MsrDataHi); } else if(pDevExt->CpuManuf == AMD) { WrMsr64ExAMD(MsrDataHi); } #endif } } KeLowerIrql(kirql); Exceptions = 0; // if we got here we didnt get an exception so we can reset our counter } except(EXCEPTION_EXECUTE_HANDLER) // if we dont mask out the top 231 bits on edx for armsr we get an exception on x64 CPUs { NTSTATUS status = STATUS_SUCCESS; KeLowerIrql(kirql); status = GetExceptionCode(); KdPrint(("LPCFILTER: Exception thrown writing MSR status is 0x%X\n", status)); Exceptions++; if(Exceptions > 10) // if we got this many exceptions in a row then give up pDevExt->ThreadGo = FALSE; } } else { KeLowerIrql(kirql); // the app stopped setting the CPU state so we assume it isnt interested in managing this CPU InterlockedExchange(&pDevExt->pCPUDB[x].PState, 0xFFFFFFFF); InterlockedIncrement(&pDevExt->pStats->StayAliveFailureHits); } }
NTSTATUS Acpi_EvaluateUcsiDsm ( _In_ PACPI_CONTEXT AcpiCtx, _In_ ULONG FunctionIndex, _Outptr_opt_ PACPI_EVAL_OUTPUT_BUFFER* Output ) /*++ N.B. Caller is expected to free the Output buffer. --*/ { NTSTATUS status; WDFDEVICE device; WDFMEMORY inputMemory; WDF_MEMORY_DESCRIPTOR inputMemDesc; PACPI_EVAL_INPUT_BUFFER_COMPLEX inputBuffer; size_t inputBufferSize; size_t inputArgumentBufferSize; PACPI_METHOD_ARGUMENT argument; WDF_MEMORY_DESCRIPTOR outputMemDesc; PACPI_EVAL_OUTPUT_BUFFER outputBuffer; size_t outputBufferSize; size_t outputArgumentBufferSize; WDF_OBJECT_ATTRIBUTES attributes; WDF_REQUEST_SEND_OPTIONS sendOptions; PAGED_CODE(); TRACE_FUNC_ENTRY(TRACE_FLAG_ACPI); device = Context_GetWdfDevice(AcpiCtx); inputMemory = WDF_NO_HANDLE; outputBuffer = nullptr; inputArgumentBufferSize = ACPI_METHOD_ARGUMENT_LENGTH(sizeof(GUID)) + ACPI_METHOD_ARGUMENT_LENGTH(sizeof(ULONG)) + ACPI_METHOD_ARGUMENT_LENGTH(sizeof(ULONG)) + ACPI_METHOD_ARGUMENT_LENGTH(0); inputBufferSize = FIELD_OFFSET(ACPI_EVAL_INPUT_BUFFER_COMPLEX, Argument) + inputArgumentBufferSize; WDF_OBJECT_ATTRIBUTES_INIT(&attributes); attributes.ParentObject = device; status = WdfMemoryCreate(&attributes, NonPagedPoolNx, 0, inputBufferSize, &inputMemory, (PVOID*) &inputBuffer); if (!NT_SUCCESS(status)) { TRACE_ERROR(TRACE_FLAG_ACPI, "[Device: 0x%p] WdfMemoryCreate failed for %Iu bytes - %!STATUS!", device, inputBufferSize, status); goto Exit; } RtlZeroMemory(inputBuffer, inputBufferSize); inputBuffer->Signature = ACPI_EVAL_INPUT_BUFFER_COMPLEX_SIGNATURE; inputBuffer->Size = (ULONG) inputArgumentBufferSize; inputBuffer->ArgumentCount = 4; inputBuffer->MethodNameAsUlong = (ULONG) 'MSD_'; argument = &(inputBuffer->Argument[0]); ACPI_METHOD_SET_ARGUMENT_BUFFER(argument, &GUID_UCSI_DSM, sizeof(GUID_UCSI_DSM)); argument = ACPI_METHOD_NEXT_ARGUMENT(argument); ACPI_METHOD_SET_ARGUMENT_INTEGER(argument, UCSI_DSM_REVISION); argument = ACPI_METHOD_NEXT_ARGUMENT(argument); ACPI_METHOD_SET_ARGUMENT_INTEGER(argument, FunctionIndex); argument = ACPI_METHOD_NEXT_ARGUMENT(argument); argument->Type = ACPI_METHOD_ARGUMENT_PACKAGE_EX; argument->DataLength = 0; outputArgumentBufferSize = ACPI_METHOD_ARGUMENT_LENGTH(sizeof(ULONG)); outputBufferSize = FIELD_OFFSET(ACPI_EVAL_OUTPUT_BUFFER, Argument) + outputArgumentBufferSize; outputBuffer = (PACPI_EVAL_OUTPUT_BUFFER) ExAllocatePoolWithTag(NonPagedPoolNx, outputBufferSize, TAG_UCSI); if (outputBuffer == nullptr) { status = STATUS_INSUFFICIENT_RESOURCES; TRACE_ERROR(TRACE_FLAG_ACPI, "[Device: 0x%p] ExAllocatePoolWithTag failed for %Iu bytes", device, outputBufferSize); goto Exit; } RtlZeroMemory(outputBuffer, outputBufferSize); WDF_MEMORY_DESCRIPTOR_INIT_HANDLE(&inputMemDesc, inputMemory, NULL); WDF_MEMORY_DESCRIPTOR_INIT_BUFFER(&outputMemDesc, outputBuffer, (ULONG) outputBufferSize); WDF_REQUEST_SEND_OPTIONS_INIT(&sendOptions, WDF_REQUEST_SEND_OPTION_SYNCHRONOUS); WDF_REQUEST_SEND_OPTIONS_SET_TIMEOUT(&sendOptions, WDF_REL_TIMEOUT_IN_MS(UCSI_DSM_EXECUTION_TIMEOUT_IN_MS)); status = WdfIoTargetSendInternalIoctlSynchronously( WdfDeviceGetIoTarget(device), NULL, IOCTL_ACPI_EVAL_METHOD, &inputMemDesc, &outputMemDesc, &sendOptions, NULL); if (!NT_SUCCESS(status)) { TRACE_ERROR(TRACE_FLAG_ACPI, "[Device: 0x%p] IOCTL_ACPI_EVAL_METHOD for _DSM failed - %!STATUS!", device, status); goto Exit; } if (outputBuffer->Signature != ACPI_EVAL_OUTPUT_BUFFER_SIGNATURE) { TRACE_ERROR(TRACE_FLAG_ACPI, "[Device: 0x%p] ACPI_EVAL_OUTPUT_BUFFER signature is incorrect", device); status = STATUS_ACPI_INVALID_DATA; goto Exit; } Exit: if (inputMemory != WDF_NO_HANDLE) { WdfObjectDelete(inputMemory); } if (!NT_SUCCESS(status) || (Output == nullptr)) { if (outputBuffer) { ExFreePoolWithTag(outputBuffer, TAG_UCSI); } } else { *Output = outputBuffer; } TRACE_FUNC_EXIT(TRACE_FLAG_ACPI); return status; }