time_t filetimeToUnixTime(const FILETIME *ft) { if (!is_filetime_set(ft)) return 0; ULONGLONG ll = (ULONGLONG(ft->dwHighDateTime) << 32) + ft->dwLowDateTime; return time_t((ll - 116444736000000000LL) / 10000000LL); }
// Function protecting GetTickCount result from rolling over, // result is in [ms] static ULONGLONG WINAPI MozGetTickCount64() { DWORD GTC = ::GetTickCount(); // Cheaper then CMPXCHG8B AutoCriticalSection lock(&sTimeStampLock); // Pull the rollover counter forward only if new value of GTC goes way // down under the last saved result if ((sLastGTCResult > GTC) && ((sLastGTCResult - GTC) > (1UL << 30))) ++sLastGTCRollover; sLastGTCResult = GTC; return ULONGLONG(sLastGTCRollover) << 32 | sLastGTCResult; }
void IMemStream::freeMem() { if( m_pbMem ) { // decommit the committed pages BOOL b; if( m_ullSize > ULONGLONG(0) ) { b = VirtualFree( m_pbMem, (SIZE_T) m_ullSize, MEM_DECOMMIT ); ASSERT(b); } // free all pages b = VirtualFree( m_pbMem, 0, MEM_RELEASE ); ASSERT(b); m_pbMem = NULL; } // WARNING: does not reset the buffer vars (size, pos, end) }
// Function protecting GetTickCount result from rolling over, result is in [ms] // @param gtc // Result of GetTickCount(). Passing it as an arg lets us call it out // of the common mutex. static inline ULONGLONG TickCount64(DWORD now) { ULONGLONG lastResultHiPart = sLastGTCResult & (~0ULL << 32); ULONGLONG result = lastResultHiPart | ULONGLONG(now); // It may happen that when accessing GTC on multiple threads the results // may differ (GTC value may be lower due to running before the others // right around the overflow moment). That falsely shifts the high part. // Easiest solution is to check for a significant difference. if (sLastGTCResult > result) { if ((sLastGTCResult - result) > (1ULL << 31)) result += 1ULL << 32; else result = sLastGTCResult; } sLastGTCResult = result; return result; }
ULONGLONG CpuUsage::getUsageEx() { ULONGLONG nCpuCopy = m_nCpuUsage; if (::InterlockedIncrement(&m_lRunCount) == 1) { if (!enoughTimePassed()) { ::InterlockedDecrement(&m_lRunCount); return nCpuCopy; } ULONGLONG ullSysNonIdleTime = getSystemNonIdleTimes(); ULONGLONG ullProcNonIdleTime = getProcessNonIdleTimes(); if (!isFirstRun()) { ULONGLONG ullTotalSys = ullSysNonIdleTime - m_ullPrevSysNonIdleTime; if (ullTotalSys == 0) { ::InterlockedDecrement(&m_lRunCount); return nCpuCopy; } m_nCpuUsage = ULONGLONG((ullProcNonIdleTime - m_ullPrevProcNonIdleTime) * 100.0 / (ullTotalSys)); m_ullPrevSysNonIdleTime = ullSysNonIdleTime; m_ullPrevProcNonIdleTime = ullProcNonIdleTime; } m_dwLastRun = (ULONGLONG)GetTickCount(); nCpuCopy = m_nCpuUsage; } ::InterlockedDecrement(&m_lRunCount); return nCpuCopy; }
void diskdump_display_regs(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; char *user_regs; size_t len; if (cpu >= NR_CPUS || dd->nt_prstatus_percpu[cpu] == NULL) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = (char *)note64 + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } if (machine_type("X86")) { note32 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = (char *)note32 + len - SIZE(user_regs_struct) - sizeof(int); fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } }
std::string rippleCommandSignedXRPPayment( const std::string& addressFrom, const std::string& secretFrom, unsigned int sequence, const std::string& addressTo, double xrp, double fee, std::string& outTransactionHash) { std::string tx_blob = sign::CreateSendXRPTransaction(addressFrom, secretFrom, addressTo, ULONGLONG(xrp*ULONGLONG(1000000)), ULONGLONG(fee*ULONGLONG(1000000)), sequence, outTransactionHash); std::string result = "{\"command\": \"submit\", \"tx_blob\": \"" + tx_blob + "\"}"; return result; }
ULONGLONG CMSFileTime::GetTime() const throw() { return( (ULONGLONG( dwHighDateTime )<<32)|dwLowDateTime ); }
ULONGLONG ZipArchiveFile::Seek(LONGLONG llOffset, ESeekOrigin origin) { if (AtEndOfStream()) { // seek beyond end of file return m_uiUncompressedSize; } // calc new position ULONGLONG ullNewPos = m_ullCurrentPos; switch (origin) { case seekBegin: ullNewPos = llOffset; break; case seekCurrent: ullNewPos += llOffset; case seekEnd: ullNewPos = ULONGLONG(m_uiUncompressedSize) - llOffset; break; default: ATLASSERT(false); break; } if (ullNewPos >= m_uiUncompressedSize) { // just seeked to end of file Close(); return m_uiUncompressedSize; } // could be converted into rewind + forward skip if (ullNewPos < m_ullCurrentPos) throw Exception(_T("zip file archive stream can't be seeked backwards"), __FILE__, __LINE__); ULONGLONG ullBytesToSkip = ullNewPos - m_ullCurrentPos; if (ullBytesToSkip == 0) return m_ullCurrentPos; // skip over bytes do { // consume out buffer if (ullBytesToSkip < m_vecOutBuffer.size()) { m_vecOutBuffer.erase(m_vecOutBuffer.begin(), m_vecOutBuffer.begin() + static_cast<size_t>(ullBytesToSkip)); ullBytesToSkip = 0; } else if (ullBytesToSkip == m_vecOutBuffer.size()) { m_vecOutBuffer.clear(); ullBytesToSkip = 0; } else { // more than enough in buffer ullBytesToSkip -= m_vecOutBuffer.size(); m_vecOutBuffer.clear(); } // when not enough, decode more input if (ullBytesToSkip > 0) { FillInputBuffer(); // read in at most 16k at a time DWORD dwBytesToRead = static_cast<DWORD>(std::min<ULONGLONG>(ullBytesToSkip, c_dwSkipBufferSize)); FillOutBuffer(dwBytesToRead); } } while (ullBytesToSkip > 0); m_ullCurrentPos = ullNewPos; return m_ullCurrentPos; }
BOOL HeapWalkHelper(Object * pBO, void * pvContext) { OBJECTREF * arrObjRef = NULL; size_t cNumRefs = 0; bool bOnStack = false; //MethodTable * pMT = pBO->GetMethodTable(); ProfilerWalkHeapContext * pProfilerWalkHeapContext = (ProfilerWalkHeapContext *) pvContext; //if (pMT->ContainsPointersOrCollectible()) { // First round through calculates the number of object refs for this class GCHeap::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs); if (cNumRefs > 0) { // Create an array to contain all of the refs for this object bOnStack = cNumRefs <= 32 ? true : false; if (bOnStack) { // It's small enough, so just allocate on the stack arrObjRef = (OBJECTREF *)_alloca(cNumRefs * sizeof(OBJECTREF)); } else { // Otherwise, allocate from the heap arrObjRef = new (nothrow) OBJECTREF[cNumRefs]; if (!arrObjRef) { return FALSE; } } // Second round saves off all of the ref values OBJECTREF * pCurObjRef = arrObjRef; GCHeap::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef); } } HRESULT hr = E_FAIL; #ifdef FEATURE_ETW if (ETW::GCLog::ShouldWalkHeapObjectsForEtw()) { ETW::GCLog::ObjectReference( pProfilerWalkHeapContext, pBO, ULONGLONG(pBO->get_SafeEEType()), cNumRefs, (Object **) arrObjRef); } #endif // FEATURE_ETW // If the data was not allocated on the stack, need to clean it up. if ((arrObjRef != NULL) && !bOnStack) { delete [] arrObjRef; } // Return TRUE iff we want to the heap walk to continue. The only way we'd abort the // heap walk is if we're issuing profapi callbacks, and the profapi profiler // intentionally returned a failed HR (as its request that we stop the walk). There's // a potential conflict here. If a profapi profiler and an ETW profiler are both // monitoring the heap dump, and the profapi profiler requests to abort the walk (but // the ETW profiler may not want to abort the walk), then what do we do? The profapi // profiler gets precedence. We don't want to accidentally send more callbacks to a // profapi profiler that explicitly requested an abort. The ETW profiler will just // have to deal. In theory, I could make the code more complex by remembering that a // profapi profiler requested to abort the dump but an ETW profiler is still // attached, and then intentionally inhibit the remainder of the profapi callbacks // for this GC. But that's unnecessary complexity. In practice, it should be // extremely rare that a profapi profiler is monitoring heap dumps AND an ETW // profiler is also monitoring heap dumps. return TRUE; }