bool TSerialPort::Select(const std::chrono::microseconds& us) { fd_set rfds; struct timeval tv, *tvp = 0; #if 0 // too verbose if (Dbg) std::cerr << "Select on " << Settings.Device << ": " << ms.count() << " us" << std::endl; #endif FD_ZERO(&rfds); FD_SET(Fd, &rfds); if (us.count() > 0) { tv.tv_sec = us.count() / 1000000; tv.tv_usec = us.count(); tvp = &tv; } int r = select(Fd + 1, &rfds, NULL, NULL, tvp); if (r < 0) throw TSerialDeviceException("select() failed"); return r > 0; }
gint64 GSourceWrap::targetTimeForDelay(std::chrono::microseconds delay) { if (!delay.count()) return 0; gint64 currentTime = g_get_monotonic_time(); gint64 targetTime = currentTime + std::min<gint64>(G_MAXINT64 - currentTime, delay.count()); ASSERT(targetTime >= currentTime); return targetTime; }
int main() { { std::chrono::milliseconds ms(1); std::chrono::microseconds us = ms; assert(us.count() == 1000); } #if TEST_STD_VER >= 11 { constexpr std::chrono::milliseconds ms(1); constexpr std::chrono::microseconds us = ms; static_assert(us.count() == 1000, ""); } #endif }
bool SocketImpl::Poll(const std::chrono::microseconds& timeout, int mode) { assert(INVALID_SOCKET != m_sockfd); fd_set fdRead; fd_set fdWrite; fd_set fdExcept; FD_ZERO(&fdRead); FD_ZERO(&fdWrite); FD_ZERO(&fdExcept); if (mode & SELECT_READ) { FD_SET(m_sockfd, &fdRead); } if (mode & SELECT_WRITE) { FD_SET(m_sockfd, &fdWrite); } if (mode & SELECT_ERROR) { FD_SET(m_sockfd, &fdExcept); } struct timeval tv; tv.tv_sec = (long)std::chrono::duration_cast<std::chrono::seconds>(timeout).count(); tv.tv_usec = (long)(timeout.count() % 1000000); return select(int(m_sockfd) + 1, &fdRead, &fdWrite, &fdExcept, &tv) > 0; }
void UnitTestClock<BaseClock>::setNow(const nanoseconds& timeSinceEpoch) { BOOST_ASSERT(boost::posix_time::microseconds(SLEEP_AFTER_TIME_CHANGE.count()) > boost::asio::time_traits<steady_clock>::to_posix_duration(timeSinceEpoch - m_currentTime)); m_currentTime = timeSinceEpoch; std::this_thread::sleep_for(SLEEP_AFTER_TIME_CHANGE); }
void tcapplication::update(std::chrono::microseconds delta) { double const count = static_cast<double>(delta.count()); double constexpr timestep = 16000; g_xrot += 0.3 * count / timestep; g_yrot += 0.2 * count / timestep; g_zrot += 0.4 * count / timestep; }
void cbPing(const vssp::header &header, const std::chrono::microseconds &delayRead) { ros::Time now = ros::Time::now() - ros::Duration(delayRead.count() * 0.001 * 0.001); ros::Duration delay = ((now - timePing) - ros::Duration(header.send_time_ms * 0.001 - header.received_time_ms * 0.001)) * 0.5; ros::Time base = timePing + delay - ros::Duration(header.received_time_ms * 0.001); if(timestampBase == ros::Time(0)) timestampBase = base; else timestampBase += (base - timestampBase) * 0.01; }
std::chrono::microseconds SYNCHRONISED( const std::chrono::microseconds data) { #if defined(SUPPORT_MPI) unsigned int synchronisedData = static_cast<unsigned int>(data.count()); MPI_Bcast(&synchronisedData, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); return std::chrono::microseconds(synchronisedData); #else return data; #endif }
void Matrix::update(Input *input, std::chrono::microseconds delta) { if (getParent()->getComponent<Menu>()->getTime() < 0) return; m_timeToGlobalRun -= delta.count()/1000000.0; if (m_timeToGlobalRun < 0) { if (solve()) m_timeToGlobalRun = 0.5; else m_timeToGlobalRun = FLT_MAX; } }
void Watchdog::startTimer(std::chrono::microseconds limit) { JSC_GETJAVAENV_CHKRET(env); static jmethodID mid = env->GetMethodID( GetWatchdogTimerClass(env), "fwkStart", "(D)V"); ASSERT(mid); env->CallVoidMethod(m_timer, mid, (jdouble) (limit.count() / (1000.0 * 1000.0))); CheckAndClearException(env); }
void APIC_Timer::oneshot(std::chrono::microseconds micros) noexcept { // prevent overflow uint64_t ticks = micros.count() * ticks_per_micro; if (ticks > 0xFFFFFFFF) ticks = 0xFFFFFFFF; // set initial counter auto& lapic = APIC::get(); lapic.timer_begin(ticks); // re-enable interrupts if disabled if (GET_TIMER().intr_enabled == false) { GET_TIMER().intr_enabled = true; lapic.timer_interrupt(true); } }
void GMainLoopSource::scheduleAfterDelay(const char* name, std::function<bool ()> function, std::chrono::microseconds delay, int priority, std::function<void ()> destroyFunction, GMainContext* context) { cancel(); ASSERT(!m_context.source); m_context = { adoptGRef(createMicrosecondsTimeoutSource(delay.count())), nullptr, // cancellable nullptr, // socketCancellable nullptr, // voidCallback WTF::move(function), nullptr, // socketCallback WTF::move(destroyFunction) }; scheduleTimeoutSource(name, reinterpret_cast<GSourceFunc>(boolSourceCallback), priority, context); }
Status PipeWindows::ReadWithTimeout(void *buf, size_t size, const std::chrono::microseconds &duration, size_t &bytes_read) { if (!CanRead()) return Status(ERROR_INVALID_HANDLE, eErrorTypeWin32); bytes_read = 0; DWORD sys_bytes_read = size; BOOL result = ::ReadFile(m_read, buf, sys_bytes_read, &sys_bytes_read, &m_read_overlapped); if (!result && GetLastError() != ERROR_IO_PENDING) return Status(::GetLastError(), eErrorTypeWin32); DWORD timeout = (duration == std::chrono::microseconds::zero()) ? INFINITE : duration.count() * 1000; DWORD wait_result = ::WaitForSingleObject(m_read_overlapped.hEvent, timeout); if (wait_result != WAIT_OBJECT_0) { // The operation probably failed. However, if it timed out, we need to // cancel the I/O. // Between the time we returned from WaitForSingleObject and the time we // call CancelIoEx, // the operation may complete. If that hapens, CancelIoEx will fail and // return ERROR_NOT_FOUND. // If that happens, the original operation should be considered to have been // successful. bool failed = true; DWORD failure_error = ::GetLastError(); if (wait_result == WAIT_TIMEOUT) { BOOL cancel_result = CancelIoEx(m_read, &m_read_overlapped); if (!cancel_result && GetLastError() == ERROR_NOT_FOUND) failed = false; } if (failed) return Status(failure_error, eErrorTypeWin32); } // Now we call GetOverlappedResult setting bWait to false, since we've already // waited // as long as we're willing to. if (!GetOverlappedResult(m_read, &m_read_overlapped, &sys_bytes_read, FALSE)) return Status(::GetLastError(), eErrorTypeWin32); bytes_read = sys_bytes_read; return Status(); }
ssize_t socket_send(int sock, uint8_t *buf, size_t len) { ssize_t ret = super::socket_send(sock, buf, len); m_bytes += ret - super::header_len(); if (m_start == timestamp()) m_start = timer::now(); m_end = timer::now(); if (!m_interval.count()) return ret; wait(); m_timestamp_last = timer::now(); return ret; }
void UnitTestClock<BaseClock>::advance(const nanoseconds& duration) { m_currentTime += duration; // On some platforms, boost::asio::io_service for deadline_timer (e.g., the one used in // Scheduler) will call time_traits<>::now() and will "sleep" for // time_traits<>::to_posix_time(duration) period before calling time_traits<>::now() // again. (Note that such "sleep" will occur even if there is no actual waiting and // program is calling io_service.poll().) // // As a result, in order for the clock advancement to be effective, we must sleep for a // period greater than time_traits<>::to_posix_time(). // // See also http://blog.think-async.com/2007/08/time-travel.html BOOST_ASSERT(boost::posix_time::microseconds(SLEEP_AFTER_TIME_CHANGE.count()) > boost::asio::time_traits<steady_clock>::to_posix_duration(duration)); std::this_thread::sleep_for(SLEEP_AFTER_TIME_CHANGE); }
void TextOutput::finishSuite(const std::string& suiteName, const unsigned int numTests, const unsigned int numPositiveTests, const std::chrono::microseconds totalDuration) { if(mode <= Verbose || numTests != numPositiveTests) stream << "Suite '" << suiteName << "' finished, " << numPositiveTests << '/' << numTests << " successful (" << prettifyPercentage(numPositiveTests, numTests) << "%) in " << totalDuration.count() << " microseconds (" << totalDuration.count()/1000.0 << " ms)." << std::endl; }
stringstream &cal_get_res(){ #ifdef DEBUG2 for(int i=0;i<planeVecA.size();i++) cout<<planeVecA[i]; cout<<endl; for(int i=0;i<16;i++) cout<<resAssign[i]<<" "; cout<<endl; for(int i=0;i<16;i++) cout<<resAssignGrid[i]<<" "; cout<<endl; cout<<curBomberNum<<endl; #endif for(int i=0;i<6;i++) for(int j=0;j<4;j++){ answer_c[i][j]=" - "; answer_n[i][j]=" - "; } int f=0; for(int i=0;resSign[i]!='\0';i++){ Grid &tGrid=gridVec[i]; if(resSign[i]=='1'){ answer_c[tGrid.carrierPos][tGrid.gridPos]=planeVecF[f].name; answer_n[tGrid.carrierPos][tGrid.gridPos]=" - "; f++; } } int shipHit[6]; float shipAtk[6]; memset(shipAtk,0,sizeof(shipAtk)); memset(shipHit,0,sizeof(shipHit)); for(int i=0;i<resBomberNum;i++){ answer_c[resAssign[i]][resAssignGrid[i]]=ResPlaneVecA[i].name; answer_n[resAssign[i]][resAssignGrid[i]] = formula_damageOP_str(theCarrier[resAssign[i]].gridSize[resAssignGrid[i]],ResPlaneVecA[i]); shipAtk[resAssign[i]] += formulaDamage(ResPlaneVecA[i]); shipHit[resAssign[i]] += ResPlaneVecA[i].accuracy; } stringstream &stmp=*(new stringstream()); for(int i=0;i<6;i++){ for(int j=0;j<4;j++) stmp<<answer_c[i][j]<<"|"; stmp<<endl; } for(int i=0;i<6;i++){ for(int j=0;j<4;j++) stmp<<answer_n[i][j]<<"|"; if(shipAtk[i] == 0){ stmp<<" - | - |"<<endl; continue; } int rd = formula_shipAtk_rawDamage(shipAtk[i], theCarrier[i].atk); stmp<<formula_shipAtk_cocurHeading(rd)<<'|'<<formula_shipAtk_invertHeading(rd)<<'|'<<endl; } cout<<"AS: "<<resAS<<" Atk: "<<resAtk<<" time: "<<(restime.count()/1000.0)<<" ms"<<endl; return stmp; }
void baoliSearch(int curlv,int remainF,int remainA,int gridSz){ if(curlv == gridSz || (remainF == 0 && remainA == 0)){//µÝ¹é³ö¿Ú if(restime=std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now()-logtime),restime.count()>TLE*1000000){ cout<<"ERROR:TLE"<<endl; exit(0); } int tAssignN[4] = {0, 0, 0, 0}; getAssignN(tAssignN, gridSz);//sav avail grids num to ships BelongStructure &curres=dpRes[tAssignN[0]][tAssignN[1]][tAssignN[2]][tAssignN[3]]; vector<int *> &tlist=curres.belongings; int i=0; for(vector<int *>::iterator iter=tlist.begin(),end=tlist.end();iter!=end;++iter){ getCurAssign(iter); float newAtk = curres.as[i++] + op_coef * calOPAtk(gridSz); if(newAtk > resAtk){ int newAS = calAS(gridSz) + calASofBomber(gridSz); if(newAS >= tarAirSupremacy){ //write cur data to res resAtk = newAtk; resAS = newAS; resBomberNum = curBomberNum - remainA; copySign2Res(resSign, gridSz); copyCurAssign2Res(resAssign,resAssignGrid); if(flushFlag){ ResPlaneVecA = planeVecA; flushFlag = 0; } } } } return; } if(remainF){//³¢ÊÔ·ÅÖÃÕ½¶·»ú£¬¼ì²â·£Õ¾ sign[curlv]=1; baoliSearch(curlv+1,remainF-1,remainA,gridSz); sign[curlv]=0; } if(remainA){//³¢ÊÔ·ÅÖù¥»÷»ú sign[curlv]=2; if(cutF(curlv+1,remainF,gridSz)){ if(gridVec[curlv].isFighterOnly){ sign[curlv]=0; baoliSearch(curlv+1,remainF,remainA,gridSz); } else baoliSearch(curlv+1,remainF,remainA-1,gridSz); } sign[curlv]=0; } }
inline void portable_sleep(std::chrono::microseconds const& time) { native_sleep(time.count()); }
// Given the tempo, convert a time to a beat value Beats microsToBeats(const std::chrono::microseconds micros) const { return Beats{micros.count() / static_cast<double>(microsPerBeat().count())}; }
Tempo(const std::chrono::microseconds microsPerBeat) : std::tuple<double>(60. * 1e6 / microsPerBeat.count()) { }
namespace time { const std::chrono::microseconds SLEEP_AFTER_TIME_CHANGE(2); template<class BaseClock> UnitTestClock<BaseClock>::UnitTestClock(const nanoseconds& startTime) : m_currentTime(startTime) { } template<class BaseClock> std::string UnitTestClock<BaseClock>::getSince() const { return " since unit test clock advancements"; } template<class BaseClock> typename BaseClock::time_point UnitTestClock<BaseClock>::getNow() const { return typename BaseClock::time_point(duration_cast<typename BaseClock::duration>(m_currentTime)); } template<class BaseClock> boost::posix_time::time_duration UnitTestClock<BaseClock>::toPosixDuration(const typename BaseClock::duration& duration) const { return #ifdef BOOST_DATE_TIME_HAS_NANOSECONDS boost::posix_time::nanoseconds(1) #else boost::posix_time::microseconds(1) #endif ; } template<class BaseClock> void UnitTestClock<BaseClock>::advance(const nanoseconds& duration) { m_currentTime += duration; // On some platforms, boost::asio::io_service for deadline_timer (e.g., the one used in // Scheduler) will call time_traits<>::now() and will "sleep" for // time_traits<>::to_posix_time(duration) period before calling time_traits<>::now() // again. (Note that such "sleep" will occur even if there is no actual waiting and // program is calling io_service.poll().) // // As a result, in order for the clock advancement to be effective, we must sleep for a // period greater than time_traits<>::to_posix_time(). // // See also http://blog.think-async.com/2007/08/time-travel.html BOOST_ASSERT(boost::posix_time::microseconds(SLEEP_AFTER_TIME_CHANGE.count()) > boost::asio::time_traits<steady_clock>::to_posix_duration(duration)); std::this_thread::sleep_for(SLEEP_AFTER_TIME_CHANGE); } template<class BaseClock> void UnitTestClock<BaseClock>::setNow(const nanoseconds& timeSinceEpoch) { BOOST_ASSERT(boost::posix_time::microseconds(SLEEP_AFTER_TIME_CHANGE.count()) > boost::asio::time_traits<steady_clock>::to_posix_duration(timeSinceEpoch - m_currentTime)); m_currentTime = timeSinceEpoch; std::this_thread::sleep_for(SLEEP_AFTER_TIME_CHANGE); } template class UnitTestClock<system_clock>; template class UnitTestClock<steady_clock>; } // namespace time
timeval getTimevalFromDuration(const std::chrono::microseconds &t){ timeval retval; retval.tv_sec = std::chrono::duration_cast<std::chrono::seconds>(t).count(); retval.tv_usec = t.count() % 1000000; return retval; }
int TSerialPort::ReadFrame(uint8_t* buf, int size, const std::chrono::microseconds& timeout, TFrameCompletePred frame_complete) { CheckPortOpen(); int nread = 0; while (nread < size) { if (frame_complete && frame_complete(buf, nread)) { // XXX A hack. // The problem is that if we don't pause here and the // serial client switches to another device after // processing this frame, that device may miss the frame // boundary and consider the last response (from this // device) and the query (from the master) to be single // frame. On the other hand, we don't want to use // device-specific frame timeout here as it can be quite // long. The proper solution would be perhaps ensuring // that there's a pause of at least // DeviceConfig->FrameTimeoutMs before polling each // device. usleep(DefaultFrameTimeout.count()); break; } if (!Select(!nread ? Settings.ResponseTimeout : timeout.count() < 0 ? DefaultFrameTimeout : timeout)) break; // end of the frame // We don't want to use non-blocking IO in general // (e.g. we want blocking writes), but we don't want // read() call below to block because actual frame // size is not known at this point. So we must // know how many bytes are available int nb; if (ioctl(Fd, FIONREAD, &nb) < 0) throw TSerialDeviceException("FIONREAD ioctl() failed"); if (!nb) continue; // shouldn't happen, actually if (nb > size - nread) nb = size - nread; int n = read(Fd, buf + nread, nb); if (n < 0) throw TSerialDeviceException("read() failed"); if (n < nb) // may happen only due to a kernel/driver bug throw TSerialDeviceException("short read()"); nread += nb; } if (!nread) throw TSerialDeviceTransientErrorException("request timed out"); if (Dbg) { // TBD: move this to libwbmqtt (HexDump?) std::ios::fmtflags f(std::cerr.flags()); std::cerr << "ReadFrame:" << std::hex << std::uppercase << std::setfill('0'); for (int i = 0; i < nread; ++i) { std::cerr << " " << std::setw(2) << int(buf[i]); } std::cerr << std::endl; std::cerr.flags(f); } return nread; }
void setFilterTime(std::chrono::microseconds _time) { setFilterSize((m_sampleRate*_time.count())/1000000LL); }
void TSerialPort::Sleep(const std::chrono::microseconds& us) { usleep(us.count()); }