void PipelineInterests::handleData(const Interest& interest, const Data& data) { BOOST_ASSERT(data.getName().equals(interest.getName())); uint64_t recv_segno = data.getName()[-1].toSegment(); if (m_highData < recv_segno) { m_highData = recv_segno; } shared_ptr<SegmentInfo> seg_info = m_segmentInfoMap[recv_segno]; BOOST_ASSERT(seg_info != nullptr); if (seg_info->state == retransmitReceived) { m_segmentInfoMap.erase(recv_segno); return; // ignore already-received segment } if (m_options.isVerbose) { duration_in_ms rtt = time::steady_clock::now() - seg_info->timeSent; std::cerr << "Received the segment #" << recv_segno << ", rtt=" << rtt.count() << "ms" << ", rto=" << seg_info->rto << "ms" << std::endl; } // for segments in retransmission queue, no need to decrement m_inFligh since // it's already been decremented when segments timed out. if (seg_info->state != inRetxQueue && m_inFlight > 0) { m_inFlight--; } m_numOfSegmentReceived++; adjustCwnd(); m_onData(interest, data); if (seg_info->state == firstTimeSent || seg_info->state == inRetxQueue) { // donot sample RTT for retransmitted segments m_rttEstimator.rttMeasurement(recv_segno, seg_info->timeSent, seg_info->rto); m_segmentInfoMap.erase(recv_segno); // remove the entry associated with the received segment } else { // retransmission seg_info->state = retransmitReceived; } if (allSegmentsReceived() == true) { printSummary(); if (m_options.keepStats) { writeStats(); m_rttEstimator.writeStats(); } cancel(); } else { schedulePackets(); } }
void PipelineInterests::handleDataFirstSegment(const Interest& interest, const Data& data) { BOOST_ASSERT(data.getName().equals(interest.getName())); uint64_t recv_segno = data.getName()[-1].toSegment(); if (m_highData < recv_segno) { m_highData = recv_segno; // record the highest segment number of data received so far } m_segmentSize = data.getContent().value_size(); // get segment size shared_ptr<SegmentInfo> seg_info = m_segmentInfoMap[recv_segno]; if (m_options.isVerbose) { duration_in_ms rtt = time::steady_clock::now() - seg_info->timeSent; std::cerr << "Received the first segment #" << recv_segno << ", rtt=" << rtt.count() << "ms" << ", rto=" << seg_info->rto << "ms" << std::endl; } // initiate RTT measurement m_rttEstimator.rttMeasurementFirstTime(recv_segno, seg_info->timeSent, seg_info->rto); m_onData(interest, data); if (m_inFlight > 0) m_inFlight--; m_numOfSegmentReceived++; m_segmentInfoMap.erase(recv_segno); adjustCwnd(); if (allSegmentsReceived() == true) cancel(); else schedulePackets(); }
void PipelineInterests::handleTimeout(uint64_t timeout_count) { if (m_highData > m_recPoint) { // react to only one timeout per RTT (TCP SACK) m_recPoint = m_highInterest; m_ssthresh = std::max(2.0, m_cwnd * m_options.mdCoef); // multiplicative decrease m_cwnd = m_ssthresh; // fast recovery m_rttEstimator.rtoBackoff(); m_numOfLossEvent++; if (m_options.isVerbose) { std::cerr << "Packets loss event happened. cwnd = " << m_cwnd << ", ssthresh = " << m_ssthresh << std::endl; } } if (m_inFlight >= timeout_count) m_inFlight = m_inFlight - timeout_count; else m_inFlight = 0; schedulePackets(); }
void PxsFluidDynamics::mergeDensity(PxBaseTask* /*continuation*/) { schedulePackets(PXS_SPH_FORCE, mMergeForceTask); mMergeForceTask.removeReference(); }
void PxsFluidDynamics::updateSph(PxBaseTask& continuation) { PxsFluidParticle* particles = mParticleSystem.mParticleState->getParticleBuffer(); PxU32 numParticles = mParticleSystem.mNumPacketParticlesIndices; const PxU32* particleIndices = mParticleSystem.mPacketParticlesIndices; const PxsParticleCell* packets = mParticleSystem.mSpatialHash->getPackets(); const PxsFluidPacketSections* packetSections = mParticleSystem.mSpatialHash->getPacketSections(); PX_ASSERT(packets); PX_ASSERT(packetSections); PX_ASSERT(numParticles > 0); PX_UNUSED(packetSections); #ifdef PX_PS3 const Cm::BitMap& particleMap = mParticleSystem.mParticleState->getParticleMap(); PxF32 timeStep = mParticleSystem.mSimulationTimeStep; startTimerMarker(ePARTICLEUPDATESPH); mDynamicSPU.mSPHSPUs = mParticleSystem.getContext().getSceneParamInt(PxPS3ConfigParam::eSPU_FLUID_SPH); if(mDynamicSPU.mSPHSPUs > 0) { mDynamicSPU.updateSphSPU(particles, mParticleSystem.mTransientBuffer, particleMap, numParticles, particleIndices, packets, packetSections, mParams, timeStep, mParticleSystem.mContext.getTaskPool(), continuation); } else #endif { //sschirm: for now we reorder particles for sph exclusively, and scatter again after sph. if (!mTempReorderedParticles) { PxU32 maxParticles = mParticleSystem.mParticleState->getMaxParticles(); mTempReorderedParticles = (PxsFluidParticle*)mParticleSystem.mAlign16.allocate(maxParticles*sizeof(PxsFluidParticle), __FILE__, __LINE__); } if (!mTempParticleForceBuf) { PxU32 maxParticles = mParticleSystem.mParticleState->getMaxParticles(); //sschirm: Add extra float, since we are accessing this buffer later with: Vec4V_From_F32Array. //The last 4 element would contain unallocated memory otherwise. //Also initializing buffer that may only be used partially and non-contiguously with 0 to avoid //simd operations to use bad values. PxU32 byteSize = maxParticles*sizeof(PxVec3) + sizeof(PxF32); mTempParticleForceBuf = (PxVec3*)mParticleSystem.mAlign16.allocate(byteSize, __FILE__, __LINE__); memset(mTempParticleForceBuf, 0, byteSize); } for (PxU32 i = 0; i < numParticles; ++i) { PxU32 particleIndex = particleIndices[i]; mTempReorderedParticles[i] = particles[particleIndex]; } //would be nice to get available thread count to decide on task decomposition //mParticleSystem.getContext().getTaskManager().getCpuDispatcher(); // use number of particles for task decomposition PxU32 targetParticleCountPerTask = PxMax(PxU32(numParticles / PXS_FLUID_MAX_PARALLEL_TASKS_SPH), PxU32(PXS_FLUID_SUBPACKET_PARTICLE_LIMIT_FORCE_DENSITY)); PxU16 packetIndex = 0; PxU16 lastPacketIndex = 0; PxU32 numTasks = 0; for (PxU32 i = 0; i < PXS_FLUID_MAX_PARALLEL_TASKS_SPH; ++i) { // if this is the last interation, we need to gather all remaining packets if (i == PXS_FLUID_MAX_PARALLEL_TASKS_SPH - 1) targetParticleCountPerTask = 0xffffffff; lastPacketIndex = packetIndex; PxU32 currentParticleCount = 0; while (currentParticleCount < targetParticleCountPerTask && packetIndex < PXS_PARTICLE_SYSTEM_PACKET_HASH_SIZE) { const PxsParticleCell& packet = packets[packetIndex]; currentParticleCount += (packet.numParticles != PX_INVALID_U32) ? packet.numParticles : 0; packetIndex++; } if (currentParticleCount > 0) { PX_ASSERT(lastPacketIndex != packetIndex); mTaskData[i].beginPacketIndex = lastPacketIndex; mTaskData[i].endPacketIndex = packetIndex; numTasks++; } else { mTaskData[i].beginPacketIndex = PX_INVALID_U16; mTaskData[i].endPacketIndex = PX_INVALID_U16; } } PX_ASSERT(packetIndex == PXS_PARTICLE_SYSTEM_PACKET_HASH_SIZE); mNumTasks = numTasks; adjustTempBuffers(PxMax(numTasks, mNumTempBuffers)); mMergeForceTask.setContinuation(&continuation); mMergeDensityTask.setContinuation(&mMergeForceTask); schedulePackets(PXS_SPH_DENSITY, mMergeDensityTask); mMergeDensityTask.removeReference(); } #ifdef PX_PS3 stopTimerMarker(ePARTICLEUPDATESPH); #endif }