/*
========================
idLobby::SendCompletedSnaps
This function will send send off any previously submitted pending snaps if they are ready
========================
*/
bool idLobby::SendCompletedSnaps()
{
	assert( lobbyType == GetActingGameStateLobbyType() );
	
	bool sentAllSubmitted = true;
	
	for( int p = 0; p < peers.Num(); p++ )
	{
		peer_t& peer = peers[p];
		
		if( !peer.IsConnected() )
		{
			continue;
		}
		
		if( peer.snapProc->PendingSnapReadyToSend() )
		{
			// Check to see if there are any snaps that were submitted that need to be sent out
			SendCompletedPendingSnap( p );
		}
		else if( IsHost() )
		{
			NET_VERBOSESNAPSHOT_PRINT_LEVEL( 7, va( "  ^8Peer %d pendingSnap not ready to send\n", p ) );
		}
		
		if( !peer.IsConnected() )    // peer may have been dropped in "SendCompletedPendingSnap". ugh.
		{
			continue;
		}
		
		if( peer.snapProc->PendingSnapReadyToSend() )
		{
			// If we still have a submitted snap, we know we're not done
			sentAllSubmitted = false;
			if( IsHost() )
			{
				NET_VERBOSESNAPSHOT_PRINT_LEVEL( 2, va( "  ^2Peer %d did not send all submitted snapshots.\n", p ) );
			}
		}
	}
	
	return sentAllSubmitted;
}
/*
========================
idLobby::SubmitPendingSnap
========================
*/
bool idLobby::SubmitPendingSnap( int p )
{

	assert( lobbyType == GetActingGameStateLobbyType() );
	
	peer_t& peer = peers[p];
	
	if( !peer.IsConnected() )
	{
		return false;
	}
	
	// If the peer doesn't have the latest resource list, send it to him before sending any new snapshots
	if( SendResources( p ) )
	{
		return false;
	}
	
	if( !peer.loaded )
	{
		return false;
	}
	
	if( !peer.snapProc->HasPendingSnap() )
	{
		return false;
	}
	
	int time = Sys_Milliseconds();
	
	int timeFromLastSub = time - peer.lastSnapJobTime;
	
	int forceResendTime = session->GetTitleStorageInt( "net_snap_redundant_resend_in_ms", net_snap_redundant_resend_in_ms.GetInteger() );
	
	if( timeFromLastSub < forceResendTime && peer.snapProc->IsBusyConfirmingPartialSnap() )
	{
		return false;
	}
	
	peer.lastSnapJobTime = time;
	assert( !peer.snapProc->PendingSnapReadyToSend() );
	
	// Submit snapshot delta to jobs
	peer.snapProc->SubmitPendingSnap( p + 1, objMemory, SNAP_OBJ_JOB_MEMORY, lzwData );
	
	NET_VERBOSESNAPSHOT_PRINT_LEVEL( 2, va( "  Submitted snapshot to jobList for peer %d. Since last jobsub: %d\n", p, timeFromLastSub ) );
	
	return true;
}
/*
========================
idSnapshotProcessor::ApplySnapshotDelta
Apply a snapshot delta to our current basestate, and make that the new base.
We can remove all deltas that refer to the basetate we just removed.
========================
*/
bool idSnapshotProcessor::ApplySnapshotDelta( int visIndex, int snapshotNumber )
{

	NET_VERBOSESNAPSHOT_PRINT_LEVEL( 6, va( "idSnapshotProcessor::ApplySnapshotDelta snapshotNumber: %d\n", snapshotNumber ) );
	
	// Sanity check deltas
	SanityCheckDeltas();
	
	// dump any deltas older than the acknoweledged snapshot, which should only happen if there is packet loss
	deltas.RemoveOlderThan( snapshotNumber );
	
	if( deltas.Num() == 0 || deltas.ItemSequence( 0 ) != snapshotNumber )
	{
		// this means the snapshot was either already acknowledged or came out of order
		// On the server, this can happen because the client is continuously/redundantly sending acks
		// Once the server has ack'd a certain base sequence, it will need to ignore all the redundant ones.
		// On the client, this will only happen due to out of order, or dropped packets.
		
		if( !common->IsServer() )
		{
			// these should be printed every time on the clients
			// printing on server is not useful / results in tons of spam
			if( deltas.Num() == 0 )
			{
				NET_VERBOSESNAPSHOT_PRINT( "NET: Got snapshot but ignored... deltas.Num(): %d snapshotNumber: %d \n", deltas.Num(), snapshotNumber );
			}
			else
			{
				NET_VERBOSESNAPSHOT_PRINT( "NET: Got snapshot but ignored... deltas.ItemSequence( 0 ): %d != snapshotNumber: %d \n   ", deltas.ItemSequence( 0 ), snapshotNumber );
				
				for( int i = 0; i < deltas.Num(); i++ )
				{
					NET_VERBOSESNAPSHOT_PRINT( "%d ", deltas.ItemSequence( i ) );
				}
				NET_VERBOSESNAPSHOT_PRINT( "\n" );
				
			}
		}
		return false;
	}
	
	int deltaSequence		= 0;
	int deltaBaseSequence	= 0;
	
	PeekDeltaSequence( ( const char* )deltas.ItemData( 0 ), deltas.ItemLength( 0 ), deltaSequence, deltaBaseSequence );
	
	assert( deltaSequence == snapshotNumber );		// Make sure compressed sequence number matches that in data queue
	assert( baseSequence == deltaBaseSequence );	// If this delta isn't based off of our currently ack'd basestate, something is trashed...
	assert( deltaSequence > baseSequence );
	
	if( baseSequence != deltaBaseSequence )
	{
		// NOTE - This should no longer happen with recent fixes.
		// We should probably disconnect from the server if this happens. (packets are trashed most likely)
		NET_VERBOSESNAPSHOT_PRINT( "NET: Got snapshot %d but baseSequence does not match. baseSequence: %d deltaBaseSequence: %d. \n", snapshotNumber, baseSequence, deltaBaseSequence );
		return false;
	}
	
	// Apply this delta to our base state
	if( ApplyDeltaToSnapshot( baseState, ( const char* )deltas.ItemData( 0 ), deltas.ItemLength( 0 ), visIndex ) )
	{
		lastFullSnapBaseSequence = deltaSequence;
	}
	
	baseSequence = deltaSequence;		// This is now our new base sequence
	
	// Remove deltas that we no longer need
	RemoveDeltasForOldBaseSequence();
	
	// Sanity check deltas
	SanityCheckDeltas();
	
	return true;
}
/*
========================
idSnapshotProcessor::GetPendingSnapDelta
========================
*/
int idSnapshotProcessor::GetPendingSnapDelta( byte* outBuffer, int maxLength )
{

	assert( PendingSnapReadyToSend() );
	
	if( !verify( jobMemory->lzwInOutData.numlzwDeltas == 1 ) )
	{
		jobMemory->lzwInOutData.numlzwDeltas = 0;
		return 0;  // No more deltas left to send
	}
	
	assert( hasPendingSnap );
	
	jobMemory->lzwInOutData.numlzwDeltas = 0;
	
	int size = jobMemory->lzwDeltas[0].size;
	
	if( !verify( size != -1 ) )
	{
#ifdef STRESS_LZW_MEM
		if( g_maxlwMem < MAX_LZW_MEM )
		{
			g_maxlwMem += 50;
			g_maxlwMem = Min( g_maxlwMem, MAX_LZW_MEM );
			return 0;
		}
#endif
		
		// This can happen if there wasn't enough maxlzwMem to process one full obj in a single delta
		idLib::Error( "GetPendingSnapDelta: Delta failed." );
	}
	
	uint8* deltaData = &jobMemory->lzwMem[jobMemory->lzwDeltas[0].offset];
	
	int deltaSequence		= 0;
	int deltaBaseSequence	= 0;
	PeekDeltaSequence( ( const char* )deltaData, size, deltaSequence, deltaBaseSequence );
	// sanity check: does the compressed data we are about to send have the sequence number we expect
	assert( deltaSequence == jobMemory->lzwDeltas[0].snapSequence );
	
	if( !verify( size <= maxLength ) )
	{
		idLib::Error( "GetPendingSnapDelta: Size overflow." );
	}
	
	// Copy to out buffer
	memcpy( outBuffer, deltaData, size );
	
	// Set the sequence to what this delta actually belongs to
	assert( jobMemory->lzwDeltas[0].snapSequence == snapSequence + 1 );
	snapSequence = jobMemory->lzwDeltas[0].snapSequence;
	
	//idLib::Printf( "deltas Num: %i, Size: %i\n", deltas.Num(), deltas.GetDataLength() );
	
	// Copy to delta buffer
	// NOTE - We don't need to save this delta off if peer has already ack'd this basestate.
	// This can happen due to the fact that we defer the processing of snap deltas on jobs.
	// When we start processing a delta, we use the currently ack'd basestate.  If while we were processing
	// the delta, the client acks a new basestate, we can get into this situation.  In this case, we simply don't
	// store the delta, since it will just take up space, and just get removed anyways during ApplySnapshotDelta.
	//	 (and cause lots of spam when it sees the delta's basestate doesn't match the current ack'd one)
	if( deltaBaseSequence >= baseSequence )
	{
		if( !deltas.Append( snapSequence, deltaData, size ) )
		{
			int resendLength = deltas.ItemLength( deltas.Num() - 1 );
			
			if( !verify( resendLength <= maxLength ) )
			{
				idLib::Error( "GetPendingSnapDelta: Size overflow for resend." );
			}
			
			memcpy( outBuffer, deltas.ItemData( deltas.Num() - 1 ), resendLength );
			size = -resendLength;
		}
	}
	
	if( jobMemory->lzwInOutData.fullSnap )
	{
		// We sent the full snap, we can stop sending this pending snap now...
		NET_VERBOSESNAPSHOT_PRINT_LEVEL( 5, va( "  wrote enough deltas to a full snapshot\n" ) ); // FIXME: peer number?
		
		hasPendingSnap = false;
		partialBaseSequence = -1;
		
	}
	else
	{
		partialBaseSequence = deltaBaseSequence;
	}
	
	return size;
}
void idLobby::SendSnapshotToPeer( idSnapShot& ss, int p )
{
	assert( lobbyType == GetActingGameStateLobbyType() );
	
	peer_t& peer = peers[p];
	
	if( net_forceDropSnap.GetBool() )
	{
		net_forceDropSnap.SetBool( false );
		return;
	}
	
	if( peer.pauseSnapshots )
	{
		return;
	}
	
	int time = Sys_Milliseconds();
	
	const int throttleMode = session->GetTitleStorageInt( "net_peer_throttle_mode", net_peer_throttle_mode.GetInteger() );
	
	// Real peer throttling based on performance
	// -We throttle before sending to jobs rather than before sending
	
	if( ( throttleMode == 1 || throttleMode == 3 ) && peer.throttledSnapRate > 0 )
	{
		if( time - peer.lastSnapJobTime < peer.throttledSnapRate / 1000 )    // fixme /1000
		{
			// This peer is throttled, skip his snap shot
			NET_VERBOSESNAPSHOT_PRINT_LEVEL( 2, va( "NET: Throttling peer %d.Skipping snapshot. Time elapsed: %d peer snap rate: %d\n", p, ( time - peer.lastSnapJobTime ), peer.throttledSnapRate ) );
			return;
		}
	}
	
	if( throttleMode != 0 )
	{
		DetectSaturation( p );
	}
	
	if( peer.maxSnapBps >= 0.0f && ( throttleMode == 2 || throttleMode == 3 ) )
	{
		if( peer.packetProc->GetOutgoingRateBytes() > peer.maxSnapBps )
		{
			return;
		}
	}
	
	// TrySetPendingSnapshot will try to set the new pending snap.
	// TrySetPendingSnapshot won't do anything until the last snap set was fully sent out.
	
	if( peer.snapProc->TrySetPendingSnapshot( ss ) )
	{
		NET_VERBOSESNAPSHOT_PRINT_LEVEL( 2, va( "  ^8Set next pending snapshot peer %d\n", 0 ) );
		
		peer.numSnapsSent++;
		
		idSnapShot* baseState = peers[p].snapProc->GetBaseState();
		if( verify( baseState != NULL ) )
		{
			baseState->UpdateExpectedSeq( peers[p].snapProc->GetSnapSequence() );
		}
		
	}
	else
	{
		NET_VERBOSESNAPSHOT_PRINT_LEVEL( 2, va( "  ^2FAILED Set next pending snapshot peer %d\n", 0 ) );
	}
	
	// We send out the pending snap, which could be the most recent, or an old one that hasn't fully been sent
	// We don't send immediately, since we have to coordinate sending snaps for all peers in same place considering jobs.
	peer.needToSubmitPendingSnap = true;
}
/*
========================
idLobby::SendCompletedPendingSnap
========================
*/
void idLobby::SendCompletedPendingSnap( int p )
{

	assert( lobbyType == GetActingGameStateLobbyType() );
	
	int time = Sys_Milliseconds();
	
	peer_t& peer = peers[p];
	
	if( !peer.IsConnected() )
	{
		return;
	}
	
	if( peer.snapProc == NULL || !peer.snapProc->PendingSnapReadyToSend() )
	{
		return;
	}
	
	// If we have a pending snap ready to send, we better have a pending snap
	assert( peer.snapProc->HasPendingSnap() );
	
	// Get the snap data blob now, even if we don't send it.
	// This is somewhat wasteful, but we have to do this to keep the snap job pipe ready to keep doing work
	// If we don't do this, this peer will cause other peers to be starved of snapshots, when they may very well be ready to send a snap
	byte buffer[ MAX_SNAP_SIZE ];
	int maxLength = sizeof( buffer ) - peer.packetProc->GetReliableDataSize() - 128;
	
	int size = peer.snapProc->GetPendingSnapDelta( buffer, maxLength );
	
	if( !CanSendMoreData( p ) )
	{
		return;
	}
	
	// Can't send anymore snapshots until all fragments are sent
	if( peer.packetProc->HasMoreFragments() )
	{
		return;
	}
	
	// If the peer doesn't have the latest resource list, send it to him before sending any new snapshots
	if( SendResources( p ) )
	{
		return;
	}
	
	int timeFromJobSub = time - peer.lastSnapJobTime;
	int timeFromLastSend = time - peer.lastSnapTime;
	
	if( timeFromLastSend > 0 )
	{
		peer.snapHz = 1000.0f / ( float )timeFromLastSend;
	}
	else
	{
		peer.snapHz = 0.0f;
	}
	
	if( net_snapshot_send_warntime.GetInteger() > 0 && peer.lastSnapTime != 0 && net_snapshot_send_warntime.GetInteger() < timeFromLastSend )
	{
		idLib::Printf( "NET: Took %d ms to send peer %d snapshot\n", timeFromLastSend, p );
	}
	
	if( peer.throttleSnapsForXSeconds != 0 )
	{
		if( time < peer.throttleSnapsForXSeconds )
		{
			return;
		}
		
		// If we were trying to recover ping, see if we succeeded
		if( peer.recoverPing != 0 )
		{
			if( peer.lastPingRtt >= peer.recoverPing )
			{
				peer.failedPingRecoveries++;
			}
			else
			{
				const int peer_throttle_minSnapSeq = session->GetTitleStorageInt( "net_peer_throttle_minSnapSeq", net_peer_throttle_minSnapSeq.GetInteger() );
				if( peer.snapProc->GetFullSnapBaseSequence() > idSnapshotProcessor::INITIAL_SNAP_SEQUENCE + peer_throttle_minSnapSeq )
				{
					// If throttling recovered the ping
					int maxRate = common->GetSnapRate() * session->GetTitleStorageInt( "net_peer_throttle_maxSnapRate", net_peer_throttle_maxSnapRate.GetInteger() );
					peer.throttledSnapRate = idMath::ClampInt( common->GetSnapRate(), maxRate, peer.throttledSnapRate + common->GetSnapRate() );
				}
			}
		}
		
		peer.throttleSnapsForXSeconds = 0;
	}
	
	peer.lastSnapTime = time;
	
	if( size != 0 )
	{
		if( size > 0 )
		{
			NET_VERBOSESNAPSHOT_PRINT_LEVEL( 3, va( "NET: (peer %d) Sending snapshot %d delta'd against %d. Since JobSub: %d Since LastSend: %d. Size: %d\n", p, peer.snapProc->GetSnapSequence(), peer.snapProc->GetBaseSequence(), timeFromJobSub, timeFromLastSend, size ) );
			ProcessOutgoingMsg( p, buffer, size, false, 0 );
		}
		else if( size < 0 )  	// Size < 0 indicates the delta buffer filled up
		{
			// There used to be code here that would disconnect peers if they were in game and filled up the buffer
			// This was causing issues in the playtests we were running (Doom 4 MP) and after some conversation
			// determined that it was not needed since a timeout mechanism has been added since
			ProcessOutgoingMsg( p, buffer, -size, false, 0 );
			if( peer.snapProc != NULL )
			{
				NET_VERBOSESNAPSHOT_PRINT( "NET: (peerNum: %d - name: %s) Resending last snapshot delta %d because his delta list filled up. Since JobSub: %d Since LastSend: %d Delta Size: %d\n", p, GetPeerName( p ), peer.snapProc->GetSnapSequence(), timeFromJobSub, timeFromLastSend, size );
			}
		}
	}
	
	// We calculate what our outgoing rate was for each sequence, so we can have a relative comparison
	// for when the client reports what his downstream was in the same timeframe
	if( IsHost() && peer.snapProc != NULL && peer.snapProc->GetSnapSequence() > 0 )
	{
		//NET_VERBOSE_PRINT("^8  %i Rate: %.2f   SnapSeq: %d GetBaseSequence: %d\n", lastAppendedSequence, peer.packetProc->GetOutgoingRateBytes(), peer.snapProc->GetSnapSequence(), peer.snapProc->GetBaseSequence() );
		peer.sentBpsHistory[ peer.snapProc->GetSnapSequence() % MAX_BPS_HISTORY ] = peer.packetProc->GetOutgoingRateBytes();
	}
}