Exemplo n.º 1
0
void CWarMap::CheckWarEnd()
{
	if (m_bEnded)
		return;

	if (m_TeamData[0].iMemberCount == 0 || m_TeamData[1].iMemberCount == 0)
	{
		if (m_bTimeout)
			return;

		if (m_pkTimeoutEvent)
			return;

		Notice(LC_TEXT("길드전에 참가한 상대방 길드원이 아무도 없습니다."));
		Notice(LC_TEXT("1분 이내에 아무도 접속하지 않으면 길드전이 자동 종료됩니다."));

		sys_log(0, "CheckWarEnd: Timeout begin %u vs %u", m_TeamData[0].dwID, m_TeamData[1].dwID);

		war_map_info* info = AllocEventInfo<war_map_info>();
		info->pWarMap = this;

		SetTimeoutEvent(event_create(war_timeout_event, info, PASSES_PER_SEC(60)));
	}
	else
		CheckScore();
}
Exemplo n.º 2
0
void ADomination::ScoreTeam(uint8 ControlPointIndex, float TeamScoreAmount)
{
	if ((CDomPoints[ControlPointIndex] != NULL && CDomPoints[ControlPointIndex]->ControllingTeam != NULL) && CDomPoints[ControlPointIndex]->bScoreReady)
	{
		for (uint8 i = 0; i < NumTeams; i++)
		{
			if (CDomPoints[ControlPointIndex]->ControllingTeam->GetTeamNum() == i)
			{
				if (CDomPoints[ControlPointIndex]->ControllingPawn != NULL)
				{
					// award points to player
					CDomPoints[ControlPointIndex]->ControllingPawn->Score += TeamScoreAmount;
					CDomPoints[ControlPointIndex]->UpdateHeldPointStat(CDomPoints[ControlPointIndex]->ControllingPawn, TeamScoreAmount);
				}
				// award points to players team
				CDomPoints[ControlPointIndex]->ControllingTeam->SetFloatScore(TeamScoreAmount);
				CDomPoints[ControlPointIndex]->ControllingTeam->ForceNetUpdate();
				CheckScore(CDomPoints[ControlPointIndex]->ControllingPawn);
			}
		}
	}
}
Exemplo n.º 3
0
int main()
{
  int x,y;
  printf("Program Start\n");
  BoardIni();

  while(1){

    InputMap(&x,&y);

    if(x == 0 && y == 0){
      CheckScore();
      printf("Program End\n");
      break;
    }

    CheckPut(x,y);
    printf("z:%d %d-%d\nmove:%d\n",z,x,y,move);
  }

  return 0;
}
Exemplo n.º 4
0
void AUTDomGameMode::ScoreKill_Implementation(AController* Killer, AController* Other, APawn* KilledPawn, TSubclassOf<UDamageType> DamageType)
{
	AUTPlayerState* OtherPlayerState = Other ? Cast<AUTPlayerState>(Other->PlayerState) : NULL;
	if ((Killer == Other) || (Killer == NULL))
	{
		// If it's a suicide, subtract a kill from the player...
		if (OtherPlayerState)
		{
			OtherPlayerState->AdjustScore(-1);
		}
	}
	else
	{
		AUTPlayerState * KillerPlayerState = Cast<AUTPlayerState>(Killer->PlayerState);
		if (KillerPlayerState != NULL)
		{
			KillerPlayerState->AdjustScore(+1);
			KillerPlayerState->IncrementKills(DamageType, true, OtherPlayerState);
			KillerPlayerState->ModifyStatsValue(NAME_RegularKillPoints, 1);

			CheckScore(KillerPlayerState);
		}

		if (!bFirstBloodOccurred)
		{
			BroadcastLocalized(this, UUTFirstBloodMessage::StaticClass(), 0, KillerPlayerState, NULL, NULL);
			bFirstBloodOccurred = true;
		}
	}

	AddKillEventToReplay(Killer, Other, DamageType);

	if (BaseMutator != NULL)
	{
		BaseMutator->ScoreKill(Killer, Other, DamageType);
	}
	FindAndMarkHighScorer();
}
Exemplo n.º 5
0
void CWarMap::UpdateScore(DWORD g1, int score1, DWORD g2, int score2)
{
	BYTE idx;

	if (GetTeamIndex(g1, idx))
	{
		if (m_TeamData[idx].iScore != score1)
		{
			m_TeamData[idx].iScore = score1;
			SendScorePacket(idx);
		}
	}

	if (GetTeamIndex(g2, idx))
	{
		if (m_TeamData[idx].iScore != score2)
		{
			m_TeamData[idx].iScore = score2;
			SendScorePacket(idx);
		}
	}

	CheckScore();
}
Exemplo n.º 6
0
//
// Localize
//
// This is where the bulk of evaluating and resampling the particles takes place. 
// Also applies the motion model
//
void Localize(TSense sense)
{
  double ftemp; 
  double threshold;  // threshhold for discarding particles (in log prob.)
  double total, normalize; 
  double turn, distance, moveAngle; // The incremental motion reported by the odometer
  double CCenter, DCenter, TCenter, CCoeff, DCoeff, TCoeff;
  double tempC, tempD;  // Temporary variables for the motion model. 
  int i, j, k, p, best;  // Incremental counters.
  int keepers = 0; // How many particles finish all rounds
  int newchildren[SAMPLE_NUMBER]; // Used for resampling
  
  // Take the odometry readings from both this time step and the last, in order to figure out
  // the base level of incremental motion. Convert our measurements from meters and degrees 
  // into terms of map squares and radians
  distance = sqrt( ((odometry.x - lastX) * (odometry.x - lastX)) 
		 + ((odometry.y - lastY) * (odometry.y - lastY)) ) * MAP_SCALE;
  turn = (odometry.theta - lastTheta);

  // Keep motion bounded between pi and -pi
  if (turn > M_PI/3)
    turn = turn - 2*M_PI;
  else if (turn < -M_PI/3)
    turn = turn + 2*M_PI;

  // Our motion model consists of motion along three variables; D is the major axis of motion, 
  // which is lateral motion along the robot's average facing angle for this time step, C is the
  // minor axis of lateral motion, which is perpendicular to D, and T is the amount of turn in 
  // the robot's facing angle. 
  // Since the motion model is probablistic, the *Center terms compute the expected center of the
  // distributions of C D and T. Note that these numbers are each a function of the reported 
  // odometric distance and turn which have been observed. The constant meanC_D is the amount of 
  // effect that the distance reported from the odometry has on our motion model's expected motion
  // along the minor axis. All of these constants are defined at the top of this file.
  CCenter = distance*meanC_D + (turn*meanC_T*MAP_SCALE);
  DCenter = distance*meanD_D + (turn*meanD_T*MAP_SCALE);
  TCenter = (distance*meanT_D/MAP_SCALE) + turn*meanT_T;

  // *Coeff computes the standard deviation for each parameter when generating gaussian noise.
  // These numbers are limited to have at least some minimal level of noise, regardless of the
  // reported motion. This is especially important for dealing with a robot skidding or sliding
  // or just general unexpected motion which may not be reported at all by the odometry (it 
  // happens more often than we would like)
  CCoeff = MAX((fabs(distance*varC_D) + fabs(turn*varC_T*MAP_SCALE)), 0.8);
  DCoeff = MAX((fabs(distance*varD_D) + fabs(turn*varD_T*MAP_SCALE)), 0.8);
  TCoeff = MAX((fabs(distance*varT_D/MAP_SCALE) + fabs(turn*varT_T)), 0.10);

  // To start this function, we have already determined which particles have been resampled, and 
  // how many times. What we still need to do is move them from their parent's position, according
  // to the motion model, so that we have the appropriate scatter.
  i = 0;
  // Iterate through each of the old particles, to see how many times it got resampled.
  for (j = 0; j < PARTICLE_NUMBER; j++) {
    // Now create a new sample for each time this particle got resampled (possibly 0)
    for (k=0; k < children[j]; k++) {
      // We make a sample entry. The first, most important value is which of the old particles 
      // is this new sample's parent. This defines which map is being inherited, which will be
      // used during localization to evaluate the "fitness" of that sample.
      newSample[i].parent = j;
      
      // Randomly calculate the 'probable' trajectory, based on the movement model. The starting
      // point is of course the position of the parent.
      tempC = CCenter + GAUSSIAN(CCoeff); // The amount of motion along the minor axis of motion
      tempD = DCenter + GAUSSIAN(DCoeff); // The amount of motion along the major axis of motion
      // Record this actual motion. If we are using hierarchical SLAM, it will be used to keep track
      // of the "corrected" motion of the robot, to define this step of the path.
      newSample[i].C = tempC / MAP_SCALE;
      newSample[i].D = tempD / MAP_SCALE;
      newSample[i].T = TCenter + GAUSSIAN(TCoeff);
      newSample[i].theta = l_particle[j].theta + newSample[i].T;

      // Assuming that the robot turned continuously throughout the time step, the major direction
      // of movement (D) should be the average of the starting angle and the final angle
      moveAngle = (newSample[i].theta + l_particle[j].theta)/2.0;

      // The first term is to correct for the LRF not being mounted on the pivot point of the robot's turns
      // The second term is to allow for movement along the major axis of movement (D)
      // The last term is movement perpendicular to the the major axis (C). We add pi/2 to give a consistent
      // "positive" direction for this term. MeanC significantly shifted from 0 would mean that the robot
      // has a distinct drift to one side.
      newSample[i].x = l_particle[j].x + (TURN_RADIUS * (cos(newSample[i].theta) - cos(l_particle[j].theta))) +
 	               (tempD * cos(moveAngle)) + (tempC * cos(moveAngle + M_PI/2));
      newSample[i].y = l_particle[j].y + (TURN_RADIUS * (sin(newSample[i].theta) - sin(l_particle[j].theta))) +
 	               (tempD * sin(moveAngle)) + (tempC * sin(moveAngle + M_PI/2));
      newSample[i].probability = 0.0;
      i++;
    }
  }

  // Go through these particles in a number of passes, in order to find the best particles. This is
  // where we cull out obviously bad particles, by performing evaluation in a number of distinct
  // steps. At the end of each pass, we identify the probability of the most likely sample. Any sample
  // which is not within the defined threshhold of that probability can be removed, and no longer 
  // evaluated, since the probability of that sample ever becoming "good" enough to be resampled is
  // negligable.
  // Note: this first evaluation is based solely off of QuickScore- that is, the evaluation is only
  // performed for a short section of the laser trace, centered on the observed endpoint. This can
  // provide a good, quick heuristic for culling off bad samples, but should not be used for final
  // weights. Something which looks good in this scan can very easily turn out to be low probability
  // when the entire laser trace is considered.

  for (i = 0; i < SAMPLE_NUMBER; i++) 
    newSample[i].probability = 1.0;
  normalize = 1.0;
  threshold = PARTICLE_NUMBER;
  for (p = 0; p < PASSES; p++){
    best = 0;
    total = 0.0;
    for (i = 0; i < SAMPLE_NUMBER; i++) {
      if ((newSample[i].probability != WORST_POSSIBLE) && 
	  (1.0-pow(1.0-(newSample[i].probability/threshold), SAMPLE_NUMBER) > 1.0/(SAMPLE_NUMBER))) {
	newSample[i].probability = newSample[i].probability / normalize;
	for (k = p; k < SENSE_NUMBER; k += PASSES) 
	  newSample[i].probability = newSample[i].probability * QuickScore(sense, k, i); 
	if (newSample[i].probability > newSample[best].probability) 
	  best = i;

	total = total + newSample[i].probability;
      }
      else 
	newSample[i].probability = WORST_POSSIBLE;
    }
    normalize = newSample[best].probability;
    threshold = total;
  }


  keepers = 0;
  for (i = 0; i < SAMPLE_NUMBER; i++) 
    if (newSample[i].probability != WORST_POSSIBLE) {
      keepers++;
      // Don't let this heuristic evaluation be included in the final eval.
      newSample[i].probability = 1.0;
    }

  // Letting the user know how many samples survived this first cut.
  fprintf(stderr, "Better %d ", keepers);
  threshold = -1;

  // Now reevaluate all of the surviving samples, using the full laser scan to look for possible
  // obstructions, in order to get the most accurate weights. While doing this evaluation, we can
  // still keep our eye out for unlikely samples before we are finished.
  keepers = 0;
  normalize = 1.0;
  threshold = PARTICLE_NUMBER;
  for (p = 0; p < PASSES; p++){
    best = 0;
    total = 0.0;
    for (i = 0; i < SAMPLE_NUMBER; i++) {
      if ((newSample[i].probability != WORST_POSSIBLE) && 
	  (1.0-pow(1.0-(newSample[i].probability/threshold), SAMPLE_NUMBER) > 30.0/(SAMPLE_NUMBER))) {
	if (p == PASSES -1)
	  keepers++;
	newSample[i].probability = newSample[i].probability / normalize;
	for (k = p; k < SENSE_NUMBER; k += PASSES) 
	  newSample[i].probability = newSample[i].probability * CheckScore(sense, k, i); 
	if (newSample[i].probability > newSample[best].probability) 
	  best = i;

	total = total + newSample[i].probability;
      }
      else 
	newSample[i].probability = WORST_POSSIBLE;
    }
    normalize = newSample[best].probability;
    threshold = total;
  }

  // Report how many samples survived the second cut. These numbers help the user have confidence that
  // the threshhold values used for culling are reasonable.
  fprintf(stderr, "Best of %d ", keepers);

  // All probabilities are currently in log form. Exponentiate them, but weight them by the prob of the
  // the most likely sample, to ensure that we don't run into issues of machine precision at really small
  // numbers.
  total = 0.0;
  threshold = newSample[best].probability;
  for (i = 0; i < SAMPLE_NUMBER; i++) 
    // If the sample was culled, it has a weight of 0
    if (newSample[i].probability == WORST_POSSIBLE)
      newSample[i].probability = 0.0;
    else 
      total = total + newSample[i].probability;

  // Renormalize to ensure that the total probability is now equal to 1.
  for (i=0; i < SAMPLE_NUMBER; i++)
    newSample[i].probability = newSample[i].probability/total;

  total = 0.0;
  // Count how many children each particle will get in next generation
  // This is done through random resampling.
  for (i = 0; i < SAMPLE_NUMBER; i++) {
    newchildren[i] = 0;
    total = total + newSample[i].probability;
  }

  i = j = 0;  // i = no. of survivors, j = no. of new samples
  while ((j < SAMPLE_NUMBER) && (i < PARTICLE_NUMBER)) {
    k = 0;
    ftemp = MTrandDec()*total;
    while (ftemp > (newSample[k].probability)) {
      ftemp = ftemp - newSample[k].probability;
      k++;
    }    
    if (newchildren[k] == 0)
      i++;
    newchildren[k]++;
    j++;
  }

  // Report exactly how many samples are kept as particles, since they were actually
  // resampled.
  fprintf(stderr, "(%d kept) ", i);

  // Do some cleaning up
  // Is this even necessary?
  for (i = 0; i < PARTICLE_NUMBER; i++) {
    children[i] = 0;
    savedParticle[i].probability = 0.0;
  }

  // Now copy over new particles to savedParticles
  best = 0;
  k = 0; // pointer into saved particles
  for (i = 0; i < SAMPLE_NUMBER; i++)
    if (newchildren[i] > 0) {
      savedParticle[k].probability = newSample[i].probability;
      savedParticle[k].x = newSample[i].x;
      savedParticle[k].y = newSample[i].y;
      savedParticle[k].theta = newSample[i].theta;
      savedParticle[k].C = newSample[i].C;
      savedParticle[k].D = newSample[i].D;
      savedParticle[k].T = newSample[i].T;
      savedParticle[k].dist = distance / MAP_SCALE;
      savedParticle[k].turn = turn;
      // For savedParticle, the ancestryNode field actually points to the parent of this saved particle
      savedParticle[k].ancestryNode = l_particle[ newSample[i].parent ].ancestryNode;
      savedParticle[k].ancestryNode->numChildren++;
      children[k] = newchildren[i];

      if (savedParticle[k].probability > savedParticle[best].probability) 
	best = k;

      k++;
    }

  // This number records how many saved particles we are currently using, so that we can ignore anything beyond this
  // in later computations.
  cur_saved_particles_used = k;

  // We might need to continue generating children for particles, if we reach PARTICLE_NUMBER worth of distinct parents early
  // We renormalize over the chosen particles, and continue to sample from there.
  if (j < SAMPLE_NUMBER) {
    total = 0.0;
    // Normalize particle probabilities. Note that they have already been exponentiated
    for (i = 0; i < cur_saved_particles_used; i++) 
      total = total + savedParticle[i].probability;

    for (i=0; i < cur_saved_particles_used; i++)
      savedParticle[i].probability = savedParticle[i].probability/total;

    total = 0.0;
    for (i = 0; i < cur_saved_particles_used; i++) 
      total = total + savedParticle[i].probability;

    while (j < SAMPLE_NUMBER) {
      k = 0;
      ftemp = MTrandDec()*total;
      while (ftemp > (savedParticle[k].probability)) {
	ftemp = ftemp - savedParticle[k].probability;
	k++;
      }    
      children[k]++;

      j++;
    }
  }

  // Some useful information concerning the current generation of particles, and the parameters for the best one.
  fprintf(stderr, "-- %.3d (%.4f, %.4f, %.4f) : %.4f\n", curGeneration, savedParticle[best].x, savedParticle[best].y, 
	  savedParticle[best].theta, savedParticle[best].probability);
}