Example #1
0
void kickBall(struct RoboAI* ai) {
  all_stop();
  printf("KICKING\n");
  kick_speed(100);
  drive_speed(100);
  sleep(2);
  stop_kicker();
  all_stop();
  // go to reset state
  ai->st.state++;
}
Example #2
0
void initializeRobot()
{
    elevator_state = STOPPED;
    conveyor_state = CONVEYOR_STOPPED;
    wheel_state = WHEEL_OFF;
    hopper_state = HOPPER_DOWN;

    drive_enter_state(NORTH);

    debounce = false;

    servoChangeRate[leftHopper]  = 1;
    servoChangeRate[rightHopper] = 1;

    servo[left] = SERVO_STOPPED;
    servo[right] = SERVO_STOPPED;

    servo[leftHopper] = LEFT_HOPPER_DOWN;
    servo[rightHopper] = RIGHT_HOPPER_DOWN;

    motor[waterWheel] = 0;

    all_stop();

    return;
}
// Provided --------------------------------
int setupAI(int mode, struct RoboAI *ai)
{
	//Note: set state to 0 if it is not used!
	int i;

	switch (mode) {
	case AI_CONFUSED:
		ai->state = 0;
		ai->runAI = confused;
		break;
        case AI_PACO:
		ai->state = NULL;
		ai->runAI = AI_main;
                ai->st.side=0;
		ai->st.state=0;
		all_stop();
		ai->st.ball=NULL;
		ai->st.old_bcx=0;
		ai->st.old_bcy=0;
		ai->st.selfID=0;
		ai->st.oppID=0;
		ai->st.ballID=0;
		clear_motion_flags(ai);
                break;
	default:
		fprintf(stderr, "AI mode %d is not implemented, sorry\n", mode);
		return 0;
	}

	return 1;
}
void id_bot(struct RoboAI *ai, struct blob *blobs)
{
 // Drive a simple pattern forward/reverse and call the particle filter to
 // find the blobs that are consistent with this in order to identify the
 // bot as well as the opponent.

 double noiseV=5;			// Expected motion magnitude due to noise for a static object
					// (it's just a guess - haven't measured it)
 double oppSizeT=2500;			// Opponent's blob nimimum size threshold
 static double blobData[10][4];	// Keep track of up to 10 blobs that could be
					// the bot. Unlikely there are more since
					// these have to be active tracked blobs.
					// blobData[i][:]=[blobID mx my p]
 static int bdataidx=0;
 double psum;
 double px,siz,len,vx,vy,wx,wy;
 int ownID,i;
 struct blob *oppID;

 struct blob *p;
 static double IDstep=0;		// Step tracker
 double frameInc=1.0/10;		// 1 over the number of frames each step should get

 // Get a handle on the ball
 id_ball(ai,blobs);
 
 // Forward motion for a few frames
 if (IDstep<3*frameInc)
 {
  drive_speed(35);
  clear_motion_flags(ai);
  ai->st.mv_fwd=1;
 }
 else if (IDstep<1.0)
 {
  drive_speed(35);
  clear_motion_flags(ai);
  ai->st.mv_fwd=1;
  particle_filter(ai,blobs);
 }
 else if (IDstep<1.0+(3*frameInc))	// Reverse drive
 {
  drive_speed(-35);
  clear_motion_flags(ai);
  ai->st.mv_back=1;
 }
 else if (IDstep<2.0)
 {
  drive_speed(-35);
  clear_motion_flags(ai);
  ai->st.mv_back=1;
  particle_filter(ai,blobs);  
 }
 else if (IDstep<2.0+(5*frameInc)) {clear_motion_flags(ai); all_stop();}	// Stop and clear blob motion history
 else {IDstep=0; ai->st.state=1;}	// For debug...
 		 
 IDstep+=frameInc; 

}
Example #5
0
void id_bot(struct RoboAI *ai, struct blob *blobs)
{
///////////////////////////////////////////////////////////////////////////////
// ** DO NOT CHANGE THIS FUNCTION **
// This routine calls track_agents() to identify the blobs corresponding to the
// robots and the ball. It commands the bot to move forward slowly so heading
// can be established from blob-tracking.
//
// NOTE 1: All heading estimates, velocity vectors, position, and orientation
//         are noisy. Remember what you have learned about noise management.
//
// NOTE 2: Heading and velocity estimates are not valid while the robot is
//         rotating in place (and the final heading vector is not valid either).
//         To re-establish heading, forward/backward motion is needed.
//
// NOTE 3: However, you do have a reliable orientation vector within the blob
//         data structures derived from blob shape. It points along the long
//         side of the rectangular 'uniform' of your bot. It is valid at all
//         times (even when rotating), but may be pointing backward and the
//         pointing direction can change over time.
//
// You should *NOT* call this function during the game. This is only for the
// initialization step. Calling this function during the game will result in
// unpredictable behaviour since it will update the AI state.
///////////////////////////////////////////////////////////////////////////////

    struct blob *p;
    static double stepID=0;
    double frame_inc=1.0/5.0;

    drive_speed(30);			// Start forward motion to establish heading
    // Will move for a few frames.

    track_agents(ai,blobs);		// Call the tracking function to find each agent

    if (ai->st.selfID==1&&ai->st.self!=NULL)
        fprintf(stderr,"Successfully identified self blob at (%f,%f)\n",ai->st.self->cx,ai->st.self->cy);
    if (ai->st.oppID==1&&ai->st.opp!=NULL)
        fprintf(stderr,"Successfully identified opponent blob at (%f,%f)\n",ai->st.opp->cx,ai->st.opp->cy);
    if (ai->st.ballID==1&&ai->st.ball!=NULL)
        fprintf(stderr,"Successfully identified ball blob at (%f,%f)\n",ai->st.ball->cx,ai->st.ball->cy);

    stepID+=frame_inc;
    if (stepID>=1&&ai->st.selfID==1)	// Stop after a suitable number of frames.
    {
        ai->st.state+=1;
        stepID=0;
        all_stop();
    }
    else if (stepID>=1) stepID=0;

// At each point, each agent currently in the field should have been identified.
    return;
}
void AI_main(struct RoboAI *ai, struct blob *blobs, void *state)
{
 // Top-level AI

 fprintf (stderr,"AI_main() called for ai at %p and blob list at %p\n",ai,blobs);

 if (ai->st.state==0)  // Initial set up - find own, ball, and opponent blobs			
 {
  fprintf(stderr,"State is 0, self-id in progress...\n");
  id_bot(ai,blobs);
  if (ai->st.selfID==1)
  {
   ai->st.state=1;
   all_stop();
   clear_motion_flags(ai);
  }
 }
}
Example #7
0
void initializeRobot()
{
    deadman_ltu_running = false;
    deadman_ltd_running = false;
    deadman_rtu_running = false;
    deadman_rtd_running = false;

    conveyor_enter_state(CONVEYOR_OFF);
    ramp_enter_state(RAMP_STOP);

    servo[roller] = SERVO_ROLLER_DOWN;

    servo[autoElbow] = 233;

    all_stop();

    return;
}
Example #8
0
void fold_arm(bool isFold){
	// if isFold is true, the arm will return to folded position
	// otherwise it will deploy to scoring position

	all_stop();
	if (isFold){
		servo[handJoint] = packedHand;
		while (nMotorEncoder[shoulderJoint] > 500){
			motor[shoulderJoint] = 30;
		}
		motor[shoulderJoint] = 0;
		writeDebugStreamLine("arm is folded at: %d", nMotorEncoder[shoulderJoint]);
		//motor[shoulderJoint] = 40;
		//wait1Msec(1300);
		//servo[handJoint] = 80;
		//wait1Msec(350);
		return;
	}
	else{
		while (nMotorEncoder[shoulderJoint] < 2300){
			motor[shoulderJoint] = -30;
		}
		motor[shoulderJoint] = 0;
		writeDebugStreamLine("arm is halfway at: %d", nMotorEncoder[shoulderJoint]);
		servo[handJoint] = scoringHand;

		//motor[shoulderJoint] = -40;
		//wait1Msec(700);
		//writeDebugStreamLine("encoder is at 1: %d", nMotorEncoder[shoulderJoint]);
		//servo[handJoint] = 180;

		while (nMotorEncoder[shoulderJoint] < 4900){
			motor[shoulderJoint] = -30;
		}
		motor[shoulderJoint] = 0;
		writeDebugStreamLine("arm is extended at: %d", nMotorEncoder[shoulderJoint]);


		//motor[shoulderJoint] = -40;
		//wait1Msec(900);
		//writeDebugStreamLine("encoder is at 2: %d", nMotorEncoder[shoulderJoint]);
		return;
	}
}
Example #9
0
void id_bot(struct RoboAI *ai, struct blob *blobs)
{
    // ** DO NOT CHANGE THIS FUNCTION **
    // This routine calls track_agents() to identify the blobs corresponding to the
    // robots and the ball. It commands the bot to move forward slowly so heading
    // can be established from blob-tracking.
    //
    // NOTE 1: All heading estimates are noisy.
    //
    // NOTE 2: Heading estimates are only valid when the robot moves with a
    //         forward or backward direction. Turning destroys heading data
    //         (why?)
    //
    // You should *NOT* call this function during the game. This is only for the
    // initialization step. Calling this function during the game will result in
    // unpredictable behaviour since it will update the AI state.

    struct blob *p;
    static double stepID = 0;
    double frame_inc = 1.0 / 5.0;

    drive_speed(30);       // Need a few frames to establish heading

    track_agents(ai, blobs);

    if (ai->st.selfID == 1 && ai->st.self != NULL)
        fprintf(stderr, "Successfully identified self blob at (%f,%f)\n", ai->st.self->cx[0], ai->st.self->cy[0]);
    if (ai->st.oppID == 1 && ai->st.opp != NULL)
        fprintf(stderr, "Successfully identified opponent blob at (%f,%f)\n", ai->st.opp->cx[0], ai->st.opp->cy[0]);
    if (ai->st.ballID == 1 && ai->st.ball != NULL)
        fprintf(stderr, "Successfully identified ball blob at (%f,%f)\n", ai->st.ball->cx[0], ai->st.ball->cy[0]);

    stepID += frame_inc;
    if (stepID >= 1 && ai->st.selfID == 1)
    {
        ai->st.state += 1;
        stepID = 0;
        all_stop();
    }
    else if (stepID >= 1) stepID = 0;

    return;
}
Example #10
0
void initializeRobot()
{
    nMotorEncoder[shoulder] = 0;

    shoulder_enter_state(SHOULDER_STOP);
    brush_enter_state(BRUSH_OFF);
    arm_enter_state(ARM_RETRACTED);
    dock_enter_state(DOCK_FINGER_UP);
    center_dispenser_enter_state(CENTER_DISPENSER_STOWED);

    deadman_ltu_running = false;
    deadman_ltd_running = false;

    servo[dockarm] = SERVO_DOCK_ARM_STOPPED;

    all_stop();

    return;
}
Example #11
0
void fold_arm(bool isDown)
{
	all_stop();
	if (isDown)
	{
		motor[shoulderJoint] = 40;
		wait1Msec(1300);
		servo[handJoint] = 60;
		wait1Msec(350);
		return;
	}
	else
	{
		motor[shoulderJoint] = -40;
		wait1Msec(700);
		servo[handJoint] = 160;
		motor[shoulderJoint] = -40;
		wait1Msec(900);
		return;
	}
}
Example #12
0
void initializeRobot()
{
    elevator_state = STOPPED;
    conveyor_state = CONVEYOR_STOPPED;
    block_scoop_enter_state(BLOCK_SCOOP_STOPPED);
    spod_enter_state(SPOD_OFF);

    limitSwitchInit(5);
    lightStripInit(0x1F);

    drive_enter_state(NORTH);

    displayRestingPulse();

    debounce = false;

    nMotorEncoder[rightElevator] = 0;
	servo[blockDump] = BLOCK_SERVO_RETRACTED;

    all_stop();

    return;
}
Example #13
0
void AI_main(struct RoboAI *ai, struct blob *blobs, void *state)
{
  /*************************************************************************
  This is the main AI loop.

  It is called by the imageCapture code *once* per frame. And it *must not*
  enter a loop or wait for visual events, since no visual refresh will happen
  until this call returns!

  Therefore. Everything you do in here must be based on the states in your
  AI and the actions the robot will perform must be started or stopped 
  depending on *state transitions*. 
  E.g. If your robot is currently standing still, with state = 03, and
   your AI determines it should start moving forward and transition to
   state 4. Then what you must do is 
   - send a command to start forward motion at the desired speed
   - update the robot's state
   - return

  I can not emphasize this enough. Unless this call returns, no image
  processing will occur, no new information will be processed, and your
  bot will be stuck on its last action/state.
  You will be working with a state-based AI. You are free to determine
  how many states there will be, what each state will represent, and
  what actions the robot will perform based on the state as well as the
  state transitions.
  You must *FULLY* document your state representation in the report
  The first two states for each more are already defined:
  State 0,100,200 - Before robot ID has taken place (this state is the initial
                  state, or is the result of pressing 'r' to reset the AI)
  State 1,101,201 - State after robot ID has taken place. At this point the AI
                  knows where the robot is, as well as where the opponent and
                  ball are (if visible on the playfield)
  Relevant UI keyboard commands:
  'r' - reset the AI. Will set AI state to zero and re-initialize the AI
  data structure.
  't' - Toggle the AI routine (i.e. start/stop calls to AI_main() ).
  'o' - Robot immediate all-stop! - do not allow your NXT to get damaged!
  ** Do not change the behaviour of the robot ID routine **
  **************************************************************************/

  if (ai->st.state==0||ai->st.state==100||ai->st.state==200)   // Initial set up - find own, ball, and opponent blobs
  {
    // Carry out self id process.
    fprintf(stderr,"Initial state, self-id in progress...\n");
    id_bot(ai,blobs);
    if ((ai->st.state%100)!=0)  // The id_bot() routine will change the AI state to initial state + 1
    {       // if robot identification is successful.
      if (ai->st.self->cx>=512) ai->st.side=1; else ai->st.side=0;
      all_stop();
      clear_motion_flags(ai);
      fprintf(stderr,"Self-ID complete. Current position: (%f,%f), current heading: [%f, %f], AI state=%d\n",ai->st.self->cx,ai->st.self->cy,ai->st.self->mx,ai->st.self->my,ai->st.state);
    }
  } else {
    track_agents(ai,blobs);  

    // reserve first three states in each mode for forward drive direction determination
    if ((ai->st.state > 0 && ai->st.state < 3) || 
        (ai->st.state > 100 && ai->st.state < 103) ||
        (ai->st.state > 200 && ai->st.state < 203)) {
      clear_motion_flags(ai);
      drive_speed(50);
      ai->st.state++;
    } 
    // otherwise, go to respective state mode and navigate
    else {
      // save up to date blob info to avoid segfaults
      backupReadings(ai);

      // SOCCER MODE
      if (ai->st.state >= 3 && ai->st.state <= 100) {
        switch(ai->st.state) {
          case 3: 
            if (checkOpponent(ai, 1)){
              approachTargetLocation(ai, TO_KICK_POS);
            } else {
              approachTargetLocation(ai, TO_DEFENSE);
            }
            break;
          case 4: 
            // if ball has moved by opponent or earthquake, jump back a state
            if ((ballHistX[0] - ballHistX[1] > 10.0) || (ballHistY[0] - ballHistY[1] > 10.0))
              --ai->st.state;
            approachTargetLocation(ai, TO_BALL); 
            break;
          case 5: kickBall(ai); break;
          default: ai->st.state = 1; break;
        }
      }
      // PENALTY MODE
      else if (ai->st.state >= 103 && ai->st.state <= 200) {
        switch(ai->st.state) {
          case 103: approachTargetLocation(ai, TO_KICK_POS); break;
          case 104: approachTargetLocation(ai, TO_BALL); break;
          case 105: kickBall(ai); break;
          default: ai->st.state = 101; break;
        }
      }
      // CHASE MODE
      else if (ai->st.state >= 203 && ai->st.state <= 300) {
        switch(ai->st.state) {
          case 203: approachTargetLocation(ai, TO_BALL); break;
          case 204: kickBall(ai); break;
          default: ai->st.state = 201; break;
        }
      }
    }
  }
}
Example #14
0
void AI_main(struct RoboAI *ai, struct blob *blobs, void *state)
{
    /*************************************************************************
     This is the main AI loop.

     It is called by the imageCapture code *once* per frame. And it *must not*
     enter a loop or wait for visual events, since no visual refresh will happen
     until this call returns!

     Therefore. Everything you do in here must be based on the states in your
     AI and the actions the robot will perform must be started or stopped
     depending on *state transitions*.

     E.g. If your robot is currently standing still, with state = 03, and
      your AI determines it should start moving forward and transition to
      state 4. Then what you must do is
      - send a command to start forward motion at the desired speed
      - update the robot's state
      - return

     I can not emphasize this enough. Unless this call returns, no image
     processing will occur, no new information will be processed, and your
     bot will be stuck on its last action/state.

     You will be working with a state-based AI. You are free to determine
     how many states there will be, what each state will represent, and
     what actions the robot will perform based on the state as well as the
     state transitions.

     You must *FULLY* document your state representation in the report

     The first two states for each more are already defined:
     State 0,100,200 - Before robot ID has taken place (this state is the initial
               	    state, or is the result of pressing 'r' to reset the AI)
     State 1,101,201 - State after robot ID has taken place. At this point the AI
               	    knows where the robot is, as well as where the opponent and
               	    ball are (if visible on the playfield)

     Relevant UI keyboard commands:
     'r' - reset the AI. Will set AI state to zero and re-initialize the AI
    data structure.
     't' - Toggle the AI routine (i.e. start/stop calls to AI_main() ).
     'o' - Robot immediate all-stop! - do not allow your NXT to get damaged!

     ** Do not change the behaviour of the robot ID routine **
    **************************************************************************/
    double x_distance;
    double y_distance;

    if (ai->st.state==0||ai->st.state==100||ai->st.state==200)  	// Initial set up - find own, ball, and opponent blobs
    {
        // Carry out self id process.
        fprintf(stderr,"Initial state, self-id in progress...\n");
        id_bot(ai,blobs);
        if ((ai->st.state%100)!=0)	// The id_bot() routine will change the AI state to initial state + 1
        {   // if robot identification is successful.
            if (ai->st.self->cx>=512) ai->st.side=1;
            else ai->st.side=0;
            all_stop();
            clear_motion_flags(ai);
            fprintf(stderr,"Self-ID complete. Current position: (%f,%f), current heading: [%f, %f], AI state=%d\n",ai->st.self->cx,ai->st.self->cy,ai->st.self->mx,ai->st.self->my,ai->st.state);
        }
    }
    else
    {
        /****************************************************************************
         TO DO:
         You will need to replace this 'catch-all' code with actual program logic to
         implement your bot's state-based AI.

         After id_bot() has successfully completed its work, the state should be
         1 - if the bot is in SOCCER mode
         101 - if the bot is in PENALTY mode
         201 - if the bot is in CHASE mode

         Your AI code needs to handle these states and their associated state
         transitions which will determine the robot's behaviour for each mode.

         Please note that in this function you should add appropriate functions below
         to handle each state's processing, and the code here should mostly deal with
         state transitions and with calling the appropriate function based on what
         the bot is supposed to be doing.
        *****************************************************************************/
        //fprintf(stderr,"Just trackin'!\n");	// bot, opponent, and ball.
        //track_agents(ai,blobs);		// Currently, does nothing but endlessly track

        if(ai->st.state ==101) {
            x_distance = abs(ai->st.ball->cx - ai->st.self->cx);
            y_distance = abs(ai->st.ball->cy - ai->st.self->cy);
            while(x_distance > 10) {

                drive_speed(50);
                x_distance -= 45;
                all_stop();
                fprintf(stderr,"distance is: %f \n", x_distance);
            }
            ai->st.state += 1;
            fprintf(stderr,"state is %i \n", ai->st.state);
        }
    }

}
Example #15
0
int setupAI(int mode, int own_col, struct RoboAI *ai)
{
/////////////////////////////////////////////////////////////////////////////
// ** DO NOT CHANGE THIS FUNCTION **
// This sets up the initial AI for the robot. There are three different modes:
//
// SOCCER -> Complete AI, tries to win a soccer game against an opponent
// PENALTY -> Score a goal (no goalie!)
// CHASE -> Kick the ball and chase it around the field
//
// Each mode sets a different initial state (0, 100, 200). Hence,
// AI states for SOCCER will be 0 through 99
// AI states for PENALTY will be 100 through 199
// AI states for CHASE will be 200 through 299
//
// You will of course have to add code to the AI_main() routine to handle
// each mode's states and do the right thing.
//
// Your bot should not become confused about what mode it started in!
//////////////////////////////////////////////////////////////////////////////

    switch (mode) {
    case AI_SOCCER:
        fprintf(stderr,"Standard Robo-Soccer mode requested\n");
        ai->st.state=0;		// <-- Set AI initial state to 0
        break;
    case AI_PENALTY:
        fprintf(stderr,"Penalty mode! let's kick it!\n");
        ai->st.state=100;	// <-- Set AI initial state to 100
        break;
    case AI_CHASE:
        fprintf(stderr,"Chasing the ball...\n");
        ai->st.state=200;	// <-- Set AI initial state to 200
        break;
    default:
        fprintf(stderr, "AI mode %d is not implemented, setting mode to SOCCER\n", mode);
        ai->st.state=0;
    }

    all_stop();			// Stop bot,
    ai->runAI = AI_main;		// and initialize all remaining AI data
    ai->calibrate = AI_calibrate;
    ai->st.ball=NULL;
    ai->st.self=NULL;
    ai->st.opp=NULL;
    ai->st.side=0;
    ai->st.botCol=own_col;
    ai->st.old_bcx=0;
    ai->st.old_bcy=0;
    ai->st.old_scx=0;
    ai->st.old_scy=0;
    ai->st.old_ocx=0;
    ai->st.old_ocy=0;
    ai->st.bvx=0;
    ai->st.bvy=0;
    ai->st.svx=0;
    ai->st.svy=0;
    ai->st.ovx=0;
    ai->st.ovy=0;
    ai->st.selfID=0;
    ai->st.oppID=0;
    ai->st.ballID=0;
    clear_motion_flags(ai);
    fprintf(stderr,"Initialized!\n");

    return(1);
}
Example #16
0
void approachTargetLocation(struct RoboAI* ai, int mode){
  int targetProximity = 200;
  double angleThreshold = 20;
  // The idea behind this version of approaching the ball
  // is simple: I turn and check the angle between our facing
  // and the ball. If it's decreasing, we are turning the right way;
  // if not, we start turning the opposite way.
  // This is to avoid the problem with acos: its always positive.
 
  struct vector selfToTarget, selfDirection, selfToBall, goalToBall, goalToSelf, selfToKickPos;
  struct vector selfToGoal;
  calculateInitialVectors(&selfDirection, &selfToBall, &goalToBall, &goalToSelf, ai, SELF);
  switch(mode) {
    case TO_BALL: // for chase mode, or after oriented to goal - go directly to ball
      selfToTarget.x = selfToBall.x;
      selfToTarget.y = selfToBall.y;
      break;
    case TO_KICK_POS: // we are still approaching the optimal kicking position
      selfToGoal.x = -goalToSelf.x;
      selfToGoal.y = -goalToSelf.y;

      if (calculateAngle(&selfToBall, &selfToGoal) < 5) {
        selfToTarget.x = selfToBall.x;
        selfToTarget.y = selfToBall.y;
      } else {
        calculateKickPosition(&selfToBall, &goalToBall, &goalToSelf, &selfToKickPos, ai);
        selfToTarget.x = selfToKickPos.x;
        selfToTarget.y = selfToKickPos.y;
      }
      break;
    case TO_DEFENSE: // assume defensive positions
      getInterceptPos(ai, &selfToTarget);
      break;
    default: // this should never happen, but otherwise, just go for ball
      selfToTarget.x = selfToBall.x;
      selfToTarget.y = selfToBall.y;
      break;
  }

  printf("[%d|%d] CURRENT TARGET: [%f, %f], DIST: [%f]\n", ai->st.state, mode, selfToTarget.x, selfToTarget.y, magnitude(&selfToTarget));
  // Note how the quadrant problem still exists at this point.
  // I will fix this by forcing the robot to reset the motion vector
  // whenever our angles start increasing in distance.
 
  //////////////////// ! RESET FLAGGED ANGLE ! ////////////////////
  // First, we check if the oldAngle variable is still valid.
  // If it is flagged for a renewal, we move forwards and reset it.
  if (oldAngle == -999999){
    drive_speed(35);
    oldAngle = calculateAngle(&selfToTarget, &selfDirection);
    return;
  }
  //////////////////// ! HANDLE TURNING ! ////////////////////
  // If we, instead, have a valid old angle, we must check if after
  // turning we are increasing the angle.
  double currentAngle = calculateAngle(&selfToTarget, &selfDirection);
  // printf("ANGLE: %f from %f, difference of %f\n", currentAngle, oldAngle, currentAngle - oldAngle);
  if (currentAngle > oldAngle && currentAngle > 5){
    adjustOverturn = 1;
    // There are two possible circumstances that we are concerned with.
    // a) We just jumped into a different quadrant from the motion vector.
    // b) We are really just turning away from the target.
    // Let me first address the quadrant problem first.
    //////////////////// ! QUADRANT SOLUTION ! ////////////////////
    if (attemptedQuadrantSolution < 5){
      // The solution to this is rather simple:
      // I continue turning in the direction I already am, and
      // reset the oldAngle to update it in the new quadrant.
      // IF I was indeed turning in the right direction but the
      // angle was messed up because of the bajanxed quadrants,
      // then the (currentAngle > oldAngle) boolean above would
      // then return false and we will continue on.
      printf("ATTEMPTED QUADRANT SOLUTION\n");
      if (attemptedQuadrantSolution < 1)
        pivotRobot(25);
      else if (attemptedQuadrantSolution < 2){
        oldAngle = -999999;
      }
      else if (attemptedQuadrantSolution < 3){
        drive_speed(50);
      } else {
        // drive_speed(25);
        all_stop();
      }
      // if (attemptedQuadrantSolution == 4)
        
      attemptedQuadrantSolution++;
      return;
    } else {
      //////////////////// ! CHANGE DIRECTION ! ////////////////////
      // If I got into here, I have already attempted a quadrant
      // problem solution, and thus am most likely turning in the
      // wrong direction. I will then change the direction I have
      // been turning in, and reset the attemptedQuadrantSolution
      // flag in anticipation of further quadrant problems.
      printf("CHANGED DIRECTION\n");
      toggleTurn = !toggleTurn;
      attemptedQuadrantSolution = 0;
      pivotRobot(20);
      oldAngle = currentAngle; // Store the old angle to keep checking.
      return;
    }
  } else {
    // If we reached this part of the code, that means we are actually
    // turning towards the target location. This is great! We can
    // continue turning towards the target until the angle is within
    // an acceptable range.
    if (currentAngle > angleThreshold){
      //////////////////// ! CONTINUE TURNING ! ////////////////////
      // We need to continue turning until an optimal angle is achieved.
      attemptedQuadrantSolution = 0;
      if (mode == TO_BALL) {
        printf("SLOW YO ROLL\n");
        pivotRobot(17);
      } else {
        pivotRobot(20);
      }
      
      oldAngle = currentAngle;
      return;
    } else {
      //////////////////// ! APPROACH TARGET ! ////////////////////
      // We are facing the target location within an acceptable range.
      if (magnitude(&selfToTarget) > targetProximity){
        // Let us start moving forwards if we are not within range.
        if (mode == TO_BALL) {
          if (adjustOverturn){
            adjustOverturn = 0;
            if (toggleTurn){
              drive_custom(38, 18);
            } else {
              drive_custom(18, 38);
            }
          } else {
            drive_speed(20);
          }
        } else {
          if (adjustOverturn){
            adjustOverturn = 0;
            if (toggleTurn){
              drive_custom(18 + driveRelative(magnitude(&selfToTarget)), driveRelative(magnitude(&selfToTarget)));
            } else {
              drive_custom(driveRelative(magnitude(&selfToTarget)), 18 + driveRelative(magnitude(&selfToTarget)));
            }
          } else {
            drive_speed(driveRelative(magnitude(&selfToTarget)));
          }
        }
        attemptedQuadrantSolution = 5;

        oldAngle = currentAngle;
        return;
      } else {
        if (mode == TO_DEFENSE){
          all_stop();
          return;
        }
        // We are in an acceptable range of the target!
        // Let us then increase the state of the AI directive and
        // handle the different cases for different game modes.
        ai->st.state ++;
        return;
      }
    }
  }
}
Example #17
0
void AI_main(struct RoboAI *ai, struct blob *blobs, void *state)
{
    /*************************************************************************
     You will be working with a state-based AI. You are free to determine
     how many states there will be, what each state will represent, and
     what actions the robot will perform based on the state as well as the
     state transitions.

     You must *FULLY* document your state representation in the report

     Here two states are defined:
     State 0,100,200 - Before robot ID has taken place (this state is the initial
                       state, or is the result of pressing 'r' to reset the AI)
     State 1,101,201 - State after robot ID has taken place. At this point the AI
                       knows where the robot is, as well as where the opponent and
                       ball are (if visible on the playfield)

     Relevant UI keyboard commands:
     'r' - reset the AI. Will set AI state to zero and re-initialize the AI
    data structure.
     't' - Toggle the AI routine (i.e. start/stop calls to AI_main() ).
     'o' - Robot immediate all-stop! - do not allow your NXT to get damaged!

     ** Do not change the behaviour of the robot ID routine **
    **************************************************************************/

    if (ai->st.state == 0 || ai->st.state == 100 || ai->st.state == 200) // Initial set up - find own, ball, and opponent blobs
    {
        // Carry out self id process.
        fprintf(stderr, "Initial state, self-id in progress...\n");
        id_bot(ai, blobs);
        if ((ai->st.state % 100) != 0) // The id_bot() routine will change the AI state to initial state + 1
        {
            // if robot identification is successful.
            if (ai->st.self->cx[0] >= 512) ai->st.side = 1; else ai->st.side = 0;
            all_stop();
            clear_motion_flags(ai);
            fprintf(stderr, "Self-ID complete. Current position: (%f,%f), current heading: [%f, %f], AI state=%d\n", ai->st.self->cx[0], ai->st.self->cy[0], ai->st.self->mx, ai->st.self->my, ai->st.state);
        }
    }
    else
    {
        /****************************************************************************
         TO DO:
         You will need to replace this 'catch-all' code with actual program logic to
         have the robot do its work depending on its current state.
         After id_bot() has successfully completed its work, the state should be
         1 - if the bot is in SOCCER mode
         101 - if the bot is in PENALTY mode
         201 - if the bot is in CHASE mode

         Your AI code needs to handle these states and their associated state
         transitions which will determine the robot's behaviour for each mode.
        *****************************************************************************/
        // track_agents(ai,blobs);        // Currently, does nothing but endlessly track
        // fprintf(stderr,"Just trackin'!\n");    // bot, opponent, and ball.

        switch(ai->st.state){
            case 101:

                kick();
                sleep(2);
                break;
            case 201:
                chase(ai);
                break;
        }
    }

}