Exemplo n.º 1
0
// moved this to aftermove so xinertia on treads is already applied
// when we calculate the main object position.
void XBoss::RunAftermove()
{
Object *o = mainobject;
int i;

	if (!mainobject || mainobject->state == 0 || !X.initilized)
		return;
	
	// main object pulled along as treads move
	int tread_center = (treads[UL]->x + treads[UR]->x + \
					 	treads[LL]->x + treads[LR]->x) / 4;
	o->x += (tread_center - o->x) / 16;
	
	run_internals();
	
	for(i=0;i<4;i++)
	{
		run_body(i);
		run_target(i);
	}
	
	for(i=0;i<2;i++)
	{
		run_door(i);
	}
}
Exemplo n.º 2
0
int main(int argc, char** argv)
{
    pid_t child_pid;

    if (argc < 2) {
        fprintf(stderr, "Expected a program name as argument\n");
        return -1;
    }

    child_pid = fork();
    if (child_pid == 0)
        run_target(argv[1]);
    else if (child_pid > 0)
        run_debugger(child_pid);
    else {
        perror("fork");
        return -1;
    }

    return 0;
}
Exemplo n.º 3
0
int main(int argc, char**argv)
{
    pid_t child_pid;
    if(argc < 2){
        fprintf(stderr, "Expected a program address\n");
        return -1;
    }
    child_pid = fork();
    if(child_pid == 0)
    {
        run_target(argv[1]);
    }
    else if (child_pid > 0)
    {
        run_debugger(child_pid);
    }
    else
    {
        perror("fork error\n");
        return -1;
    }
    return 0;
}
int main(int argc, char** argv)
{
	/*
	Equation eq;
	eq.theta0 = -1;
	eq.theta[0] = 2;
	Equation eq1;
	eq.roundoff(eq1);
	std::cout << eq << std::endl;
	std::cout << eq1 << std::endl;
	return 0;
	*/

	if (argc < 1) {
		std::cout << "Arguments less than 2.\n";
		exit(-1);
	}	
	if (argc >= 3) {
		minv = atoi(argv[1]);
		maxv = atoi(argv[2]);
	}

	Solution inputs;
	
	init_gsets();
	srand(time(NULL)); // initialize seed for rand() function


	int rnd;
	bool b_similar_last_time = false;
	bool b_converged = false;
	bool b_svm_i = false;
	Equation* p = NULL;
	int pre_positive_size = 0, pre_negative_size = 0; // , pre_question_size = 0;
	//int cur_positive_size = 0, cur_negative_size = 0; // , cur_question_size = 0;



	//Start SVM training
	SVM* svm = new SVM(print_null);
	//svm->problem.x = (svm_node**)(training_set);
	//svm->problem.y = training_label;

	for (rnd = 1; rnd <= max_iter; rnd++) {
		svm->main_equation = NULL;

	init_svm:
		std::cout << "[" << rnd << "]SVM-----------------------------------------------" << "-------------------------------------------------------------" << std::endl;
		if (rnd == 1) {
			/*
			*	The first round is very special, so we put this round apart with its following rounds.
			*	1> We used random values as inputs for program executions in the first round.
			*	2> We need to make sure there are at last two classes of generated traces. "positive" and "negative"
			*/
			std::cout << "\t(1) execute programs... [" << init_exes + random_exes << "] {";
			for (int i = 0; i < init_exes + random_exes; i++) {
				Equation::linearSolver(NULL, inputs);
				std::cout << inputs;
				if (i < init_exes + random_exes - 1) std::cout << "|";
				run_target(inputs);
			}
			std::cout << "}" << std::endl;

			if (gsets[POSITIVE].traces_num() == 0 || gsets[NEGATIVE].traces_num() == 0) {
				if (gsets[POSITIVE].traces_num() == 0) std::cout << "[0] Positive trace, execute program again." << std::endl;
				if (gsets[NEGATIVE].traces_num() == 0) std::cout << "[0] Negative trace, execute program again." << std::endl;
				goto init_svm;
			}
		}
		else {
			std::cout << "\t(1) execute programs...[" << after_exes + random_exes << "] {";
			for (int i = 0; i < random_exes; i++) {
				Equation::linearSolver(NULL, inputs);
				std::cout << inputs;
				std::cout << " | ";
				run_target(inputs);
			}
			for (int i = 0; i < after_exes; i++) {
				Equation::linearSolver(p, inputs);
				std::cout << " | " << inputs;
				run_target(inputs);
			}
			std::cout << "}" << std::endl;
		}

		std::cout << "\t(2) prepare training data... ";
		svm->prepare_training_data(gsets, pre_positive_size, pre_negative_size);
		std::cout << std::endl;

		std::cout << "\t(3) start training... ";
		svm->train();
		std::cout << "|-->> ";
		set_console_color(std::cout);
		std::cout << *svm << std::endl;
		unset_console_color(std::cout);

		

		/*
		*	check on its own training data.
		*	There should be no prediction errors.
		*/
		std::cout << "\t(4) checking training traces.";
		double passRat = svm->predict_on_training_set();
		std::cout << " [" << passRat * 100 << "%]";
		if (passRat < 1) {
			std::cout << " [FAIL] \n The problem is not linear separable.. Trying to solve is by SVM-I algo" << std::endl;
			if (p != NULL) {
				Equation* tmp = svm->main_equation;
				svm->main_equation = p;
				double passRat = svm->predict_on_training_set();
				std::cout << " last divide: " << *p << " accuracy[" << passRat * 100 << "%]\n";
				svm->main_equation = tmp;
			}
			std::cerr << "*******************************USING SVM_I NOW******************************" << std::endl;
			b_svm_i = true;
			break;
		}
		std::cout << " [PASS]" << std::endl;


		/*
		*	Check on Question traces.
		*	There should not exists one traces, in which a negative state is behind a positive state.
		*/
		std::cout << "\t(5) checking question traces.";
		set_console_color(std::cout, RED);
		
		if (svm->check_question_set(gsets[QUESTION]) != 0) {	
			std::cout << std::endl << "check on question set return error." << std::endl;
			unset_console_color(std::cout);
			return -1;
		}
		unset_console_color(std::cout);
		std::cout << std::endl;


		/*
		*	b_similar_last_time is used to store the convergence check return value for the last time.
		*	We only admit convergence if the three consecutive round are converged.
		*	This is to prevent in some round the points are too right to adjust the classifier.
		*/
		std::cout << "\t(6) check convergence:        ";
		if (svm->main_equation->is_similar(p) == 0) {
			if (b_similar_last_time == true) {
				std::cout << "[TT]  [SUCCESS] rounding off" << std::endl;
				b_converged = true;
				break;
			}
			std::cout << "[FT]";
			b_similar_last_time = true;
		}
		else {
			std::cout << ((b_similar_last_time == true) ? "[T" : "[F") << "F] ";
			b_similar_last_time = false;
		}
		std::cout << "  [FAIL] neXt round " << std::endl;
		if (p != NULL) {
			delete p;
		}
		p = svm->main_equation;
	} // end of SVM training procedure

	

	if ((b_converged) || (rnd >= max_iter)) {
		std::cout << "-------------------------------------------------------" << "-------------------------------------------------------------" << std::endl;
		std::cout << "finish running svm for " << rnd << " times." << std::endl;
		int equation_num = -1;
		Equation* equs = svm->roundoff(equation_num);
		assert(equation_num == 1);
		set_console_color(std::cout);
		if (b_converged)
			std::cout << "  Hypothesis Invairant(Converged): {\n";
		else 
			std::cout << "  Hypothesis Invairant(Reaching Maximium Iteration): {\n";
		std::cout << "\t\t" << equs[0] << std::endl;
		std::cout << "  }" << std::endl;
		unset_console_color(std::cout);
		delete[]equs;
		delete p;
		delete svm->main_equation;
		delete svm;
		return 0;
	}

	if (p == NULL) { // get out svm in the first round, p has not been set yet
		p = svm->main_equation;
	}
	delete svm;






	b_similar_last_time = false;
	int pre_equation_num = 1;
	//start SVM_I training
	assert(b_svm_i == true);
	SVM_I* svm_i = new SVM_I(print_null, p);

	int svm_i_start = rnd;
	for (; rnd <= max_iter; rnd++) {
//	init_svm_i:
		std::cout << "[" << rnd << "]SVM-I---------------------------------------------" << "-------------------------------------------------------------" << std::endl;
		if (rnd != svm_i_start) {
			
			int exes_each_equation = (after_exes + pre_equation_num - 1) / pre_equation_num;
			std::cout << "\t(1) execute programs...[" << exes_each_equation * pre_equation_num + random_exes << "] {";
			for (int i = 0; i < random_exes; i++) {
				Equation::linearSolver(NULL, inputs);
				std::cout << inputs << " | ";
				run_target(inputs);
			}
			p = svm_i->main_equation;
			for (int j = 0; j < exes_each_equation; j++) {
				Equation::linearSolver(p, inputs);
				std::cout << " | " << inputs;
				run_target(inputs);
			}
			for (int i = 0; i < svm_i->equ_num; i++) {
				p = &(svm_i->equations[i]);
				for (int j = 0; j < exes_each_equation; j++) {
					Equation::linearSolver(p, inputs);
					std::cout << " | " << inputs;
					run_target(inputs);
				}
			}
			std::cout << "}" << std::endl;
		}
		else {
			pre_positive_size = 0;
			pre_negative_size = 0;
		}

		std::cout << "\t(2) prepare training data... ";
		svm_i->prepare_training_data(gsets, pre_positive_size, pre_negative_size);
		std::cout << std::endl;

		std::cout << "\t(3) start training... ";
		int ret = svm_i->train();
		if (ret == -1)
			return -1;
		std::cout << svm_i->equ_num;
		std::cout << "|-->> ";
		set_console_color(std::cout);
		std::cout << *svm_i << std::endl;
		unset_console_color(std::cout);



		/*
		*	check on its own training data.
		*	There should be no prediction errors.
		*/
		std::cout << "\t(4) checking training traces.";
		double passRat = svm_i->predict_on_training_set();
		std::cout << " [" << passRat * 100 << "%]";
		if (passRat < 1) {
			set_console_color(std::cout, RED);
			std::cerr << "[FAIL] ..... Can not dividey by SVM_I." << std::endl;
			//std::cerr << "[FAIL] ..... Reaching maximium num of equation supported by SVM_I." << std::endl;
			//std::cerr << "You can increase the limit by modifying [classname::methodname]=SVM-I::SVM-I(..., int equ = **) " << std::endl;
			unset_console_color(std::cout);
			return -1;
			//			b_svm_i = true;
			//			break;
		}
		else {
			std::cout << " [PASS]" << std::endl;
		}
		
		


		/*
		*	Check on Question traces.
		*	There should not exists one traces, in which a negative state is behind a positive state.
		*/
		std::cout << "\t(5) checking question traces.";
		if (svm_i->check_question_set(gsets[QUESTION]) != 0) {
			std::cout << std::endl << "check on question set return error." << std::endl;
			return -1;
		}
		std::cout << std::endl;
		

		/*
		*	b_similar_last_time is used to store the convergence check return value for the last time.
		*	We only admit convergence if the three consecutive round are converged.
		*	This is to prevent in some round the points are too right to adjust the classifier.
		*/
		std::cout << "\t(6) check convergence:        ";
		if (pre_equation_num == svm_i->equ_num + 1) {
			if (b_similar_last_time == true) {
				std::cout << "[TT]  [SUCCESS] rounding off" << std::endl;
				b_converged = true;
				break;
			}
			std::cout << "[FT]";
			b_similar_last_time = true;
		}
		else {
			std::cout << ((b_similar_last_time == true) ? "[T" : "[F") << "F] ";
			b_similar_last_time = false;
		}
		std::cout << "  [FAIL] neXt round " << std::endl;
		pre_equation_num = svm_i->equ_num + 1;

		std::cout << std::endl;
	} // end of SVM-I training procedure




	
	std::cout << "-------------------------------------------------------" << "-------------------------------------------------------------" << std::endl;
	std::cout << "finish running svm-I for " << rnd - svm_i_start << " times." << std::endl;
	int equation_num = -1;
	Equation* equs = svm_i->roundoff(equation_num);
	set_console_color(std::cout);
	std::cout << "Hypothesis Invairant: {\n";
	std::cout << "\t     " << equs[0] << std::endl;
	for (int i = 1; i < equation_num; i++)
		std::cout << "\t  /\\ " << equs[i] << std::endl;
	std::cout << "}" << std::endl;
	unset_console_color(std::cout);
	
	
	delete[]equs;
	delete svm_i->main_equation;
	delete svm_i;
	return 0;
}
Exemplo n.º 5
0
int main(int argc, char** argv) {

  s32 opt;
  u8  mem_limit_given = 0, timeout_given = 0, qemu_mode = 0;
  u32 tcnt;
  char** use_argv;

  doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;

  while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQ")) > 0)

    switch (opt) {

      case 'o':

        if (out_file) FATAL("Multiple -o options not supported");
        out_file = optarg;
        break;

      case 'm': {

          u8 suffix = 'M';

          if (mem_limit_given) FATAL("Multiple -m options not supported");
          mem_limit_given = 1;

          if (!strcmp(optarg, "none")) {

            mem_limit = 0;
            break;

          }

          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
              optarg[0] == '-') FATAL("Bad syntax used for -m");

          switch (suffix) {

            case 'T': mem_limit *= 1024 * 1024; break;
            case 'G': mem_limit *= 1024; break;
            case 'k': mem_limit /= 1024; break;
            case 'M': break;

            default:  FATAL("Unsupported suffix or bad syntax for -m");

          }

          if (mem_limit < 5) FATAL("Dangerously low value of -m");

          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
            FATAL("Value of -m out of range on 32-bit systems");

        }

        break;

      case 't':

        if (timeout_given) FATAL("Multiple -t options not supported");
        timeout_given = 1;

        if (strcmp(optarg, "none")) {
          exec_tmout = atoi(optarg);

          if (exec_tmout < 20 || optarg[0] == '-')
            FATAL("Dangerously low value of -t");

        }

        break;

      case 'e':

        if (edges_only) FATAL("Multiple -e options not supported");
        edges_only = 1;
        break;

      case 'q':

        if (quiet_mode) FATAL("Multiple -q options not supported");
        quiet_mode = 1;
        break;

      case 'Z':

        /* This is an undocumented option to write data in the syntax expected
           by afl-cmin. Nobody else should have any use for this. */

        cmin_mode  = 1;
        quiet_mode = 1;
        break;

      case 'A':

        /* Another afl-cmin specific feature. */
        at_file = optarg;
        break;

      case 'Q':

        if (qemu_mode) FATAL("Multiple -Q options not supported");
        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;

        qemu_mode = 1;
        break;

      default:

        usage(argv[0]);

    }

  if (optind == argc || !out_file) usage(argv[0]);

  setup_shm();
  setup_signal_handlers();

  set_up_environment();

  find_binary(argv[optind]);

  if (!quiet_mode) {
    show_banner();
    ACTF("Executing '%s'...\n", target_path);
  }

  detect_file_args(argv + optind);

  if (qemu_mode)
    use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
  else
    use_argv = argv + optind;

  run_target(use_argv);

  tcnt = write_results();

  if (!quiet_mode) {

    if (!tcnt) FATAL("No instrumentation detected" cRST);
    OKF("Captured %u tuples in '%s'." cRST, tcnt, out_file);

  }

  exit(child_crashed * 2 + child_timed_out);

}
Exemplo n.º 6
0
int DifferentialEvolution::recombination(RunManagerAbstract &run_manager)
{
	ostream &os = file_manager.rec_ofstream();

	int best_run_idx = 0;
	ModelRun run_target(obj_func_ptr);
	ModelRun run_canidate(obj_func_ptr);
	Parameters tmp_pars_targ;
	Observations tmp_obs_targ;
	Parameters tmp_pars_can;
	Observations tmp_obs_can;

	best_phi = std::numeric_limits<double>::max();

	int d = gen_1.get_nruns();
	int n_good_runs_targ = gen_1.get_num_good_runs();
	int n_good_runs_can = run_manager.get_num_good_runs();
	double phi_sum_targ = 0.0;
	double phi_sum_can = 0.0;
	double phi_sum_new = 0.0;
	double phi_max_targ = 0.0;
	double phi_max_can = 0.0;
	double phi_max_new = 0.0;
	double phi_min_targ = std::numeric_limits<double>::max();
	double phi_min_can = std::numeric_limits<double>::max();
	double phi_min_new = std::numeric_limits<double>::max();

	os << "  population phi values:" << endl;
	os << "              parent         canidate" << endl;
	os << "    id        phi            phi" << endl;
	os << "    ----     ---------      ---------" << endl;
	for (int i_run = 0; i_run < d; ++i_run)
	{
		double new_phi = std::numeric_limits<double>::max();
		bool run_target_ok = gen_1.get_run(i_run, tmp_pars_targ, tmp_obs_targ);
		bool  run_canidate_ok = run_manager.get_run(i_run, tmp_pars_can, tmp_obs_can);
		if (!run_canidate_ok && !run_target_ok)
		{
			//keep current target
			new_phi = std::numeric_limits<double>::max();
			++failed_runs_old;
			++failed_runs_new;
			os << "    " << left << setw(10) << i_run << setw(15) << "N/A" << setw(15) << "N/A";
			os << endl;

		}
		else if (!run_canidate_ok)
		{
			//keep current target
			++failed_runs_new;
			par_transform.model2ctl_ip(tmp_pars_targ);
			// compute phi
			run_target.update_ctl(tmp_pars_targ, tmp_obs_targ);
			double phi_target = run_target.get_phi(DynamicRegularization::get_unit_reg_instance());
			phi_sum_targ += phi_target;
			phi_sum_new += phi_target;
			phi_min_targ = min(phi_min_targ, phi_target);
			phi_max_targ = max(phi_max_targ, phi_target);
			os << "    " << left << setw(10) << i_run << setw(15) << phi_target << setw(15) << "N/A";
			os << endl;
		}
		else if (!run_target_ok)
		{
			gen_1.update_run(i_run, tmp_pars_can, tmp_obs_can);
			// compute phi
			par_transform.model2ctl_ip(tmp_pars_can);
			run_canidate.update_ctl(tmp_pars_can, tmp_obs_can);
			new_phi = run_canidate.get_phi(DynamicRegularization::get_unit_reg_instance());
			++failed_runs_old;
			phi_sum_can += new_phi;
			phi_sum_new += new_phi;
			phi_min_can = min(phi_min_targ, new_phi);
			phi_max_can = max(phi_max_targ, new_phi);
			os << "    " << left << setw(10) << i_run << setw(15) << "N/A" << setw(15) << new_phi;
			os << endl;
		}
		else
		{
			// process target parameters and observations
			par_transform.model2ctl_ip(tmp_pars_targ);
			run_target.update_ctl(tmp_pars_targ, tmp_obs_targ);
			double phi_target = run_target.get_phi(DynamicRegularization::get_unit_reg_instance());
			//process canidate parameters and observations
			par_transform.model2ctl_ip(tmp_pars_can);
			run_canidate.update_ctl(tmp_pars_can, tmp_obs_can);
			double phi_canidate = run_canidate.get_phi(DynamicRegularization::get_unit_reg_instance());
			new_phi = min(phi_target, phi_canidate);
			os << "    " << left << setw(10) << i_run;
			os << setw(15) << phi_target;
			os << setw(15) << phi_canidate;
			os << endl;
			if (phi_canidate < phi_target)
			{
				gen_1.update_run(i_run, tmp_pars_can, tmp_obs_can);
			}
			phi_sum_targ += phi_target;
			phi_sum_can += phi_canidate;
			phi_sum_new += new_phi;
			phi_min_can = min(phi_min_can, phi_canidate);
			phi_max_can = max(phi_max_can, phi_canidate);
			phi_min_targ = min(phi_min_targ, phi_target);
			phi_max_targ = max(phi_max_targ, phi_target);

		}
		phi_min_new = min(phi_min_new, new_phi);
		phi_max_new = max(phi_max_new, new_phi);
		if (new_phi < best_phi)
		{
			best_phi = new_phi;
			best_run_idx = i_run;
		}
	}
	double phi_avg_targ = std::numeric_limits<double>::max();
	double phi_avg_can = std::numeric_limits<double>::max();
	double phi_avg_new = std::numeric_limits<double>::max();
	int n_good_runs_new = gen_1.get_num_good_runs();
	if (n_good_runs_targ > 0)
	{
		phi_avg_targ = phi_sum_targ / n_good_runs_targ;
	}
	if (n_good_runs_can > 0)
	{
		phi_avg_can = phi_sum_can / n_good_runs_can;
	}
	if (n_good_runs_new > 0)
	{
		phi_avg_new = phi_sum_new / n_good_runs_new;
	}

	write_run_summary(cout, n_good_runs_targ, phi_avg_targ, phi_min_targ, phi_max_targ,
		n_good_runs_can, phi_avg_can, phi_min_can, phi_max_can,
		n_good_runs_new, phi_avg_new, phi_min_new, phi_max_new);
	cout << endl;
	os << endl;
	write_run_summary(os, n_good_runs_targ, phi_avg_targ, phi_min_targ, phi_max_targ,
		n_good_runs_can, phi_avg_can, phi_min_can, phi_max_can,
		n_good_runs_new, phi_avg_new, phi_min_new, phi_max_new);
	os << endl;
	
	return best_run_idx;
}