示例#1
0
int main(int argc, char **argv)
{
  int visited_states[NUMBER_OF_STATES];
  int k;
  int j;
  const int N = 100;
  long double size;

  gg_srand(6);

  for (k = 0; k < NUMBER_OF_STATES; k++) {
    visited_states[k] = 0;
    sum_weights[k] = 0.0;
  }
  
  for (i = 0; i < N; i++) {
    for (k = 0; k < NUMBER_OF_STATES; k++)
      Q[k].node = -1;

    Q[NUMBER_OF_STATES - 1].node = 0;
    Q[NUMBER_OF_STATES - 1].parent_stratum = -1;
    Q[NUMBER_OF_STATES - 1].weight = 1.0;
  
    for (k = NUMBER_OF_STATES - 1; k >= 0; k++) {
      int r;
      int state = Q[k].node;
      if (state == -1)
	continue;

      sum_weights[k] += Q[k].weight;
      
      for (r = k; r > 0; r = Q[r].parent_stratum) {
	visited_states[Q[r].node] = 1;
      }
      
      for (s = 0; s < 2 * NUMBER_OF_POINTS; s++) {
	int next_state = transformations[state][s];
	if (next_state >= 0
	    && !visited_states[next_state]) {
	  int next_stratum = compute_stratum(next_state, visited_states);
	  if (Q[next_stratum].node == -1) {
	    Q[next_stratum].node = next_state;
	    Q[next_stratum].parent_stratum = k;
	    Q[next_stratum].weight = Q[k].weight;
	  }
	  else {
	    Q[next_stratum].weight += Q[k].weight;
	    if (gg_drand() * Q[next_stratum] < Q[k].weight) {
	      Q[next_stratum].node = next_state;
	      Q[next_stratum].parent_stratum = k;
	    }
	  }
	}
      }
      
      for (r = k; r > 0; r = Q[r].parent_stratum) {
	visited_states[Q[r].node] = 0;
      }
    }
  }

  size = 0.0;
  for (k = NUMBER_OF_STATES - 1; k >= 0; k++) {
    size += Q[k].weight;
    printf("%d %Lg %Lg\n", k, Q[k].weight / N, size / N);
  }

  printf("Estimated size: %Lg\n%Lf\n", size / N, size / N);

  return 0;
}
示例#2
0
文件: dfa.c 项目: epichub/neatzsche
/* dfa_patterns_optimize_variations() tries to reduce the size of DFA by
 * altering pattern variations (in fact, transformations). The algorithm
 * is to change several patterns' variations and if this happens to give
 * size reduction, to keep the change, otherwise, revert.
 *
 * This function contains many heuristically chosen values for variation
 * changing probability etc. They have been chosen by observing algorithm
 * effectiveness and seem to be very good.
 *
 * Note that we subtract 1 from the number of nodes to be consistent with
 * the standard builder, which doesn't count error state.
 */
int *
dfa_patterns_optimize_variations(dfa_patterns *patterns, int iterations)
{
  int k = 0;
  int failed_iterations = 0;
  int min_nodes_so_far;
  int num_nodes_original;
  int *best_variations;
  double lower_limit = 2.0 / patterns->num_patterns;
  double upper_limit = 6.0 / patterns->num_patterns;
  double change_probability = 4.0 / patterns->num_patterns;
  dfa_pattern *pattern;

  best_variations = malloc(patterns->num_patterns * sizeof(*best_variations));
  assert(best_variations);
  for (pattern = patterns->patterns; pattern; pattern = pattern->next, k++)
    best_variations[k] = pattern->current_variation;

  dfa_patterns_build_graph(patterns);
  num_nodes_original = patterns->graph.num_nodes;
  min_nodes_so_far = num_nodes_original;

  fprintf(stderr, "Original number of DFA states: %d\n", min_nodes_so_far - 1);
  fprintf(stderr, "Trying to optimize in %d iterations\n", iterations);

  gg_srand(num_nodes_original + patterns->num_patterns);

  while (iterations--) {
    int changed_variations = 0;
    int k = 0;
    
    /* Randomly change some variations. */
    for (pattern = patterns->patterns; pattern; pattern = pattern->next, k++) {
      if (gg_drand() < change_probability && pattern->num_variations > 1) {
	int new_variation = gg_rand() % (pattern->num_variations - 1);
	if (new_variation >= pattern->current_variation)
	  new_variation++;
	pattern->current_variation = new_variation;
	changed_variations++;
      }
      else
	pattern->current_variation = best_variations[k];
    }

    if (changed_variations == 0) {
      iterations++;
      continue;
    }

    fprintf(stderr, ".");
    dfa_patterns_build_graph(patterns);

    if (patterns->graph.num_nodes < min_nodes_so_far) {
      /* If the new set of variations produces smaller dfa, save it. */
      int k = 0;
      for (pattern = patterns->patterns; pattern; pattern = pattern->next, k++)
	best_variations[k] = pattern->current_variation;

      fprintf(stderr, "\nOptimized: %d => %d states (%d iterations left)\n",
	      min_nodes_so_far - 1, patterns->graph.num_nodes - 1, iterations);
      min_nodes_so_far = patterns->graph.num_nodes;
      failed_iterations = 0;
    }
    else
      failed_iterations++;

    if (failed_iterations >= 30) {
      /* If haven't succeded in 30 last iterations, try to alter variation
       * change probability.
       */
      double delta = gg_drand() / patterns->num_patterns;
      if (change_probability > upper_limit
	  || (change_probability >= lower_limit && gg_rand() % 2 == 0))
	delta = -delta;

      change_probability += delta;
      failed_iterations = 0;
    }
  }

  fprintf(stderr, "\nTotal optimization result: %d => %d states\n",
	  num_nodes_original - 1, min_nodes_so_far - 1);

  dfa_graph_clear(&(patterns->graph));
  return best_variations;
}
int main(int argc, char **argv)
{
  int k;
  int j;
  long double size = 0.0;
  long double sum_size = 0.0;
  long double sum_squared_size = 0.0;
  int width;
  int height;
  struct queue_item *Qin = Q1;
  struct queue_item *Qout = Q2;
  struct queue_item *tmp;
  int x, y;
  int num_iterations = 10000;

  if (argc < 5) {
    fprintf(stderr, "Usage: estimate_legal_stratified <height> <width> <num_samples> <seed>\n");
    return 1;
  }

  height = atoi(argv[1]);
  width = atoi(argv[2]);
  num_iterations = atoi(argv[3]);
  gg_srand(atoi(argv[4]));
  setwidth(height);
  
  for (j = 0; j < num_iterations; j++) {
    for (k = 0; k < MAX_NUMBER_OF_STRATA; k++)
      Qin[k].weight = -1.0;

    memcpy(Qin[0].node, *startstate(), sizeof(bstate));
    Qin[0].weight = 1.0;

    for (y = 0; y < width; y++)
      for (x = 0; x < height; x++) {

	for (k = 0; k <= height; k++)
	  Qout[k].weight = -1.0;
	
	for (k = 0; k <= height; k++) {
	  bstate expanded_states[3];
	  int num_expanded_states;
	  int m;
      
	  if (Qin[k].weight == -1.0)
	    continue;
      
	  num_expanded_states = expandstate(Qin[k].node, x, expanded_states);
	  for (m = 0; m < num_expanded_states; m++) {
	    int next_stratum = stratify(expanded_states[m]);
	    if (Qout[next_stratum].weight == -1.0) {
	      memcpy(Qout[next_stratum].node, expanded_states[m], sizeof(bstate));
	      Qout[next_stratum].weight = Qin[k].weight / 3.0;
	    }
	    else {
	      Qout[next_stratum].weight += Qin[k].weight / 3.0;
	      if (gg_drand() * Qout[next_stratum].weight < Qin[k].weight / 3.0)
		memcpy(Qout[next_stratum].node, expanded_states[m], sizeof(bstate));
	    }
	  }
	}

	tmp = Qin;
	Qin = Qout;
	Qout = tmp;
      }

    size = 0;
    for (k = 0; k <= height; k++)
      if (Qin[k].weight != -1.0 && finalstate(Qin[k].node))
	size += Qin[k].weight;

    sum_size += size;
    sum_squared_size += size * size;

    if ((j + 1) % 1000 == 0) {
      double std = sqrt(sum_squared_size - sum_size * sum_size / (j + 1)) / j;
      printf("%d %10.8Lg %lg %lg\n", j + 1, sum_size / (j + 1),
	     std, std * sqrt(j));
    }
  }
  
  printf("Estimated legal probability: %10.8Lg\n", sum_size / num_iterations);
  printf("Standard deviation: %lg\n",
	 sqrt(sum_squared_size - sum_size * sum_size / num_iterations) / num_iterations);
  printf("Standard deviation per sample: %lg\n",
	 sqrt((sum_squared_size - sum_size * sum_size / num_iterations) / num_iterations));
  
  return 0;
}