Beispiel #1
0
 void push(std::vector<int> cand) {
   int pos = cand[std::uniform_int_distribution<>(0, (int)cand.size() - 1)(rnd)];
   for (int i = height - 2; i >= 0; --i)
     board[i+1][pos] = board[i][pos];
   board[0][pos] = next;
   next_gen();
 }
HeapWord* DefNewGeneration::allocate(size_t word_size,
                                     bool is_large_noref,
                                     bool is_tlab) {
  // This is the slow-path allocation for the DefNewGeneration.
  // Most allocations are fast-path in compiled code.
  // We try to allocate from the eden.  If that works, we are happy.
  // Note that since DefNewGeneration supports lock-free allocation, we
  // have to use it here, as well.
  HeapWord* result = eden()->par_allocate(word_size);
  if (result == NULL) {
    // Tell the next generation we reached a limit.
    HeapWord* new_limit = 
      next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
    if (new_limit != NULL) {
      eden()->set_soft_end(new_limit);
      result = eden()->par_allocate(word_size);
    } else {
      assert(eden()->soft_end() == eden()->end(),
	     "invalid state after allocation_limit_reached returned null")
    }

    // If the eden is full and the last collection bailed out, we are running
    // out of heap space, and we try to allocate the from-space, too.
    // allocate_from_space can't be inlined because that would introduce a
    // circular dependency at compile time.
    if (result == NULL) {
      result = allocate_from_space(word_size);
    }
  }
Beispiel #3
0
int main() {
	GRID_TYPE grid[COLS][ROWS];
	GRID_TYPE ghost_grid[COLS+2][ROWS+2];
	int character;
	int x;
	int y;

	/* Generate a random grid */
	for(y=0; y <= ROWS-1; y++) {
		for(x=0; x <= COLS-1; x++) {
			grid[x][y] = get_random(2);
		}
	}

	pretty_print(grid);
	update_ghost(grid, ghost_grid);

	int iterations = 10000;

	while(iterations--) {
		puts("\n\n\n");
		pretty_print(grid);
		//nanosleep(&wait, NULL);

		next_gen(grid, ghost_grid);
	}
}
int main(void)
{
	int width = 64;
	int generations = 32;
	int * board1 = calloc(width, sizeof(int));
	int * board2 = calloc(width, sizeof(int));

	// Initial generation: only has one live cell in the middle.
	board1[width / 2] = 1;

	for (int i = 0; i < generations; i++)
	{
		// Print the current generation.
		// printf("Generation %02d: ", i);
		print_board(board1, width);

		// Compute the next generation.
		next_gen(board1, board2, width);

		// Swap the current and previous generations.
		int * tmp = board1;
		board1 = board2;
		board2 = tmp;
	}

	free(board1);
	free(board2);
	return 0;
}
Beispiel #5
0
void run_cpu(void) {
    randomize_board();
    int gen = 0;
    for (int i = 0; i < 10; i++) {
        printf("GENERATION %d\n", gen);
        print_board();
        for (int j = 0; j < 100000; j++) {
            next_gen();
        }
        gen += 100000;
    }
}
Beispiel #6
0
void ASParNewGeneration::compute_new_size() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
    "not a CMS generational heap");


  CMSAdaptiveSizePolicy* size_policy =
    (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
  assert(size_policy->is_gc_cms_adaptive_size_policy(),
    "Wrong type of size policy");

  size_t survived = from()->used();
  if (!survivor_overflow()) {
    // Keep running averages on how much survived
    size_policy->avg_survived()->sample(survived);
  } else {
    size_t promoted =
      (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
    assert(promoted < gch->capacity(), "Conversion problem?");
    size_t survived_guess = survived + promoted;
    size_policy->avg_survived()->sample(survived_guess);
  }

  size_t survivor_limit = max_survivor_size();
  _tenuring_threshold =
    size_policy->compute_survivor_space_size_and_threshold(
                                                     _survivor_overflow,
                                                     _tenuring_threshold,
                                                     survivor_limit);
  size_policy->avg_young_live()->sample(used());
  size_policy->avg_eden_live()->sample(eden()->used());

  size_policy->compute_young_generation_free_space(eden()->capacity(),
                                                   max_gen_size());

  resize(size_policy->calculated_eden_size_in_bytes(),
         size_policy->calculated_survivor_size_in_bytes());

  if (UsePerfData) {
    CMSGCAdaptivePolicyCounters* counters =
      (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
    assert(counters->kind() ==
           GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
      "Wrong kind of counters");
    counters->update_tenuring_threshold(_tenuring_threshold);
    counters->update_survivor_overflowed(_survivor_overflow);
    counters->update_young_capacity(capacity());
  }
}
HeapWord* DefNewGeneration::allocate(size_t word_size,
                                     bool is_tlab) {
  // This is the slow-path allocation for the DefNewGeneration.
  // Most allocations are fast-path in compiled code.
  // We try to allocate from the eden.  If that works, we are happy.
  // Note that since DefNewGeneration supports lock-free allocation, we
  // have to use it here, as well.
  HeapWord* result = eden()->par_allocate(word_size);
  if (result != NULL) {
    return result;
  }
  do {
    HeapWord* old_limit = eden()->soft_end();
    if (old_limit < eden()->end()) {
      // Tell the next generation we reached a limit.
      HeapWord* new_limit =
        next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
      if (new_limit != NULL) {
        Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
      } else {
        assert(eden()->soft_end() == eden()->end(),
               "invalid state after allocation_limit_reached returned null");
      }
    } else {
      // The allocation failed and the soft limit is equal to the hard limit,
      // there are no reasons to do an attempt to allocate
      assert(old_limit == eden()->end(), "sanity check");
      break;
    }
    // Try to allocate until succeeded or the soft limit can't be adjusted
    result = eden()->par_allocate(word_size);
  } while (result == NULL);

  // If the eden is full and the last collection bailed out, we are running
  // out of heap space, and we try to allocate the from-space, too.
  // allocate_from_space can't be inlined because that would introduce a
  // circular dependency at compile time.
  if (result == NULL) {
    result = allocate_from_space(word_size);
  }
  return result;
}
Beispiel #8
0
int main()
{
	srand(0);

	init_GP();
	init_pop();

	oclInit();
	oclBuffer();

//	size_t ws, ls;
//	clGetKernelWorkGroupInfo(clKernel1, clDeviceId, CL_KERNEL_WORK_GROUP_SIZE, sizeof(ws), (void *) &ws, NULL);
//	printf("CL_KERNEL_WORK_GROUP_SIZE is: %i \n", ws);
//	clGetKernelWorkGroupInfo(clKernel1, clDeviceId, CL_KERNEL_LOCAL_MEM_SIZE , sizeof(ls), (void *) &ls, NULL);
//	printf("CL_KERNEL_LOCAL_MEM_SIZE is: %i \n", ls);

	fitness_func();
	ocl_fitness_func();

	for (int i=0; i<POP_SIZE; i++)
	{
		if (fitness_cpu[i] != fitness_gpu[i])
			printf("mismatch at i = %i \n", i);
		printf("fitness_gpu[%i] = %i, fitness_cpu[%i] = %i \n", i, fitness_gpu[i], i, fitness_cpu[i]);
	}
	for (int i=1; i<GENERATION; i++)
	{
		//printf("hello \n");
		next_gen();
		fitness_func();
		ocl_fitness_func();
		gen_per(i);
	}
	oclClean();
	test_gp();
	printResult();

	return 0;
}
Beispiel #9
0
/**
 * Called in state CHANGE to process a ping.
 *
 * \param[in] ping  Ping to process
 */
static void
state_change_process_ping(const sup_ping_t *ping)
{
  switch (ping->view.state)
    {
    case SUP_STATE_UNKNOWN:
      /* Not possible */
      break;

    case SUP_STATE_CHANGE:
      if (self_is_coord())
	{
	  __trace("i'm coord");

	  /* Go to state ACCEPT if all the nodes in our clique see us as
	   * their coordinator. This will initiate the agreement phase.
	   */
	  if (clique_sees_self_as_coord())
	    {
	      __trace("+++ EVERYONE IN MY CLIQUE SEES ME AS COORD");

	      accept_clique(next_gen());
	      __trace("\t=> new state=%s, accepted=%u",
		     sup_state_name(self->view.state),
		     self->view.accepted);
	    }
	}
      break;

    case SUP_STATE_ACCEPT:
      if (!self_is_coord())
	{
	  /* Go to state ACCEPT if the sender is our coordinator, views the
	   * same clique as us and its accepted clique generation is higher
	   * than ours.
	   */
	  if (ping->sender == self->view.coord
	      && exa_nodeset_equals(&ping->view.clique, &self->view.clique)
	      && ping->view.accepted > self_highest_gen())
	    {
	      __trace("coord ok, view ok, accepted ok => i accept");
	      accept_clique(ping->view.accepted);
	    }
	}
      break;

    case SUP_STATE_COMMIT:
      if (!self_is_coord())
	{
	  /* Commit the membership if we accepted it */
	  if (exa_nodeset_equals(&ping->view.clique, &accepted_clique)
	      && ping->view.committed == self->view.accepted
	      && ping->view.committed > self->view.committed)
	    {
	      __trace("saw commit %u from %u and i accepted %u => i commit %u",
		      ping->view.committed, ping->sender, self->view.accepted,
		      self->view.accepted);
	      commit_clique();
	    }
	}
      break;
    }
}