Exemplo n.º 1
0
static void cpu_test(void) {
  uint32_t i;
  int cores_seen = 0;
  struct cpu_test ct;
  gpr_thd_id thd;
  ct.ncores = gpr_cpu_num_cores();
  GPR_ASSERT(ct.ncores > 0);
  ct.nthreads = (int)ct.ncores * 3;
  ct.used = gpr_malloc(ct.ncores * sizeof(int));
  memset(ct.used, 0, ct.ncores * sizeof(int));
  gpr_mu_init(&ct.mu);
  gpr_cv_init(&ct.done_cv);
  ct.is_done = 0;
  for (i = 0; i < ct.ncores * 3; i++) {
    GPR_ASSERT(gpr_thd_new(&thd, &worker_thread, &ct, NULL));
  }
  gpr_mu_lock(&ct.mu);
  while (!ct.is_done) {
    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&ct.mu);
  fprintf(stderr, "Saw cores [");
  for (i = 0; i < ct.ncores; i++) {
    if (ct.used[i]) {
      fprintf(stderr, "%d,", i);
      cores_seen++;
    }
  }
  fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores);
  gpr_free(ct.used);
}
Exemplo n.º 2
0
// Tests scenario where block being read is detached from a core and put on the
// dirty list.
void test_detached_while_reading(void) {
  printf("Starting test: detached while reading\n");
  setup_test(0);
  // Start a write.
  static const size_t DWR_RECORD_SIZE = 10;
  void* record_written = census_log_start_write(DWR_RECORD_SIZE);
  GPR_ASSERT(record_written != NULL);
  census_log_end_write(record_written, DWR_RECORD_SIZE);
  // Read this record.
  census_log_init_reader();
  size_t bytes_available;
  const void* record_read = census_log_read_next(&bytes_available);
  GPR_ASSERT(record_read != NULL);
  GPR_ASSERT(DWR_RECORD_SIZE == bytes_available);
  // Now fill the log. This will move the block being read from core-local
  // array to the dirty list.
  while ((record_written = census_log_start_write(DWR_RECORD_SIZE))) {
    census_log_end_write(record_written, DWR_RECORD_SIZE);
  }

  // In this iteration, read_next() should only traverse blocks in the
  // core-local array. Therefore, we expect at most gpr_cpu_num_cores() more
  // blocks. As log is full, if read_next() is traversing the dirty list, we
  // will get more than gpr_cpu_num_cores() blocks.
  int block_read = 0;
  while ((record_read = census_log_read_next(&bytes_available))) {
    ++block_read;
    GPR_ASSERT(block_read <= (int)gpr_cpu_num_cores());
  }
  census_log_shutdown();
}
Exemplo n.º 3
0
/* Given log size and record size, computes the minimum usable space. */
static int32_t min_usable_space(size_t log_size, size_t record_size) {
  int32_t usable_space;
  int32_t num_blocks =
      GPR_MAX(log_size / CENSUS_LOG_MAX_RECORD_SIZE, gpr_cpu_num_cores());
  int32_t waste_per_block = CENSUS_LOG_MAX_RECORD_SIZE % record_size;
  /* In the worst case, all except one core-local block is full. */
  int32_t num_full_blocks = num_blocks - 1;
  usable_space = (int32_t)log_size -
                 (num_full_blocks * CENSUS_LOG_MAX_RECORD_SIZE) -
                 ((num_blocks - num_full_blocks) * waste_per_block);
  GPR_ASSERT(usable_space > 0);
  return usable_space;
}
Exemplo n.º 4
0
// External functions: primary stats_log interface
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
  // Check cacheline alignment.
  GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(!g_log.initialized);
  g_log.discard_old_records = discard_old_records;
  g_log.num_cores = gpr_cpu_num_cores();
  // Ensure that we will not get any overflow in calaculating num_blocks
  GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
  GPR_ASSERT(size_in_mb < 1000);
  // Ensure at least 2x as many blocks as there are cores.
  g_log.num_blocks =
      (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
                                                 CENSUS_LOG_2_MAX_RECORD_SIZE);
  gpr_mu_init(&g_log.lock);
  g_log.read_iterator_state = 0;
  g_log.block_being_read = NULL;
  g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
      g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.core_local_blocks, 0,
         g_log.num_cores * sizeof(cl_core_local_block));
  g_log.blocks = (cl_block*)gpr_malloc_aligned(
      g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
  g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  cl_block_list_initialize(&g_log.free_block_list);
  cl_block_list_initialize(&g_log.dirty_block_list);
  for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
    cl_block* block = g_log.blocks + i;
    cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
    cl_block_try_disable_access(block, 1 /* discard data */);
    cl_block_list_insert_at_tail(&g_log.free_block_list, block);
  }
  gpr_atm_rel_store(&g_log.out_of_space_count, 0);
  g_log.initialized = 1;
}
Exemplo n.º 5
0
/* External functions: primary stats_log interface */
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
  gpr_int32 ix;
  /* Check cacheline alignment. */
  GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(!g_log.initialized);
  g_log.discard_old_records = discard_old_records;
  g_log.num_cores = gpr_cpu_num_cores();
  /* Ensure at least as many blocks as there are cores. */
  g_log.num_blocks = GPR_MAX(
      g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE);
  gpr_mu_init(&g_log.lock);
  g_log.read_iterator_state = 0;
  g_log.block_being_read = NULL;
  gpr_atm_rel_store(&g_log.is_full, 0);
  g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
      g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.core_local_blocks, 0,
         g_log.num_cores * sizeof(cl_core_local_block));
  g_log.blocks = (cl_block *)gpr_malloc_aligned(
      g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
  g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  cl_block_list_initialize(&g_log.free_block_list);
  cl_block_list_initialize(&g_log.dirty_block_list);
  for (ix = 0; ix < g_log.num_blocks; ++ix) {
    cl_block *block = g_log.blocks + ix;
    cl_block_initialize(block,
                        g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
    cl_block_try_disable_access(block, 1 /* discard data */);
    cl_block_list_insert_at_tail(&g_log.free_block_list, block);
  }
  gpr_atm_rel_store(&g_log.out_of_space_count, 0);
  g_log.initialized = 1;
}