void* census_log_start_write(size_t size) { // Used to bound number of times block allocation is attempted. GPR_ASSERT(size > 0); GPR_ASSERT(g_log.initialized); if (size > CENSUS_LOG_MAX_RECORD_SIZE) { return NULL; } uint32_t attempts_remaining = g_log.num_blocks; uint32_t core_id = gpr_cpu_current_cpu(); do { void* record = NULL; cl_block* block = cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); if (block && (record = cl_block_start_write(block, size))) { return record; } // Need to allocate a new block. We are here if: // - No block associated with the core OR // - Write in-progress on the block OR // - block is out of space gpr_mu_lock(&g_log.lock); bool allocated = cl_allocate_core_local_block(core_id, block); gpr_mu_unlock(&g_log.lock); if (!allocated) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } } while (attempts_remaining--); // Give up. gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; }
static void worker_thread(void *arg) { struct cpu_test *ct = (struct cpu_test *)arg; uint32_t cpu; unsigned r = 12345678; unsigned i, j; /* Avoid repetitive division calculations */ int64_t max_i = 1000 / grpc_test_slowdown_factor(); int64_t max_j = 1000000 / grpc_test_slowdown_factor(); for (i = 0; i < max_i; i++) { /* run for a bit - just calculate something random. */ for (j = 0; j < max_j; j++) { r = (r * 17) & ((r - i) | (r * i)); } cpu = gpr_cpu_current_cpu(); GPR_ASSERT(cpu < ct->ncores); gpr_mu_lock(&ct->mu); ct->used[cpu] = 1; for (j = 0; j < ct->ncores; j++) { if (!ct->used[j]) break; } gpr_mu_unlock(&ct->mu); if (j == ct->ncores) { break; /* all cpus have been used - no further use in running this test */ } } gpr_mu_lock(&ct->mu); ct->r = r; /* make it look like we care about r's value... */ ct->nthreads--; if (ct->nthreads == 0) { ct->is_done = 1; gpr_cv_signal(&ct->done_cv); } gpr_mu_unlock(&ct->mu); }
static void worker_thread(void *arg) { struct cpu_test *ct = (struct cpu_test *)arg; uint32_t cpu; unsigned r = 12345678; unsigned i, j; for (i = 0; i < 1000 / GRPC_TEST_SLOWDOWN_FACTOR; i++) { /* run for a bit - just calculate something random. */ for (j = 0; j < 1000000 / GRPC_TEST_SLOWDOWN_FACTOR; j++) { r = (r * 17) & ((r - i) | (r * i)); } cpu = gpr_cpu_current_cpu(); GPR_ASSERT(cpu < ct->ncores); gpr_mu_lock(&ct->mu); ct->used[cpu] = 1; for (j = 0; j < ct->ncores; j++) { if (!ct->used[j]) break; } gpr_mu_unlock(&ct->mu); if (j == ct->ncores) { break; /* all cpus have been used - no further use in running this test */ } } gpr_mu_lock(&ct->mu); ct->r = r; /* make it look like we care about r's value... */ ct->nthreads--; if (ct->nthreads == 0) { ct->is_done = 1; gpr_cv_signal(&ct->done_cv); } gpr_mu_unlock(&ct->mu); }
void *census_log_start_write(size_t size) { /* Used to bound number of times block allocation is attempted. */ gpr_int32 attempts_remaining = g_log.num_blocks; /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */ gpr_int32 core_id = gpr_cpu_current_cpu(); GPR_ASSERT(g_log.initialized); if (size > CENSUS_LOG_MAX_RECORD_SIZE) { return NULL; } do { int allocated; void *record = NULL; cl_block *block = cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); if (block && (record = cl_block_start_write(block, size))) { return record; } /* Need to allocate a new block. We are here if: - No block associated with the core OR - Write in-progress on the block OR - block is out of space */ if (gpr_atm_acq_load(&g_log.is_full)) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } gpr_mu_lock(&g_log.lock); allocated = cl_allocate_core_local_block(core_id, block); gpr_mu_unlock(&g_log.lock); if (!allocated) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } } while (attempts_remaining--); /* Give up. */ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; }