// Gets the next block to read and tries to free 'prev' block (if not NULL). // Returns NULL if reached the end. static cl_block* cl_next_block_to_read(cl_block* prev) { cl_block* block = NULL; if (g_log.read_iterator_state == g_log.num_cores) { // We are traversing dirty list; find the next dirty block. if (prev != NULL) { // Try to free the previous block if there is no unread data. This // block // may have unread data if previously incomplete record completed // between // read_next() calls. block = prev->link.next->block; if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) { cl_block_list_remove(&g_log.dirty_block_list, prev); cl_block_list_insert_at_head(&g_log.free_block_list, prev); } } else { block = cl_block_list_head(&g_log.dirty_block_list); } if (block != NULL) { return block; } // We are done with the dirty list; moving on to core-local blocks. } while (g_log.read_iterator_state > 0) { g_log.read_iterator_state--; block = cl_core_local_block_get_block( &g_log.core_local_blocks[g_log.read_iterator_state]); if (block != NULL) { return block; } } return NULL; }
// Allocates a new free block (or recycles an available dirty block if log is // configured to discard old records). Returns NULL if out-of-space. static cl_block* cl_allocate_block(void) { cl_block* block = cl_block_list_head(&g_log.free_block_list); if (block != NULL) { cl_block_list_remove(&g_log.free_block_list, block); return block; } if (!g_log.discard_old_records) { // No free block and log is configured to keep old records. return NULL; } // Recycle dirty block. Start from the oldest. for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL; block = block->link.next->block) { if (cl_block_try_disable_access(block, 1 /* discard data */)) { cl_block_list_remove(&g_log.dirty_block_list, block); return block; } } return NULL; }
// External functions: primary stats_log interface void census_log_initialize(size_t size_in_mb, int discard_old_records) { // Check cacheline alignment. GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); GPR_ASSERT(!g_log.initialized); g_log.discard_old_records = discard_old_records; g_log.num_cores = gpr_cpu_num_cores(); // Ensure that we will not get any overflow in calaculating num_blocks GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE); GPR_ASSERT(size_in_mb < 1000); // Ensure at least 2x as many blocks as there are cores. g_log.num_blocks = (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >> CENSUS_LOG_2_MAX_RECORD_SIZE); gpr_mu_init(&g_log.lock); g_log.read_iterator_state = 0; g_log.block_being_read = NULL; g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned( g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.core_local_blocks, 0, g_log.num_cores * sizeof(cl_core_local_block)); g_log.blocks = (cl_block*)gpr_malloc_aligned( g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); cl_block_list_initialize(&g_log.free_block_list); cl_block_list_initialize(&g_log.dirty_block_list); for (uint32_t i = 0; i < g_log.num_blocks; ++i) { cl_block* block = g_log.blocks + i; cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i)); cl_block_try_disable_access(block, 1 /* discard data */); cl_block_list_insert_at_tail(&g_log.free_block_list, block); } gpr_atm_rel_store(&g_log.out_of_space_count, 0); g_log.initialized = 1; }
/* External functions: primary stats_log interface */ void census_log_initialize(size_t size_in_mb, int discard_old_records) { gpr_int32 ix; /* Check cacheline alignment. */ GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); GPR_ASSERT(!g_log.initialized); g_log.discard_old_records = discard_old_records; g_log.num_cores = gpr_cpu_num_cores(); /* Ensure at least as many blocks as there are cores. */ g_log.num_blocks = GPR_MAX( g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE); gpr_mu_init(&g_log.lock); g_log.read_iterator_state = 0; g_log.block_being_read = NULL; gpr_atm_rel_store(&g_log.is_full, 0); g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned( g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.core_local_blocks, 0, g_log.num_cores * sizeof(cl_core_local_block)); g_log.blocks = (cl_block *)gpr_malloc_aligned( g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); cl_block_list_initialize(&g_log.free_block_list); cl_block_list_initialize(&g_log.dirty_block_list); for (ix = 0; ix < g_log.num_blocks; ++ix) { cl_block *block = g_log.blocks + ix; cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix)); cl_block_try_disable_access(block, 1 /* discard data */); cl_block_list_insert_at_tail(&g_log.free_block_list, block); } gpr_atm_rel_store(&g_log.out_of_space_count, 0); g_log.initialized = 1; }