void* census_log_start_write(size_t size) { // Used to bound number of times block allocation is attempted. GPR_ASSERT(size > 0); GPR_ASSERT(g_log.initialized); if (size > CENSUS_LOG_MAX_RECORD_SIZE) { return NULL; } uint32_t attempts_remaining = g_log.num_blocks; uint32_t core_id = gpr_cpu_current_cpu(); do { void* record = NULL; cl_block* block = cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); if (block && (record = cl_block_start_write(block, size))) { return record; } // Need to allocate a new block. We are here if: // - No block associated with the core OR // - Write in-progress on the block OR // - block is out of space gpr_mu_lock(&g_log.lock); bool allocated = cl_allocate_core_local_block(core_id, block); gpr_mu_unlock(&g_log.lock); if (!allocated) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } } while (attempts_remaining--); // Give up. gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; }
int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) { lockfree_node head; lockfree_node newhead; do { head.atm = gpr_atm_acq_load(&(stack->head.atm)); if (head.contents.index == INVALID_ENTRY_INDEX) { return -1; } newhead.atm = gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm)); } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm)); #ifndef NDEBUG /* Check for valid pop */ { int pushed_index = head.contents.index / (8 * sizeof(gpr_atm)); int pushed_bit = head.contents.index % (8 * sizeof(gpr_atm)); gpr_atm old_val; old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index], -((gpr_atm)1 << pushed_bit)); GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) != 0); } #endif return head.contents.index; }
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call, grpc_completion_type type) { gpr_ref(&cc->refs); if (call) grpc_call_internal_ref(call); #ifndef NDEBUG gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1); #endif }
void gpr_ref_non_zero(gpr_refcount *r) { #ifndef NDEBUG gpr_atm prior = gpr_atm_no_barrier_fetch_add(&r->count, 1); assert(prior > 0); #else gpr_ref(r); #endif }
static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta, int barrier REF_MUTATE_EXTRA_ARGS) { gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta) : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); #ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val, old_val + delta, reason); #endif return old_val; }
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(fd, n, reason) unref_by(fd, n) static void ref_by(grpc_fd *fd, int n) { #endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); }
static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta, int barrier REF_MUTATE_EXTRA_ARGS) { gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta) : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); #ifndef NDEBUG if (GRPC_TRACER_ON(grpc_trace_lb_policy_refcount)) { gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY: 0x%p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c, purpose, old_val, old_val + delta, reason); } #endif return old_val; }
static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) { grpc_cq_completion *c = NULL; if (gpr_spinlock_trylock(&q->queue_lock)) { c = (grpc_cq_completion *)gpr_mpscq_pop(&q->queue); gpr_spinlock_unlock(&q->queue_lock); } if (c) { gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1); } return c; }
void *census_log_start_write(size_t size) { /* Used to bound number of times block allocation is attempted. */ gpr_int32 attempts_remaining = g_log.num_blocks; /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */ gpr_int32 core_id = gpr_cpu_current_cpu(); GPR_ASSERT(g_log.initialized); if (size > CENSUS_LOG_MAX_RECORD_SIZE) { return NULL; } do { int allocated; void *record = NULL; cl_block *block = cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); if (block && (record = cl_block_start_write(block, size))) { return record; } /* Need to allocate a new block. We are here if: - No block associated with the core OR - Write in-progress on the block OR - block is out of space */ if (gpr_atm_acq_load(&g_log.is_full)) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } gpr_mu_lock(&g_log.lock); allocated = cl_allocate_core_local_block(core_id, block); gpr_mu_unlock(&g_log.lock); if (!allocated) { gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; } } while (attempts_remaining--); /* Give up. */ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); return NULL; }
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); } #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n) static void ref_by(grpc_fd *fd, int n) { #endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); }
int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) { lockfree_node head; lockfree_node newhead; lockfree_node curent; lockfree_node newent; /* First fill in the entry's index and aba ctr for new head */ newhead.contents.index = (uint16_t)entry; #ifdef GPR_ARCH_64 /* Fill in the pad to avoid confusing memcheck tools */ newhead.contents.pad = 0; #endif /* Also post-increment the aba_ctr */ curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm); newhead.contents.aba_ctr = ++curent.contents.aba_ctr; gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm); #ifndef NDEBUG /* Check for double push */ { int pushed_index = entry / (int)(8 * sizeof(gpr_atm)); int pushed_bit = entry % (int)(8 * sizeof(gpr_atm)); gpr_atm old_val; old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index], ((gpr_atm)1 << pushed_bit)); GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) == 0); } #endif do { /* Atomically get the existing head value for use */ head.atm = gpr_atm_no_barrier_load(&(stack->head.atm)); /* Point to it */ newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm); newent.contents.index = head.contents.index; gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm); } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm)); /* Use rel_cas above to make sure that entry index is set properly */ return head.contents.index == INVALID_ENTRY_INDEX; }
void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc) { gpr_atm_no_barrier_fetch_add(&c->value, inc); }
void gpr_refn(gpr_refcount *r, int n) { gpr_atm_no_barrier_fetch_add(&r->count, n); }
static void interned_slice_ref(void *p) { interned_slice_refcount *s = (interned_slice_refcount *)p; GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0); }
void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); }
static void ref_by(grpc_fd *fd, int n) { GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); }
/* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_tcp_listener *sp = arg; grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, sp->fd_index}; grpc_pollset *read_notifier_pollset = NULL; grpc_fd *fdobj; if (err != GRPC_ERROR_NONE) { goto error; } read_notifier_pollset = sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( &sp->server->next_pollset_to_assign, 1) % sp->server->pollset_count]; /* loop until accept4 returns EAGAIN, and then re-arm notification */ for (;;) { struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); char *addr_str; char *name; /* Note: If we ever decide to return this address to the user, remember to strip off the ::ffff:0.0.0.0/96 prefix first. */ int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1); if (fd < 0) { switch (errno) { case EINTR: continue; case EAGAIN: grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); return; default: gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno)); goto error; } } grpc_set_socket_no_sigpipe_if_possible(fd); addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); if (grpc_tcp_trace) { gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); } fdobj = grpc_fd_create(fd, name); if (read_notifier_pollset == NULL) { gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd"); goto error; } grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); sp->server->on_accept_cb( exec_ctx, sp->server->on_accept_cb_arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str), read_notifier_pollset, &acceptor); gpr_free(name); gpr_free(addr_str); } GPR_UNREACHABLE_CODE(return ); error: gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports) { gpr_mu_unlock(&sp->server->mu); deactivated_all_ports(exec_ctx, sp->server); } else { gpr_mu_unlock(&sp->server->mu); } }
void* tunnel_get_next_tag(grpc_tunnel* tunnel) { return (void *) gpr_atm_no_barrier_fetch_add(&tunnel->next_tag, 1); }
static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) { gpr_mpscq_push(&q->queue, (gpr_mpscq_node *)c); return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0; }
static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) { GPR_ASSERT(amount > 0); GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0); }
grpc_slice grpc_slice_intern(grpc_slice slice) { GPR_TIMER_BEGIN("grpc_slice_intern", 0); if (GRPC_IS_STATIC_METADATA_STRING(slice)) { GPR_TIMER_END("grpc_slice_intern", 0); return slice; } uint32_t hash = grpc_slice_hash(slice); for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) { static_metadata_hash_ent ent = static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)]; if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT && grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) { GPR_TIMER_END("grpc_slice_intern", 0); return grpc_static_slice_table[ent.idx]; } } interned_slice_refcount *s; slice_shard *shard = &g_shards[SHARD_IDX(hash)]; gpr_mu_lock(&shard->mu); /* search for an existing string */ size_t idx = TABLE_IDX(hash, shard->capacity); for (s = shard->strs[idx]; s; s = s->bucket_next) { if (s->hash == hash && grpc_slice_eq(slice, materialize(s))) { if (gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) == 0) { /* If we get here, we've added a ref to something that was about to * die - drop it immediately. * The *only* possible path here (given the shard mutex) should be to * drop from one ref back to zero - assert that with a CAS */ GPR_ASSERT(gpr_atm_rel_cas(&s->refcnt, 1, 0)); /* and treat this as if we were never here... sshhh */ } else { gpr_mu_unlock(&shard->mu); GPR_TIMER_END("grpc_slice_intern", 0); return materialize(s); } } } /* not found: create a new string */ /* string data goes after the internal_string header */ s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice)); gpr_atm_rel_store(&s->refcnt, 1); s->length = GRPC_SLICE_LENGTH(slice); s->hash = hash; s->base.vtable = &interned_slice_vtable; s->base.sub_refcount = &s->sub; s->sub.vtable = &interned_slice_sub_vtable; s->sub.sub_refcount = &s->sub; s->bucket_next = shard->strs[idx]; shard->strs[idx] = s; memcpy(s + 1, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice)); shard->count++; if (shard->count > shard->capacity * 2) { grow_shard(shard); } gpr_mu_unlock(&shard->mu); GPR_TIMER_END("grpc_slice_intern", 0); return materialize(s); }