Exemplo n.º 1
0
void grpc_fd_shutdown(grpc_fd *fd) {
  size_t ncb = 0;
  gpr_mu_lock(&fd->set_state_mu);
  GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
  gpr_atm_rel_store(&fd->shutdown, 1);
  set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
  set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
  gpr_mu_unlock(&fd->set_state_mu);
  GPR_ASSERT(ncb <= 2);
  process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
                    0 /* GPR_FALSE */);
}
Exemplo n.º 2
0
struct bt_ctf_event *bt_ctf_iter_read_event_flags(struct bt_ctf_iter *iter,
		int *flags)
{
	struct ctf_file_stream *file_stream;
	struct bt_ctf_event *ret;
	struct ctf_stream_definition *stream;
	struct packet_index *packet_index;

	/*
	 * We do not want to fail for any other reason than end of
	 * trace, hence the assert.
	 */
	assert(iter);

	ret = &iter->current_ctf_event;
	file_stream = bt_heap_maximum(iter->parent.stream_heap);
	if (!file_stream) {
		/* end of file for all streams */
		goto stop;
	}
	stream = &file_stream->parent;
	ret->parent = g_ptr_array_index(stream->events_by_id,
			stream->event_id);

	if (flags)
		*flags = 0;
	if (!file_stream->pos.packet_cycles_index)
		packet_index = NULL;
	else
		packet_index = &g_array_index(file_stream->pos.packet_cycles_index,
				struct packet_index, file_stream->pos.cur_index);
	iter->events_lost = 0;
	if (packet_index && packet_index->events_discarded >
			file_stream->pos.last_events_discarded) {
		if (flags)
			*flags |= BT_ITER_FLAG_LOST_EVENTS;
		iter->events_lost += packet_index->events_discarded -
			file_stream->pos.last_events_discarded;
		file_stream->pos.last_events_discarded =
			packet_index->events_discarded;
	}

	if (ret->parent->stream->stream_id > iter->callbacks->len)
		goto end;

	process_callbacks(iter, ret->parent->stream);

end:
	return ret;
stop:
	return NULL;
}
Exemplo n.º 3
0
/*
 * Callback routine when the required locks are obtained.
 * Called from parent context
 */
static void ctdb_lock_handler(struct tevent_context *ev,
			    struct tevent_fd *tfd,
			    uint16_t flags,
			    void *private_data)
{
	struct lock_context *lock_ctx;
	char c;
	bool locked;
	double t;
	int id;

	lock_ctx = talloc_get_type_abort(private_data, struct lock_context);

	/* cancel the timeout event */
	TALLOC_FREE(lock_ctx->ttimer);

	t = timeval_elapsed(&lock_ctx->start_time);
	id = lock_bucket_id(t);

	/* Read the status from the child process */
	if (sys_read(lock_ctx->fd[0], &c, 1) != 1) {
		locked = false;
	} else {
		locked = (c == 0 ? true : false);
	}

	/* Update statistics */
	CTDB_INCREMENT_STAT(lock_ctx->ctdb, locks.num_calls);
	if (lock_ctx->ctdb_db) {
		CTDB_INCREMENT_DB_STAT(lock_ctx->ctdb_db, locks.num_calls);
	}

	if (locked) {
		if (lock_ctx->ctdb_db) {
			CTDB_INCREMENT_STAT(lock_ctx->ctdb, locks.buckets[id]);
			CTDB_UPDATE_LATENCY(lock_ctx->ctdb, lock_ctx->ctdb_db,
					    lock_type_str[lock_ctx->type], locks.latency,
					    lock_ctx->start_time);

			CTDB_UPDATE_DB_LATENCY(lock_ctx->ctdb_db, lock_type_str[lock_ctx->type], locks.latency, t);
			CTDB_INCREMENT_DB_STAT(lock_ctx->ctdb_db, locks.buckets[id]);
		}
	} else {
		CTDB_INCREMENT_STAT(lock_ctx->ctdb, locks.num_failed);
		if (lock_ctx->ctdb_db) {
			CTDB_INCREMENT_DB_STAT(lock_ctx->ctdb_db, locks.num_failed);
		}
	}

	process_callbacks(lock_ctx, locked);
}
Exemplo n.º 4
0
/* 
 * Informs other threads that this thread has passed through a quiescent 
 * state.
 * If all threads have passed through a quiescent state since the last time
 * this thread processed it's callbacks, proceed to process pending callbacks.
 */
void quiescent_state (int blocking)
{
    uint8_t my_index = ltd.thread_index;
    int epoch;
    int orig;

 retry:    
    epoch = qg->global_epoch;
    if (shtd[my_index].epoch != epoch) { /* New epoch. */
        /* Process callbacks for old 'incarnation' of this epoch. */
        process_callbacks(ltd.limbo_list[epoch]);
        shtd[my_index].epoch = epoch;
    } else {
        orig = shtd[my_index].in_critical;
        shtd[my_index].in_critical = 0;
        int res = update_epoch();
        if (res) {
            shtd[my_index].in_critical = orig; 
            MEM_BARRIER;
            epoch = qg->global_epoch;
            if (shtd[my_index].epoch != epoch) {
                process_callbacks(ltd.limbo_list[epoch]);
                shtd[my_index].epoch = epoch;
            }
            return;
        } else if (blocking) {
            shtd[my_index].in_critical = orig; 
            MEM_BARRIER;
            sched_yield();
            goto retry;
        }
        shtd[my_index].in_critical = orig; 
        MEM_BARRIER;
    }
    
    return;
}
Exemplo n.º 5
0
static void set_ready(grpc_fd *fd, gpr_atm *st,
                      int allow_synchronous_callback) {
  /* only one set_ready can be active at once (but there may be a racing
     notify_on) */
  int success;
  grpc_iomgr_closure* closure;
  size_t ncb = 0;

  gpr_mu_lock(&fd->set_state_mu);
  set_ready_locked(st, &closure, &ncb);
  gpr_mu_unlock(&fd->set_state_mu);
  success = !gpr_atm_acq_load(&fd->shutdown);
  GPR_ASSERT(ncb <= 1);
  if (ncb > 0) {
    process_callbacks(closure, ncb, success, allow_synchronous_callback);
  }
}
Exemplo n.º 6
0
void mastermind_t::data::collect_info_loop() {
	std::unique_lock<std::mutex> lock(m_mutex);

	if (m_done) {
		COCAINE_LOG_INFO(m_logger, "libmastermind: have to stop immediately");
		return;
	}

	try {
		reconnect();
	} catch (const std::exception &ex) {
		COCAINE_LOG_ERROR(m_logger, "libmastermind: reconnect: %s", ex.what());
	}

#if __GNUC_MINOR__ >= 6
	auto no_timeout = std::cv_status::no_timeout;
	auto timeout = std::cv_status::timeout;
	auto tm = timeout;
#else
	bool no_timeout = true;
	bool timeout = false;
	bool tm = timeout;
#endif
	COCAINE_LOG_INFO(m_logger, "libmastermind: collect_info_loop: update period is %d", static_cast<int>(m_group_info_update_period));
	do {
		collect_info_loop_impl();

		process_callbacks();

		tm = timeout;
		do {
			tm = m_weight_cache_condition_variable.wait_for(lock,
															std::chrono::seconds(
																m_group_info_update_period));
		} while(tm == no_timeout && m_done == false);
	} while(m_done == false);
}
Exemplo n.º 7
0
struct bt_ctf_event *bt_ctf_iter_read_event_flags(struct bt_ctf_iter *iter,
		int *flags)
{
	struct ctf_file_stream *file_stream;
	struct bt_ctf_event *ret;
	struct ctf_stream_definition *stream;
	struct packet_index *packet_index;

	/*
	 * We do not want to fail for any other reason than end of
	 * trace, hence the assert.
	 */
	assert(iter);

	if (flags)
		*flags = 0;

	ret = &iter->current_ctf_event;
	file_stream = bt_heap_maximum(iter->parent.stream_heap);
	if (!file_stream) {
		/* end of file for all streams */
		goto stop;
	}

	/*
	 * If the packet is empty (contains only headers or is of size 0), the
	 * caller has to know that we can't read the current event and we need
	 * to do a bt_iter_next.
	 */
	if (file_stream->pos.data_offset == file_stream->pos.content_size
			|| file_stream->pos.content_size == 0) {
		/* More events may come. */
		ret = NULL;
		if (flags)
			*flags |= BT_ITER_FLAG_RETRY;
		goto end;
	}

	stream = &file_stream->parent;
	if (iter->parent.end_pos &&
		iter->parent.end_pos->type == BT_SEEK_TIME &&
		stream->real_timestamp > iter->parent.end_pos->u.seek_time) {
		goto stop;
	}
	ret->parent = g_ptr_array_index(stream->events_by_id,
			stream->event_id);

	if (!file_stream->pos.packet_index)
		packet_index = NULL;
	else
		packet_index = &g_array_index(file_stream->pos.packet_index,
				struct packet_index, file_stream->pos.cur_index);
	iter->events_lost = 0;
	if (packet_index && packet_index->events_discarded >
			file_stream->pos.last_events_discarded) {
		if (flags)
			*flags |= BT_ITER_FLAG_LOST_EVENTS;
		iter->events_lost += packet_index->events_discarded -
			file_stream->pos.last_events_discarded;
		file_stream->pos.last_events_discarded =
			packet_index->events_discarded;
	}

	if (ret->parent->stream->stream_id > iter->callbacks->len)
		goto end;

	process_callbacks(iter, ret->parent->stream);

end:
	return ret;
stop:
	return NULL;
}
Exemplo n.º 8
0
void mastermind_t::data::cache_force_update() {
	std::lock_guard<std::mutex> lock(m_mutex);
	(void) lock;
	collect_info_loop_impl();
	process_callbacks();
}