void manos_shutdown (manos_data_t *data) { ev_async_stop (data->loop, &eio_want_poll_watcher); ev_async_stop (data->loop, &eio_done_poll_watcher); ev_idle_stop (data->loop, &eio_idle_watcher); free (data); }
spx_private void ParserRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); err_t err = 0; struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ParserRequest ctx is NULL"); return; } printf("\n----------------CLIENT:%d xxxxxxxxxxxxxxxxxx CTX:%d-----------------------\n", ctx->fd, GetCTXCount()); if(NULL != ctx->request){ //msg_print(ctx->request, ctx->req_size); long handle_size = ctx->ServerHandler(ctx->req_size, ctx->request, &ctx->response); if (-1 == handle_size){ RequestException(ctx, bad_request); } else{ ctx->resp_size = handle_size; ctx->life_cycle = SEND_RESPONSE; RegisterAayncWatcher(&ctx->async_watcher,SendResponse, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); } } else{ RequestException(ctx, bad_request); } }/*}}}*/
spx_private void ReciveRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ReciveRequest ctx is NULL"); return; } err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, ctx->request, &ctx->req_len); if(0 == err){ RegisterAayncWatcher(&ctx->async_watcher, ParserRequest, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); printf("buf:%s\n",ctx->request); return; }else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"ReciveRequest_GetRequest_ReadRequest Failed"); if( -1 == err) CloseCTX(ctx); else RequestException(ctx, bad_request); return; } } }/*}}}*/
spx_private void Sender(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "Sender ctx is NULL"); return; } err_t err = Sender_WriteResponse(ctx->fd, ctx->response, &ctx->resp_len, &ctx->split_size); if((0 == err)&&(0 == ctx->split_size)){ if(ctx->resp_size == ctx->resp_len){ RequestFinish(ctx); }else{ int remain_size = ctx->resp_size - ctx->resp_len; if(remain_size >= SPLIT_SIZE){ ctx->split_size = SPLIT_SIZE; }else{ ctx->split_size = remain_size; } RegisterAayncWatcher(&ctx->async_watcher, Sender, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); return; } }else{ if((EAGAIN == err || EWOULDBLOCK == err || EINTR == err)||(ctx->resp_size > 0)) { ev_once(main_socket_loop, ctx->fd, EV_READ, ctx->timeout, Sender_ReWriteResponse, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"Sender Failed"); RequestException(ctx, bad_request); } } }/*}}}*/
/** * Shuts down all the connections * and listeners and prepares to exit. * @arg netconf The config for the networking stack. */ int shutdown_networking(statsite_proxy_networking *netconf) { // Instruct the threads to shutdown netconf->should_run = 0; // Break the EV loop schedule_async(netconf, EXIT, NULL); // Wait for the thread to return if (netconf->thread) pthread_join(netconf->thread, NULL); // Stop listening for new connections ev_io_stop(&netconf->tcp_client); close(netconf->tcp_client.fd); ev_io_stop(&netconf->udp_client); close(netconf->udp_client.fd); // Stop the other timers ev_async_stop(&netconf->loop_async); // TODO: Close all the client/proxy connections // ??? For now, we just leak the memory // since we are shutdown down anyways... // Free the event loop ev_loop_destroy(EV_DEFAULT); // Free the netconf free(netconf); return 0; }
spx_private void SendResponse(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); err_t err = 0; struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "SendResponse ctx is NULL"); return; } if(NULL != ctx->response){ //headers(ctx->fd, ctx->resp_size); int remain_size = ctx->resp_size - ctx->resp_len; if(remain_size >= SPLIT_SIZE){ ctx->split_size = SPLIT_SIZE; }else{ ctx->split_size = remain_size; } RegisterAayncWatcher(&ctx->async_watcher, Sender, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); }else{ RequestException(ctx, bad_request); } }/*}}}*/
spx_private void ReciveRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ReciveRequest ctx is NULL"); return; } if(READ_HEADER == ctx->life_cycle){ char buf[LOG_HEADER_SIZE] = {0}; size_t len = 0; err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, buf, &len, LOG_HEADER_SIZE); if(0 == err){ log_header_unpack(buf, ctx->header); char * request = (char *) calloc(1, sizeof(char)*ctx->header->req_size); if(NULL == request){ SpxLog2(g_log, SpxLogError, err,"calloc log_header failed"); return; } ctx->req_size = ctx->header->req_size; ctx->request = request; ctx->life_cycle = READ_REQUEST; } else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; } else{ SpxLog2(g_log, SpxLogError, err,"Read header failed"); CloseCTX(ctx); return; } } } if(READ_REQUEST == ctx->life_cycle){ printf("---------------ReadRequest-------------\n"); printf("req_size:%d\n", (int)ctx->req_size); err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, ctx->request, &ctx->req_len, ctx->req_size); printf("read request complete\n" ); if(0 == err){ ctx->life_cycle = PARSE_REQUEST; RegisterAayncWatcher(&ctx->async_watcher, ParserRequest, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); return; }else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"ReciveRequest_GetRequest_ReadRequest Failed"); CloseCTX(ctx); return; } } } }/*}}}*/
int uv_close(uv_handle_t* handle, uv_close_cb close_cb) { uv_tcp_t* tcp; uv_async_t* async; uv_timer_t* timer; handle->close_cb = close_cb; switch (handle->type) { case UV_TCP: tcp = (uv_tcp_t*) handle; uv_read_stop((uv_stream_t*)tcp); ev_io_stop(EV_DEFAULT_ &tcp->write_watcher); break; case UV_PREPARE: uv_prepare_stop((uv_prepare_t*) handle); break; case UV_CHECK: uv_check_stop((uv_check_t*) handle); break; case UV_IDLE: uv_idle_stop((uv_idle_t*) handle); break; case UV_ASYNC: async = (uv_async_t*)handle; ev_async_stop(EV_DEFAULT_ &async->async_watcher); ev_ref(EV_DEFAULT_UC); break; case UV_TIMER: timer = (uv_timer_t*)handle; if (ev_is_active(&timer->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher); break; default: assert(0); return -1; } uv_flag_set(handle, UV_CLOSING); /* This is used to call the on_close callback in the next loop. */ ev_idle_start(EV_DEFAULT_ &handle->next_watcher); ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); return 0; }
/* the watcher callback signifying thread exit. Called in the context * of the event loop that created the thread */ static void uv__thread_exit(EV_P_ ev_async* watcher, int revents) { uv_thread_t *thread = watcher->data; assert(&thread->thread_watcher == watcher); assert(revents & EV_ASYNC); ev_async_stop(EV_A_ &thread->thread_watcher); if (thread->exit_cb) { thread->exit_cb((uv_handle_t *)thread, thread->exit_status, thread->term_signal); } }
void write_cb(struct ev_loop *loop, struct ev_async *w, int revents) { AsyncConnection *conn = (AsyncConnection *) w->data; char *method = (char *) ""; switch (conn->request->method) { case EBB_GET: method = (char *)"GET"; break; case EBB_POST: method = (char *)"POST"; break; default: break; } struct timeval endtime; gettimeofday(&endtime, nullptr); float duration = endtime.tv_sec + endtime.tv_usec / 1000000.0 - conn->starttime.tv_sec - conn->starttime.tv_usec / 1000000.0; time_t rawtime; struct tm *timeinfo; char timestr[80]; time(&rawtime); timeinfo = localtime(&rawtime); strftime(timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S %z", timeinfo); if (conn->connection != nullptr) { conn->write_buffer = (char *)malloc(max_header_length + conn->response_length); conn->write_buffer_len = 0; // Copy the http status code conn->code = conn->code == 0 ? 200 : conn->code; conn->contentType = conn->contentType.size() == 0 ? "application/json" : conn->contentType; conn->write_buffer_len += snprintf((char *)conn->write_buffer, max_header_length, "HTTP/1.1 %lu OK\r\nContent-Type: %s\r\nContent-Length: %lu\r\nConnection: close\r\n\r\n", conn->code, conn->contentType.c_str(), conn->response_length); // Append the response memcpy(conn->write_buffer + conn->write_buffer_len, conn->response, conn->response_length); conn->write_buffer_len += conn->response_length; ebb_connection_write(conn->connection, conn->write_buffer, conn->write_buffer_len, continue_responding); // We need to wait for `continue_responding` to fire to be sure the client has been sent all the data printf("%s [%s] %s %s (%f s)\n", inet_ntoa(conn->addr.sin_addr), timestr, method, conn->path, duration); } else { printf("%s [%s] %s %s (%f s) not sent\n", inet_ntoa(conn->addr.sin_addr), timestr, method, conn->path, duration); } ev_async_stop(conn->ev_loop, &conn->ev_write); // When connection is nullptr, `continue_responding` won't fire since we never sent data to the client, // thus, we'll need to clean up manually here, while connection has already been cleaned up in on `on_response` if (conn->connection == nullptr) delete conn; }
static void as_ev_close_loop(as_event_loop* event_loop) { ev_async_stop(event_loop->loop, &event_loop->wakeup); // Only stop event loop if client created event loop. if (as_event_threads_created) { ev_unloop(event_loop->loop, EVUNLOOP_ALL); } // Cleanup event loop resources. as_queue_destroy(&event_loop->queue); as_queue_destroy(&event_loop->pipe_cb_queue); }
/* called by the client when the corresponding uv_thread_t handle * is closed */ void uv_thread_close(uv_thread_t *thread) { /* close the watcher */ ev_async_stop(thread->loop->ev, (ev_async *)&thread->thread_watcher); /* synchronously clear the reference to this from the shared handle */ uv_thread_shared_t *hnd = thread->thread_shared; pthread_mutex_lock(&hnd->mtx); hnd->thread_handle = 0; pthread_t thread_still_exists = hnd->thread_id; pthread_mutex_unlock(&hnd->mtx); /* if the thread has exited already, delete shared handle */ if(!thread_still_exists) uv__thread_shared_delete(hnd); }
static void dispose_ev_loop (MilterLibevEventLoopPrivate *priv) { dispose_release(priv); if (priv->ev_loop) { if (priv->breaker) { ev_async_stop(priv->ev_loop, priv->breaker); g_free(priv->breaker); priv->breaker = NULL; } ev_set_userdata(priv->ev_loop, NULL); ev_loop_destroy(priv->ev_loop); priv->ev_loop = NULL; } }
int uv_close(uv_handle_t* handle) { switch (handle->type) { case UV_TCP: ev_io_stop(EV_DEFAULT_ &handle->write_watcher); ev_io_stop(EV_DEFAULT_ &handle->read_watcher); break; case UV_PREPARE: uv_prepare_stop(handle); break; case UV_CHECK: uv_check_stop(handle); break; case UV_IDLE: uv_idle_stop(handle); break; case UV_ASYNC: ev_async_stop(EV_DEFAULT_ &handle->async_watcher); ev_ref(EV_DEFAULT_UC); break; case UV_TIMER: if (ev_is_active(&handle->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher); break; default: assert(0); return -1; } uv_flag_set(handle, UV_CLOSING); /* This is used to call the on_close callback in the next loop. */ ev_idle_start(EV_DEFAULT_ &handle->next_watcher); ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); return 0; }
/** * @brief Handle client disconnection and free resources * * @param loop The event loop where the event was issued * @param w The async event object * @param revents Unused * * This event is triggered when a client disconnects or is forcefully * disconnected. It stops the other events from running, and frees all * the remaining resources for the client itself. */ static void client_ev_disconnect_handler(struct ev_loop *loop, ev_async *w, int revents) { RTSP_Client *rtsp = (RTSP_Client*)w->data; GString *outbuf = NULL; feng *srv = rtsp->srv; ev_io_stop(srv->loop, &rtsp->ev_io_read); ev_io_stop(srv->loop, &rtsp->ev_io_write); ev_async_stop(srv->loop, &rtsp->ev_sig_disconnect); ev_timer_stop(srv->loop, &rtsp->ev_timeout); Sock_close(rtsp->sock); srv->connection_count--; rtsp_session_free(rtsp->session); r_close(rtsp->cached_resource); interleaved_free_list(rtsp); /* Remove the output queue */ while( (outbuf = g_queue_pop_tail(rtsp->out_queue)) ) g_string_free(outbuf, TRUE); g_queue_free(rtsp->out_queue); g_byte_array_free(rtsp->input, true); g_slice_free(RTSP_Client, rtsp); fnc_log(FNC_LOG_INFO, "[client] Client removed"); demuxer_stsw_global_uninit(); sleep(1); exit(0); }
void li_tasklet_pool_free(liTaskletPool *pool) { liTasklet *t; if (!pool) return; li_tasklet_pool_set_threads(pool, 0); while (NULL != (t = g_async_queue_try_pop(pool->finished))) { t->finished_cb(t->data); } g_async_queue_unref(pool->finished); pool->finished = NULL; ev_ref(pool->loop); ev_async_stop(pool->loop, &pool->finished_watcher); if (-1 == pool->delete_later) { pool->delete_later = 1; } else { g_slice_free(liTaskletPool, pool); } }
void write_cb(struct ev_loop *loop, struct ev_async *w, int revents) { AsyncConnection *conn = (AsyncConnection *) w->data; char *method = (char *) ""; switch (conn->request->method) { case EBB_GET: method = (char *)"GET"; break; case EBB_POST: method = (char *)"POST"; break; default: break; } struct timeval endtime; gettimeofday(&endtime, nullptr); float duration = endtime.tv_sec + endtime.tv_usec / 1000000.0 - conn->starttime.tv_sec - conn->starttime.tv_usec / 1000000.0; time_t rawtime; struct tm *timeinfo; char timestr[80]; time(&rawtime); timeinfo = localtime(&rawtime); strftime(timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S %z", timeinfo); // Handle the actual writing if (conn->connection != nullptr) { ebb_connection_write(conn->connection, conn->write_buffer, conn->write_buffer_len, continue_responding); printf("%s [%s] %s %s (%f s)\n", inet_ntoa(conn->addr.sin_addr), timestr, method, conn->path, duration); } else { printf("%s [%s] %s %s (%f s) not sent\n", inet_ntoa(conn->addr.sin_addr), timestr, method, conn->path, duration); } ev_async_stop(conn->ev_loop, &conn->ev_write); conn->waiting_for_response = false; // When connection is nullptr, `continue_responding` won't fire since we never sent data to the client, // thus, we'll need to clean up manually here, while connection has already been cleaned up in on `on_close` if (conn->connection == nullptr) delete conn; }
void uv__async_close(uv_async_t* handle) { ev_async_stop(handle->loop->ev, &handle->async_watcher); uv__handle_ref(handle); uv__handle_stop(handle); }
ZipFile::~ZipFile() { assert(file == NULL); ev_async_stop(EV_DEFAULT_UC_ ¬ifier); zip_close(archive); }
void stream_loop_destroy(struct stream_loop *loop) { struct ev_loop *el = loop->stream_loop; ev_async_stop(el, &loop->bell_watcher); ev_loop_destroy(el); }
scheduler_impl_t::~scheduler_impl_t() { ev_async_stop(ev_loop_, &activate_); ev_async_stop(ev_loop_, &break_loop_); ev_loop_destroy(ev_loop_); }
template<> void event_watcher<ev_async>::stop() { /* TODO event shall be removed from async_watchers */ ev_async_stop(this->self->loop, &this->watcher); }
void ydb_storage_dio_do_delete_form_chunkfile( struct ev_loop *loop,ev_async *w,int revents){/*{{{*/ ev_async_stop(loop,w); err_t err = 0; struct ydb_storage_dio_context *dc = (struct ydb_storage_dio_context *) w->data; struct spx_task_context *tc = dc->tc; struct spx_job_context *jc = dc->jc; struct ydb_storage_configurtion *c = jc->config; if(0 != (err = ydb_storage_dio_delete_context_from_chunkfile( c,dc->filename,dc->begin,dc->totalsize, dc->opver,dc->ver,dc->lastmodifytime, dc->realsize,spx_now()))){ SpxLogFmt2(dc->log,SpxLogError,err, "delete context begin:%lld,realsize:%lld,totalsize:%lld " "form chunkfile:%s is fail.", dc->begin,dc->realsize,dc->totalsize, dc->filename); goto r1; } YdbStorageBinlogDeleteWriter(dc->rfid); struct ydb_storage_mountpoint *mp = spx_list_get(c->mountpoints, dc->mp_idx); mp->last_modify_time = spx_now(); struct spx_msg_header *wh = (struct spx_msg_header *) \ spx_alloc_alone(sizeof(*wh),&err); if(NULL == wh){ SpxLogFmt2(dc->log,SpxLogError,err, "delete context begin:%lld,realsize:%lld,totalsize:%lld " "form chunkfile:%s is success bug new response header is fail.", dc->begin,dc->realsize,dc->totalsize, dc->filename); goto r1; } jc->writer_header = wh; wh->version = YDB_VERSION; wh->protocol = YDB_C2S_DELETE; wh->offset = 0; wh->bodylen = 0; err = 0; goto r2; r1: jc->writer_header = (struct spx_msg_header *) spx_alloc_alone(sizeof(*(jc->writer_header)),&err); if(NULL == jc->writer_header){ SpxLog2(dc->log,SpxLogError,err,\ "new response header is fail." "no notify client and push jc force."); spx_task_pool_push(g_spx_task_pool,tc); ydb_storage_dio_pool_push(g_ydb_storage_dio_pool,dc); spx_job_pool_push(g_spx_job_pool,jc); return; } jc->writer_header->protocol = jc->reader_header->protocol; jc->writer_header->bodylen = 0; jc->writer_header->version = YDB_VERSION; jc->writer_header->err = err; r2: spx_task_pool_push(g_spx_task_pool,tc); ydb_storage_dio_pool_push(g_ydb_storage_dio_pool,dc); jc->err = err; jc->moore = SpxNioMooreResponse; size_t idx = spx_network_module_wakeup_idx(jc); struct spx_thread_context *threadcontext = spx_get_thread(g_spx_network_module,idx); jc->tc = threadcontext; SpxModuleDispatch(spx_network_module_wakeup_handler,jc); return; }/*}}}*/
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { uv_async_t* async; uv_stream_t* stream; uv_process_t* process; handle->close_cb = close_cb; switch (handle->type) { case UV_NAMED_PIPE: uv_pipe_cleanup((uv_pipe_t*)handle); /* Fall through. */ case UV_TTY: case UV_TCP: stream = (uv_stream_t*)handle; uv_read_stop(stream); ev_io_stop(stream->loop->ev, &stream->write_watcher); uv__close(stream->fd); stream->fd = -1; if (stream->accepted_fd >= 0) { uv__close(stream->accepted_fd); stream->accepted_fd = -1; } assert(!ev_is_active(&stream->read_watcher)); assert(!ev_is_active(&stream->write_watcher)); break; case UV_UDP: uv__udp_start_close((uv_udp_t*)handle); break; case UV_PREPARE: uv_prepare_stop((uv_prepare_t*) handle); break; case UV_CHECK: uv_check_stop((uv_check_t*) handle); break; case UV_IDLE: uv_idle_stop((uv_idle_t*) handle); break; case UV_ASYNC: async = (uv_async_t*)handle; ev_async_stop(async->loop->ev, &async->async_watcher); ev_ref(async->loop->ev); break; case UV_TIMER: uv_timer_stop((uv_timer_t*)handle); break; case UV_PROCESS: process = (uv_process_t*)handle; ev_child_stop(process->loop->ev, &process->child_watcher); break; case UV_FS_EVENT: uv__fs_event_destroy((uv_fs_event_t*)handle); break; default: assert(0); } handle->flags |= UV_CLOSING; /* This is used to call the on_close callback in the next loop. */ ev_idle_start(handle->loop->ev, &handle->next_watcher); ev_feed_event(handle->loop->ev, &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); }
/* * EV connect callback * * EV_TIMER connect_timer */ static void _eredis_ev_connect_cb (struct ev_loop *loop, ev_timer *w, int revents) { int i; eredis_t *e; (void) revents; (void) loop; e = (eredis_t*) w->data; if (IS_SHUTDOWN(e)) { if (e->hosts_connected) { for (i=0; i<e->hosts_nb; i++) { host_t *h = &e->hosts[i]; if (H_IS_CONNECTED(h) && h->async_ctx) redisAsyncDisconnect( h->async_ctx ); } } else { /* Connect timer */ ev_timer_stop( e->loop, &e->connect_timer ); /* Async send */ ev_async_stop( e->loop, &e->send_async ); /* Event break */ ev_break( e->loop, EVBREAK_ALL ); } return; } for (i=0; i<e->hosts_nb; i++) { host_t *h = &e->hosts[i]; switch (H_CONN_STATE( h )) { case HOST_F_CONNECTED: break; case HOST_F_FAILED: if ((h->failures < HOST_FAILED_RETRY_AFTER) || ( ! _host_connect( h, 0 ))) { h->failures %= HOST_FAILED_RETRY_AFTER; h->failures ++; } break; case HOST_F_DISCONNECTED: if (! _host_connect( h, 0 )) { if ((++ h->failures) > HOST_DISCONNECTED_RETRIES) { h->failures = 0; H_SET_FAILED( h ); } } break; default: break; } } if (! IS_READY(e)) { /* Ready flag - need a connected host or a connection failure */ int nb = 0; /* build ready flag */ for (i=0; i<e->hosts_nb; i++) { host_t *h = &e->hosts[i]; if (H_IS_INIT( h )) nb ++; } if (nb == e->hosts_nb) { SET_READY(e); e->send_async_pending = 1; ev_async_send( e->loop, &e->send_async ); } } }