void ShutDownEventQueues(void) { EVQM_syslog(LOG_DEBUG, "EVENT Qs triggering exits.\n"); pthread_mutex_lock(&DBEventQueueMutex); ev_async_send (event_db, &DBExitEventLoop); pthread_mutex_unlock(&DBEventQueueMutex); pthread_mutex_lock(&EventQueueMutex); ev_async_send (EV_DEFAULT_ &ExitEventLoop); pthread_mutex_unlock(&EventQueueMutex); }
static void check_if_any_rtp_session_timedout(gpointer element, gpointer user_data) { RTP_session *session = (RTP_session *)element; time_t *last_packet_send_time = (time_t *)user_data; time_t now = time(NULL); /* Jmkn: get the last packet send time in all the session*/ if( last_packet_send_time != NULL && (session->last_packet_send_time > *last_packet_send_time)){ *last_packet_send_time = session->last_packet_send_time; } #if 0 /* Check if we didn't send any data for more then STREAM_BYE_TIMEOUT seconds * this will happen if we are not receiving any more from live producer or * if the stored stream ended. */ if ((session->track->properties.media_source == MS_live) && (now - session->last_packet_send_time) >= LIVE_STREAM_BYE_TIMEOUT) { fnc_log(FNC_LOG_INFO, "[client] Soft stream timeout"); rtcp_send_sr(session, BYE); } /* If we were not able to serve any packet and the client ignored our BYE * kick it by closing everything */ if (session->isBye != 0 && (now - session->last_packet_send_time) >= STREAM_TIMEOUT) { fnc_log(FNC_LOG_INFO, "[client] Stream Timeout, client kicked off!"); ev_async_send(session->srv->loop, &session->client->ev_sig_disconnect); }else{ #endif /* send RTCP SDE */ rtcp_send_sr(session, SDES); /* If we do not read RTCP report in 12 seconds .we will deal it as the client lost connection,and end the child process. */ if(session->srv->srvconf.rtcp_heartbeat != 0 && session->last_rtcp_read_time != 0 && (now - session->last_rtcp_read_time)>=60) { fnc_log(FNC_LOG_INFO, "[client] Client Lost Connection\n"); ev_async_send(session->srv->loop, &session->client->ev_sig_disconnect); } #if 0 } #endif }
void nitro_async_schedule(nitro_async_t *a) { pthread_mutex_lock(&the_runtime->l_async); LL_APPEND(the_runtime->async_queue, a); pthread_mutex_unlock(&the_runtime->l_async); ev_async_send(the_runtime->the_loop, &the_runtime->thread_wake); }
spx_private void Sender(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "Sender ctx is NULL"); return; } err_t err = Sender_WriteResponse(ctx->fd, ctx->response, &ctx->resp_len, &ctx->split_size); if((0 == err)&&(0 == ctx->split_size)){ if(ctx->resp_size == ctx->resp_len){ RequestFinish(ctx); }else{ int remain_size = ctx->resp_size - ctx->resp_len; if(remain_size >= SPLIT_SIZE){ ctx->split_size = SPLIT_SIZE; }else{ ctx->split_size = remain_size; } RegisterAayncWatcher(&ctx->async_watcher, Sender, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); return; } }else{ if((EAGAIN == err || EWOULDBLOCK == err || EINTR == err)||(ctx->resp_size > 0)) { ev_once(main_socket_loop, ctx->fd, EV_READ, ctx->timeout, Sender_ReWriteResponse, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"Sender Failed"); RequestException(ctx, bad_request); } } }/*}}}*/
spx_private void ParserRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); err_t err = 0; struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ParserRequest ctx is NULL"); return; } printf("\n----------------CLIENT:%d xxxxxxxxxxxxxxxxxx CTX:%d-----------------------\n", ctx->fd, GetCTXCount()); if(NULL != ctx->request){ //msg_print(ctx->request, ctx->req_size); long handle_size = ctx->ServerHandler(ctx->req_size, ctx->request, &ctx->response); if (-1 == handle_size){ RequestException(ctx, bad_request); } else{ ctx->resp_size = handle_size; ctx->life_cycle = SEND_RESPONSE; RegisterAayncWatcher(&ctx->async_watcher,SendResponse, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); } } else{ RequestException(ctx, bad_request); } }/*}}}*/
spx_private void SendResponse(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); err_t err = 0; struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "SendResponse ctx is NULL"); return; } if(NULL != ctx->response){ //headers(ctx->fd, ctx->resp_size); int remain_size = ctx->resp_size - ctx->resp_len; if(remain_size >= SPLIT_SIZE){ ctx->split_size = SPLIT_SIZE; }else{ ctx->split_size = remain_size; } RegisterAayncWatcher(&ctx->async_watcher, Sender, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); }else{ RequestException(ctx, bad_request); } }/*}}}*/
void proc_emu_on_io_in(struct connection *con, struct processor_data *pd) { g_debug("%s con %p pd %p", __PRETTY_FUNCTION__, con, pd); struct emu_ctx *ctx = pd->ctx; int offset = MAX(ctx->offset-300, 0); void *streamdata = NULL; int32_t size = bistream_get_stream(pd->bistream, bistream_in, offset, -1, &streamdata); int ret = 0; if( size != -1 ) { struct emu *e = emu_new(); #if 0 emu_cpu_debugflag_set(emu_cpu_get(e), instruction_string); emu_log_level_set(emu_logging_get(e),EMU_LOG_DEBUG); #endif ret = emu_shellcode_test(e, streamdata, size); emu_free(e); ctx->offset += size; if( ret >= 0 ) { struct incident *ix = incident_new("dionaea.shellcode.detected"); GAsyncQueue *aq = g_async_queue_ref(g_dionaea->threads->cmds); g_async_queue_push(aq, async_cmd_new(async_incident_report, ix)); g_async_queue_unref(aq); ev_async_send(g_dionaea->loop, &g_dionaea->threads->trigger); g_debug("shellcode found offset %i", ret); profile(ctx->config, con, streamdata, size, ret); pd->state = processor_done; } g_free(streamdata); } }
bool MatchServerTest::RequestPublic(int index, char* sendBuffer, unsigned int len) { struct sockaddr_in mAddr; bzero(&mAddr, sizeof(mAddr)); mAddr.sin_family = AF_INET; mAddr.sin_port = htons(mPort + 1); mAddr.sin_addr.s_addr = inet_addr(mIp.c_str()); int iFlag = 1; int fd; if ((fd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { printf("# MatchServerTest::RequestPublic( Create socket error ) \n"); return false; } setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const char *)&iFlag, sizeof(iFlag)); setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &iFlag, sizeof(iFlag)); if( connect(fd, (struct sockaddr *)&mAddr, sizeof(mAddr)) != -1 ) { // printf("# MatchServerTest::RequestPublic( Connect ok, index : %d ) \n", index); if( send(fd, sendBuffer, strlen(sendBuffer), 0) > 0 ) { // 开始监听接收 mRequestList.PushBack(fd); ev_async_send(mLoop, &mAsync_send_watcher); return true; } else { // 关闭连接 close(fd); } } else { printf("# MatchServerTest::RequestPublic( Connect fail ) \n"); } return false; }
void user_worker(gpointer psession, gpointer data) { user_session_t *usersession = (user_session_t *) psession; struct msg_addr_set *gmsg; int ret; gboolean sess_ok = TRUE; if (g_mutex_trylock(usersession->rw_lock)) { while (sess_ok && (gmsg = g_async_queue_try_pop(usersession->workunits_queue))) { if (gmsg == GINT_TO_POINTER(0x1)) { debug_log_message(VERBOSE_DEBUG, DEBUG_AREA_USER, "reading message from \"%s\"", usersession->user_name); ret = tls_user_check_activity(usersession); switch (ret) { case NU_EXIT_OK: case NU_EXIT_CONTINUE: break; case NU_EXIT_ERROR: log_message(INFO, DEBUG_AREA_USER, "Problem reading message from \"%s\"", usersession->user_name); usersession->pending_disconnect = TRUE; sess_ok = FALSE; break; } } else { debug_log_message(VERBOSE_DEBUG, DEBUG_AREA_USER, "writing message to \"%s\"", usersession->user_name); /* send message */ ret = nussl_write(usersession->nussl, (char*)gmsg->msg, ntohs(gmsg->msg->length)); g_free(gmsg->msg); g_free(gmsg); if (ret < 0) { debug_log_message(VERBOSE_DEBUG, DEBUG_AREA_USER, "client disconnect"); usersession->pending_disconnect = TRUE; sess_ok = FALSE; break; } } } /* send socket back to user select no message are waiting */ g_async_queue_push(mx_queue, usersession); g_mutex_unlock(usersession->rw_lock); ev_async_send(usersession->srv_context->loop, &usersession->srv_context->client_injector_signal); } else { debug_log_message(VERBOSE_DEBUG, DEBUG_AREA_USER, "client locked at %s:%d", __FILE__, __LINE__); } return; }
static void accept_cb(struct ev_loop *loop, struct ev_io *w, int revents) { int client_fd; struct sockaddr_in client_addr; socklen_t client_len = sizeof(client_addr); if (EV_ERROR & revents) { perror(" w got invalid event"); return; } client_fd = accept(w->fd, (struct sockaddr *)&client_addr, &client_len); if (client_fd == -1) { return; } setnonblock(client_fd) ; if (!ev_async_pending(&async_watcher)) { pthread_mutex_lock(&lock); memset(&swap, 0, sizeof(swap)); swap.fd = client_fd; swap.state = NOTINIT; swap.start_time = 1384227300; swap.in_used = 1; pthread_mutex_unlock(&lock); ev_async_send(work_loop, &async_watcher); } }
void *ZipFile::Save_Thread(void *data) { save_closure_t *save = (save_closure_t *)data; int ret = zip_close(save->zf->archive); pthread_mutex_lock(&save->mutex); if (ret < 0) { std::stringstream s; s << "Error while saving in zip archive: " << zip_strerror(save->zf->archive) << "\n"; save->error = new std::string(s.str()); } else { // We assume that reopening is ok here.. int err; save->zf->archive = zip_open(save->zf->file_name.c_str(), ZIP_CREATE, &err); } save->done = true; pthread_mutex_unlock(&save->mutex); ev_async_send(EV_DEFAULT_UC_ ¬ifier); return NULL; }
spx_private void ReciveRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ReciveRequest ctx is NULL"); return; } err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, ctx->request, &ctx->req_len); if(0 == err){ RegisterAayncWatcher(&ctx->async_watcher, ParserRequest, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); printf("buf:%s\n",ctx->request); return; }else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"ReciveRequest_GetRequest_ReadRequest Failed"); if( -1 == err) CloseCTX(ctx); else RequestException(ctx, bad_request); return; } } }/*}}}*/
/* the actual thread entrypoint passed to pthread_create() */ static void *uv__thread_run(void *arg) { /* synchronously get the entrypoint and call it */ uv_thread_shared_t *hnd = (uv_thread_shared_t *)arg; pthread_mutex_lock(&hnd->mtx); uv_thread_run thread_run = hnd->options->thread_run; void *thread_arg = hnd->options->thread_arg; /* ... any other processing depending on thread or options here ... */ pthread_cond_signal(&hnd->cond); pthread_mutex_unlock(&hnd->mtx); void *result = (*thread_run)(hnd, thread_arg); /* close any pipes created */ if(hnd->stdin_fd != -1) uv__close(hnd->stdin_fd); if(hnd->stdout_fd != -1) uv__close(hnd->stdout_fd); if(hnd->stderr_fd != -1) uv__close(hnd->stderr_fd); /* synchronously notify exit to the client event loop */ pthread_mutex_lock(&hnd->mtx); uv_thread_t *thread = hnd->thread_handle; if(thread) { /* copy these before they disappear ... */ thread->exit_status = hnd->exit_status; thread->term_signal = hnd->term_signal; ev_async_send(thread->loop->ev, &thread->thread_watcher); } hnd->thread_id = 0; pthread_mutex_unlock(&hnd->mtx); /* if the handle had gone already, delete shared handle */ if(!thread) uv__thread_shared_delete(hnd); return result; }
void fsock_thread_schedule_task (struct fsock_thread *self, struct fsock_task * task) { fsock_mutex_lock (&self->sync); fsock_queue_push (&self->jobs, &task->item); ev_async_send (self->loop, &self->job_async); fsock_mutex_unlock (&self->sync); }
static void run_tasklet(gpointer data, gpointer userdata) { liTaskletPool *pool = userdata; liTasklet *t = data; t->run_cb(t->data); g_async_queue_push(pool->finished, t); ev_async_send(pool->loop, &pool->finished_watcher); }
/* * EV send async trigger for new commands to send * (External to the event loop) */ static inline void _eredis_ev_send_trigger (eredis_t *e) { if (IS_READY(e) && !IS_SHUTDOWN(e) && !e->send_async_pending) { e->send_async_pending = 1; ev_async_send( e->loop, &e->send_async ); } }
// When writing these settings we must lock inline void dwt_update_settings(void* data) { // tell the DUMMY_WORKER thread to copy the settings from the pointers we gave it pthread_mutex_lock(&(thread_control.settings_lock)); memcpy(&dummy_settings, data, sizeof(dummy_settings)); pthread_mutex_unlock(&(thread_control.settings_lock)); ev_async_send(thread_control.EV_A, &(thread_control.update_settings)); }
static void quit (MilterEventLoop *loop) { MilterLibevEventLoopPrivate *priv; priv = MILTER_LIBEV_EVENT_LOOP_GET_PRIVATE(loop); ev_async_send(priv->ev_loop, priv->breaker); }
spx_private void ReciveRequest(EV_P_ ev_async *watcher, int revents){/*{{{*/ ev_async_stop(loop, watcher); struct server_context * ctx = (struct server_context *) watcher->data; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "ReciveRequest ctx is NULL"); return; } if(READ_HEADER == ctx->life_cycle){ char buf[LOG_HEADER_SIZE] = {0}; size_t len = 0; err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, buf, &len, LOG_HEADER_SIZE); if(0 == err){ log_header_unpack(buf, ctx->header); char * request = (char *) calloc(1, sizeof(char)*ctx->header->req_size); if(NULL == request){ SpxLog2(g_log, SpxLogError, err,"calloc log_header failed"); return; } ctx->req_size = ctx->header->req_size; ctx->request = request; ctx->life_cycle = READ_REQUEST; } else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; } else{ SpxLog2(g_log, SpxLogError, err,"Read header failed"); CloseCTX(ctx); return; } } } if(READ_REQUEST == ctx->life_cycle){ printf("---------------ReadRequest-------------\n"); printf("req_size:%d\n", (int)ctx->req_size); err_t err = ReciveRequest_GetRequest_ReadRequest(ctx->fd, ctx->request, &ctx->req_len, ctx->req_size); printf("read request complete\n" ); if(0 == err){ ctx->life_cycle = PARSE_REQUEST; RegisterAayncWatcher(&ctx->async_watcher, ParserRequest, ctx); ev_async_start(loop, &ctx->async_watcher); ev_async_send(loop, &ctx->async_watcher); return; }else{ if(EAGAIN == err || EWOULDBLOCK == err || EINTR == err) { ev_once(loop, ctx->fd, EV_READ, ctx->timeout, ReciveRequest_GetRequest, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"ReciveRequest_GetRequest_ReadRequest Failed"); CloseCTX(ctx); return; } } } }/*}}}*/
void EventDispatcherLibEv::wakeUp(void) { Q_D(EventDispatcherLibEv); if (d->m_wakeups.testAndSetAcquire(0, 1)) { ev_async_send(d->m_base, &d->m_wakeup); } }
/*============================================================================== * Name : void Client_switch_to_recv(CONN_INFO* conn) * Abstr : Start the io watcher to waiting for data * Params : CONN_INFO* conn : connection with db server * Return : * Modify : *=============================================================================*/ static void Client_switch_to_recv(CONN_INFO* conn) { /* io wathcer switch to waiting for data */ ev_io_init(&conn->io_wt, Client_recv_cb, conn->fd, EV_READ); ev_io_start(host_loop, &conn->io_wt); /* notice the host loop */ ev_async_send(host_loop, &async_wt); }
/*============================================================================== * Name : void Client_switch_to_test(CONN_INFO* conn) * Abstr : Start the socket test watcher * Params : CONN_INFO* conn : connection with db server * Return : * Modify : *=============================================================================*/ static void Client_switch_to_test(CONN_INFO* conn) { /* io wathcer switch to testing status */ ev_io_init(&conn->io_wt, Client_test_cb, conn->fd, EV_READ | EV_WRITE); ev_io_start(host_loop, &conn->io_wt); /* notice the host loop */ ev_async_send(host_loop, &async_wt); }
/*============================================================================== * Name : void Client_switch_to_start(CONN_INFO* conn) * Abstr : Start the connection restart timer * Params : CONN_INFO* conn : connection with db server * Return : * Modify : *=============================================================================*/ static void Client_switch_to_start(CONN_INFO* conn) { /* start the connection after 120s */ ev_timer_init(&conn->t_wt, Client_start_cb, timer_array[TID_CONN_START].t_val, 0.); ev_timer_start(host_loop, &conn->t_wt); /* notice the host loop */ ev_async_send(host_loop, &async_wt); }
bool LinkageWorker::SendCommand(Runnable *command) { MutexLocker locker(_mutex); _commands.push_back(command); locker.Unlock(); ev_async_send(_loop, _async); return true; }
static void sigint_callback(EV_P_ ev_signal *w, int revents) { if (revents & EV_SIGNAL) { printf("Call signal_callback\n"); printf("ev_async_send 调用前%d\n", ev_async_pending(&async_watcher)); ev_async_send(EV_A, &async_watcher); //这里回调用async_callback printf("ev_async_send 调用后%d\n", ev_async_pending(&async_watcher)); } }
void scheduler_impl_t::activate(fiber_impl_t* fiber) { assert(!fiber->is_terminated()); std::unique_lock<spinlock_t> guard(activated_lock_); if(fiber->is_linked()) return; activated_fibers_.push_back(*fiber); guard.unlock(); ev_async_send(ev_loop_, &activate_); }
int jack_callback (jack_nframes_t nframes, void *arg) { int jack_bsize = nframes * sizeof(jack_default_audio_sample_t); const char *jack_b = jack_port_get_buffer(output_port, nframes); if (jack_ringbuffer_write(ringbuf, (void *) jack_b, jack_bsize) < jack_bsize) { fprintf(stderr, "buffer underrun!\n"); } if ( rtmp_i && jack_ringbuffer_read_space(ringbuf) >= (buffer_samples * sizeof(jack_default_audio_sample_t)) ) { ev_async_send(loop, &async); } return 0; }
void Loop::quit() { _quit = true; if (nullptr != _loop) { if (std::this_thread::get_id() == _owner) { ev_break(_loop); } else { SLOG(INFO) << "quit async!"; ev_async_send(_loop, &_async); } } }
spx_private void Sender_ReWriteResponse(int revents, void *arg){/*{{{*/ struct server_context *ctx = (struct server_context *) arg; if(NULL == ctx){ SpxLog1(g_log, SpxLogError, "Sender ctx is NULL"); return; } if(EV_ERROR & revents){ SpxLog1(g_log, SpxLogError, "EV_ERROR"); return; } if(EV_TIMEOUT & revents){ if((ctx->resp_retry++) >= RETRY_TIMES){ RequestException(ctx, bad_request); SpxLog1(g_log, SpxLogError, "EV_TIMEOUT"); return; }else{ ev_once(main_socket_loop, ctx->fd, EV_WRITE, ctx->timeout, Sender_ReWriteResponse, ctx); return; } } if(EV_WRITE & revents){ err_t err = Sender_WriteResponse(ctx->fd, ctx->response, &ctx->resp_len, &ctx->split_size); if((0 == err)&&(0 == ctx->split_size)){ if(ctx->resp_size == ctx->resp_len){ RequestFinish(ctx); }else{ int remain_size = ctx->resp_size - ctx->resp_len; if(remain_size >= SPLIT_SIZE){ ctx->split_size = SPLIT_SIZE; }else{ ctx->split_size = remain_size; } RegisterAayncWatcher(&ctx->async_watcher, Sender, ctx); ev_async_start(main_socket_loop, &ctx->async_watcher); ev_async_send(main_socket_loop, &ctx->async_watcher); return; } }else{ if((EAGAIN == err || EWOULDBLOCK == err || EINTR == err)||(ctx->resp_size > 0)) { ev_once(main_socket_loop, ctx->fd, EV_READ, ctx->timeout, Sender_ReWriteResponse, ctx); return; }else{ SpxLog2(g_log, SpxLogError, err,"Sender Failed"); RequestException(ctx, bad_request); } } } }/*}}}*/
/** * Sends an event to the AsyncEvent object which will trigger it in the event * loop. * * @return boolean false if the object is not attached to an event loop */ PHP_METHOD(AsyncEvent, send) { event_object *obj = (event_object *)zend_object_store_get_object(getThis() TSRMLS_CC); if(event_has_loop(obj)) { ev_async_send(obj->loop_obj->loop, (ev_async*)obj->watcher); RETURN_BOOL(1); } RETURN_BOOL(0); }