void MessagePumpLibevent::Run(Delegate* delegate) { //DCHECK(keep_running_) << "Quit must have been called outside of Run!"; assert(keep_running_); // event_base_loopexit() + EVLOOP_ONCE is leaky, see http://crbug.com/25641. // Instead, make our own timer and reuse it on each call to event_base_loop(). std::unique_ptr<event> timer_event(new event); for (;;) { bool did_work = delegate->DoWork(); if (!keep_running_) break; event_base_loop(event_base_, EVLOOP_NONBLOCK); did_work |= processed_io_events_; processed_io_events_ = false; if (!keep_running_) break; did_work |= delegate->DoDelayedWork(&delayed_work_time_); if (!keep_running_) break; if (did_work) continue; did_work = delegate->DoIdleWork(); if (!keep_running_) break; if (did_work) continue; // EVLOOP_ONCE tells libevent to only block once, // but to service all pending events when it wakes up. if (delayed_work_time_.is_null()) { event_base_loop(event_base_, EVLOOP_ONCE); } else { TimeDelta delay = delayed_work_time_ - TimeTicks::Now(); if (delay > TimeDelta()) { struct timeval poll_tv; poll_tv.tv_sec = (long)delay.InSeconds(); poll_tv.tv_usec = delay.InMicroseconds() % Time::kMicrosecondsPerSecond; event_set(timer_event.get(), -1, 0, timer_callback, event_base_); event_base_set(event_base_, timer_event.get()); event_add(timer_event.get(), &poll_tv); event_base_loop(event_base_, EVLOOP_ONCE); event_del(timer_event.get()); } else { delayed_work_time_ = TimeTicks(); } } } keep_running_ = true; }
static void test_edgetriggered(void *et) { struct event *ev = NULL; struct event_base *base = NULL; const char *test = "test string"; evutil_socket_t pair[2] = {-1,-1}; int supports_et; if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair) == -1) { tt_abort_perror("socketpair"); } called = was_et = 0; send(pair[0], test, (int)strlen(test)+1, 0); shutdown(pair[0], SHUT_WR); /* Initalize the event library */ base = event_base_new(); if (!strcmp(event_base_get_method(base), "epoll") || !strcmp(event_base_get_method(base), "epoll (with changelist)") || !strcmp(event_base_get_method(base), "kqueue")) supports_et = 1; else supports_et = 0; TT_BLATHER(("Checking for edge-triggered events with %s, which should %s" "support edge-triggering", event_base_get_method(base), supports_et?"":"not ")); /* Initalize one event */ ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev); event_add(ev, NULL); /* We're going to call the dispatch function twice. The first invocation * will read a single byte from pair[1] in either case. If we're edge * triggered, we'll only see the event once (since we only see transitions * from no data to data), so the second invocation of event_base_loop will * do nothing. If we're level triggered, the second invocation of * event_base_loop will also activate the event (because there's still * data to read). */ event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE); if (supports_et) { tt_int_op(called, ==, 1); tt_assert(was_et); } else {
int notify_event_init(notify_t *p_notify){ int ret=0; assert(p_notify!=NULL); set_nonblocking(p_notify->pipe_fd[0]); struct event event_notify; do{ if(event_set(&event_notify,\ p_notify->pipe_fd[0],\ EV_READ|EV_PERSIST,\ p_notify->func,\ &(p_notify->index))!=0){ ret=-1; break; } if(event_add(&event_notify,NULL)!=0){ ret=-1; break; } while(isLoop){ event_base_loop(p_notify->ev_base,0); } }while(0); return ret; }
void rspamd_async_test_func () { struct aio_context *aio_ctx; gchar *tmpfile; static gchar testbuf[BUFSIZ]; gint fd, afd, ret; aio_ctx = rspamd_aio_init (base); g_assert (aio_ctx != NULL); fd = g_file_open_tmp ("raXXXXXX", &tmpfile, NULL); g_assert (fd != -1); afd = rspamd_aio_open (aio_ctx, tmpfile, O_RDWR); g_assert (fd != -1); /* Write some data */ memset (testbuf, 0xef, sizeof (testbuf)); ret = rspamd_aio_write (afd, testbuf, sizeof (testbuf), 0, aio_ctx, aio_write_cb, aio_ctx); g_assert (ret != -1); event_base_loop (base, 0); close (afd); close (fd); unlink (tmpfile); }
void *worker_libevent(void *arg) { WTQ *work_child; LIBEVENT_WORK_THREAD *me = (LIBEVENT_WORK_THREAD *)arg; /* Any per-thread setup can happen here; thread_init() will block until * all threads have finished initializing. */ pthread_mutex_lock(&init_work_lock); work_child = wtq_init(); work_child->pid = pthread_self(); me->thread_id = work_child->pid; work_child->no = me->no; if(wtq_queue_head == NULL){ wtq_queue_tail = wtq_queue_head = work_child; }else{ wtq_queue_head->next = work_child; wtq_queue_head = work_child; } init_count++; /* error_add(pthread_self());*/ pthread_cond_signal(&init_work_cond); pthread_mutex_unlock(&init_work_lock); wtq_queue_head->next = wtq_queue_tail; event_base_loop(me->base, 0); }
void LibEventServer::dispatch() { m_pipeStop.open(); event_set(&m_eventStop, m_pipeStop.getOut(), EV_READ|EV_PERSIST, on_thread_stop, m_eventBase); event_base_set(m_eventBase, &m_eventStop); event_add(&m_eventStop, nullptr); while (getStatus() != RunStatus::STOPPED) { event_base_loop(m_eventBase, EVLOOP_ONCE); } event_del(&m_eventStop); // flushing all responses if (!m_responseQueue.empty()) { m_responseQueue.process(); } m_responseQueue.close(); if (m_takeover_agent) { m_takeover_agent->stop(); } // flushing all remaining events if (RuntimeOption::ServerGracefulShutdownWait) { dispatchWithTimeout(RuntimeOption::ServerGracefulShutdownWait); } }
int main() { int listenfd, connfd; struct sockaddr_in cliaddr, servaddr; socklen_t clilen; listenfd = socket(AF_INET, SOCK_STREAM, 0); memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_addr.s_addr = htonl(INADDR_ANY); servaddr.sin_port = htons(9877); bind(listenfd, (struct sockaddr *) &servaddr, sizeof(servaddr)); listen(listenfd, 10); main_base = event_init(); struct event ev; event_set(&ev, listenfd, EV_READ | EV_PERSIST, call_accept, &ev); //event_base_set(main_base, &ev); event_add(&ev, NULL); event_base_loop(main_base, 0); printf("block before accept\n"); printf("after event_dispatch\n"); return 0; }
int main(int argc, char ** argv) { evbase_t * evbase; evhtp_connection_t * conn; evhtp_request_t * request; evbase = event_base_new(); conn = evhtp_connection_new(evbase, "75.126.169.52", 80); request = evhtp_request_new(request_cb, evbase); evhtp_set_hook(&request->hooks, evhtp_hook_on_read, print_data, evbase); evhtp_set_hook(&request->hooks, evhtp_hook_on_new_chunk, print_new_chunk_len, NULL); evhtp_set_hook(&request->hooks, evhtp_hook_on_chunk_complete, print_chunk_complete, NULL); evhtp_set_hook(&request->hooks, evhtp_hook_on_chunks_complete, print_chunks_complete, NULL); evhtp_headers_add_header(request->headers_out, evhtp_header_new("Host", "ieatfood.net", 0, 0)); evhtp_headers_add_header(request->headers_out, evhtp_header_new("User-Agent", "libevhtp", 0, 0)); evhtp_headers_add_header(request->headers_out, evhtp_header_new("Connection", "close", 0, 0)); evhtp_make_request(conn, request, htp_method_GET, "/"); event_base_loop(evbase, 0); event_base_free(evbase); return 0; }
int main(int argc, char **argv) { SSL_CTX *ctx; struct evconnlistener *listener; struct event_base *evbase; struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_port = htons(9999); sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */ ctx = evssl_init(); if (ctx == NULL) return 1; evbase = event_base_new(); listener = evconnlistener_new_bind( evbase, ssl_acceptcb, (void *)ctx, LEV_OPT_CLOSE_ON_FREE | LEV_OPT_REUSEABLE, 1024, (struct sockaddr *)&sin, sizeof(sin)); event_base_loop(evbase, 0); evconnlistener_free(listener); SSL_CTX_free(ctx); return 0; }
int main(int argc, char ** argv) { struct evdns_base * dns_base; struct timeval tv; if (parse_args(argc, argv) < 0) { exit(1); } tv.tv_sec = sec; tv.tv_usec = usec; evbase = event_base_new(); dns_base = evdns_base_new(evbase, 1); bev = bufferevent_socket_new(evbase, -1, BEV_OPT_CLOSE_ON_FREE); sev = evtimer_new(evbase, send_byte, bev); bufferevent_setcb(bev, readcb, NULL, eventcb, evbase); bufferevent_enable(bev, EV_READ | EV_WRITE); bufferevent_socket_connect_hostname(bev, dns_base, AF_UNSPEC, addr, port); event_base_loop(evbase, 0); while (fread(&fbyte, 1, 1, input_file) == 1) { fprintf(stdout, "sending: '%c' (%x)\n", isprint(fbyte) ? fbyte : ' ', fbyte); } return 0; }
static ZEND_RESULT_CODE php_http_client_curl_event_wait(void *context, struct timeval *custom_timeout) { php_http_client_curl_event_context_t *ctx = context; struct timeval timeout; #if DBG_EVENTS fprintf(stderr, "W"); #endif if (!event_initialized(ctx->timeout)) { if (0 > event_assign(ctx->timeout, ctx->evbase, CURL_SOCKET_TIMEOUT, 0, php_http_client_curl_event_timeout_callback, ctx)) { return FAILURE; } } else if (custom_timeout && timerisset(custom_timeout)) { if (0 > event_add(ctx->timeout, custom_timeout)) { return FAILURE; } } else if (!event_pending(ctx->timeout, EV_TIMEOUT, NULL)) { php_http_client_curl_get_timeout(ctx->client->ctx, 1000, &timeout); if (0 > event_add(ctx->timeout, &timeout)) { return FAILURE; } } if (0 > event_base_loop(ctx->evbase, EVLOOP_ONCE)) { return FAILURE; } return SUCCESS; }
/** \brief SCLogRedisWriteAsync() writes string to redis output in async mode * \param file_ctx Log file context allocated by caller * \param string Buffer to output */ static int SCLogRedisWriteAsync(LogFileCtx *file_ctx, const char *string, size_t string_len) { SCLogRedisContext *ctx = file_ctx->redis; if (! ctx->connected) { if (SCConfLogReopenAsyncRedis(file_ctx) == -1) { return -1; } if (ctx->tried == 0) { SCLogNotice("Trying to connect to Redis"); } SCLogAsyncRedisSendEcho(ctx); } if (!ctx->connected) { return -1; } if (ctx->async == NULL) { return -1; } redisAsyncCommand(ctx->async, SCRedisAsyncCommandCallback, file_ctx, "%s %s %s", file_ctx->redis_setup.command, file_ctx->redis_setup.key, string); event_base_loop(ctx->ev_base, EVLOOP_NONBLOCK); return 0; }
void workerLoop(struct worker* worker) { event_base_priority_init(worker->event_base, 2); //Seed event for each fd int i; for( i = 0; i < worker->nConnections; i++) { struct event* ev = event_new(worker->event_base, worker->connections[i]->sock, EV_WRITE|EV_PERSIST, sendCallback, worker); event_priority_set(ev, 1); event_add(ev, NULL); ev = event_new(worker->event_base, worker->connections[i]->sock, EV_READ|EV_PERSIST, receiveCallback, worker); event_priority_set(ev, 2); event_add(ev, NULL); }//End for i gettimeofday(&(worker->last_write_time), NULL); printf("starting receive base loop\n"); int error = event_base_loop(worker->event_base, 0); if(error == -1) { printf("Error starting libevent\n"); } else if(error == 1) { printf("No events registered with libevent\n"); } printf("base loop done\n"); }//End workerLoop()
void start_mc_server() { /* create the listening socket and bind it */ if (settings.socketpath == NULL) { l_socket = server_socket(settings.port, 0); if (l_socket == -1) { fprintf(stderr, "failed to listen\n"); exit(EXIT_FAILURE); } } /* initialize main thread libevent instance */ main_base = event_init(); /* create the initial listening connection */ /*if (!(listen_conn = conn_new(l_socket, conn_listening, EV_READ | EV_PERSIST, 1, false, main_base))) { fprintf(stderr, "failed to create listening connection"); exit(EXIT_FAILURE); }*/ struct event ev; event_set(&ev, l_socket, EV_READ | EV_PERSIST, event_handler, (void *)&l_socket); //函数创建新的事件结构 event_base_set(main_base, &ev); //event_add使通过event_set()设置的事件在事件匹配或超时时(如果设置了超时)被执行 if (event_add(&ev, 0) == -1) { //close(l_socket); } thread_init(settings.num_threads, main_base);/* llthread.c */ /* enter the event loop */ event_base_loop(main_base, 0); }
/* * Worker thread: main event loop */ static void *worker_libevent(void *arg) { LIBEVENT_THREAD *me = arg; struct conn *conn; CQ_ITEM *item; /* Any per-thread setup can happen here; thread_init() will block until * all threads have finished initializing. */ pthread_mutex_lock(&init_lock); init_count++; pthread_cond_signal(&init_cond); pthread_mutex_unlock(&init_lock); event_base_loop(me->base, 0); /* close all connections */ conn = me->conn_list; while (conn != NULL) { close(conn->sfd); conn = conn->conn_next; } item = cq_pop(me->new_conn_queue); while (item != NULL) { close(item->sfd); cqi_free(item); item = cq_pop(me->new_conn_queue); } return NULL; }
static const char *run_case(struct Worker *client, struct Worker *server) { struct event_base *base = NULL; int spair[2]; const char *res = "huh"; bool done = false; ignore_sigpipe(); base = event_init(); client->evbase = base; server->evbase = base; tt_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, spair) == 0); tt_assert(socket_setup(spair[0], true)); tt_assert(socket_setup(spair[1], true)); str_check(start_worker(client, spair[1]), "OK"); str_check(start_worker(server, spair[0]), "OK"); while (client->ctx || server->ctx) tt_assert(event_base_loop(base, EVLOOP_ONCE) == 0); done = true; end: res = check_errors(client, server); free_worker(client); free_worker(server); event_base_free(base); return done ? res : "fail"; }
void Spider_Url_Rinse::dns_parse(UrlPtrVec& url_array) { for (unsigned int i = 0; i < url_array.size() ; ++i) { struct evutil_addrinfo hints; struct evdns_getaddrinfo_request *req; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = EVUTIL_AI_CANONNAME; hints.ai_protocol = IPPROTO_TCP; ++g_pending_requests; dns_cb_arg *arg = (dns_cb_arg *) calloc(sizeof(dns_cb_arg), 1); arg->url_ptr = url_array[i]; arg->pthis=this; req =evdns_getaddrinfo(m_evdnsbase, arg->url_ptr->domain, NULL, &hints, (evdns_getaddrinfo_cb)dns_callback, arg); if (req == NULL) { LLOG(L_WARN,"evdns_getaddrifo return null."); } } if (g_pending_requests) { int ret=event_base_loop(m_evbase, 0); if( ret!=0 ) LLOG(L_ERROR, "event_base_loop error code %d", ret); } return; }
//启动工作线程 bool WorkerThread::Run() { do { //初始化管道 if (!CreateNotifyFds()) break; //初始化事件处理回调 if (!InitEventHandler()) break; try { shared_ptr_thread_.reset(new std::thread([this] { event_base_loop(pthread_event_base_, 0);})); } catch (...) { break; } return true; } while (0); return false; }
void run(char *conf_file) { struct running rr; struct event *sig1_ev,*sig2_ev,*sig_hup; evthread_use_pthreads(); setup_running(&rr); register_interface_types(&rr); register_source_types(&rr); run_config(&rr,conf_file); start_stats_timer(&rr); ref_release(&(rr.ic_running)); event_add(sq_consumer(rr.sq),0); event_add(si_consumer(rr.si),0); sq_release(rr.sq); evsignal_add(sig1_ev=evsignal_new(rr.eb,SIGINT,user_quit,&rr),0); evsignal_add(sig2_ev=evsignal_new(rr.eb,SIGTERM,user_quit,&rr),0); evsignal_add(sig_hup=evsignal_new(rr.eb,SIGHUP,hupev,&rr),0); rr.sigkill_timer = event_new(rr.eb,-1,EV_PERSIST,sigkill_self,&rr); log_info(("Starting event loop")); event_base_loop(rr.eb,0); log_info(("Event loop finished")); event_del(sig1_ev); event_del(sig2_ev); event_del(sig_hup); event_free(sig1_ev); event_free(sig2_ev); event_free(sig_hup); closedown(&rr); log_info(("Bye!")); config_finished(); }
void Loop::run() { _frameEvent = event_new(_base, -1, EV_PERSIST, frameEventCallback, this); if (NULL == _frameEvent) { return; } struct timeval tv = {}; evutil_timerclear(&tv); tv.tv_sec = 0; tv.tv_usec = 5000; if (0 != event_add(_frameEvent, &tv)) { event_free(_frameEvent); _frameEvent = NULL; return; } curThreadLoop = this; doEvent(ec::Loop::kEventRun); event_base_loop(_base, 0); doEvent(ec::Loop::kEventEnd); curThreadLoop = NULL; _thread = NULL; }
Bool HawkGateThread::OnThreadLoop() { if (m_pBase) { //日志记录 HawkFmtPrint("GateThread EventLoop, ThreadId: %u", HawkOSOperator::GetThreadId()); while (m_bRunning) { //设置默认空闲状态 m_bIdle = true; //开始消息循环 event_base_loop((event_base*)m_pBase, EVLOOP_ONCE | EVLOOP_NONBLOCK); //获取网关发送过来的数据消息 OnGatewayEvent(); //空闲状态 OnThreadIdle(); } return true; } return false; }
void f(const int fd, const short which, void *arg) { struct timeval t = {.tv_sec = 1, .tv_usec = 0}; static bool init = false; if (init) { evtimer_del(&e); } else { init = true; } evtimer_set(&e, f, NULL); event_base_set(me, &e); evtimer_add(&e, &t); printf("xxx\n"); } int main() { me = event_init(); f(0, 0, NULL); event_base_loop(me, 0); return 0; }
/* * Worker thread main event loop */ static void * thread_worker_main(void *arg) { struct thread_worker *t = arg; rstatus_t status; /* * Any per-thread setup can happen here; thread_init() will block until * all threads have finished initializing. */ status = thread_setkeys(t); if (status != MC_OK) { exit(1); } pthread_mutex_lock(&init_lock); t->tid = pthread_self(); init_count++; pthread_cond_signal(&init_cond); pthread_mutex_unlock(&init_lock); event_base_loop(t->base, 0); return NULL; }
void EventLoop::run() { __in_event_loop__ = true; // Block SIGPIPE in the event loop because we can not force // underlying implementations such as SSL bufferevents to use // MSG_NOSIGNAL. SUPPRESS(SIGPIPE) { do { int result = event_base_loop(base, EVLOOP_ONCE); if (result < 0) { LOG(FATAL) << "Failed to run event loop"; } else if (result > 0) { // All events are handled, continue event loop. continue; } else { CHECK_EQ(0, result); if (event_base_got_break(base)) { break; } else if (event_base_got_exit(base)) { break; } } } while (true); } __in_event_loop__ = false; }
/* Initialize libevent and start the event loop */ void levent_loop(struct lldpd *cfg) { levent_init(cfg); lldpd_loop(cfg); /* libevent loop */ do { if (event_base_got_break(cfg->g_base) || event_base_got_exit(cfg->g_base)) break; #ifdef USE_SNMP if (cfg->g_snmp) { /* We don't use delegated requests (request whose answer is delayed). However, we keep the call here in case we use it some day. We don't call run_alarms() here. We do it on timeout only. */ netsnmp_check_outstanding_agent_requests(); levent_snmp_update(cfg); } #endif } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0); #ifdef USE_SNMP if (cfg->g_snmp) agent_shutdown(); #endif /* USE_SNMP */ }
void net_loop (void) { delete_stdin_event = 0; if (verbosity >= E_DEBUG) { logprintf ("Starting netloop\n"); } term_ev = event_new (TLS->ev_base, 0, EV_READ | EV_PERSIST, stdin_read_callback, 0); event_add (term_ev, 0); int last_get_state = time (0); while (1) { event_base_loop (TLS->ev_base, EVLOOP_ONCE); if (term_ev && delete_stdin_event) { logprintf ("delete stdin\n"); event_free (term_ev); term_ev = 0; } #ifdef USE_LUA lua_do_all (); #endif #ifdef USE_PYTHON py_do_all (); #endif if (safe_quit && !TLS->active_queries) { printf ("All done. Exit\n"); do_halt (0); safe_quit = 0; } if (sigterm_cnt > 0) { do_halt (0); } if (time (0) - last_get_state > 3600) { tgl_do_lookup_state (TLS); last_get_state = time (0); } write_state_file (); update_prompt (); /* if (unknown_user_list_pos) { int i; for (i = 0; i < unknown_user_list_pos; i++) { tgl_do_get_user_info (TLS, TGL_MK_USER (unknown_user_list[i]), 0, 0, 0); } unknown_user_list_pos = 0; } */ } if (term_ev) { event_free (term_ev); term_ev = 0; } if (verbosity >= E_DEBUG) { logprintf ("End of netloop\n"); } }
int main(int argc, char** argv){ /* 主线程主体流程: conn_init; thread_init; 还得有一个 stat_init stats的结构是用来记录当前的状态的,stats是一个静态变量 server_socket 请求,分发 注册事件loop */ int retval; main_base = event_init(); FILE *portnumber_file = NULL; portnumber_file = fopen("/tmp/portnumber.file", "a"); stats_init(); conn_init(); thread_init(NUM_OF_THREADS); server_socket("127.0.0.1", SERVER_PORT, tcp_transport, portnumber_file); if (event_base_loop(main_base, 0) != 0) { printf("event_base_loop error"); retval = EXIT_FAILURE; } return retval; exit(0); }
int main(int argc, char ** argv) { evbase_t * evbase = event_base_new(); evhtp_t * htp = evhtp_new(evbase, NULL); evhtp_callback_t * cb_plurals = NULL; evhtp_callback_t * cb_score = NULL; int num_threads = 6; cb_plurals = evhtp_set_cb(htp, "/blahblah/plurals", receive_plurals, "plurals"); cb_score = evhtp_set_cb(htp, "/blahblah/score", provide_score, "score"); #ifndef EVHTP_DISABLE_EVTHR evhtp_use_threads(htp, NULL, num_threads, NULL); #else if (num_threads != 1) { printf("Error: multithreading is not supported\n"); exit(1); } #endif evhtp_bind_socket(htp, "0.0.0.0", 8081, 1024); event_base_loop(evbase, 0); evhtp_unbind_socket(htp); evhtp_callback_free(cb_score); evhtp_callback_free(cb_plurals); evhtp_free(htp); event_base_free(evbase); return 0; }
int wshtp_server_start(wshtp_server_t *server) { evhtp_set_glob_cb(server->htp, "*", wshtp_handler_cb, server); evhtp_set_post_accept_cb(server->htp, wshtp_post_accept_cb, server); int ret = event_base_loop(server->evbase, 0); return ret; }
static gint lua_util_process_message (lua_State *L) { struct rspamd_config *cfg = lua_check_config (L, 1); const gchar *message; gsize mlen; struct rspamd_task *task; struct event_base *base; ucl_object_t *res = NULL; message = luaL_checklstring (L, 2, &mlen); if (cfg != NULL && message != NULL) { base = event_init (); rspamd_init_filters (cfg, FALSE); task = rspamd_task_new (NULL); task->cfg = cfg; task->ev_base = base; task->msg.start = rspamd_mempool_alloc (task->task_pool, mlen + 1); rspamd_strlcpy ((gpointer)task->msg.start, message, mlen + 1); task->msg.len = mlen; task->fin_callback = lua_util_task_fin; task->fin_arg = &res; task->resolver = dns_resolver_init (NULL, base, cfg); task->s = rspamd_session_create (task->task_pool, rspamd_task_fin, rspamd_task_restore, rspamd_task_free_hard, task); if (!rspamd_task_load_message (task, NULL, message, mlen)) { lua_pushnil (L); } else { if (rspamd_task_process (task, RSPAMD_TASK_PROCESS_ALL)) { event_base_loop (base, 0); if (res != NULL) { ucl_object_push_lua (L, res, true); ucl_object_unref (res); } else { ucl_object_push_lua (L, rspamd_protocol_write_ucl (task, NULL), true); rdns_resolver_release (task->resolver->r); rspamd_task_free_hard (task); } } else { lua_pushnil (L); } } event_base_free (base); } else { lua_pushnil (L); } return 1; }