void IO_postdns_callback(struct ev_loop *loop, ev_idle *watcher, int revents) { AsyncIO *IO = watcher->data; SetEVState(IO, eCaresFinished); SET_EV_TIME(IO, event_base); EV_syslog(LOG_DEBUG, "event: %s\n", __FUNCTION__); become_session(IO->CitContext); assert(IO->DNS.Query->PostDNS); switch (IO->DNS.Query->PostDNS(IO)) { case eAbort: assert(IO->DNS.Fail); switch (IO->DNS.Fail(IO)) { case eAbort: //// StopClientWatchers(IO); ShutDownCLient(IO); break; case eDBQuery: StopClientWatchers(IO, 0); QueueAnDBOperation(IO); break; default: break; } case eDBQuery: StopClientWatchers(IO, 0); QueueAnDBOperation(IO); break; default: break; } }
static void IO_Timeout_callback(struct ev_loop *loop, ev_timer *watcher, int revents) { AsyncIO *IO = watcher->data; SetEVState(IO, eIOTimeout); SET_EV_TIME(IO, event_base); ev_timer_stop (event_base, &IO->rw_timeout); become_session(IO->CitContext); if (IO->SendBuf.fd != 0) { ev_io_stop(event_base, &IO->send_event); ev_io_stop(event_base, &IO->recv_event); ev_timer_stop (event_base, &IO->rw_timeout); close(IO->SendBuf.fd); IO->SendBuf.fd = IO->RecvBuf.fd = 0; } assert(IO->Timeout); switch (IO->Timeout(IO)) { case eAbort: ShutDownCLient(IO); default: break; } }
static void IO_connfailimmediate_callback(struct ev_loop *loop, ev_idle *watcher, int revents) { AsyncIO *IO = watcher->data; SetEVState(IO, eIOConnfailNow); SET_EV_TIME(IO, event_base); ev_idle_stop (event_base, &IO->conn_fail_immediate); if (IO->SendBuf.fd != 0) { close(IO->SendBuf.fd); IO->SendBuf.fd = IO->RecvBuf.fd = 0; } become_session(IO->CitContext); assert(IO->ConnFail); switch (IO->ConnFail(IO)) { case eAbort: ShutDownCLient(IO); default: break; } }
static void set_start_callback(struct ev_loop *loop, AsyncIO *IO, int revents) { ev_timer_stop(event_base, &IO->conn_fail); ev_timer_start(event_base, &IO->rw_timeout); switch(IO->NextState) { case eReadMore: case eReadMessage: case eReadFile: StrBufAppendBufPlain(IO->ErrMsg, HKEY("[while waiting for greeting]"), 0); ev_io_start(event_base, &IO->recv_event); break; case eSendReply: case eSendMore: case eReadPayload: case eSendFile: become_session(IO->CitContext); IO_send_callback(loop, &IO->send_event, revents); break; case eDBQuery: case eSendDNSQuery: case eReadDNSReply: case eConnect: case eTerminateConnection: case eAbort: /// TODO: WHUT? break; } }
static void DBQueueEventAddCallback(EV_P_ ev_async *w, int revents) { CitContext *Ctx; long IOID = -1; long count = 0;; ev_tstamp Now; HashList *q; void *v; HashPos *It; long len; const char *Key; /* get the control command... */ pthread_mutex_lock(&DBEventQueueMutex); if (DBInboundEventQueues[0] == DBInboundEventQueue) { DBInboundEventQueue = DBInboundEventQueues[1]; q = DBInboundEventQueues[0]; } else { DBInboundEventQueue = DBInboundEventQueues[0]; q = DBInboundEventQueues[1]; } pthread_mutex_unlock(&DBEventQueueMutex); Now = ev_now (event_db); It = GetNewHashPos(q, 0); while (GetNextHashPos(q, It, &len, &Key, &v)) { IOAddHandler *h = v; eNextState rc; count ++; if (h->IO->ID == 0) h->IO->ID = EvIDSource++; IOID = h->IO->ID; if (h->IO->StartDB == 0.0) h->IO->StartDB = Now; h->IO->CitContext->lastcmd = h->IO->Now = Now; SetEVState(h->IO, eDBAttach); Ctx = h->IO->CitContext; become_session(Ctx); ev_cleanup_start(event_db, &h->IO->db_abort_by_shutdown); rc = h->EvAttch(h->IO); switch (rc) { case eAbort: ShutDownDBCLient(h->IO); default: break; } } DeleteHashPos(&It); DeleteHashContent(&q); EVQ_syslog(LOG_DEBUG, "%s CC[%ld] DBEVENT Q Add %ld done.", IOSTR, IOID, count); }
void ShutDownDBCLient(AsyncIO *IO) { CitContext *Ctx =IO->CitContext; become_session(Ctx); SetEVState(IO, eDBTerm); EVM_syslog(LOG_DEBUG, "DBEVENT Terminating.\n"); StopDBWatchers(IO); assert(IO->DBTerminate); IO->DBTerminate(IO); }
eNextState ReAttachIO(AsyncIO *IO, void *pData, int ReadFirst) { SetEVState(IO, eIOAttach); IO->Data = pData; become_session(IO->CitContext); ev_cleanup_start(event_base, &IO->abort_by_shutdown); if (ReadFirst) { IO->NextState = eReadMessage; } else { IO->NextState = eSendReply; } set_start_callback(event_base, IO, 0); return IO->NextState; }
eNextState ShutDownCLient(AsyncIO *IO) { CitContext *Ctx =IO->CitContext; SetEVState(IO, eExit); become_session(Ctx); EVM_syslog(LOG_DEBUG, "EVENT Terminating \n"); StopClientWatchers(IO, 1); if (IO->DNS.Channel != NULL) { ares_destroy(IO->DNS.Channel); EV_DNS_LOG_STOP(DNS.recv_event); EV_DNS_LOG_STOP(DNS.send_event); ev_io_stop(event_base, &IO->DNS.recv_event); ev_io_stop(event_base, &IO->DNS.send_event); IO->DNS.Channel = NULL; } assert(IO->Terminate); return IO->Terminate(IO); }
void DB_PerformNext(struct ev_loop *loop, ev_idle *watcher, int revents) { AsyncIO *IO = watcher->data; SetEVState(IO, eDBNext); SET_EV_TIME(IO, event_db); EV_syslog(LOG_DEBUG, "%s()", __FUNCTION__); become_session(IO->CitContext); ev_idle_stop(event_db, &IO->db_unwind_stack); assert(IO->NextDBOperation); switch (IO->NextDBOperation(IO)) { case eSendReply: ev_cleanup_stop(loop, &IO->db_abort_by_shutdown); QueueAnEventContext(IO); break; case eDBQuery: break; case eSendDNSQuery: case eReadDNSReply: case eConnect: case eSendMore: case eSendFile: case eReadMessage: case eReadMore: case eReadPayload: case eReadFile: ev_cleanup_stop(loop, &IO->db_abort_by_shutdown); break; case eTerminateConnection: case eAbort: ev_idle_stop(event_db, &IO->db_unwind_stack); ev_cleanup_stop(loop, &IO->db_abort_by_shutdown); ShutDownDBCLient(IO); } }
void pop3client_scan(void) { static time_t last_run = 0L; time_t fastest_scan; HashPos *it; long len; const char *Key; void *vrptr; pop3aggr *cptr; become_session(&pop3_client_CC); if (config.c_pop3_fastest < config.c_pop3_fetch) fastest_scan = config.c_pop3_fastest; else fastest_scan = config.c_pop3_fetch; /* * Run POP3 aggregation no more frequently than once every n seconds */ if ( (time(NULL) - last_run) < fastest_scan ) { return; } /* * This is a simple concurrency check to make sure only one pop3client * run is done at a time. We could do this with a mutex, but since we * don't really require extremely fine granularity here, we'll do it * with a static variable instead. */ if (doing_pop3client) return; doing_pop3client = 1; EVP3CQM_syslog(LOG_DEBUG, "pop3client started"); CtdlForEachNetCfgRoom(pop3client_scan_room, NULL, pop3client); pthread_mutex_lock(&POP3QueueMutex); it = GetNewHashPos(POP3FetchUrls, 0); while (!server_shutting_down && GetNextHashPos(POP3FetchUrls, it, &len, &Key, &vrptr) && (vrptr != NULL)) { cptr = (pop3aggr *)vrptr; if (cptr->RefCount == 0) if (!pop3_do_fetching(cptr)) DeletePOP3Aggregator(cptr);////TODO /* if ((palist->interval && time(NULL) > (last_run + palist->interval)) || (time(NULL) > last_run + config.c_pop3_fetch)) pop3_do_fetching(palist->roomname, palist->pop3host, palist->pop3user, palist->pop3pass, palist->keep); pptr = palist; palist = palist->next; free(pptr); */ } DeleteHashPos(&it); pthread_mutex_unlock(&POP3QueueMutex); EVP3CQM_syslog(LOG_DEBUG, "pop3client ended"); last_run = time(NULL); doing_pop3client = 0; }
static void QueueEventAddCallback(EV_P_ ev_async *w, int revents) { CitContext *Ctx; long IOID = -1; long count = 0; ev_tstamp Now; HashList *q; void *v; HashPos*It; long len; const char *Key; /* get the control command... */ pthread_mutex_lock(&EventQueueMutex); if (InboundEventQueues[0] == InboundEventQueue) { InboundEventQueue = InboundEventQueues[1]; q = InboundEventQueues[0]; } else { InboundEventQueue = InboundEventQueues[0]; q = InboundEventQueues[1]; } pthread_mutex_unlock(&EventQueueMutex); Now = ev_now (event_base); It = GetNewHashPos(q, 0); while (GetNextHashPos(q, It, &len, &Key, &v)) { IOAddHandler *h = v; count ++; if (h->IO->ID == 0) { h->IO->ID = EvIDSource++; } IOID = h->IO->ID; if (h->IO->StartIO == 0.0) h->IO->StartIO = Now; SetEVState(h->IO, eIOAttach); Ctx = h->IO->CitContext; become_session(Ctx); h->IO->CitContext->lastcmd = h->IO->Now = Now; switch (h->EvAttch(h->IO)) { case eReadMore: case eReadMessage: case eReadFile: case eSendReply: case eSendMore: case eReadPayload: case eSendFile: case eDBQuery: case eSendDNSQuery: case eReadDNSReply: case eConnect: break; case eTerminateConnection: case eAbort: ShutDownCLient(h->IO); break; } } DeleteHashPos(&It); DeleteHashContent(&q); EVQ_syslog(LOG_DEBUG, "%s CC[%ld] EVENT Q Add %ld done.", IOSTR, IOID, count); }
eNextState EvConnectSock(AsyncIO *IO, double conn_timeout, double first_rw_timeout, int ReadFirst) { struct sockaddr_in egress_sin; int fdflags; int rc = -1; SetEVState(IO, eIOConnectSock); become_session(IO->CitContext); if (ReadFirst) { IO->NextState = eReadMessage; } else { IO->NextState = eSendReply; } IO->SendBuf.fd = IO->RecvBuf.fd = socket( (IO->ConnectMe->IPv6)?PF_INET6:PF_INET, SOCK_STREAM, IPPROTO_TCP); if (IO->SendBuf.fd < 0) { EV_syslog(LOG_ERR, "EVENT: socket() failed: %s\n", strerror(errno)); StrBufPrintf(IO->ErrMsg, "Failed to create socket: %s", strerror(errno)); IO->SendBuf.fd = IO->RecvBuf.fd = 0; return eAbort; } fdflags = fcntl(IO->SendBuf.fd, F_GETFL); if (fdflags < 0) { EV_syslog(LOG_ERR, "EVENT: unable to get socket %d flags! %s \n", IO->SendBuf.fd, strerror(errno)); StrBufPrintf(IO->ErrMsg, "Failed to get socket %d flags: %s", IO->SendBuf.fd, strerror(errno)); close(IO->SendBuf.fd); IO->SendBuf.fd = IO->RecvBuf.fd = 0; return eAbort; } fdflags = fdflags | O_NONBLOCK; if (fcntl(IO->SendBuf.fd, F_SETFL, fdflags) < 0) { EV_syslog( LOG_ERR, "EVENT: unable to set socket %d nonblocking flags! %s \n", IO->SendBuf.fd, strerror(errno)); StrBufPrintf(IO->ErrMsg, "Failed to set socket flags: %s", strerror(errno)); close(IO->SendBuf.fd); IO->SendBuf.fd = IO->RecvBuf.fd = 0; return eAbort; } /* TODO: maye we could use offsetof() to calc the position of data... * http://doc.dvgu.ru/devel/ev.html#associating_custom_data_with_a_watcher */ ev_io_init(&IO->recv_event, IO_recv_callback, IO->RecvBuf.fd, EV_READ); IO->recv_event.data = IO; ev_io_init(&IO->send_event, IO_send_callback, IO->SendBuf.fd, EV_WRITE); IO->send_event.data = IO; ev_timer_init(&IO->conn_fail, IO_connfail_callback, conn_timeout, 0); IO->conn_fail.data = IO; ev_timer_init(&IO->rw_timeout, IO_Timeout_callback, first_rw_timeout,0); IO->rw_timeout.data = IO; /* for debugging you may bypass it like this: * IO->Addr.sin_addr.s_addr = inet_addr("127.0.0.1"); * ((struct sockaddr_in)IO->ConnectMe->Addr).sin_addr.s_addr = * inet_addr("127.0.0.1"); */ if (IO->ConnectMe->IPv6) { rc = connect(IO->SendBuf.fd, &IO->ConnectMe->Addr, sizeof(struct sockaddr_in6)); } else { /* If citserver is bound to a specific IP address on the host, make * sure we use that address for outbound connections. */ memset(&egress_sin, 0, sizeof(egress_sin)); egress_sin.sin_family = AF_INET; if (!IsEmptyStr(CtdlGetConfigStr("c_ip_addr"))) { egress_sin.sin_addr.s_addr = inet_addr(CtdlGetConfigStr("c_ip_addr")); if (egress_sin.sin_addr.s_addr == !INADDR_ANY) { egress_sin.sin_addr.s_addr = INADDR_ANY; } /* If this bind fails, no problem; we can still use INADDR_ANY */ bind(IO->SendBuf.fd, (struct sockaddr *)&egress_sin, sizeof(egress_sin)); } rc = connect(IO->SendBuf.fd, (struct sockaddr_in *)&IO->ConnectMe->Addr, sizeof(struct sockaddr_in)); } if (rc >= 0){ SetEVState(IO, eIOConnNow); EV_syslog(LOG_DEBUG, "connect() = %d immediate success.\n", IO->SendBuf.fd); set_start_callback(event_base, IO, 0); return IO->NextState; } else if (errno == EINPROGRESS) { SetEVState(IO, eIOConnWait); EV_syslog(LOG_DEBUG, "connect() = %d have to wait now.\n", IO->SendBuf.fd); ev_io_init(&IO->conn_event, IO_connestd_callback, IO->SendBuf.fd, EV_READ|EV_WRITE); IO->conn_event.data = IO; ev_io_start(event_base, &IO->conn_event); ev_timer_start(event_base, &IO->conn_fail); return IO->NextState; } else { SetEVState(IO, eIOConnfail); ev_idle_init(&IO->conn_fail_immediate, IO_connfailimmediate_callback); IO->conn_fail_immediate.data = IO; ev_idle_start(event_base, &IO->conn_fail_immediate); EV_syslog(LOG_ERR, "connect() = %d failed: %s\n", IO->SendBuf.fd, strerror(errno)); StrBufPrintf(IO->ErrMsg, "Failed to connect: %s", strerror(errno)); return IO->NextState; } return IO->NextState; }
static void IO_send_callback(struct ev_loop *loop, ev_io *watcher, int revents) { int rc; AsyncIO *IO = watcher->data; const char *errmsg = NULL; SET_EV_TIME(IO, event_base); become_session(IO->CitContext); #ifdef BIGBAD_IODBG { int rv = 0; char fn [SIZ]; FILE *fd; const char *pch = ChrPtr(IO->SendBuf.Buf); const char *pchh = IO->SendBuf.ReadWritePointer; long nbytes; if (pchh == NULL) pchh = pch; nbytes = StrLength(IO->SendBuf.Buf) - (pchh - pch); snprintf(fn, SIZ, "/tmp/foolog_ev_%s.%d", ((CitContext*)(IO->CitContext))->ServiceName, IO->SendBuf.fd); fd = fopen(fn, "a+"); if (fd == NULL) { syslog(LOG_EMERG, "failed to open file %s: %s", fn, strerror(errno)); cit_backtrace(); exit(1); } fprintf(fd, "Send: BufSize: %ld BufContent: [", nbytes); rv = fwrite(pchh, nbytes, 1, fd); if (!rv) printf("failed to write debug to %s!\n", fn); fprintf(fd, "]\n"); #endif switch (IO->NextState) { case eSendFile: rc = FileSendChunked(&IO->IOB, &errmsg); if (rc < 0) StrBufPlain(IO->ErrMsg, errmsg, -1); break; default: rc = StrBuf_write_one_chunk_callback(IO->SendBuf.fd, 0, &IO->SendBuf); } #ifdef BIGBAD_IODBG fprintf(fd, "Sent: BufSize: %d bytes.\n", rc); fclose(fd); } #endif if (rc == 0) { ev_io_stop(event_base, &IO->send_event); switch (IO->NextState) { case eSendMore: assert(IO->SendDone); IO->NextState = IO->SendDone(IO); if ((IO->NextState == eTerminateConnection) || (IO->NextState == eAbort) ) ShutDownCLient(IO); else { ev_io_start(event_base, &IO->send_event); } break; case eSendFile: if (IO->IOB.ChunkSendRemain > 0) { ev_io_start(event_base, &IO->recv_event); SetNextTimeout(IO, 100.0); } else { assert(IO->ReadDone); IO->NextState = IO->ReadDone(IO); switch(IO->NextState) { case eSendDNSQuery: case eReadDNSReply: case eDBQuery: case eConnect: break; case eSendReply: case eSendMore: case eSendFile: ev_io_start(event_base, &IO->send_event); break; case eReadMessage: case eReadMore: case eReadPayload: case eReadFile: break; case eTerminateConnection: case eAbort: break; } } break; case eSendReply: if (StrBufCheckBuffer(&IO->SendBuf) != eReadSuccess) break; IO->NextState = eReadMore; case eReadMore: case eReadMessage: case eReadPayload: case eReadFile: if (StrBufCheckBuffer(&IO->RecvBuf) == eBufferNotEmpty) { HandleInbound(IO); } else { ev_io_start(event_base, &IO->recv_event); } break; case eDBQuery: /* * we now live in another queue, * so we have to unregister. */ ev_cleanup_stop(loop, &IO->abort_by_shutdown); break; case eSendDNSQuery: case eReadDNSReply: case eConnect: case eTerminateConnection: case eAbort: break; } } else if (rc < 0) { if (errno != EAGAIN) { StopClientWatchers(IO, 1); EV_syslog(LOG_DEBUG, "IO_send_callback(): Socket Invalid! [%d] [%s] [%d]\n", errno, strerror(errno), IO->SendBuf.fd); StrBufPrintf(IO->ErrMsg, "Socket Invalid! [%s]", strerror(errno)); SetNextTimeout(IO, 0.01); } } /* else : must write more. */ }
eReadState HandleInbound(AsyncIO *IO) { const char *Err = NULL; eReadState Finished = eBufferNotEmpty; become_session(IO->CitContext); while ((Finished == eBufferNotEmpty) && ((IO->NextState == eReadMessage)|| (IO->NextState == eReadMore)|| (IO->NextState == eReadFile)|| (IO->NextState == eReadPayload))) { /* Reading lines... * lex line reply in callback, * or do it ourselves. * i.e. as nnn-blabla means continue reading in SMTP */ if ((IO->NextState == eReadFile) && (Finished == eBufferNotEmpty)) { Finished = WriteIOBAlreadyRead(&IO->IOB, &Err); if (Finished == eReadSuccess) { IO->NextState = eSendReply; } } else if (IO->LineReader) Finished = IO->LineReader(IO); else Finished = StrBufChunkSipLine(IO->IOBuf, &IO->RecvBuf); switch (Finished) { case eMustReadMore: /// read new from socket... break; case eBufferNotEmpty: /* shouldn't happen... */ case eReadSuccess: /// done for now... break; case eReadFail: /// WHUT? ///todo: shut down! break; } if (Finished != eMustReadMore) { ev_io_stop(event_base, &IO->recv_event); IO->NextState = IO->ReadDone(IO); if (IO->NextState == eDBQuery) { if (QueueAnDBOperation(IO) == eAbort) return eReadFail; else return eReadSuccess; } else { Finished = StrBufCheckBuffer(&IO->RecvBuf); } } } PostInbound(IO); return Finished; }