/** * \brief Forks a separate simple sleep() -&- sync periodic timer * * Forks a very basic periodic timer process, that just sleep()s for * the specified interval and then calls the timer function. * The new "sync timer" process execution start immediately, the sleep() * is called first (so the first call to the timer function will happen * \<interval\> seconds after the call to fork_sync_timer) * @param child_id @see fork_process() * @param desc @see fork_process() * @param make_sock @see fork_process() * @param f timer function/callback * @param param parameter passed to the timer function * @param interval interval in seconds. * @return pid of the new process on success, -1 on error * (doesn't return anything in the child process) */ int fork_sync_timer(int child_id, char* desc, int make_sock, timer_function* f, void* param, int interval) { int pid; ticks_t ts1 = 0; ticks_t ts2 = 0; pid=fork_process(child_id, desc, make_sock); if (pid<0) return -1; if (pid==0){ /* child */ interval *= 1000; /* miliseconds */ ts2 = interval; if (cfg_child_init()) return -1; for(;;){ if (ts2>interval) sleep_us(1000); /* 1 milisecond sleep to catch up */ else sleep_us(ts2*1000); /* microseconds sleep */ ts1 = get_ticks_raw(); cfg_update(); f(TICKS_TO_S(ts1), param); /* ticks in sec for compatibility with old timers */ /* adjust the next sleep duration */ ts2 = interval - TICKS_TO_MS(get_ticks_raw()) + TICKS_TO_MS(ts1); } } /* parent */ return pid; }
static int child_init(int rank) { int pid; if (rank!=PROC_MAIN) return 0; if(!reg_db_url.s || reg_db_url.len<=0) return 0; pid=fork_process(PROC_TIMER, "TIMER UAC REG", 1); if (pid<0) { LM_ERR("failed to register timer routine as process\n"); return -1; } if (pid==0){ /* child */ /* initialize the config framework */ if (cfg_child_init()) return -1; uac_reg_load_db(); uac_reg_timer(0); for(;;){ /* update the local config framework structures */ cfg_update(); sleep(reg_timer_interval); uac_reg_timer(get_ticks()); } } /* parent */ return 0; }
/** * \brief Forks a separate simple sleep() -&- sync periodic timer * * Forks a very basic periodic timer process, that just sleep()s for * the specified interval and then calls the timer function. * The new "sync timer" process execution start immediately, the sleep() * is called first (so the first call to the timer function will happen * \<interval\> seconds after the call to fork_sync_timer) * @param child_id @see fork_process() * @param desc @see fork_process() * @param make_sock @see fork_process() * @param f timer function/callback * @param param parameter passed to the timer function * @param interval interval in seconds. * @return pid of the new process on success, -1 on error * (doesn't return anything in the child process) */ int fork_sync_timer(int child_id, char* desc, int make_sock, timer_function* f, void* param, int interval) { int pid; ticks_t ts1 = 0; ticks_t ts2 = 0; pid=fork_process(child_id, desc, make_sock); if (pid<0) return -1; if (pid==0){ /* child */ ts2 = interval; if (cfg_child_init()) return -1; for(;;){ if(ts2>0) sleep(ts2); else sleep(1); ts1 = get_ticks(); cfg_update(); f(get_ticks(), param); /* ticks in s for compatibility with old timers */ ts2 = interval - get_ticks() + ts1; } } /* parent */ return pid; }
/* Relay a MESSAGE to a SIP client */ int purple_send_sip_msg(char *to, char *from, char *msg) { LM_DBG("sending message from %s to %s\n", from, to); str msg_type = { "MESSAGE", 7 }; str ruri, hdr, fromstr, tostr, msgstr; char hdr_buf[512], ruri_buf[512]; uac_req_t uac_r; /* update the local config framework structures */ cfg_update(); ruri.s = ruri_buf; ruri.len = snprintf(ruri_buf, sizeof(ruri_buf), "%s;proto=purple", to); hdr.s = hdr_buf; hdr.len = snprintf(hdr_buf, sizeof(hdr_buf), "Content-type: text/plain" CRLF "Contact: %s" CRLF, from); fromstr.s = from; fromstr.len = strlen(from); tostr.s = to; tostr.len = strlen(to); msgstr.s = msg; msgstr.len = strlen(msg); set_uac_req(&uac_r, &msg_type, &hdr, &msgstr, 0, 0, 0, 0); if (tmb.t_request(&uac_r, &ruri, &tostr, &fromstr, 0) < 0) { LM_ERR("error sending request\n"); return -1; } LM_DBG("message sent successfully\n"); return 0; }
/** * \brief Forks a separate simple milisecond-sleep() -&- sync periodic timer * * Forks a very basic periodic timer process, that just ms-sleep()s for * the specified interval and then calls the timer function. * The new "sync timer" process execution start immediately, the ms-sleep() * is called first (so the first call to the timer function will happen * \<interval\> seconds after the call to fork_basic_utimer) * @param child_id @see fork_process() * @param desc @see fork_process() * @param make_sock @see fork_process() * @param f timer function/callback * @param param parameter passed to the timer function * @param uinterval interval in mili-seconds. * @return pid of the new process on success, -1 on error * (doesn't return anything in the child process) */ int fork_sync_utimer(int child_id, char* desc, int make_sock, utimer_function* f, void* param, int uinterval) { int pid; ticks_t ts1 = 0; ticks_t ts2 = 0; pid=fork_process(child_id, desc, make_sock); if (pid<0) return -1; if (pid==0){ /* child */ ts2 = uinterval; if (cfg_child_init()) return -1; for(;;){ if(ts2>0) sleep_us(uinterval); else sleep_us(1); ts1 = get_ticks_raw(); cfg_update(); f(TICKS_TO_MS(ts1), param); /* ticks in mili-seconds */ ts2 = uinterval - get_ticks_raw() + ts1; } } /* parent */ return pid; }
void pipe_reader(gpointer data, gint fd, PurpleInputCondition condition) { struct purple_cmd *cmd; if (read(fd, &cmd, sizeof(cmd)) != sizeof(cmd)) { LM_ERR("failed to read from command pipe: %s\n", strerror(errno)); return; } /* update the local config framework structures */ cfg_update(); switch (cmd->type) { case PURPLE_MESSAGE_CMD: LM_DBG("received message cmd via pipe from <%s> to <%s>\n", cmd->message.from, cmd->message.to); pipe_handle_message(&cmd->message); break; case PURPLE_SUBSCRIBE_CMD: LM_DBG("received subscribe cmd via pipe from <%s> to <%s>\n", cmd->subscribe.from, cmd->subscribe.to); pipe_handle_subscribe(&cmd->subscribe); break; case PURPLE_PUBLISH_CMD: LM_DBG("received publish cmd via pipe from <%s>\n", cmd->publish.from); pipe_handle_publish(&cmd->publish); break; default: LM_ERR("unknown cmd type 0x%x\n", cmd->type); } purple_free_cmd(cmd); }
static void jsonrpc_fifo_server(FILE *fifo_stream) { FILE *reply_stream; char buf_in[JSONRPC_BUF_IN_SIZE]; char buf_rpath[128]; int lread; str scmd; str srpath; int nw; jsonrpc_plain_reply_t* jr = NULL; while(1) { /* update the local config framework structures */ cfg_update(); reply_stream = NULL; lread = 0; if(jsonrpc_read_stream(buf_in, JSONRPC_BUF_IN_SIZE, fifo_stream, &lread)<0 || lread<=0) { LM_DBG("failed to get the json document from fifo stream\n"); continue; } scmd.s = buf_in; scmd.len = lread; trim(&scmd); LM_DBG("preparing to execute fifo jsonrpc [%.*s]\n", scmd.len, scmd.s); srpath.s = buf_rpath; srpath.len = 128; if(jsonrpc_exec_ex(&scmd, &srpath)<0) { LM_ERR("failed to execute the json document from fifo stream\n"); continue; } jr = jsonrpc_plain_reply_get(); LM_DBG("command executed - result: [%.*s] [%d] [%p] [%.*s]\n", srpath.len, srpath.s, jr->rcode, jr->rbody.s, jr->rbody.len, jr->rbody.s); if(srpath.len>0) { reply_stream = jsonrpc_open_reply_fifo(&srpath); if (reply_stream==NULL) { LM_ERR("cannot open reply fifo: %.*s\n", srpath.len, srpath.s); continue; } nw = fwrite(jr->rbody.s, 1, jr->rbody.len, reply_stream); if(nw < jr->rbody.len) { LM_ERR("failed to write the reply to fifo: %d out of %d\n", nw, jr->rbody.len); } fclose(reply_stream); } } return; }
/** * Accept loop that listens for incoming connections on all listening sockets. * When a connection is received, accept_connection() is called. * @returns only on shutdown */ void accept_loop() { fd_set listen_set; struct timeval timeout; int i=0,max_sock=0,nready; int new_sock; while(listening_socks[i]){ if (listening_socks[i]>max_sock) max_sock=listening_socks[i]; i++; } while(1){ if (shutdownx && *shutdownx) break; cfg_update(); timeout.tv_sec=2; timeout.tv_usec=0; FD_ZERO(&listen_set); i=0; while(listening_socks[i]){ FD_SET(listening_socks[i],&listen_set); i++; } nready = select( max_sock+1, &listen_set, 0, 0, &timeout); if (nready == 0){ LM_DBG("accept_loop(): No connection attempts\n"); continue; } if (nready == -1) { if (errno == EINTR) { continue; } else { LM_ERR("accept_loop(): select fails: %s\n", strerror(errno)); sleep(2); continue; } } i=0; while(listening_socks[i]){ if (FD_ISSET(listening_socks[i],&listen_set)){ accept_connection(listening_socks[i],&new_sock); } i++; } } }
/* main timer function, never exists */ void timer_main() { in_timer=1; /* mark this process as the fast timer */ while(1){ if (run_timer){ /* update the local cfg if needed */ cfg_update(); timer_handler(); } pause(); } }
void cpl_aux_process( int cmd_out, char *log_dir) { struct cpl_cmd cmd; int len; /* this process will ignore SIGCHLD signal */ if (signal( SIGCHLD, SIG_IGN)==SIG_ERR) { LOG(L_ERR,"ERROR:cpl_c:cpl_aux_process: cannot set to IGNORE " "SIGCHLD signal\n"); } /* set the path for logging */ if (log_dir) { strcpy( file, log_dir); file_ptr = file + strlen(log_dir); *(file_ptr++) = '/'; } while(1) { /* let's read a command from pipe */ len = read( cmd_out, &cmd, sizeof(struct cpl_cmd)); if (len!=sizeof(struct cpl_cmd)) { if (len>=0) { LOG(L_ERR,"ERROR:cpl_aux_processes: truncated message" " read from pipe! -> discarded\n"); } else if (errno!=EAGAIN) { LOG(L_ERR,"ERROR:cpl_aux_process: pipe reading failed: " " : %s\n",strerror(errno)); } sleep(1); continue; } /* update the local config */ cfg_update(); /* process the command*/ switch (cmd.code) { case CPL_LOG_CMD: write_log( &cmd ); break; case CPL_MAIL_CMD: send_mail( &cmd ); break; default: LOG(L_ERR,"ERROR:cpl_aux_process: unknown command (%d) " "received! -> ignoring\n",cmd.code); } /* end switch*/ } }
/** * Loop that checks every #TIMER_RESOLUTION seconds if some timer expired. * On expires, the callback is called. The callback should return rapidly * in order to avoid blocking the timer process. If the timer is "one_time", * then it is removed from the timers list. * @returns on shutdown */ void timer_loop() { time_t now; timer_cb_t *i; callback_f cb=0; void *ptr=0; int interval=0; while(1){ if (shutdownx && *shutdownx) break; now = time(0); cfg_update(); do { cb = 0; lock_get(timers_lock); i = timers->head; while(i && i->expires>now) i = i->next; if (i){ cb = i->cb; ptr = *(i->ptr); if (i->one_time){ if (i->prev) i->prev->next = i->next; else timers->head = i->next; if (i->next) i->next->prev = i->prev; else timers->tail = i->next; shm_free(i); i=0; } } lock_release(timers_lock); if (cb) { interval = cb(now,ptr); if (i){ lock_get(timers_lock); i->expires = now + interval; lock_release(timers_lock); } } } while(cb); sleep(TIMER_RESOLUTION); } }
/*! This function: * * 1) Registers itself with the Master Agent * 2) Initializes all of the SNMPStats modules scalars and tables, while * simultaneously registering their respective SNMP OID's and handlers * with the master agent. * 3) Repeatedly checks for new SNMP messages to process * * \note This function never returns, so it should always be called from a * sub-process. */ static int initialize_agentx(void) { /* We register with a master agent */ register_with_master_agent(AGENT_PROCESS_NAME); LM_DBG("Initializing Kamailio OID's for SNMPD MasterX\n"); /* Initialize all scalars, and let the master agent know we want to * handle all OID's pertaining to these scalars. */ init_kamailioSIPCommonObjects(); init_kamailioSIPServerObjects(); init_kamailioObjects(); /* Initialiaze all the tables, and let the master agent know we want to * handle all the OID's pertaining to these tables */ init_kamailioSIPPortTable(); init_kamailioSIPMethodSupportedTable(); init_kamailioSIPStatusCodesTable(); init_kamailioSIPRegUserTable(); init_kamailioSIPContactTable(); init_kamailioSIPRegUserLookupTable(); init_kamailioServer(); init_kamailioNet(); init_kamailioNetConfig(); LM_DBG("Done initializing Kamailio OID's for SNMPD MasterX\n"); /* In case we recevie a request to stop (kill -TERM or kill -INT) */ keep_running = 1; while(keep_running) { /* update the local config framework structures */ cfg_update(); agent_check_and_process(1); /* 0 == don't block */ } LM_DBG("Shutting down Kamailio SNMPD MasterX sub agent.\n"); snmp_shutdown(AGENT_PROCESS_NAME); SOCK_CLEANUP; exit (0); return 0; }
/** * This is the main worker process. * Takes tasks from the queue in a loop and processes them by calling the registered callbacks. * @param id - id of the worker * @returns never, exits on shutdown. */ void worker_process(int id) { task_t t; cdp_cb_t *cb; int r; LM_INFO("[%d] Worker process started...\n", id); /* init the application level for this child */ while (1) { if (shutdownx && (*shutdownx)) break; cfg_update(); t = take_task(); if (!t.msg) { if (shutdownx && (*shutdownx)) break; LM_INFO("[%d] got empty task Q(%d/%d)\n", id, tasks->start, tasks->end); continue; } LM_DBG("worker_process(): [%d] got task Q(%d/%d)\n", id, tasks->start, tasks->end); r = is_req(t.msg); for (cb = callbacks->head; cb; cb = cb->next) (*(cb->cb))(t.p, t.msg, *(cb->ptr)); if (r) { AAAFreeMessage(&(t.msg)); } else { /* will be freed by the user in upper api */ /*AAAFreeMessage(&(t.msg));*/ } } worker_poison_queue(); LM_INFO("[%d]... Worker process finished\n", id); #ifdef CDP_FOR_SER #else #ifdef PKG_MALLOC LM_DBG("Worker[%d] Memory status (pkg):\n", id); //pkg_status(); #ifdef pkg_sums pkg_sums(); #endif #endif dp_del_pid(getpid()); #endif exit(0); }
/** * \brief Forks a separate simple sleep() periodic timer * * Forks a very basic periodic timer process, that just sleep()s for * the specified interval and then calls the timer function. * The new "basic timer" process execution start immediately, the sleep() * is called first (so the first call to the timer function will happen * \<interval\> seconds after the call to fork_basic_timer) * @param child_id @see fork_process() * @param desc @see fork_process() * @param make_sock @see fork_process() * @param f timer function/callback * @param param parameter passed to the timer function * @param interval interval in seconds. * @return pid of the new process on success, -1 on error * (doesn't return anything in the child process) */ int fork_basic_timer(int child_id, char* desc, int make_sock, timer_function* f, void* param, int interval) { int pid; pid=fork_process(child_id, desc, make_sock); if (pid<0) return -1; if (pid==0){ /* child */ if (cfg_child_init()) return -1; for(;;){ sleep(interval); cfg_update(); f(get_ticks(), param); /* ticks in s for compatibility with old timers */ } } /* parent */ return pid; }
/** * \brief Forks a separate simple milisecond-sleep() periodic timer * * Forks a very basic periodic timer process, that just ms-sleep()s for * the specified interval and then calls the timer function. * The new "basic timer" process execution start immediately, the ms-sleep() * is called first (so the first call to the timer function will happen * \<interval\> seconds after the call to fork_basic_utimer) * @param child_id @see fork_process() * @param desc @see fork_process() * @param make_sock @see fork_process() * @param f timer function/callback * @param param parameter passed to the timer function * @param uinterval interval in mili-seconds. * @return pid of the new process on success, -1 on error * (doesn't return anything in the child process) */ int fork_basic_utimer(int child_id, char* desc, int make_sock, utimer_function* f, void* param, int uinterval) { int pid; ticks_t ts; pid=fork_process(child_id, desc, make_sock); if (pid<0) return -1; if (pid==0){ /* child */ if (cfg_child_init()) return -1; for(;;){ sleep_us(uinterval); cfg_update(); ts = get_ticks_raw(); f(TICKS_TO_MS(ts), param); /* ticks in mili-seconds */ } } /* parent */ return pid; }
/* Relay a PUBLISH */ int purple_send_sip_publish(char *from, char *tupleid, enum purple_publish_basic basic, enum purple_publish_activity primitive, const char *note) { LM_DBG("publishing presence for <%s> with tuple [%s]\n", from, tupleid); char pres_buff[512]; publ_info_t publ; str pres_uri; /* update the local config framework structures */ cfg_update(); memset(&publ, 0, sizeof(publ_info_t)); pres_uri.s = pres_buff; pres_uri.len = sprintf(pres_buff, "%s;proto=purple", from); publ.pres_uri = &pres_uri; publ.source_flag = PURPLE_PUBLISH; publ.event = PRESENCE_EVENT; str *body = NULL; if (basic == PURPLE_BASIC_OPEN) { body = build_pidf(from, tupleid, basic, primitive, note); publ.expires = 3600; } else { publ.body = NULL; publ.expires = 0; } publ.body = body; if(pua_send_publish(&publ) < 0) { LM_ERR("error while sending publish\n"); return -1; } LM_DBG("publish sent successfully for <%s>\n", from); return 0; }
/* handle io routine, based on the fd_map type * (it will be called from io_wait_loop* ) * params: fm - pointer to a fd hash entry * idx - index in the fd_array (or -1 if not known) * return: -1 on error, or when we are not interested any more on reads * from this fd (e.g.: we are closing it ) * 0 on EAGAIN or when by some other way it is known that no more * io events are queued on the fd (the receive buffer is empty). * Usefull to detect when there are no more io events queued for * sigio_rt, epoll_et, kqueue. * >0 on successfull read from the fd (when there might be more io * queued -- the receive buffer might still be non-empty) */ inline static int handle_io(struct fd_map* fm, short events, int idx) { int ret; int n; int read_flags; struct tcp_connection* con; int s; long resp; ticks_t t; /* update the local config */ cfg_update(); switch(fm->type){ case F_TCPMAIN: again: ret=n=receive_fd(fm->fd, &con, sizeof(con), &s, 0); LM_DBG("received n=%d con=%p, fd=%d\n", n, con, s); if (unlikely(n<0)){ if (errno == EWOULDBLOCK || errno == EAGAIN){ ret=0; break; }else if (errno == EINTR) goto again; else{ LM_CRIT("read_fd: %s \n", strerror(errno)); abort(); /* big error*/ } } if (unlikely(n==0)){ LM_ERR("0 bytes read\n"); goto error; } if (unlikely(con==0)){ LM_CRIT("null pointer\n"); goto error; } con->fd=s; if (unlikely(s==-1)) { LM_ERR("read_fd: no fd read\n"); goto con_error; } con->reader_pid=my_pid(); if (unlikely(con==tcp_conn_lst)){ LM_CRIT("duplicate connection received: %p, id %d, fd %d, refcnt %d" " state %d (n=%d)\n", con, con->id, con->fd, atomic_get(&con->refcnt), con->state, n); goto con_error; break; /* try to recover */ } if (unlikely(con->state==S_CONN_BAD)){ LM_WARN("received an already bad connection: %p id %d refcnt %d\n", con, con->id, atomic_get(&con->refcnt)); goto con_error; } /* if we received the fd there is most likely data waiting to * be read => process it first to avoid extra sys calls */ read_flags=((con->flags & (F_CONN_EOF_SEEN|F_CONN_FORCE_EOF)) && !(con->flags & F_CONN_OOB_DATA))? RD_CONN_FORCE_EOF :0; #ifdef USE_TLS repeat_1st_read: #endif /* USE_TLS */ resp=tcp_read_req(con, &n, &read_flags); if (unlikely(resp<0)){ /* some error occured, but on the new fd, not on the tcp * main fd, so keep the ret value */ if (unlikely(resp!=CONN_EOF)) con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, resp, tcpmain_sock); break; } #ifdef USE_TLS /* repeat read if requested (for now only tls might do this) */ if (unlikely(read_flags & RD_CONN_REPEAT_READ)) goto repeat_1st_read; #endif /* USE_TLS */ /* must be before io_watch_add, io_watch_add might catch some * already existing events => might call handle_io and * handle_io might decide to del. the new connection => * must be in the list */ tcpconn_listadd(tcp_conn_lst, con, c_next, c_prev); t=get_ticks_raw(); con->timeout=t+S_TO_TICKS(TCP_CHILD_TIMEOUT); /* re-activate the timer */ con->timer.f=tcpconn_read_timeout; local_timer_reinit(&con->timer); local_timer_add(&tcp_reader_ltimer, &con->timer, S_TO_TICKS(TCP_CHILD_TIMEOUT), t); if (unlikely(io_watch_add(&io_w, s, POLLIN, F_TCPCONN, con)<0)){ LM_CRIT("io_watch_add failed for %p id %d fd %d, state %d, flags %x," " main fd %d, refcnt %d\n", con, con->id, con->fd, con->state, con->flags, con->s, atomic_get(&con->refcnt)); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); local_timer_del(&tcp_reader_ltimer, &con->timer); goto con_error; } break; case F_TCPCONN: con=(struct tcp_connection*)fm->data; if (unlikely(con->state==S_CONN_BAD)){ resp=CONN_ERROR; if (!(con->send_flags.f & SND_F_CON_CLOSE)) LM_WARN("F_TCPCONN connection marked as bad: %p id %d refcnt %d\n", con, con->id, atomic_get(&con->refcnt)); goto read_error; } read_flags=(( #ifdef POLLRDHUP (events & POLLRDHUP) | #endif /* POLLRDHUP */ (events & (POLLHUP|POLLERR)) | (con->flags & (F_CONN_EOF_SEEN|F_CONN_FORCE_EOF))) && !(events & POLLPRI))? RD_CONN_FORCE_EOF: 0; #ifdef USE_TLS repeat_read: #endif /* USE_TLS */ resp=tcp_read_req(con, &ret, &read_flags); if (unlikely(resp<0)){ read_error: ret=-1; /* some error occured */ if (unlikely(io_watch_del(&io_w, con->fd, idx, IO_FD_CLOSING) < 0)){ LM_CRIT("io_watch_del failed for %p id %d fd %d," " state %d, flags %x, main fd %d, refcnt %d\n", con, con->id, con->fd, con->state, con->flags, con->s, atomic_get(&con->refcnt)); } tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); local_timer_del(&tcp_reader_ltimer, &con->timer); if (unlikely(resp!=CONN_EOF)) con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, resp, tcpmain_sock); }else{ #ifdef USE_TLS if (unlikely(read_flags & RD_CONN_REPEAT_READ)) goto repeat_read; #endif /* USE_TLS */ /* update timeout */ con->timeout=get_ticks_raw()+S_TO_TICKS(TCP_CHILD_TIMEOUT); /* ret= 0 (read the whole socket buffer) if short read & * !POLLPRI, bytes read otherwise */ ret&=(((read_flags & RD_CONN_SHORT_READ) && !(events & POLLPRI)) - 1); } break; case F_NONE: LM_CRIT("empty fd map %p (%d): {%d, %d, %p}\n", fm, (int)(fm-io_w.fd_hash), fm->fd, fm->type, fm->data); goto error; default: LM_CRIT("uknown fd type %d\n", fm->type); goto error; } return ret; con_error: con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, CONN_ERROR, tcpmain_sock); return ret; error: return -1; }
/** Main loop for the Event Dispatcher process. * */ int dispatcher_main_loop(void) { struct pollfd poll_fds[3+MAX_AS_NR],*poll_tmp; int clean_index,i,j,k,fd,poll_events=0,socks[2],chld_status; int as_nr,unc_as_nr; pid_t chld; struct timeval last_ping,now; struct as_entry *as; sig_flag=0; is_dispatcher=1; as_nr=0; timerclear(&last_ping); timerclear(&now); signal(SIGCHLD,seas_sighandler); signal(SIGTERM,seas_sighandler); signal(SIGUSR1,seas_sighandler); signal(SIGINT, seas_sighandler); signal(SIGKILL,seas_sighandler); strcpy(whoami,"Seas Event Dispatcher process"); /*I set process_no to -1 because otherwise, the logging process confuses this process with another from SER * (see LM_*() and dprint() and my_pid())*/ process_no = -1; if(open_server_sockets(seas_listen_ip,seas_listen_port,socks)==-1){ LM_ERR("unable to open server sockets on dispatcher\n"); return -1; } for(i=0;i<2;i++){ poll_fds[i].fd=socks[i]; poll_fds[i].revents=0; poll_fds[i].events=POLLIN; } poll_fds[2].fd=read_pipe; poll_fds[2].revents=0; poll_fds[2].events=POLLIN;/*pollhup ?*/ poll_events=0; unc_as_nr=0; if(use_ha) spawn_pinger(); while(1){ /* update the local config framework structures */ cfg_update(); if(sig_flag==SIGCHLD){ while ((chld=waitpid( -1, &chld_status, WNOHANG ))>0) { if (WIFEXITED(chld_status)){ LM_INFO("child process %d exited normally, status=%d\n", chld,WEXITSTATUS(chld_status)); }else if (WIFSIGNALED(chld_status)) { LM_INFO("child process %d exited by a signal %d\n", chld,WTERMSIG(chld_status)); }else if (WIFSTOPPED(chld_status)) LM_INFO("child process %d stopped by a signal %d\n", chld,WSTOPSIG(chld_status)); for (as=as_list;as;as=as->next) { if(as->type!=AS_TYPE) continue; if(as->u.as.action_pid==chld){ for(i=0;i<as_nr && ((poll_fds[3+i].fd)!=(as->u.as.event_fd));i++) ; if(i==as_nr){ LM_ERR("Either the pinger has died or BUG found..\n"); continue; } /*overwrite the obsolete 'i' position with the next position*/ for(j=3+i;j<(as_nr+unc_as_nr+3-1);i++){ poll_fds[j].fd=poll_fds[j+1].fd; poll_fds[j].events=poll_fds[j+1].events; poll_fds[j].revents=poll_fds[j+1].revents; } close(as->u.as.event_fd);/*close the socket fd*/ if (as->u.as.ev_buffer.s) { pkg_free(as->u.as.ev_buffer.s); as->u.as.ev_buffer.s=(char *)0; as->u.as.ev_buffer.len=0; } as->u.as.event_fd=as->u.as.action_fd=-1; as->connected=0; destroy_pingtable(&as->u.as.jain_pings); destroy_pingtable(&as->u.as.servlet_pings); as_nr--; LM_WARN("client [%.*s] leaving (Action Dispatcher Process died !)\n", as->name.len,as->name.s); break; }/*if(action_pid==chld)*/ }/*for(as=as_list;as;as=as->next)*/ }/*while(waitpid(-1)>0)*/ }else if (sig_flag) { LM_WARN("received signal != sigchld(%d)\n",sig_flag); } sig_flag=0; clean_index=0; LM_INFO("polling [2 ServSock] [1 pipe] [%d App Servers]" " [%d Uncomplete AS]\n",as_nr,unc_as_nr); poll_events = poll(poll_fds,3+unc_as_nr+as_nr,-1); if (poll_events == -1) { if(errno==EINTR){ /*handle the case a child has died. * It will be done in the next iteration in if(seas_sigchld_received)*/ continue; } if(errno==EBADF){ LM_ERR("invalid file descriptor passed to poll (%s)\n", strerror(errno)); return -1;/*??*/ } /* errors */ LM_ERR("poll'ing:%s\n",strerror(errno)); poll_events=0; continue; } else if (poll_events == 0) {/*timeout*/ continue; } else {/*there are events !*/ /*handle connections from server sockets*/ for(i=0;i<2;i++){ if(poll_fds[i].revents) poll_events--; if(poll_fds[i].revents & POLLIN){ poll_fds[i].revents &= (~POLLIN); if((fd=new_as_connect(socks[i],i==0?'e':'a'))>=0){ poll_tmp=&poll_fds[3+as_nr+unc_as_nr]; poll_tmp->fd=fd; poll_tmp->events=POLLIN|POLLHUP; unc_as_nr++; LM_DBG("Have new %s client\n",i==0?"event":"action"); }else{ LM_ERR("accepting connection from AS\n"); } } } /*handle data from pipe*/ if(poll_fds[2].revents & POLLIN){ poll_fds[2].revents &= (~POLLIN); poll_events--; if(dispatch_relay()<0){ LM_ERR("dispatch_relay returned -1" "should clean-up table\n"); } } /*now handle receive data from completed AS*/ clean_index=0; LM_DBG("Scanning data from %d AS\n",as_nr); for(i=0;(i<as_nr) && poll_events;i++){ clean_index=0; poll_tmp=&poll_fds[3+i]; if(poll_tmp->revents) poll_events--; if(poll_tmp->revents & POLLIN){ LM_DBG("POLLIN found in AS #%i\n",i); poll_tmp->revents &= (~POLLIN); switch(handle_as_data(poll_tmp->fd)){ case -2:/*read returned 0 bytes, an AS client is leaving*/ clean_index=1; break; case -1:/*shouldnt happen*/ LM_ERR("reading from AS socket\n"); break; case 0:/* event_response received and processed*/ break; default: LM_WARN("unknown return type from handle_as_data\n"); } } if(clean_index || (poll_tmp->revents & POLLHUP)){ LM_DBG("POLHUP or read==0 found in %i AS \n",i); clean_index=0; poll_tmp->revents = 0; for(as=as_list;as;as=as->next){ if(as->type==CLUSTER_TYPE) continue; if(as->connected && (as->u.as.event_fd == poll_tmp->fd)){ close(poll_tmp->fd);/*close the socket fd*/ /*TODO we should send a signal to the Action Dispatcher !!!*/ as->connected=0; as_nr--; /*overwrite the obsolete 'i' position with the next position*/ for(k=i;k<(as_nr+unc_as_nr);k++){ j=3+k; poll_fds[j].fd=poll_fds[j+1].fd; poll_fds[j].events=poll_fds[j+1].events; poll_fds[j].revents=poll_fds[j+1].revents; } --i; LM_WARN("client %.*s leaving !!!\n",as->name.len,as->name.s); break; } } if (!as) { LM_ERR("the leaving client was not found in the as_list\n"); } } } /*now handle data sent from uncompleted AS*/ LM_DBG("Scanning data from %d uncomplete AS \n",unc_as_nr); clean_index=0; for(i=0;i<unc_as_nr && poll_events;i++){ poll_tmp=&poll_fds[3+as_nr+i]; if(poll_tmp->revents) poll_events--; if(poll_tmp->revents & POLLIN){ LM_DBG("POLLIN found in %d uncomplete AS \n",i); poll_tmp->revents &= (~POLLIN); fd=handle_unc_as_data(poll_tmp->fd); if(fd>0){ /* there's a new AS, push the uncomplete poll_fds up and set the AS */ for(k=i;k>0;k--){ j=3+as_nr+k; poll_fds[j].fd=poll_fds[j-1].fd; poll_fds[j].events=poll_fds[j-1].events; poll_fds[j].revents=poll_fds[j-1].revents; } poll_fds[3+as_nr].fd=fd; poll_fds[3+as_nr].events=POLLIN|POLLHUP; poll_fds[3+as_nr].revents=0; as_nr++;/*not very sure if this is thread-safe*/ unc_as_nr--; }else if(fd<=0){/* pull the upper set of uncomplete AS down and take this one out*/ poll_tmp->revents=0; for(k=i;k<(unc_as_nr-1);k++){ j=3+as_nr+k; poll_fds[j].fd=poll_fds[j+1].fd; poll_fds[j].events=poll_fds[j+1].events; poll_fds[j].revents=poll_fds[j+1].revents; } unc_as_nr--; /** we decrement i so that pulling down the upper part of the unc_as array so that * it doesn't affect our for loop */ i--; } } if(poll_tmp->revents & POLLHUP){ LM_DBG("POLLHUP found in %d uncomplete AS \n",i); close(poll_tmp->fd); for(k=i;k<(unc_as_nr-1);k++){ j=3+as_nr+k; poll_fds[j].fd=poll_fds[j+1].fd; poll_fds[j].events=poll_fds[j+1].events; poll_fds[j].revents=poll_fds[j+1].revents; } unc_as_nr--; i--; poll_tmp->revents = 0; } }/*for*/ }/*else ...(poll_events>0)*/ }/*while(1)*/ }
void jsonrpc_dgram_server(int rx_sock, int tx_sock) { int ret; str scmd; jsonrpc_plain_reply_t* jr = NULL; ret = 0; while(1) { /*read the datagram*/ /* update the local config framework structures */ cfg_update(); memset(jsonrpc_dgram_buf, 0, JSONRPC_DGRAM_BUF_SIZE); jsonrpc_dgram_reply_addr_len = sizeof(jsonrpc_dgram_reply_addr); /* get the client's address */ ret = recvfrom(rx_sock, jsonrpc_dgram_buf, JSONRPC_DGRAM_BUF_SIZE, 0, (struct sockaddr*)&jsonrpc_dgram_reply_addr, &jsonrpc_dgram_reply_addr_len); if (ret == -1) { LM_ERR("recvfrom: (%d) %s\n", errno, strerror(errno)); if ((errno == EINTR) || (errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == ECONNREFUSED)) { LM_DBG("got %d (%s), going on\n", errno, strerror(errno)); continue; } LM_DBG("error in recvfrom\n"); continue; } if(ret == 0) continue; LM_DBG("received %.*s\n", ret, jsonrpc_dgram_buf); if(ret> JSONRPC_DGRAM_BUF_SIZE) { LM_ERR("buffer overflow\n"); continue; } scmd.s = jsonrpc_dgram_buf; scmd.len = ret; trim(&scmd); LM_DBG("buf is %s and we have received %i bytes\n", scmd.s, scmd.len); if(jsonrpc_exec_ex(&scmd, NULL)<0) { LM_ERR("failed to execute the json document from datagram\n"); continue; } jr = jsonrpc_plain_reply_get(); LM_DBG("command executed - result: [%d] [%p] [%.*s]\n", jr->rcode, jr->rbody.s, jr->rbody.len, jr->rbody.s); jsonrpc_dgram_send_data(tx_sock, jr->rbody.s, jr->rbody.len, (struct sockaddr*)&jsonrpc_dgram_reply_addr, jsonrpc_dgram_reply_addr_len, jsonrpc_dgram_timeout); } }
void cmd_pipe_cb(int fd, short event, void *arg) { struct jsonrpc_pipe_cmd *cmd; char *ns = 0; size_t bytes; json_object *payload = NULL; jsonrpc_request_t *req = NULL; json_object *params; /* struct event *ev = (struct event*)arg; */ if (read(fd, &cmd, sizeof(cmd)) != sizeof(cmd)) { LM_ERR("failed to read from command pipe: %s\n", strerror(errno)); return; } cfg_update(); params = json_tokener_parse(cmd->params); if (cmd->notify_only) { payload = build_jsonrpc_notification(cmd->method, params); } else { req = build_jsonrpc_request(cmd->method, params, (char*)cmd, res_cb); if (req) payload = req->payload; } if (!payload) { LM_ERR("Failed to build jsonrpc_request_t (method: %s, params: %s)\n", cmd->method, cmd->params); goto error; } char *json = (char*)json_object_get_string(payload); bytes = netstring_encode_new(&ns, json, (size_t)strlen(json)); struct jsonrpc_server_group *g; int sent = 0; for (g = server_group; g != NULL; g = g->next_group) { struct jsonrpc_server *s, *first = NULL; for (s = g->next_server; s != first; s = s->next) { if (first == NULL) first = s; if (s->status == JSONRPC_SERVER_CONNECTED) { if (send(s->socket, ns, bytes, 0) == bytes) { sent = 1; break; } else { handle_server_failure(s); } } g->next_server = s->next; } if (sent) { break; } else { LM_WARN("Failed to send on priority group %d... proceeding to next priority group.\n", g->priority); } } if (sent && req) { int timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); if (timerfd == -1) { LM_ERR("Could not create timerfd."); goto error; } req->timerfd = timerfd; struct itimerspec *itime = pkg_malloc(sizeof(struct itimerspec)); CHECK_MALLOC_VOID(itime); itime->it_interval.tv_sec = 0; itime->it_interval.tv_nsec = 0; itime->it_value.tv_sec = JSONRPC_TIMEOUT/1000; itime->it_value.tv_nsec = (JSONRPC_TIMEOUT % 1000) * 1000000; if (timerfd_settime(timerfd, 0, itime, NULL) == -1) { LM_ERR("Could not set timer."); pkg_free(itime); goto error; } pkg_free(itime); struct event *timer_ev = pkg_malloc(sizeof(struct event)); CHECK_MALLOC_VOID(timer_ev); event_set(timer_ev, timerfd, EV_READ, timeout_cb, req); if(event_add(timer_ev, NULL) == -1) { LM_ERR("event_add failed while setting request timer (%s).", strerror(errno)); goto error; } req->timer_ev = timer_ev; } else if (!sent) { LM_ERR("Request could not be sent... no more failover groups.\n"); if (req) { json_object *error = json_object_new_string("failure"); void_jsonrpc_request(req->id); req->cbfunc(error, req->cbdata, 1); } } pkg_free(ns); json_object_put(payload); if (cmd->notify_only) free_pipe_cmd(cmd); return; error: if(ns) pkg_free(ns); if(payload) json_object_put(payload); if (cmd->notify_only) free_pipe_cmd(cmd); return; }
void slow_timer_main() { int n; ticks_t ret; struct timer_ln* tl; unsigned short i; #ifdef USE_SIGWAIT int sig; #endif in_slow_timer=1; /* mark this process as the slow timer */ while(1){ #ifdef USE_SIGWAIT n=sigwait(&slow_timer_sset, &sig); #else n=sigwaitinfo(&slow_timer_sset, 0); #endif if (n==-1){ if (errno==EINTR) continue; /* some other signal, ignore it */ LM_ERR("sigwaitinfo failed: %s [%d]\n", strerror(errno), errno); sleep(1); /* try to continue */ } #ifdef USE_SIGWAIT if (sig!=SLOW_TIMER_SIG){ #ifdef __OS_darwin /* on darwin sigwait is buggy: it will cause extreme slow down * on signal delivery for the signals it doesn't wait on * (on darwin 8.8.0, g4 1.5Ghz I've measured a 36s delay!). * To work arround this bug, we sigwait() on all the signals we * are interested in ser and manually call the master signal handler * if the signal!= slow timer signal -- andrei */ sig_usr(sig); #endif continue; } #endif /* update the local cfg if needed */ cfg_update(); LOCK_SLOW_TIMER_LIST(); while(*s_idx!=*t_idx){ i= *s_idx%SLOW_LISTS_NO; while(slow_timer_lists[i].next!= (struct timer_ln*)&slow_timer_lists[i]){ tl=slow_timer_lists[i].next; _timer_rm_list(tl); tl->next=tl->prev=0; #ifdef TIMER_DEBUG tl->expires_no++; #endif SET_RUNNING_SLOW(tl); UNLOCK_SLOW_TIMER_LIST(); ret=tl->f(*ticks, tl, tl->data); /* reset the configuration group handles */ cfg_reset_all(); if (ret==0){ /* one shot */ UNSET_RUNNING_SLOW(); LOCK_SLOW_TIMER_LIST(); }else{ /* not one shot, re-add it */ LOCK_TIMER_LIST(); /* add it to the "main" list */ RESET_SLOW_LIST(tl); if (ret!=(ticks_t)-1) /* != periodic */ tl->initial_timeout=ret; _timer_add(*ticks, tl); UNLOCK_TIMER_LIST(); LOCK_SLOW_TIMER_LIST(); UNSET_RUNNING_SLOW(); } } (*s_idx)++; } UNLOCK_SLOW_TIMER_LIST(); } }
int xmpp_component_child_process(int data_pipe) { int fd, maxfd, rv; fd_set fdset; xode_pool pool; xode_stream stream; struct xmpp_private_data priv; struct xmpp_pipe_cmd *cmd; while (1) { fd = net_connect(xmpp_host, xmpp_port); if (fd < 0) { sleep(3); continue; } priv.fd = fd; priv.running = 1; pool = xode_pool_new(); stream = xode_stream_new(pool, stream_node_callback, &priv); net_printf(fd, "<?xml version='1.0'?>" "<stream:stream xmlns='jabber:component:accept' to='%s' " "version='1.0' xmlns:stream='http://etherx.jabber.org/streams'>", xmpp_domain); while (priv.running) { FD_ZERO(&fdset); FD_SET(data_pipe, &fdset); FD_SET(fd, &fdset); maxfd = fd > data_pipe ? fd : data_pipe; rv = select(maxfd + 1, &fdset, NULL, NULL, NULL); /* update the local config framework structures */ cfg_update(); if (rv < 0) { LM_ERR("select() failed: %s\n", strerror(errno)); } else if (!rv) { /* timeout */ } else if (FD_ISSET(fd, &fdset)) { char *buf = net_read_static(fd); if (!buf) /* connection closed */ break; LM_DBG("server read\n[%s]\n", buf); xode_stream_eat(stream, buf, strlen(buf)); } else if (FD_ISSET(data_pipe, &fdset)) { if (read(data_pipe, &cmd, sizeof(cmd)) != sizeof(cmd)) { LM_ERR("failed to read from command pipe: %s\n", strerror(errno)); } else { LM_DBG("got pipe cmd %d\n", cmd->type); switch (cmd->type) { case XMPP_PIPE_SEND_MESSAGE: do_send_message_component(&priv, cmd); break; case XMPP_PIPE_SEND_PACKET: case XMPP_PIPE_SEND_PSUBSCRIBE: case XMPP_PIPE_SEND_PNOTIFY: do_send_bulk_message_component(&priv, cmd); break; } xmpp_free_pipe_cmd(cmd); } } } xode_pool_free(pool); close(fd); } return 0; }
int xmpp_server_child_process(int data_pipe) { int rv; int listen_fd; fd_set fdset; struct xmpp_connection *conn; snprintf(local_secret, sizeof(local_secret), "%s", random_secret()); while ((listen_fd = net_listen(xmpp_domain, xmpp_port)) < 0) { /* ugh. */ sleep(3); } while (1) { FD_ZERO(&fdset); FD_SET(data_pipe, &fdset); FD_SET(listen_fd, &fdset); /* check for dead connections */ for (conn = conn_list; conn; ) { struct xmpp_connection *next = conn->next; if (conn->type == CONN_DEAD) conn_free(conn); conn = next; } for (conn = conn_list; conn; conn = conn->next) { /* check if we need to set up a connection */ if (conn->type == CONN_OUTBOUND && conn->fd == -1) { if ((conn->fd = net_connect(conn->domain, xmpp_port)) >= 0) { net_printf(conn->fd, "<?xml version='1.0'?>" "<stream:stream xmlns:stream='http://etherx.jabber.org/streams' xmlns='jabber:server' version='1.0' " "xmlns:db='jabber:server:dialback' to='%s' from='%s'>", conn->domain, xmpp_domain); net_printf(conn->fd, "<stream:features xmlns:stream='http://etherx.jabber.org/streams'/>"); } else { conn->type = CONN_DEAD; } } if (conn->fd != -1) FD_SET(conn->fd, &fdset); } rv = select(FD_SETSIZE, &fdset, NULL, NULL, NULL); /* update the local config framework structures */ cfg_update(); if (rv < 0) { LM_ERR("select() failed: %s\n", strerror(errno)); } else if (!rv) { /* timeout */ } else { for (conn = conn_list; conn; conn = conn->next) { if (conn->fd != -1 && FD_ISSET(conn->fd, &fdset)) { char *buf = net_read_static(conn->fd); if (!buf) { conn->type = CONN_DEAD; } else { LM_DBG("stream (fd %d, domain '%s') read\n[%s]\n", conn->fd, conn->domain, buf); xode_stream_eat(conn->stream, buf, strlen(buf)); } } } if (FD_ISSET(listen_fd, &fdset)) { struct sockaddr_in sin; unsigned int len = sizeof(sin); int fd; if ((fd = accept(listen_fd,(struct sockaddr*)&sin, &len))<0) { LM_ERR("accept() failed: %s\n", strerror(errno)); } else { LM_DBG("accept()ed connection from %s:%d\n", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port)); conn_new(CONN_INBOUND, fd, NULL); } } if (FD_ISSET(data_pipe, &fdset)) { struct xmpp_pipe_cmd *cmd; if (read(data_pipe, &cmd, sizeof(cmd)) != sizeof(cmd)) { LM_ERR("failed to read from command pipe: %s\n", strerror(errno)); } else { LM_DBG("got pipe cmd %d\n", cmd->type); switch (cmd->type) { case XMPP_PIPE_SEND_MESSAGE: do_send_message_server(cmd); break; case XMPP_PIPE_SEND_PACKET: case XMPP_PIPE_SEND_PSUBSCRIBE: case XMPP_PIPE_SEND_PNOTIFY: break; } xmpp_free_pipe_cmd(cmd); } } } } return 0; }
void cmd_pipe_cb(int fd, short event, void *arg) { struct jsonrpc_pipe_cmd *cmd; if (read(fd, &cmd, sizeof(cmd)) != sizeof(cmd)) { ERR("FATAL ERROR: failed to read from command pipe: %s\n", strerror(errno)); return; } cfg_update(); switch(cmd->type) { case CMD_CLOSE: if(cmd->server) { wait_close(cmd->server); } goto end; break; case CMD_RECONNECT: if(cmd->server) { wait_reconnect(cmd->server); } goto end; break; case CMD_CONNECT: if(cmd->server) { bev_connect(cmd->server); } goto end; break; case CMD_UPDATE_SERVER_GROUP: if(cmd->new_grp) { jsonrpc_server_group_t* old_grp = *global_server_group; *global_server_group = cmd->new_grp; free_server_group(&old_grp); } lock_release(jsonrpc_server_group_lock); goto end; break; case CMD_SEND: break; default: ERR("Unrecognized pipe command: %d\n", cmd->type); goto end; break; } /* command is SEND */ jsonrpc_req_cmd_t* req_cmd = cmd->req_cmd; if(req_cmd == NULL) { ERR("req_cmd is NULL. Invalid send command\n"); goto end; } jsonrpc_request_t* req = NULL; req = create_request(req_cmd); if (!req || !req->payload) { json_t* error = internal_error(JRPC_ERR_REQ_BUILD, NULL); pv_value_t val; char* freeme = NULL; jsontoval(&val, &freeme, error); if(req_cmd->route.len <=0 && send_to_script(&val, req_cmd)<0) { ERR("Failed to build request (method: %.*s, params: %.*s)\n", STR(req_cmd->method), STR(req_cmd->params)); } if(freeme) free(freeme); if(error) json_decref(error); free_req_cmd(req_cmd); goto end; } int sent = jsonrpc_send(req_cmd->conn, req, req_cmd->notify_only); char* type; if (sent<0) { if (req_cmd->notify_only == false) { type = "Request"; } else { type = "Notification"; } WARN("%s could not be sent to connection group: %.*s\n", type, STR(req_cmd->conn)); fail_request(JRPC_ERR_SEND, req, "Failed to send request"); } end: free_pipe_cmd(cmd); }
/* * Initialize children */ static int child_init(int rank) { int i, j, mpid, cpid; DBG("XJAB:child_init: initializing child <%d>\n", rank); /* Rank 0 is main process now - 1 is the first child (janakj) */ if(rank == 1) { #ifdef HAVE_IHTTP /** register iHTTP callbacks -- go forward in any case*/ ihb.reg_f("xjab", "XMPP Gateway", IH_MENU_YES, xjab_mod_info, NULL); ihb.reg_f("xjabc", "XMPP connections", IH_MENU_YES, xjab_connections, NULL); #endif if((mpid=fork())<0 ) { LOG(L_ERR, "XJAB:child_init:error - cannot launch worker's" " manager\n"); return -1; } if(mpid == 0) { /** launching the workers */ for(i=0;i<nrw;i++) { if ( (cpid=fork())<0 ) { LOG(L_ERR,"XJAB:child_init:error - cannot launch worker\n"); return -1; } if (cpid == 0) { for(j=0;j<nrw;j++) if(j!=i) close(pipes[j][0]); close(pipes[i][1]); if(xj_wlist_set_pid(jwl, getpid(), i) < 0) { LOG(L_ERR, "XJAB:child_init:error setting worker's" " pid\n"); return -1; } /* initialize the config framework */ if (cfg_child_init()) return -1; ctx = db_ctx("jabber"); if (ctx == NULL) goto dberror; if (db_add_db(ctx, db_url) < 0) goto dberror; if (db_connect(ctx) < 0) goto dberror; cmd = db_cmd(DB_GET, ctx, db_table, db_cols, db_params, NULL); if (!cmd) goto dberror; xj_worker_process(jwl,jaddress,jport,i, cmd); db_cmd_free(cmd); db_ctx_free(ctx); ctx = NULL; /* destroy the local config */ cfg_child_destroy(); exit(0); } } mpid = getpid(); /* initialize the config framework */ if (cfg_child_init()) return -1; while(1) { sleep(check_time); /* update the local config */ cfg_update(); xjab_check_workers(mpid); } } } //if(pipes) //{ // for(i=0;i<nrw;i++) // close(pipes[i][0]); //} return 0; dberror: if (cmd) db_cmd_free(cmd); cmd = NULL; if (ctx) db_ctx_free(ctx); ctx = NULL; return -1; }
void async_http_cb(struct http_m_reply *reply, void *param) { async_query_t *aq; cfg_action_t *act; unsigned int tindex; unsigned int tlabel; struct cell *t = NULL; char *p; str newbuf = {0, 0}; sip_msg_t *fmsg; if (reply->result != NULL) { LM_DBG("query result = %.*s [%d]\n", reply->result->len, reply->result->s, reply->result->len); } /* clean process-local result variables */ ah_error.s = NULL; ah_error.len = 0; memset(ah_reply, 0, sizeof(struct sip_msg)); /* set process-local result variables */ if (reply->result == NULL) { /* error */ ah_error.s = reply->error; ah_error.len = strlen(ah_error.s); } else { /* success */ /* check for HTTP Via header * - HTTP Via format is different that SIP Via * - workaround: replace with Hia to be ignored by SIP parser */ if((p=strfindcasestrz(reply->result, "\nVia:"))!=NULL) { p++; *p = 'H'; LM_DBG("replaced HTTP Via with Hia [[\n%.*s]]\n", reply->result->len, reply->result->s); } ah_reply->buf = reply->result->s; ah_reply->len = reply->result->len; if (parse_msg(reply->result->s, reply->result->len, ah_reply) != 0) { LM_DBG("failed to parse the http_reply\n"); } else { if (ah_reply->first_line.u.reply.statuscode == 100) { newbuf.s = get_body( ah_reply ); newbuf.len = reply->result->s + reply->result->len - newbuf.s; if (!(newbuf.len < 0)) { memset(ah_reply, 0, sizeof(struct sip_msg)); ah_reply->buf = newbuf.s; ah_reply->len = newbuf.len; if (parse_msg(ah_reply->buf, ah_reply->len, ah_reply) != 0) { LM_DBG("failed to parse the http_reply\n"); } else { LM_DBG("successfully parsed http reply %p\n", ah_reply); } } else { /* this should not happen! */ LM_WARN("something got wrong parsing the 100 Continue: got %d len\n", newbuf.len); } } else { LM_DBG("successfully parsed http reply %p\n", ah_reply); } } } aq = param; strncpy(q_id, aq->id, strlen(aq->id)); q_id[strlen(aq->id)] = '\0'; act = (cfg_action_t*)aq->param; cfg_update(); if (aq->query_params.suspend_transaction) { tindex = aq->tindex; tlabel = aq->tlabel; if (tmb.t_lookup_ident(&t, tindex, tlabel) < 0) { LM_ERR("transaction not found %d:%d\n", tindex, tlabel); LM_DBG("freeing query %p\n", aq); free_async_query(aq); return; } // we bring the list of AVPs of the transaction to the current context set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from); set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to); set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from); set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to); set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from); set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to); if (t) tmb.unref_cell(t); LM_DBG("resuming transaction (%d:%d)\n", tindex, tlabel); if(act!=NULL) tmb.t_continue(tindex, tlabel, act); } else { fmsg = faked_msg_next(); if (run_top_route(act, fmsg, 0)<0) LM_ERR("failure inside run_top_route\n"); } free_sip_msg(ah_reply); free_async_query(aq); return; }
void modem_process(struct modem *mdm) { struct sms_msg *sms_messg; struct incame_sms sms; struct network *net; int i,k,len; int counter; int dont_wait; int empty_pipe; int cpms_unsupported; int max_mem=0, used_mem=0; sms_messg = 0; cpms_unsupported = 0; /* let's open/init the modem */ LM_DBG("opening modem\n"); if (openmodem(mdm)==-1) { LM_ERR("failed to open modem %s! %s \n", mdm->name,strerror(errno)); return; } setmodemparams(mdm); initmodem(mdm,check_cds_report); if ( (max_mem=check_memory(mdm,MAX_MEM))==-1 ) { LM_WARN("CPMS command unsupported! using default values (10,10)\n"); used_mem = max_mem = 10; cpms_unsupported = 1; } LM_DBG("modem maximum memory is %d\n",max_mem); set_gettime_function(); while(1) { /* update the local config */ cfg_update(); dont_wait = 0; for (i=0;i<nr_of_networks && mdm->net_list[i]!=-1;i++) { counter = 0; empty_pipe = 0; net = &(networks[mdm->net_list[i]]); /*getting msgs from pipe*/ while( counter<net->max_sms_per_call && !empty_pipe ) { /* let's read a sms from pipe */ len = read(net->pipe_out, &sms_messg, sizeof(sms_messg)); if (len!=sizeof(sms_messg)) { if (len>=0) LM_ERR("truncated message read from pipe!" " -> discarded\n"); else if (errno==EAGAIN) empty_pipe = 1; else LM_ERR("pipe reading failed: %s\n",strerror(errno)); sleep(1); counter++; continue; } (*queued_msgs)--; /* compute and send the sms */ LM_DBG("%s processing sms for net %s:" " \n\tTo:[%.*s]\n\tBody=<%d>[%.*s]\n", mdm->device, net->name, sms_messg->to.len,sms_messg->to.s, sms_messg->text.len,sms_messg->text.len,sms_messg->text.s); send_as_sms( sms_messg , mdm); counter++; /* if I reached the limit -> set not to wait */ if (counter==net->max_sms_per_call) dont_wait = 1; }/*while*/ }/*for*/ /* let's see if we have incoming sms */ if ( !cpms_unsupported ) if ((used_mem = check_memory(mdm,USED_MEM))==-1) { LM_ERR("CPMS command failed! cannot get used mem -> using 10\n"); used_mem = 10; } /* if any, let's get them */ if (used_mem) LM_DBG("%d new SMS on modem\n",used_mem); for(i=1,k=1;k<=used_mem && i<=max_mem;i++) { if (getsms(&sms,mdm,i)!=-1) { k++; LM_DBG("SMS Get from location %d\n",i); /*for test ;-) -> to be remove*/ LM_DBG("SMS RECEIVED:\n\rFrom: %s %s\n\r%.*s %.*s" "\n\r\"%.*s\"\n\r",sms.sender,sms.name, DATE_LEN,sms.date,TIME_LEN,sms.time, sms.userdatalength,sms.ascii); if (!sms.is_statusreport) _send_sms_as_sip(&sms, mdm); else check_sms_report(&sms); } } /* if reports are used, checks for expired records in report queue */ if (sms_report_type!=NO_REPORT) check_timeout_in_report_queue(); /* sleep -> if it's needed */ if (!dont_wait) { sleep(mdm->looping_interval); } }/*while*/ }
/** * Selects once on sockets for receiving and sending stuff. * Monitors: * - the fd exchange pipe, for receiving descriptors to be handled here * - the tcp sockets of all serviced peers, triggering the incoming messages do_receive() * - the send pipes of all serviced peers, triggering the sending of outgoing messages * @returns 0 on normal exit or -1 on error */ int receive_loop(peer *original_peer) { fd_set rfds,efds; struct timeval tv; int n,max=0,cnt=0; AAAMessage *msg=0; serviced_peer_t *sp,*sp2; peer *p; int fd=-1; int fd_exchange_pipe_local=0; if (original_peer) fd_exchange_pipe_local = original_peer->fd_exchange_pipe_local; else fd_exchange_pipe_local = fd_exchange_pipe_unknown_local; // if (shutdownx) return -1; while(shutdownx&&!*shutdownx){ n = 0; while(!n){ if (shutdownx&&*shutdownx) break; cfg_update(); log_serviced_peers(); max =-1; FD_ZERO(&rfds); FD_ZERO(&efds); FD_SET(fd_exchange_pipe_local,&rfds); if (fd_exchange_pipe_local>max) max = fd_exchange_pipe_local; for(sp=serviced_peers;sp;sp=sp->next){ if (sp->tcp_socket>=0){ FD_SET(sp->tcp_socket,&rfds); FD_SET(sp->tcp_socket,&efds); if (sp->tcp_socket>max) max = sp->tcp_socket; } if (sp->send_pipe_fd>=0) { FD_SET(sp->send_pipe_fd,&rfds); if (sp->send_pipe_fd>max) max = sp->send_pipe_fd; } } tv.tv_sec=1; tv.tv_usec=0; n = select(max+1,&rfds,0,&efds,&tv); if (n==-1){ if (shutdownx&&*shutdownx) return 0; LM_ERR("select_recv(): %s\n",strerror(errno)); for(sp=serviced_peers;sp;sp=sp2){ sp2 = sp->next; disconnect_serviced_peer(sp,0); if (sp->p && sp->p->is_dynamic) drop_serviced_peer(sp,0); } sleep(1); break; }else if (n){ if (FD_ISSET(fd_exchange_pipe_local,&rfds)){ /* fd exchange */ LM_DBG("select_recv(): There is something on the fd exchange pipe\n"); p = 0; fd = -1; if (!receive_fd(fd_exchange_pipe_local,&fd,&p)){ LM_ERR("select_recv(): Error reading from fd exchange pipe\n"); }else{ LM_DBG("select_recv(): fd exchange pipe says fd [%d] for peer %p:[%.*s]\n",fd, p, p?p->fqdn.len:0, p?p->fqdn.s:0); if (p){ sp2=0; for(sp=serviced_peers;sp;sp=sp->next) if (sp->p==p){ sp2 = sp; break; } if (!sp2) sp2 = add_serviced_peer(p); else make_send_pipe(sp2); if (!sp2) { LM_ERR("Error on add_serviced_peer()\n"); continue; } sp2->tcp_socket = fd; if (p->state == Wait_Conn_Ack){ p->I_sock = fd; sm_process(p,I_Rcv_Conn_Ack,0,0,fd); }else{ p->R_sock = fd; } }else{ sp2 = add_serviced_peer(NULL); if (!sp2) { LM_ERR("Error on add_serviced_peer()\n"); continue; } sp2->tcp_socket = fd; } } } for(sp=serviced_peers;sp;){ if (sp->tcp_socket>=0 && FD_ISSET(sp->tcp_socket,&efds)) { LM_INFO("select_recv(): [%.*s] Peer socket [%d] found on the exception list... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket); goto drop_peer; } if (sp->send_pipe_fd>=0 && FD_ISSET(sp->send_pipe_fd,&rfds)) { /* send */ LM_DBG("select_recv(): There is something on the send pipe\n"); cnt = read(sp->send_pipe_fd,&msg,sizeof(AAAMessage *)); if (cnt==0){ //This is very stupid and might not work well - droped messages... to be fixed LM_INFO("select_recv(): ReOpening pipe for read. This should not happen...\n"); close(sp->send_pipe_fd); sp->send_pipe_fd = open(sp->send_pipe_name.s, O_RDONLY | O_NDELAY); goto receive; } if (cnt<sizeof(AAAMessage *)){ if (cnt<0) LM_ERR("select_recv(): Error reading from send pipe\n"); goto receive; } LM_DBG("select_recv(): Send pipe says [%p] %d\n",msg,cnt); if (sp->tcp_socket<0){ LM_ERR("select_recv(): got a signal to send something, but the connection was not opened"); } else { while( (cnt=write(sp->tcp_socket,msg->buf.s,msg->buf.len))==-1 ) { if (errno==EINTR) continue; LM_ERR("select_recv(): [%.*s] write on socket [%d] returned error> %s... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, strerror(errno)); AAAFreeMessage(&msg); close(sp->tcp_socket); goto drop_peer; } if (cnt!=msg->buf.len){ LM_ERR("select_recv(): [%.*s] write on socket [%d] only wrote %d/%d bytes... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, cnt, msg->buf.len); AAAFreeMessage(&msg); close(sp->tcp_socket); goto drop_peer; } } AAAFreeMessage(&msg); //don't return, maybe there is something to read } receive: /* receive */ if (sp->tcp_socket>=0 && FD_ISSET(sp->tcp_socket,&rfds)) { errno=0; cnt = do_receive(sp); if (cnt<=0) { LM_INFO("select_recv(): [%.*s] read on socket [%d] returned %d > %s... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, cnt, errno?strerror(errno):""); goto drop_peer; } } //next_sp: /* go to next serviced peer */ sp=sp->next; continue; drop_peer: /* drop this serviced peer on error */ sp2 = sp->next; disconnect_serviced_peer(sp,0); if (sp->p && sp->p->is_dynamic) drop_serviced_peer(sp,0); sp = sp2; } } } } return 0; }
void mi_datagram_server(int rx_sock, int tx_sock) { struct mi_root *mi_cmd; struct mi_root *mi_rpl; struct mi_handler *hdl; struct mi_cmd * f; datagram_stream dtgram; int ret, len; ret = 0; f = 0; while(1) { /*read the datagram*/ /* update the local config framework structures */ cfg_update(); memset(mi_buf, 0, DATAGRAM_SOCK_BUF_SIZE); reply_addr_len = sizeof(reply_addr); /* get the client's address */ ret = recvfrom(rx_sock, mi_buf, DATAGRAM_SOCK_BUF_SIZE, 0, (struct sockaddr*)&reply_addr, &reply_addr_len); if (ret == -1) { LM_ERR("recvfrom: (%d) %s\n", errno, strerror(errno)); if ((errno == EINTR) || (errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == ECONNREFUSED)) { LM_DBG("got %d (%s), going on\n", errno, strerror(errno)); continue; } LM_DBG("error in recvfrom\n"); continue; } if(ret == 0) continue; LM_DBG("received %.*s\n", ret, mi_buf); if(ret> DATAGRAM_SOCK_BUF_SIZE) { LM_ERR("buffer overflow\n"); continue; } LM_DBG("mi_buf is %s and we have received %i bytes\n",mi_buf, ret); dtgram.start = mi_buf; dtgram.len= ret; dtgram.current = dtgram.start; ret = identify_command(&dtgram, &f); /*analyze the command--from the first line*/ if(ret != 0) { LM_ERR("command not available\n"); mi_send_dgram(tx_sock, MI_COMMAND_NOT_AVAILABLE, MI_COMMAND_AVAILABLE_LEN, (struct sockaddr* )&reply_addr, reply_addr_len, mi_socket_timeout); continue; } LM_DBG("we have a valid command \n"); /* if asyncron cmd, build the async handler */ if (f->flags&MI_ASYNC_RPL_FLAG) { hdl = build_async_handler(mi_socket_domain, (struct sockaddr* )&reply_addr, reply_addr_len); if (hdl==0) { LM_ERR("failed to build async handler\n"); mi_send_dgram(tx_sock, MI_INTERNAL_ERROR, MI_INTERNAL_ERROR_LEN,(struct sockaddr* )&reply_addr, reply_addr_len, mi_socket_timeout); continue; } } else { hdl = 0; } LM_DBG("after identifing the command, the received datagram is %s\n", dtgram.current); /*if no params required*/ if (f->flags&MI_NO_INPUT_FLAG) { LM_DBG("the command has no params\n"); mi_cmd = 0; } else { LM_DBG("parsing the command's params\n"); mi_cmd = mi_datagram_parse_tree(&dtgram); if (mi_cmd==NULL){ LM_ERR("failed to parse the MI tree\n"); mi_send_dgram(tx_sock, MI_PARSE_ERROR, MI_PARSE_ERROR_LEN, (struct sockaddr* )&reply_addr, reply_addr_len, mi_socket_timeout); free_async_handler(hdl); continue; } mi_cmd->async_hdl = hdl; } LM_DBG("done parsing the mi tree\n"); if ( (mi_rpl=run_mi_cmd(f, mi_cmd))==0 ) { /*error while running the command*/ LM_ERR("failed to process the command\n"); mi_send_dgram(tx_sock, MI_COMMAND_FAILED, MI_COMMAND_FAILED_LEN, (struct sockaddr* )&reply_addr, reply_addr_len, mi_socket_timeout); goto failure; } /*the command exited well*/ LM_DBG("command process (%s)succeded\n",f->name.s); if (mi_rpl!=MI_ROOT_ASYNC_RPL) { if(mi_datagram_write_tree(&dtgram , mi_rpl) != 0){ LM_ERR("failed to build the response \n"); goto failure; } len = dtgram.current - dtgram.start; ret = mi_send_dgram(tx_sock, dtgram.start,len, (struct sockaddr* )&reply_addr, reply_addr_len, mi_socket_timeout); if (ret>0){ LM_DBG("the response: %s has been sent in %i octets\n", dtgram.start, ret); } else { LM_ERR("failed to send the response\n"); } free_mi_tree( mi_rpl ); free_async_handler(hdl); if (mi_cmd) free_mi_tree( mi_cmd ); } else { if (mi_cmd) free_mi_tree( mi_cmd ); } continue; failure: free_async_handler(hdl); /* destroy request tree */ if (mi_cmd) free_mi_tree( mi_cmd ); /* destroy the reply tree */ if (mi_rpl) free_mi_tree(mi_rpl); continue; } }