/*! \brief releases expired connections and cleans up bad ones (state<0) */ static inline void tcp_receive_timeout(void) { struct tcp_connection* con; struct tcp_connection* next; unsigned int ticks; ticks=get_ticks(); for (con=tcp_conn_lst; con; con=next) { next=con->c_next; /* safe for removing */ if (con->state<0){ /* kill bad connections */ /* S_CONN_BAD or S_CONN_ERROR, remove it */ /* fd will be closed in release_tcpconn */ io_watch_del(&io_w, con->fd, -1, IO_FD_CLOSING); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); con->state=S_CONN_BAD; release_tcpconn(con, CONN_ERROR, tcpmain_sock); continue; } if (con->timeout<=ticks){ LM_DBG("%p expired - (%d, %d) lt=%d\n", con, con->timeout, ticks,con->lifetime); /* fd will be closed in release_tcpconn */ io_watch_del(&io_w, con->fd, -1, IO_FD_CLOSING); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); if (con->msg_attempts) release_tcpconn(con, CONN_ERROR, tcpmain_sock); else release_tcpconn(con, CONN_RELEASE, tcpmain_sock); } } }
/*! \brief * handles an io event on one of the watched tcp connections * * \param tcpconn - pointer to the tcp_connection for which we have an io ev. * \param fd_i - index in the fd_array table (needed for delete) * \return handle_* return convention, but on success it always returns 0 * (because it's one-shot, after a succesfull execution the fd is * removed from tcp_main's watch fd list and passed to a child => * tcp_main is not interested in further io events that might be * queued for this fd) */ inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, int fd_i) { int fd; /* is refcnt!=0 really necessary? * No, in fact it's a bug: I can have the following situation: a send only * tcp connection used by n processes simultaneously => refcnt = n. In * the same time I can have a read event and this situation is perfectly * valid. -- andrei */ #if 0 if ((tcpconn->refcnt!=0)){ /* FIXME: might be valid for sigio_rt iff fd flags are not cleared * (there is a short window in which it could generate a sig * that would be catched by tcp_main) */ LM_CRIT("io event on referenced tcpconn (%p), refcnt=%d, fd=%d\n", tcpconn, tcpconn->refcnt, tcpconn->s); return -1; } #endif /* pass it to child, so remove it from the io watch list */ LM_DBG("data available on %p %d\n", tcpconn, tcpconn->s); if (io_watch_del(&io_h, tcpconn->s, fd_i, 0)==-1) goto error; tcpconn->flags|=F_CONN_REMOVED; tcpconn_ref(tcpconn); /* refcnt ++ */ if (send2child(tcpconn)<0){ LM_ERR("no children available\n"); TCPCONN_LOCK; tcpconn->refcnt--; if (tcpconn->refcnt==0){ fd=tcpconn->s; _tcpconn_rm(tcpconn); close(fd); }else tcpconn->timeout=0; /* force expire*/ TCPCONN_UNLOCK; } return 0; /* we are not interested in possibly queued io events, the fd was either passed to a child, or closed */ error: return -1; }
/*! \brief very inefficient for now - FIXME * keep in sync with tcpconn_destroy, the "delete" part should be * the same except for io_watch_del.. * \todo FIXME (very inefficient for now) */ static inline void tcpconn_timeout(int force) { struct tcp_connection *c, *next; unsigned int ticks; unsigned h; int fd; ticks=get_ticks(); TCPCONN_LOCK; /* fixme: we can lock only on delete IMO */ for(h=0; h<TCP_ID_HASH_SIZE; h++){ c=tcpconn_id_hash[h]; while(c){ next=c->id_next; if (force ||((c->refcnt==0) && (ticks>c->timeout))) { if (!force) LM_DBG("timeout for hash=%d - %p" " (%d > %d)\n", h, c, ticks, c->timeout); fd=c->s; #ifdef USE_TLS if (c->type==PROTO_TLS) tls_close(c, fd); #endif _tcpconn_rm(c); if ((!force)&&(fd>0)&&(c->refcnt==0)) { if (!(c->flags & F_CONN_REMOVED)){ io_watch_del(&io_h, fd, -1, IO_FD_CLOSING); c->flags|=F_CONN_REMOVED; } close(fd); } tcp_connections_no--; } c=next; } } TCPCONN_UNLOCK; }
static ticks_t tcpconn_read_timeout(ticks_t t, struct timer_ln* tl, void* data) { struct tcp_connection *c; c=(struct tcp_connection*)data; /* or (struct tcp...*)(tl-offset(c->timer)) */ if (likely(!(c->state<0) && TICKS_LT(t, c->timeout))){ /* timeout extended, exit */ return (ticks_t)(c->timeout - t); } /* if conn->state is ERROR or BAD => force timeout too */ if (unlikely(io_watch_del(&io_w, c->fd, -1, IO_FD_CLOSING)<0)){ LM_ERR("io_watch_del failed for %p" " id %d fd %d, state %d, flags %x, main fd %d\n", c, c->id, c->fd, c->state, c->flags, c->s); } tcpconn_listrm(tcp_conn_lst, c, c_next, c_prev); LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(c, (c->state<0)?CONN_ERROR:CONN_RELEASE, tcpmain_sock); return 0; }
/*! \brief handles io from a "generic" ser process (get fd or new_fd from a tcp_send) * * \param p - pointer in the ser processes array (pt[]), to the entry for * which an io event was detected * \param fd_i - fd index in the fd_array (usefull for optimizing * io_watch_deletes) * \return handle_* return convention: * - -1 on error reading from the fd, * - 0 on EAGAIN or when no more io events are queued * (receive buffer empty), * - >0 on successfull reads from the fd (the receive buffer might * be non-empty). */ inline static int handle_ser_child(struct process_table* p, int fd_i) { struct tcp_connection* tcpconn; long response[2]; int cmd; int bytes; int ret; int fd; ret=-1; if (p->unix_sock<=0){ /* (we can't have a fd==0, 0 is never closed )*/ LM_CRIT("fd %d for %d (pid %d)\n", p->unix_sock, (int)(p-&pt[0]), p->pid); goto error; } /* get all bytes and the fd (if transmitted) * (this is a SOCK_STREAM so read is not atomic) */ bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd, MSG_DONTWAIT); if (bytes<(int)sizeof(response)){ /* too few bytes read */ if (bytes==0){ /* EOF -> bad, child has died */ LM_DBG("dead child %d, pid %d" " (shutting down?)\n", (int)(p-&pt[0]), p->pid); /* don't listen on it any more */ io_watch_del(&io_h, p->unix_sock, fd_i, 0); goto error; /* child dead => no further io events from it */ }else if (bytes<0){ /* EAGAIN is ok if we try to empty the buffer * e.g: SIGIO_RT overflow mode or EPOLL ET */ if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){ LM_CRIT("read from child %d (pid %d): %s [%d]\n", (int)(p-&pt[0]), p->pid, strerror(errno), errno); ret=-1; }else{ ret=0; } /* try to ignore ? */ goto end; }else{ /* should never happen */ LM_CRIT("too few bytes received (%d)\n", bytes ); ret=0; /* something was read so there is no error; otoh if receive_fd returned less then requested => the receive buffer is empty => no more io queued on this fd */ goto end; } } ret=1; /* something was received, there might be more queued */ LM_DBG("read response= %lx, %ld, fd %d from %d (%d)\n", response[0], response[1], fd, (int)(p-&pt[0]), p->pid); cmd=response[1]; tcpconn=(struct tcp_connection*)response[0]; if (tcpconn==0){ LM_CRIT("null tcpconn pointer received from child %d (pid %d)" "%lx, %lx\n", (int)(p-&pt[0]), p->pid, response[0], response[1]) ; goto end; } switch(cmd){ case CONN_ERROR: if (!(tcpconn->flags & F_CONN_REMOVED) && (tcpconn->s!=-1)){ io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING); tcpconn->flags|=F_CONN_REMOVED; } tcpconn_destroy(tcpconn); /* will close also the fd */ break; case CONN_GET_FD: /* send the requested FD */ /* WARNING: take care of setting refcnt properly to * avoid race condition */ if (send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn), tcpconn->s)<=0){ LM_ERR("send_fd failed\n"); } break; case CONN_NEW: /* update the fd in the requested tcpconn*/ /* WARNING: take care of setting refcnt properly to * avoid race condition */ if (fd==-1){ LM_CRIT(" cmd CONN_NEW: no fd received\n"); break; } tcpconn->s=fd; /* add tcpconn to the list*/ tcpconn_add(tcpconn); /* update the timeout*/ tcpconn->timeout=get_ticks()+tcp_con_lifetime; io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn); tcpconn->flags&=~F_CONN_REMOVED; break; default: LM_CRIT("unknown cmd %d\n", cmd); } end: return ret; error: return -1; }
/*! \brief handles io from a tcp child process * \param tcp_c - pointer in the tcp_children array, to the entry for * which an io event was detected * \param fd_i - fd index in the fd_array (usefull for optimizing * io_watch_deletes) * \return handle_* return convention: -1 on error, 0 on EAGAIN (no more * io events queued), >0 on success. success/error refer only to * the reads from the fd. */ inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i) { struct tcp_connection* tcpconn; long response[2]; int cmd; int bytes; if (tcp_c->unix_sock<=0){ /* (we can't have a fd==0, 0 is never closed )*/ LM_CRIT("fd %d for %d (pid %d, ser no %d)\n", tcp_c->unix_sock, (int)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no); goto error; } /* read until sizeof(response) * (this is a SOCK_STREAM so read is not atomic) */ bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT); if (bytes<(int)sizeof(response)){ if (bytes==0){ /* EOF -> bad, child has died */ LM_DBG("dead tcp child %d (pid %d, no %d)" " (shutting down?)\n", (int)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no ); /* don't listen on it any more */ io_watch_del(&io_h, tcp_c->unix_sock, fd_i, 0); goto error; /* eof. so no more io here, it's ok to return error */ }else if (bytes<0){ /* EAGAIN is ok if we try to empty the buffer * e.g.: SIGIO_RT overflow mode or EPOLL ET */ if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){ LM_CRIT("read from tcp child %ld (pid %d, no %d) %s [%d]\n", (long)(tcp_c-&tcp_children[0]), tcp_c->pid, tcp_c->proc_no, strerror(errno), errno ); }else{ bytes=0; } /* try to ignore ? */ goto end; }else{ /* should never happen */ LM_CRIT("too few bytes received (%d)\n", bytes ); bytes=0; /* something was read so there is no error; otoh if receive_fd returned less then requested => the receive buffer is empty => no more io queued on this fd */ goto end; } } LM_DBG("reader response= %lx, %ld from %d \n", response[0], response[1], (int)(tcp_c-&tcp_children[0])); cmd=response[1]; tcpconn=(struct tcp_connection*)response[0]; if (tcpconn==0){ /* should never happen */ LM_CRIT("null tcpconn pointer received from tcp child %d (pid %d):" "%lx, %lx\n", (int)(tcp_c-&tcp_children[0]), tcp_c->pid, response[0], response[1]) ; goto end; } switch(cmd){ case CONN_RELEASE: tcp_c->busy--; if (tcpconn->state==S_CONN_BAD){ tcpconn_destroy(tcpconn); break; } /* update the timeout (lifetime) */ set_tcp_timeout( tcpconn ); tcpconn_put(tcpconn); /* must be after the de-ref*/ io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn); tcpconn->flags&=~F_CONN_REMOVED; LM_DBG("cmd CONN_RELEASE %p refcnt= %d\n", tcpconn, tcpconn->refcnt); break; case CONN_ERROR: case CONN_DESTROY: case CONN_EOF: /* WARNING: this will auto-dec. refcnt! */ tcp_c->busy--; /* main doesn't listen on it => we don't have to delete it if (tcpconn->s!=-1) io_watch_del(&io_h, tcpconn->s, -1, IO_FD_CLOSING); */ tcpconn_destroy(tcpconn); /* closes also the fd */ break; default: LM_CRIT("unknown cmd %d from tcp reader %d\n", cmd, (int)(tcp_c-&tcp_children[0])); } end: return bytes; error: return -1; }
/*! \brief * handle io routine, based on the fd_map type * (it will be called from io_wait_loop* ) * params: fm - pointer to a fd hash entry * idx - index in the fd_array (or -1 if not known) * return: -1 on error, or when we are not interested any more on reads * from this fd (e.g.: we are closing it ) * 0 on EAGAIN or when by some other way it is known that no more * io events are queued on the fd (the receive buffer is empty). * Usefull to detect when there are no more io events queued for * sigio_rt, epoll_et, kqueue. * >0 on successfull read from the fd (when there might be more io * queued -- the receive buffer might still be non-empty) */ inline static int handle_io(struct fd_map* fm, int idx) { int ret; int n; struct tcp_connection* con; int s; long resp; switch(fm->type){ case F_TCPMAIN: again: ret=n=receive_fd(fm->fd, &con, sizeof(con), &s, 0); LM_DBG("received n=%d con=%p, fd=%d\n", n, con, s); if (n<0){ if (errno == EWOULDBLOCK || errno == EAGAIN){ ret=0; break; }else if (errno == EINTR) goto again; else{ LM_CRIT("read_fd: %s \n", strerror(errno)); abort(); /* big error*/ } } if (n==0){ LM_WARN("0 bytes read\n"); break; } if (con==0){ LM_CRIT("null pointer\n"); break; } con->fd=s; if (s==-1) { LM_ERR("read_fd:no fd read\n"); goto con_error; } if (con==tcp_conn_lst){ LM_CRIT("duplicate" " connection received: %p, id %d, fd %d, refcnt %d" " state %d (n=%d)\n", con, con->id, con->fd, con->refcnt, con->state, n); release_tcpconn(con, CONN_ERROR, tcpmain_sock); break; /* try to recover */ } /* reset the per process TCP req struct */ init_tcp_req(¤t_req); /* 0 attempts so far for this SIP MSG */ con->msg_attempts = 0; /* must be before io_watch_add, io_watch_add might catch some * already existing events => might call handle_io and * handle_io might decide to del. the new connection => * must be in the list */ tcpconn_listadd(tcp_conn_lst, con, c_next, c_prev); con->timeout=get_ticks()+TCP_CHILD_MAX_MSG_TIME; if (io_watch_add(&io_w, s, F_TCPCONN, con)<0){ LM_CRIT("failed to add new socket to the fd list\n"); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); goto con_error; } break; case F_TCPCONN: con=(struct tcp_connection*)fm->data; resp=tcp_read_req(con, &ret); if (resp<0) { ret=-1; /* some error occured */ io_watch_del(&io_w, con->fd, idx, IO_FD_CLOSING); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); con->state=S_CONN_BAD; release_tcpconn(con, resp, tcpmain_sock); } break; case F_NONE: LM_CRIT("empty fd map %p (%d): " "{%d, %d, %p}\n", fm, (int)(fm-io_w.fd_hash), fm->fd, fm->type, fm->data); goto error; default: LM_CRIT("uknown fd type %d\n", fm->type); goto error; } return ret; con_error: con->state=S_CONN_BAD; release_tcpconn(con, CONN_ERROR, fm->fd); return ret; error: return -1; }
/* Responsible for reading the request * * if returns >= 0 : it keeps the connection for further usage * or releases it manually * * if returns < 0 : the connection should be released by the * upper layer */ int tcp_read_req(struct tcp_connection* con, int* bytes_read) { int bytes; int total_bytes; int resp; long size; struct tcp_req* req; char c; struct receive_info local_rcv; char *msg_buf; int msg_len; bytes=-1; total_bytes=0; resp=CONN_RELEASE; if (con->con_req) { req=con->con_req; LM_DBG("Using the per connection buff \n"); } else { LM_DBG("Using the global ( per process ) buff \n"); req=¤t_req; } #ifdef USE_TLS if (con->type==PROTO_TLS){ if (tls_fix_read_conn(con)!=0){ resp=CONN_ERROR; goto end_req; } if(con->state!=S_CONN_OK) goto end_req; /* not enough data */ } #endif again: if(req->error==TCP_REQ_OK){ bytes=tcp_read_headers(con,req); //#ifdef EXTRA_DEBUG /* if timeout state=0; goto end__req; */ LM_DBG("read= %d bytes, parsed=%d, state=%d, error=%d\n", bytes, (int)(req->parsed-req->start), req->state, req->error ); LM_DBG("last char=0x%02X, parsed msg=\n%.*s\n", *(req->parsed-1), (int)(req->parsed-req->start), req->start); //#endif if (bytes==-1){ LM_ERR("failed to read \n"); resp=CONN_ERROR; goto end_req; } total_bytes+=bytes; /* eof check: * is EOF if eof on fd and req. not complete yet, * if req. is complete we might have a second unparsed * request after it, so postpone release_with_eof */ if ((con->state==S_CONN_EOF) && (req->complete==0)) { LM_DBG("EOF\n"); resp=CONN_EOF; goto end_req; } } if (req->error!=TCP_REQ_OK){ LM_ERR("bad request, state=%d, error=%d " "buf:\n%.*s\nparsed:\n%.*s\n", req->state, req->error, (int)(req->pos-req->buf), req->buf, (int)(req->parsed-req->start), req->start); LM_DBG("- received from: port %d\n", con->rcv.src_port); print_ip("- received from: ip ",&con->rcv.src_ip, "\n"); resp=CONN_ERROR; goto end_req; } if (req->complete){ #ifdef EXTRA_DEBUG LM_DBG("end of header part\n"); LM_DBG("- received from: port %d\n", con->rcv.src_port); print_ip("- received from: ip ", &con->rcv.src_ip, "\n"); LM_DBG("headers:\n%.*s.\n",(int)(req->body-req->start), req->start); #endif if (req->has_content_len){ LM_DBG("content-length= %d\n", req->content_len); #ifdef EXTRA_DEBUG LM_DBG("body:\n%.*s\n", req->content_len,req->body); #endif }else{ req->error=TCP_REQ_BAD_LEN; LM_ERR("content length not present or unparsable\n"); resp=CONN_ERROR; goto end_req; } /* update the timeout - we succesfully read the request */ con->timeout=get_ticks()+TCP_CHILD_MAX_MSG_TIME; /* if we are here everything is nice and ok*/ update_stat( pt[process_no].load, +1 ); resp=CONN_RELEASE; #ifdef EXTRA_DEBUG LM_DBG("calling receive_msg(%p, %d, )\n", req->start, (int)(req->parsed-req->start)); #endif /* rcv.bind_address should always be !=0 */ bind_address=con->rcv.bind_address; /* just for debugging use sendipv4 as receiving socket FIXME*/ /* if (con->rcv.dst_ip.af==AF_INET6){ bind_address=sendipv6_tcp; }else{ bind_address=sendipv4_tcp; } */ con->rcv.proto_reserved1=con->id; /* copy the id */ c=*req->parsed; /* ugly hack: zero term the msg & save the previous char, req->parsed should be ok because we always alloc BUF_SIZE+1 */ *req->parsed=0; /* prepare for next request */ size=req->pos-req->parsed; if (req->state==H_PING_CRLFCRLF) { /* we send the reply */ if (tcp_send( con->rcv.bind_address, con->rcv.proto,CRLF, CRLF_LEN, &(con->rcv.src_su), con->rcv.proto_reserved1) < 0) { LM_ERR("CRLF pong - tcp_send() failed\n"); } if (!size) { /* we can release the connection */ io_watch_del(&io_w, con->fd, -1, IO_FD_CLOSING); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); if (con->state==S_CONN_EOF) release_tcpconn(con, CONN_EOF, tcpmain_sock); else release_tcpconn(con, CONN_RELEASE, tcpmain_sock); } } else { msg_buf = req->start; msg_len = req->parsed-req->start; local_rcv = con->rcv; if (!size) { /* did not read any more things - we can release the connection */ LM_DBG("We're releasing the connection in state %d \n",con->state); if (req != ¤t_req) { /* we have the buffer in the connection tied buff - * detach it , release the conn and free it afterwards */ con->con_req = NULL; } io_watch_del(&io_w, con->fd, -1, IO_FD_CLOSING); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); /* if we have EOF, signal that to MAIN as well * otherwise - just pass it back */ if (con->state==S_CONN_EOF) release_tcpconn(con, CONN_EOF, tcpmain_sock); else release_tcpconn(con, CONN_RELEASE, tcpmain_sock); } else { LM_DBG("We still have things on the pipe - keeping connection \n"); } if (receive_msg(msg_buf, msg_len, &local_rcv) <0) LM_ERR("receive_msg failed \n"); if (req != ¤t_req) pkg_free(req); } *req->parsed=c; update_stat( pt[process_no].load, -1 ); if (size) memmove(req->buf, req->parsed, size); #ifdef EXTRA_DEBUG LM_DBG("preparing for new request, kept %ld bytes\n", size); #endif req->pos=req->buf+size; req->parsed=req->buf; req->start=req->buf; req->body=0; req->error=TCP_REQ_OK; req->state=H_SKIP_EMPTY; req->complete=req->content_len=req->has_content_len=0; req->bytes_to_go=0; con->msg_attempts = 0; /* if we still have some unparsed bytes, try to parse them too*/ if (size) goto again; } else { /* request not complete - check the if the thresholds are exceeded */ con->msg_attempts ++; if (con->msg_attempts == TCP_CHILD_MAX_MSG_CHUNK) { LM_ERR("Made %u read attempts but message is not complete yet - " "closing connection \n",con->msg_attempts); resp = CONN_ERROR; goto end_req; } if (req == ¤t_req) { /* let's duplicate this - most likely another conn will come in */ LM_DBG("We didn't manage to read a full request. Back to child poll\n"); /* FIXME - PKG or SHM ? */ con->con_req = pkg_malloc(sizeof(struct tcp_req)); if (con->con_req == NULL) { LM_ERR("No more mem for dynamic con request buffer\n"); resp = CONN_ERROR; goto end_req; } con->con_req->content_len = req->content_len; con->con_req->bytes_to_go = req->bytes_to_go; con->con_req->error = req->error; con->con_req->state = req->state; if (req->pos != req->buf) { /* we have read some bytes */ memcpy(con->con_req->buf,req->buf,req->pos-req->buf); con->con_req->pos = con->con_req->buf + (req->pos-req->buf); } else { con->con_req->pos = con->con_req->buf; } if (req->start != req->buf) con->con_req->start = con->con_req->buf + (req->start-req->buf); else con->con_req->start = con->con_req->buf; if (req->parsed != req->buf) con->con_req->parsed = con->con_req->buf + (req->parsed-req->buf); else con->con_req->parsed = con->con_req->buf; /* zero out the per process req for the future SIP msg */ init_tcp_req(¤t_req); } } LM_DBG("tcp_read_req end\n"); end_req: if (bytes_read) *bytes_read=total_bytes; return resp; }
/* handle io routine, based on the fd_map type * (it will be called from io_wait_loop* ) * params: fm - pointer to a fd hash entry * idx - index in the fd_array (or -1 if not known) * return: -1 on error, or when we are not interested any more on reads * from this fd (e.g.: we are closing it ) * 0 on EAGAIN or when by some other way it is known that no more * io events are queued on the fd (the receive buffer is empty). * Usefull to detect when there are no more io events queued for * sigio_rt, epoll_et, kqueue. * >0 on successfull read from the fd (when there might be more io * queued -- the receive buffer might still be non-empty) */ inline static int handle_io(struct fd_map* fm, short events, int idx) { int ret; int n; int read_flags; struct tcp_connection* con; int s; long resp; ticks_t t; /* update the local config */ cfg_update(); switch(fm->type){ case F_TCPMAIN: again: ret=n=receive_fd(fm->fd, &con, sizeof(con), &s, 0); LM_DBG("received n=%d con=%p, fd=%d\n", n, con, s); if (unlikely(n<0)){ if (errno == EWOULDBLOCK || errno == EAGAIN){ ret=0; break; }else if (errno == EINTR) goto again; else{ LM_CRIT("read_fd: %s \n", strerror(errno)); abort(); /* big error*/ } } if (unlikely(n==0)){ LM_ERR("0 bytes read\n"); goto error; } if (unlikely(con==0)){ LM_CRIT("null pointer\n"); goto error; } con->fd=s; if (unlikely(s==-1)) { LM_ERR("read_fd: no fd read\n"); goto con_error; } con->reader_pid=my_pid(); if (unlikely(con==tcp_conn_lst)){ LM_CRIT("duplicate connection received: %p, id %d, fd %d, refcnt %d" " state %d (n=%d)\n", con, con->id, con->fd, atomic_get(&con->refcnt), con->state, n); goto con_error; break; /* try to recover */ } if (unlikely(con->state==S_CONN_BAD)){ LM_WARN("received an already bad connection: %p id %d refcnt %d\n", con, con->id, atomic_get(&con->refcnt)); goto con_error; } /* if we received the fd there is most likely data waiting to * be read => process it first to avoid extra sys calls */ read_flags=((con->flags & (F_CONN_EOF_SEEN|F_CONN_FORCE_EOF)) && !(con->flags & F_CONN_OOB_DATA))? RD_CONN_FORCE_EOF :0; #ifdef USE_TLS repeat_1st_read: #endif /* USE_TLS */ resp=tcp_read_req(con, &n, &read_flags); if (unlikely(resp<0)){ /* some error occured, but on the new fd, not on the tcp * main fd, so keep the ret value */ if (unlikely(resp!=CONN_EOF)) con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, resp, tcpmain_sock); break; } #ifdef USE_TLS /* repeat read if requested (for now only tls might do this) */ if (unlikely(read_flags & RD_CONN_REPEAT_READ)) goto repeat_1st_read; #endif /* USE_TLS */ /* must be before io_watch_add, io_watch_add might catch some * already existing events => might call handle_io and * handle_io might decide to del. the new connection => * must be in the list */ tcpconn_listadd(tcp_conn_lst, con, c_next, c_prev); t=get_ticks_raw(); con->timeout=t+S_TO_TICKS(TCP_CHILD_TIMEOUT); /* re-activate the timer */ con->timer.f=tcpconn_read_timeout; local_timer_reinit(&con->timer); local_timer_add(&tcp_reader_ltimer, &con->timer, S_TO_TICKS(TCP_CHILD_TIMEOUT), t); if (unlikely(io_watch_add(&io_w, s, POLLIN, F_TCPCONN, con)<0)){ LM_CRIT("io_watch_add failed for %p id %d fd %d, state %d, flags %x," " main fd %d, refcnt %d\n", con, con->id, con->fd, con->state, con->flags, con->s, atomic_get(&con->refcnt)); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); local_timer_del(&tcp_reader_ltimer, &con->timer); goto con_error; } break; case F_TCPCONN: con=(struct tcp_connection*)fm->data; if (unlikely(con->state==S_CONN_BAD)){ resp=CONN_ERROR; if (!(con->send_flags.f & SND_F_CON_CLOSE)) LM_WARN("F_TCPCONN connection marked as bad: %p id %d refcnt %d\n", con, con->id, atomic_get(&con->refcnt)); goto read_error; } read_flags=(( #ifdef POLLRDHUP (events & POLLRDHUP) | #endif /* POLLRDHUP */ (events & (POLLHUP|POLLERR)) | (con->flags & (F_CONN_EOF_SEEN|F_CONN_FORCE_EOF))) && !(events & POLLPRI))? RD_CONN_FORCE_EOF: 0; #ifdef USE_TLS repeat_read: #endif /* USE_TLS */ resp=tcp_read_req(con, &ret, &read_flags); if (unlikely(resp<0)){ read_error: ret=-1; /* some error occured */ if (unlikely(io_watch_del(&io_w, con->fd, idx, IO_FD_CLOSING) < 0)){ LM_CRIT("io_watch_del failed for %p id %d fd %d," " state %d, flags %x, main fd %d, refcnt %d\n", con, con->id, con->fd, con->state, con->flags, con->s, atomic_get(&con->refcnt)); } tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); local_timer_del(&tcp_reader_ltimer, &con->timer); if (unlikely(resp!=CONN_EOF)) con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, resp, tcpmain_sock); }else{ #ifdef USE_TLS if (unlikely(read_flags & RD_CONN_REPEAT_READ)) goto repeat_read; #endif /* USE_TLS */ /* update timeout */ con->timeout=get_ticks_raw()+S_TO_TICKS(TCP_CHILD_TIMEOUT); /* ret= 0 (read the whole socket buffer) if short read & * !POLLPRI, bytes read otherwise */ ret&=(((read_flags & RD_CONN_SHORT_READ) && !(events & POLLPRI)) - 1); } break; case F_NONE: LM_CRIT("empty fd map %p (%d): {%d, %d, %p}\n", fm, (int)(fm-io_w.fd_hash), fm->fd, fm->type, fm->data); goto error; default: LM_CRIT("uknown fd type %d\n", fm->type); goto error; } return ret; con_error: con->state=S_CONN_BAD; LM_WARN("%s:%d %s releasing\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); release_tcpconn(con, CONN_ERROR, tcpmain_sock); return ret; error: return -1; }