/* * This is a tricky allocation function using the zlib. * This is based on the allocation order in deflateInit2. */ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size) { struct comp_ctx *ctx = opaque; static char round = 0; /* order in deflateInit2 */ void *buf = NULL; if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < (long)(items * size)) goto end; switch (round) { case 0: if (zlib_pool_deflate_state == NULL) zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED); ctx->zlib_deflate_state = buf = pool_alloc2(zlib_pool_deflate_state); break; case 1: if (zlib_pool_window == NULL) zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED); ctx->zlib_window = buf = pool_alloc2(zlib_pool_window); break; case 2: if (zlib_pool_prev == NULL) zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED); ctx->zlib_prev = buf = pool_alloc2(zlib_pool_prev); break; case 3: if (zlib_pool_head == NULL) zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED); ctx->zlib_head = buf = pool_alloc2(zlib_pool_head); break; case 4: if (zlib_pool_pending_buf == NULL) zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED); ctx->zlib_pending_buf = buf = pool_alloc2(zlib_pool_pending_buf); break; } if (buf != NULL) zlib_used_memory += items * size; end: /* deflateInit2() first allocates and checks the deflate_state, then if * it succeeds, it allocates all other 4 areas at ones and checks them * at the end. So we want to correctly count the rounds depending on when * zlib is supposed to abort. */ if (buf || round) round = (round + 1) % 5; return buf; }
/* Adds the session <sess> to the pending connection list of server <sess>->srv * or to the one of <sess>->proxy if srv is NULL. All counters and back pointers * are updated accordingly. Returns NULL if no memory is available, otherwise the * pendconn itself. If the session was already marked as served, its flag is * cleared. It is illegal to call this function with a non-NULL sess->srv_conn. */ struct pendconn *pendconn_add(struct session *sess) { struct pendconn *p; p = pool_alloc2(pool2_pendconn); if (!p) return NULL; sess->pend_pos = p; p->sess = sess; p->srv = sess->srv; if (sess->flags & SN_ASSIGNED && sess->srv) { LIST_ADDQ(&sess->srv->pendconns, &p->list); sess->srv->nbpend++; sess->logs.srv_queue_size += sess->srv->nbpend; if (sess->srv->nbpend > sess->srv->nbpend_max) sess->srv->nbpend_max = sess->srv->nbpend; } else { LIST_ADDQ(&sess->be->pendconns, &p->list); sess->be->nbpend++; sess->logs.prx_queue_size += sess->be->nbpend; if (sess->be->nbpend > sess->be->nbpend_max) sess->be->nbpend_max = sess->be->nbpend; } sess->be->totpend++; return p; }
/* * Alloc the comp_ctx */ static inline int init_comp_ctx(struct comp_ctx **comp_ctx) { #ifdef USE_ZLIB z_stream *strm; if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < sizeof(struct comp_ctx)) return -1; #endif if (unlikely(pool_comp_ctx == NULL)) pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED); *comp_ctx = pool_alloc2(pool_comp_ctx); if (*comp_ctx == NULL) return -1; #ifdef USE_ZLIB zlib_used_memory += sizeof(struct comp_ctx); strm = &(*comp_ctx)->strm; strm->zalloc = alloc_zlib; strm->zfree = free_zlib; strm->opaque = *comp_ctx; #endif return 0; }
/* Create a a new session and assign it to frontend <fe>, listener <li>, * origin <origin>, set the current date and clear the stick counters pointers. * Returns the session upon success or NULL. The session may be released using * session_free(). */ struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin) { struct session *sess; sess = pool_alloc2(pool2_session); if (sess) { sess->listener = li; sess->fe = fe; sess->origin = origin; sess->accept_date = date; /* user-visible date for logging */ sess->tv_accept = now; /* corrected date for internal use */ memset(sess->stkctr, 0, sizeof(sess->stkctr)); } return sess; }
/* Set current session's backend to <be>. Nothing is done if the * session already had a backend assigned, which is indicated by * s->flags & SN_BE_ASSIGNED. * All flags, stats and counters which need be updated are updated. * Returns 1 if done, 0 in case of internal error, eg: lack of resource. */ int session_set_backend(struct session *s, struct proxy *be) { if (s->flags & SN_BE_ASSIGNED) return 1; s->be = be; be->beconn++; if (be->beconn > be->be_counters.conn_max) be->be_counters.conn_max = be->beconn; proxy_inc_be_ctr(be); /* assign new parameters to the session from the new backend */ s->si[1].flags &= ~SI_FL_INDEP_STR; if (be->options2 & PR_O2_INDEPSTR) s->si[1].flags |= SI_FL_INDEP_STR; if (be->options2 & PR_O2_RSPBUG_OK) s->txn.rsp.err_pos = -1; /* let buggy responses pass */ s->flags |= SN_BE_ASSIGNED; /* If the target backend requires HTTP processing, we have to allocate * a struct hdr_idx for it if we did not have one. */ if (unlikely(!s->txn.hdr_idx.v && (be->acl_requires & ACL_USE_L7_ANY))) { if ((s->txn.hdr_idx.v = pool_alloc2(pool2_hdr_idx)) == NULL) return 0; /* not enough memory */ /* and now initialize the HTTP transaction state */ http_init_txn(s); s->txn.hdr_idx.size = global.tune.max_http_hdr; hdr_idx_init(&s->txn.hdr_idx); } if (be->options2 & PR_O2_NODELAY) { s->req->flags |= BF_NEVER_WAIT; s->rep->flags |= BF_NEVER_WAIT; } /* We want to enable the backend-specific analysers except those which * were already run as part of the frontend/listener. Note that it would * be more reliable to store the list of analysers that have been run, * but what we do here is OK for now. */ s->req->analysers |= be->be_req_ana & ~(s->listener->analysers); return 1; }
/* * Create a new peer session in assigned state (connect will start automatically) */ static struct session *peer_session_create(struct peer *peer, struct peer_session *ps) { struct listener *l = ((struct proxy *)peer->peers->peers_fe)->listen; struct proxy *p = (struct proxy *)l->frontend; /* attached frontend */ struct session *s; struct http_txn *txn; struct task *t; if ((s = pool_alloc2(pool2_session)) == NULL) { /* disable this proxy for a while */ Alert("out of memory in event_accept().\n"); goto out_close; } LIST_ADDQ(&sessions, &s->list); LIST_INIT(&s->back_refs); s->flags = SN_ASSIGNED|SN_ADDR_SET; s->term_trace = 0; /* if this session comes from a known monitoring system, we want to ignore * it as soon as possible, which means closing it immediately for TCP. */ if ((t = task_new()) == NULL) { /* disable this proxy for a while */ Alert("out of memory in event_accept().\n"); goto out_free_session; } ps->reconnect = tick_add(now_ms, MS_TO_TICKS(5000)); ps->statuscode = PEER_SESSION_CONNECTCODE; t->process = l->handler; t->context = s; t->nice = l->nice; memcpy(&s->si[1].conn.addr.to, &peer->addr, sizeof(s->si[1].conn.addr.to)); s->task = t; s->listener = l; /* Note: initially, the session's backend points to the frontend. * This changes later when switching rules are executed or * when the default backend is assigned. */ s->be = s->fe = p; s->req = s->rep = NULL; /* will be allocated later */ s->si[0].conn.t.sock.fd = -1; s->si[0].conn.flags = CO_FL_NONE; s->si[0].owner = t; s->si[0].state = s->si[0].prev_state = SI_ST_EST; s->si[0].err_type = SI_ET_NONE; s->si[0].err_loc = NULL; s->si[0].release = NULL; s->si[0].send_proxy_ofs = 0; set_target_client(&s->si[0].conn.target, l); s->si[0].exp = TICK_ETERNITY; s->si[0].flags = SI_FL_NONE; if (s->fe->options2 & PR_O2_INDEPSTR) s->si[0].flags |= SI_FL_INDEP_STR; stream_int_register_handler(&s->si[0], &peer_applet); s->si[0].applet.st0 = PEER_SESSION_CONNECT; s->si[0].conn.data_ctx = (void *)ps; s->si[1].conn.t.sock.fd = -1; /* just to help with debugging */ s->si[1].conn.flags = CO_FL_NONE; s->si[1].owner = t; s->si[1].state = s->si[1].prev_state = SI_ST_ASS; s->si[1].conn_retries = p->conn_retries; s->si[1].err_type = SI_ET_NONE; s->si[1].err_loc = NULL; s->si[1].release = NULL; s->si[1].send_proxy_ofs = 0; set_target_proxy(&s->si[1].conn.target, s->be); si_prepare_conn(&s->si[1], peer->proto, peer->data); s->si[1].exp = TICK_ETERNITY; s->si[1].flags = SI_FL_NONE; if (s->be->options2 & PR_O2_INDEPSTR) s->si[1].flags |= SI_FL_INDEP_STR; session_init_srv_conn(s); set_target_proxy(&s->target, s->be); s->pend_pos = NULL; /* init store persistence */ s->store_count = 0; s->stkctr1_entry = NULL; s->stkctr2_entry = NULL; /* FIXME: the logs are horribly complicated now, because they are * defined in <p>, <p>, and later <be> and <be>. */ s->logs.logwait = 0; s->do_log = NULL; /* default error reporting function, may be changed by analysers */ s->srv_error = default_srv_error; s->uniq_id = 0; s->unique_id = NULL; txn = &s->txn; /* Those variables will be checked and freed if non-NULL in * session.c:session_free(). It is important that they are * properly initialized. */ txn->sessid = NULL; txn->srv_cookie = NULL; txn->cli_cookie = NULL; txn->uri = NULL; txn->req.cap = NULL; txn->rsp.cap = NULL; txn->hdr_idx.v = NULL; txn->hdr_idx.size = txn->hdr_idx.used = 0; if ((s->req = pool_alloc2(pool2_channel)) == NULL) goto out_fail_req; /* no memory */ s->req->buf.size = global.tune.bufsize; channel_init(s->req); s->req->prod = &s->si[0]; s->req->cons = &s->si[1]; s->si[0].ib = s->si[1].ob = s->req; s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */ /* activate default analysers enabled for this listener */ s->req->analysers = l->analysers; /* note: this should not happen anymore since there's always at least the switching rules */ if (!s->req->analysers) { channel_auto_connect(s->req);/* don't wait to establish connection */ channel_auto_close(s->req);/* let the producer forward close requests */ } s->req->rto = s->fe->timeout.client; s->req->wto = s->be->timeout.server; if ((s->rep = pool_alloc2(pool2_channel)) == NULL) goto out_fail_rep; /* no memory */ s->rep->buf.size = global.tune.bufsize; channel_init(s->rep); s->rep->prod = &s->si[1]; s->rep->cons = &s->si[0]; s->si[0].ob = s->si[1].ib = s->rep; s->rep->rto = s->be->timeout.server; s->rep->wto = s->fe->timeout.client; s->req->rex = TICK_ETERNITY; s->req->wex = TICK_ETERNITY; s->req->analyse_exp = TICK_ETERNITY; s->rep->rex = TICK_ETERNITY; s->rep->wex = TICK_ETERNITY; s->rep->analyse_exp = TICK_ETERNITY; t->expire = TICK_ETERNITY; s->rep->flags |= CF_READ_DONTWAIT; /* it is important not to call the wakeup function directly but to * pass through task_wakeup(), because this one knows how to apply * priorities to tasks. */ task_wakeup(t, TASK_WOKEN_INIT); l->nbconn++; /* warning! right now, it's up to the handler to decrease this */ p->feconn++;/* beconn will be increased later */ jobs++; if (!(s->listener->options & LI_O_UNLIMITED)) actconn++; totalconn++; return s; /* Error unrolling */ out_fail_rep: pool_free2(pool2_channel, s->req); out_fail_req: task_free(t); out_free_session: LIST_DEL(&s->list); pool_free2(pool2_session, s); out_close: return s; }
/* Finish a stream accept() for a proxy (TCP or HTTP). It returns a negative * value in case of a critical failure which must cause the listener to be * disabled, a positive or null value in case of success. */ int frontend_accept(struct stream *s) { struct session *sess = s->sess; struct connection *conn = objt_conn(sess->origin); struct listener *l = sess->listener; struct proxy *fe = sess->fe; if (unlikely(fe->nb_req_cap > 0)) { if ((s->req_cap = pool_alloc2(fe->req_cap_pool)) == NULL) goto out_return; /* no memory */ memset(s->req_cap, 0, fe->nb_req_cap * sizeof(void *)); } if (unlikely(fe->nb_rsp_cap > 0)) { if ((s->res_cap = pool_alloc2(fe->rsp_cap_pool)) == NULL) goto out_free_reqcap; /* no memory */ memset(s->res_cap, 0, fe->nb_rsp_cap * sizeof(void *)); } if (fe->http_needed) { /* we have to allocate header indexes only if we know * that we may make use of them. This of course includes * (mode == PR_MODE_HTTP). */ if (unlikely(!http_alloc_txn(s))) goto out_free_rspcap; /* no memory */ /* and now initialize the HTTP transaction state */ http_init_txn(s); } if ((fe->mode == PR_MODE_TCP || fe->mode == PR_MODE_HTTP) && (!LIST_ISEMPTY(&fe->logsrvs))) { if (likely(!LIST_ISEMPTY(&fe->logformat))) { /* we have the client ip */ if (s->logs.logwait & LW_CLIP) if (!(s->logs.logwait &= ~(LW_CLIP|LW_INIT))) s->do_log(s); } else if (conn) { char pn[INET6_ADDRSTRLEN], sn[INET6_ADDRSTRLEN]; conn_get_from_addr(conn); conn_get_to_addr(conn); switch (addr_to_str(&conn->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: addr_to_str(&conn->addr.to, sn, sizeof(sn)); send_log(fe, LOG_INFO, "Connect from %s:%d to %s:%d (%s/%s)\n", pn, get_host_port(&conn->addr.from), sn, get_host_port(&conn->addr.to), fe->id, (fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; case AF_UNIX: /* UNIX socket, only the destination is known */ send_log(fe, LOG_INFO, "Connect to unix:%d (%s/%s)\n", l->luid, fe->id, (fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; } } } if (unlikely((global.mode & MODE_DEBUG) && conn && (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) { char pn[INET6_ADDRSTRLEN]; conn_get_from_addr(conn); switch (addr_to_str(&conn->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [%s:%d]\n", s->uniq_id, fe->id, (unsigned short)l->fd, (unsigned short)conn->t.sock.fd, pn, get_host_port(&conn->addr.from)); break; case AF_UNIX: /* UNIX socket, only the destination is known */ chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [unix:%d]\n", s->uniq_id, fe->id, (unsigned short)l->fd, (unsigned short)conn->t.sock.fd, l->luid); break; } shut_your_big_mouth_gcc(write(1, trash.str, trash.len)); } if (fe->mode == PR_MODE_HTTP) s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */ /* everything's OK, let's go on */ return 1; /* Error unrolling */ out_free_rspcap: pool_free2(fe->rsp_cap_pool, s->res_cap); out_free_reqcap: pool_free2(fe->req_cap_pool, s->req_cap); out_return: return -1; }
/* Finish a session accept() for a proxy (TCP or HTTP). It returns a negative * value in case of a critical failure which must cause the listener to be * disabled, a positive value in case of success, or zero if it is a success * but the session must be closed ASAP (eg: monitoring). */ int frontend_accept(struct session *s) { int cfd = s->si[0].fd; tv_zero(&s->logs.tv_request); s->logs.t_queue = -1; s->logs.t_connect = -1; s->logs.t_data = -1; s->logs.t_close = 0; s->logs.bytes_in = s->logs.bytes_out = 0; s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */ s->logs.srv_queue_size = 0; /* we will get this number soon */ /* FIXME: the logs are horribly complicated now, because they are * defined in <p>, <p>, and later <be> and <be>. */ s->do_log = sess_log; /* default error reporting function, may be changed by analysers */ s->srv_error = default_srv_error; /* Adjust some socket options */ if (s->listener->addr.ss_family == AF_INET || s->listener->addr.ss_family == AF_INET6) { if (setsockopt(cfd, IPPROTO_TCP, TCP_NODELAY, (char *) &one, sizeof(one)) == -1) goto out_return; if (s->fe->options & PR_O_TCP_CLI_KA) setsockopt(cfd, SOL_SOCKET, SO_KEEPALIVE, (char *) &one, sizeof(one)); if (s->fe->options & PR_O_TCP_NOLING) setsockopt(cfd, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger)); #if defined(TCP_MAXSEG) if (s->listener->maxseg < 0) { /* we just want to reduce the current MSS by that value */ int mss; socklen_t mss_len = sizeof(mss); if (getsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, &mss_len) == 0) { mss += s->listener->maxseg; /* remember, it's < 0 */ setsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)); } } #endif } if (global.tune.client_sndbuf) setsockopt(cfd, SOL_SOCKET, SO_SNDBUF, &global.tune.client_sndbuf, sizeof(global.tune.client_sndbuf)); if (global.tune.client_rcvbuf) setsockopt(cfd, SOL_SOCKET, SO_RCVBUF, &global.tune.client_rcvbuf, sizeof(global.tune.client_rcvbuf)); if (s->fe->mode == PR_MODE_HTTP) { /* the captures are only used in HTTP frontends */ if (unlikely(s->fe->nb_req_cap > 0 && (s->txn.req.cap = pool_alloc2(s->fe->req_cap_pool)) == NULL)) goto out_return; /* no memory */ if (unlikely(s->fe->nb_rsp_cap > 0 && (s->txn.rsp.cap = pool_alloc2(s->fe->rsp_cap_pool)) == NULL)) goto out_free_reqcap; /* no memory */ } if (s->fe->acl_requires & ACL_USE_L7_ANY) { /* we have to allocate header indexes only if we know * that we may make use of them. This of course includes * (mode == PR_MODE_HTTP). */ s->txn.hdr_idx.size = global.tune.max_http_hdr; if (unlikely((s->txn.hdr_idx.v = pool_alloc2(pool2_hdr_idx)) == NULL)) goto out_free_rspcap; /* no memory */ /* and now initialize the HTTP transaction state */ http_init_txn(s); } if ((s->fe->mode == PR_MODE_TCP || s->fe->mode == PR_MODE_HTTP) && (!LIST_ISEMPTY(&s->fe->logsrvs))) { if (likely(s->fe->to_log)) { /* we have the client ip */ if (s->logs.logwait & LW_CLIP) if (!(s->logs.logwait &= ~LW_CLIP)) s->do_log(s); } else { char pn[INET6_ADDRSTRLEN], sn[INET6_ADDRSTRLEN]; if (!(s->flags & SN_FRT_ADDR_SET)) get_frt_addr(s); switch (addr_to_str(&s->req->prod->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: addr_to_str(&s->req->prod->addr.to, sn, sizeof(sn)); send_log(s->fe, LOG_INFO, "Connect from %s:%d to %s:%d (%s/%s)\n", pn, get_host_port(&s->req->prod->addr.from), sn, get_host_port(&s->req->prod->addr.to), s->fe->id, (s->fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; case AF_UNIX: /* UNIX socket, only the destination is known */ send_log(s->fe, LOG_INFO, "Connect to unix:%d (%s/%s)\n", s->listener->luid, s->fe->id, (s->fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; } } } if (unlikely((global.mode & MODE_DEBUG) && (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) { char pn[INET6_ADDRSTRLEN]; int len = 0; if (!(s->flags & SN_FRT_ADDR_SET)) get_frt_addr(s); switch (addr_to_str(&s->req->prod->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: len = sprintf(trash, "%08x:%s.accept(%04x)=%04x from [%s:%d]\n", s->uniq_id, s->fe->id, (unsigned short)s->listener->fd, (unsigned short)cfd, pn, get_host_port(&s->req->prod->addr.from)); break; case AF_UNIX: /* UNIX socket, only the destination is known */ len = sprintf(trash, "%08x:%s.accept(%04x)=%04x from [unix:%d]\n", s->uniq_id, s->fe->id, (unsigned short)s->listener->fd, (unsigned short)cfd, s->listener->luid); break; } write(1, trash, len); } if (s->fe->mode == PR_MODE_HTTP) s->req->flags |= BF_READ_DONTWAIT; /* one read is usually enough */ /* note: this should not happen anymore since there's always at least the switching rules */ if (!s->req->analysers) { buffer_auto_connect(s->req); /* don't wait to establish connection */ buffer_auto_close(s->req); /* let the producer forward close requests */ } s->req->rto = s->fe->timeout.client; s->rep->wto = s->fe->timeout.client; fdtab[cfd].flags = FD_FL_TCP | FD_FL_TCP_NODELAY; if (s->fe->options & PR_O_TCP_NOLING) fdtab[cfd].flags |= FD_FL_TCP_NOLING; if (unlikely((s->fe->mode == PR_MODE_HTTP && (s->flags & SN_MONITOR)) || (s->fe->mode == PR_MODE_HEALTH && ((s->fe->options2 & PR_O2_CHK_ANY) == PR_O2_HTTP_CHK)))) { /* Either we got a request from a monitoring system on an HTTP instance, * or we're in health check mode with the 'httpchk' option enabled. In * both cases, we return a fake "HTTP/1.0 200 OK" response and we exit. */ struct chunk msg; chunk_initstr(&msg, "HTTP/1.0 200 OK\r\n\r\n"); stream_int_retnclose(&s->si[0], &msg); /* forge a 200 response */ s->req->analysers = 0; s->task->expire = s->rep->wex; EV_FD_CLR(cfd, DIR_RD); } else if (unlikely(s->fe->mode == PR_MODE_HEALTH)) { /* health check mode, no client reading */ struct chunk msg; chunk_initstr(&msg, "OK\n"); stream_int_retnclose(&s->si[0], &msg); /* forge an "OK" response */ s->req->analysers = 0; s->task->expire = s->rep->wex; EV_FD_CLR(cfd, DIR_RD); } /* everything's OK, let's go on */ return 1; /* Error unrolling */ out_free_rspcap: pool_free2(s->fe->rsp_cap_pool, s->txn.rsp.cap); out_free_reqcap: pool_free2(s->fe->req_cap_pool, s->txn.req.cap); out_return: return -1; }
/* Finish a session accept() for a proxy (TCP or HTTP). It returns a negative * value in case of a critical failure which must cause the listener to be * disabled, a positive value in case of success, or zero if it is a success * but the session must be closed ASAP (eg: monitoring). */ int frontend_accept(struct session *s) { int cfd = s->si[0].conn->t.sock.fd; tv_zero(&s->logs.tv_request); s->logs.t_queue = -1; s->logs.t_connect = -1; s->logs.t_data = -1; s->logs.t_close = 0; s->logs.bytes_in = s->logs.bytes_out = 0; s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */ s->logs.srv_queue_size = 0; /* we will get this number soon */ /* FIXME: the logs are horribly complicated now, because they are * defined in <p>, <p>, and later <be> and <be>. */ s->do_log = sess_log; /* default error reporting function, may be changed by analysers */ s->srv_error = default_srv_error; /* Adjust some socket options */ if (s->listener->addr.ss_family == AF_INET || s->listener->addr.ss_family == AF_INET6) { if (setsockopt(cfd, IPPROTO_TCP, TCP_NODELAY, (char *) &one, sizeof(one)) == -1) goto out_return; if (s->fe->options & PR_O_TCP_CLI_KA) setsockopt(cfd, SOL_SOCKET, SO_KEEPALIVE, (char *) &one, sizeof(one)); if (s->fe->options & PR_O_TCP_NOLING) setsockopt(cfd, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger)); #if defined(TCP_MAXSEG) if (s->listener->maxseg < 0) { /* we just want to reduce the current MSS by that value */ int mss; socklen_t mss_len = sizeof(mss); if (getsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, &mss_len) == 0) { mss += s->listener->maxseg; /* remember, it's < 0 */ setsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)); } } #endif } if (global.tune.client_sndbuf) setsockopt(cfd, SOL_SOCKET, SO_SNDBUF, &global.tune.client_sndbuf, sizeof(global.tune.client_sndbuf)); if (global.tune.client_rcvbuf) setsockopt(cfd, SOL_SOCKET, SO_RCVBUF, &global.tune.client_rcvbuf, sizeof(global.tune.client_rcvbuf)); if (s->fe->mode == PR_MODE_HTTP) { /* the captures are only used in HTTP frontends */ if (unlikely(s->fe->nb_req_cap > 0 && (s->txn.req.cap = pool_alloc2(s->fe->req_cap_pool)) == NULL)) goto out_return; /* no memory */ if (unlikely(s->fe->nb_rsp_cap > 0 && (s->txn.rsp.cap = pool_alloc2(s->fe->rsp_cap_pool)) == NULL)) goto out_free_reqcap; /* no memory */ } if (s->fe->http_needed) { /* we have to allocate header indexes only if we know * that we may make use of them. This of course includes * (mode == PR_MODE_HTTP). */ s->txn.hdr_idx.size = global.tune.max_http_hdr; if (unlikely((s->txn.hdr_idx.v = pool_alloc2(pool2_hdr_idx)) == NULL)) goto out_free_rspcap; /* no memory */ /* and now initialize the HTTP transaction state */ http_init_txn(s); } if ((s->fe->mode == PR_MODE_TCP || s->fe->mode == PR_MODE_HTTP) && (!LIST_ISEMPTY(&s->fe->logsrvs))) { if (likely(!LIST_ISEMPTY(&s->fe->logformat))) { /* we have the client ip */ if (s->logs.logwait & LW_CLIP) if (!(s->logs.logwait &= ~(LW_CLIP|LW_INIT))) s->do_log(s); } else { char pn[INET6_ADDRSTRLEN], sn[INET6_ADDRSTRLEN]; conn_get_from_addr(s->req->prod->conn); conn_get_to_addr(s->req->prod->conn); switch (addr_to_str(&s->req->prod->conn->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: addr_to_str(&s->req->prod->conn->addr.to, sn, sizeof(sn)); send_log(s->fe, LOG_INFO, "Connect from %s:%d to %s:%d (%s/%s)\n", pn, get_host_port(&s->req->prod->conn->addr.from), sn, get_host_port(&s->req->prod->conn->addr.to), s->fe->id, (s->fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; case AF_UNIX: /* UNIX socket, only the destination is known */ send_log(s->fe, LOG_INFO, "Connect to unix:%d (%s/%s)\n", s->listener->luid, s->fe->id, (s->fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP"); break; } } } if (unlikely((global.mode & MODE_DEBUG) && (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) { char pn[INET6_ADDRSTRLEN]; conn_get_from_addr(s->req->prod->conn); switch (addr_to_str(&s->req->prod->conn->addr.from, pn, sizeof(pn))) { case AF_INET: case AF_INET6: chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [%s:%d]\n", s->uniq_id, s->fe->id, (unsigned short)s->listener->fd, (unsigned short)cfd, pn, get_host_port(&s->req->prod->conn->addr.from)); break; case AF_UNIX: /* UNIX socket, only the destination is known */ chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [unix:%d]\n", s->uniq_id, s->fe->id, (unsigned short)s->listener->fd, (unsigned short)cfd, s->listener->luid); break; } if (write(1, trash.str, trash.len) < 0) /* shut gcc warning */; } if (s->fe->mode == PR_MODE_HTTP) s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */ /* note: this should not happen anymore since there's always at least the switching rules */ if (!s->req->analysers) { channel_auto_connect(s->req); /* don't wait to establish connection */ channel_auto_close(s->req); /* let the producer forward close requests */ } s->req->rto = s->fe->timeout.client; s->rep->wto = s->fe->timeout.client; /* everything's OK, let's go on */ return 1; /* Error unrolling */ out_free_rspcap: pool_free2(s->fe->rsp_cap_pool, s->txn.rsp.cap); out_free_reqcap: pool_free2(s->fe->req_cap_pool, s->txn.req.cap); out_return: return -1; }