void cancel_branch( struct cell *t, int branch ) { char *cancel; unsigned int len; struct retr_buf *crb, *irb; crb=&t->uac[branch].local_cancel; irb=&t->uac[branch].request; # ifdef EXTRA_DEBUG if (crb->buffer.s!=0 && crb->buffer.s!=BUSY_BUFFER) { LM_CRIT("attempt to rewrite cancel buffer failed\n"); abort(); } # endif cancel=build_cancel(t, branch, &len); if (!cancel) { LM_ERR("attempt to build a CANCEL failed\n"); return; } /* install cancel now */ crb->buffer.s=cancel; crb->buffer.len=len; crb->dst=irb->dst; crb->branch=branch; /* label it as cancel so that FR timer can better now how * to deal with it */ crb->activ_type=TYPE_LOCAL_CANCEL; if ( has_tran_tmcbs( t, TMCB_REQUEST_BUILT) ) { set_extra_tmcb_params( &crb->buffer, &crb->dst); run_trans_callbacks( TMCB_REQUEST_BUILT, t, t->uas.request,0, -t->uas.request->REQ_METHOD); } LM_DBG("sending cancel...\n"); SEND_BUFFER( crb ); /*sets and starts the FINAL RESPONSE timer */ start_retr( crb ); }
static int _reply_light( struct cell *trans, char* buf, unsigned int len, unsigned int code, char *to_tag, unsigned int to_tag_len, int lock, struct bookmark *bm) { struct retr_buf *rb; unsigned int buf_len; branch_bm_t cancel_bitmap; str cb_s; if (!buf) { LM_DBG("response building failed\n"); /* determine if there are some branches to be canceled */ if ( is_invite(trans) ) { if (lock) LOCK_REPLIES( trans ); which_cancel(trans, &cancel_bitmap ); if (lock) UNLOCK_REPLIES( trans ); } /* and clean-up, including cancellations, if needed */ goto error; } cancel_bitmap=0; if (lock) LOCK_REPLIES( trans ); if ( is_invite(trans) ) which_cancel(trans, &cancel_bitmap ); if (trans->uas.status>=200) { LM_ERR("failed to generate %d reply when a final %d was sent out\n", code, trans->uas.status); goto error2; } rb = & trans->uas.response; rb->activ_type=code; trans->uas.status = code; buf_len = rb->buffer.s ? len : len + REPLY_OVERBUFFER_LEN; rb->buffer.s = (char*)shm_resize( rb->buffer.s, buf_len ); /* puts the reply's buffer to uas.response */ if (! rb->buffer.s ) { LM_ERR("failed to allocate shmem buffer\n"); goto error3; } update_local_tags(trans, bm, rb->buffer.s, buf); rb->buffer.len = len ; memcpy( rb->buffer.s , buf , len ); /* needs to be protected too because what timers are set depends on current transactions status */ /* t_update_timers_after_sending_reply( rb ); */ trans->relaied_reply_branch=-2; if (lock) UNLOCK_REPLIES( trans ); /* do UAC cleanup procedures in case we generated a final answer whereas there are pending UACs */ if (code>=200) { if ( is_local(trans) ) { LM_DBG("local transaction completed\n"); if ( has_tran_tmcbs(trans, TMCB_LOCAL_COMPLETED) ) { run_trans_callbacks( TMCB_LOCAL_COMPLETED, trans, 0, FAKED_REPLY, code); } } else { /* run the PRE send callbacks */ if ( has_tran_tmcbs(trans, TMCB_RESPONSE_PRE_OUT) ) { cb_s.s = buf; cb_s.len = len; set_extra_tmcb_params( &cb_s, &rb->dst); if (lock) run_trans_callbacks_locked( TMCB_RESPONSE_PRE_OUT, trans, trans->uas.request, FAKED_REPLY, code); else run_trans_callbacks( TMCB_RESPONSE_PRE_OUT, trans, trans->uas.request, FAKED_REPLY, code); } } if (!is_hopbyhop_cancel(trans)) { cleanup_uac_timers( trans ); if (is_invite(trans)) cancel_uacs( trans, cancel_bitmap ); /* for auth related replies, we do not do retransmission (via set_final_timer()), but only wait for a final reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */ if ((code != 401) && (code != 407)) set_final_timer( trans ); else put_on_wait(trans); } } /* send it out : response.dst.send_sock is valid all the time now, * as it's taken from original request -bogdan */ if (!trans->uas.response.dst.send_sock) { LM_CRIT("send_sock is NULL\n"); } SEND_PR_BUFFER( rb, buf, len ); LM_DBG("reply sent out. buf=%p: %.9s..., " "shmem=%p: %.9s\n", buf, buf, rb->buffer.s, rb->buffer.s ); /* run the POST send callbacks */ if (code>=200&&!is_local(trans)&&has_tran_tmcbs(trans,TMCB_RESPONSE_OUT)) { cb_s.s = buf; cb_s.len = len; set_extra_tmcb_params( &cb_s, &rb->dst); if (lock) run_trans_callbacks_locked( TMCB_RESPONSE_OUT, trans, trans->uas.request, FAKED_REPLY, code); else run_trans_callbacks( TMCB_RESPONSE_OUT, trans, trans->uas.request, FAKED_REPLY, code); } pkg_free( buf ) ; stats_trans_rpl( code, 1 /*local*/ ); LM_DBG("finished\n"); return 1; error3: error2: if (lock) UNLOCK_REPLIES( trans ); pkg_free ( buf ); error: /* do UAC cleanup */ cleanup_uac_timers( trans ); if ( is_invite(trans) ) cancel_uacs( trans, cancel_bitmap ); /* we did not succeed -- put the transaction on wait */ put_on_wait(trans); return -1; }
/* this is the code which decides what and when shall be relayed upstream; note well -- it assumes it is entered locked with REPLY_LOCK and it returns unlocked! */ enum rps relay_reply( struct cell *t, struct sip_msg *p_msg, int branch, unsigned int msg_status, branch_bm_t *cancel_bitmap ) { int relay; int save_clone; char *buf; /* length of outbound reply */ unsigned int res_len; int relayed_code; struct sip_msg *relayed_msg; struct bookmark bm; int totag_retr; enum rps reply_status; /* retransmission structure of outbound reply and request */ struct retr_buf *uas_rb; str cb_s; str text; /* keep compiler warnings about use of uninit vars silent */ res_len=0; buf=0; relayed_msg=0; relayed_code=0; totag_retr=0; /* remember, what was sent upstream to know whether we are * forwarding a first final reply or not */ /* *** store and relay message as needed *** */ reply_status = t_should_relay_response(t, msg_status, branch, &save_clone, &relay, cancel_bitmap, p_msg ); LM_DBG("branch=%d, save=%d, relay=%d\n", branch, save_clone, relay ); /* store the message if needed */ if (save_clone) /* save for later use, typically branch picking */ { if (!store_reply( t, branch, p_msg )) goto error01; } uas_rb = & t->uas.response; if (relay >= 0 ) { /* initialize sockets for outbound reply */ uas_rb->activ_type=msg_status; t->relaied_reply_branch = relay; /* try building the outbound reply from either the current * or a stored message */ relayed_msg = branch==relay ? p_msg : t->uac[relay].reply; if (relayed_msg==FAKED_REPLY) { relayed_code = branch==relay ? msg_status : t->uac[relay].last_received; text.s = error_text(relayed_code); text.len = strlen(text.s); /* FIXME - bogdan*/ if (relayed_code>=180 && t->uas.request->to && (get_to(t->uas.request)->tag_value.s==0 || get_to(t->uas.request)->tag_value.len==0)) { calc_crc_suffix( t->uas.request, tm_tag_suffix ); buf = build_res_buf_from_sip_req( relayed_code, &text, &tm_tag, t->uas.request, &res_len, &bm ); } else { buf = build_res_buf_from_sip_req( relayed_code, &text, 0/* no to-tag */, t->uas.request, &res_len, &bm ); } } else { /* run callbacks for all types of responses - * even if they are shmem-ed or not */ if (has_tran_tmcbs(t,TMCB_RESPONSE_FWDED) ) { run_trans_callbacks( TMCB_RESPONSE_FWDED, t, t->uas.request, relayed_msg, msg_status ); } relayed_code=relayed_msg->REPLY_STATUS; buf = build_res_buf_from_sip_res( relayed_msg, &res_len, uas_rb->dst.send_sock); /* remove all lumps which are not in shm * added either by build_res_buf_from_sip_res, or by * the callbacks that have been called with shmem-ed messages - vlad */ if (branch!=relay) { del_notflaged_lumps( &(relayed_msg->add_rm), LUMPFLAG_SHMEM); del_notflaged_lumps( &(relayed_msg->body_lumps), LUMPFLAG_SHMEM); } } if (!buf) { LM_ERR("no mem for outbound reply buffer\n"); goto error02; } /* attempt to copy the message to UAS's shmem: - copy to-tag for ACK matching as well - allocate little a bit more for provisional as larger messages are likely to follow and we will be able to reuse the memory frag */ uas_rb->buffer.s = (char*)shm_resize( uas_rb->buffer.s, res_len + (msg_status<200 ? REPLY_OVERBUFFER_LEN : 0)); if (!uas_rb->buffer.s) { LM_ERR("no more share memory\n"); goto error03; } uas_rb->buffer.len = res_len; memcpy( uas_rb->buffer.s, buf, res_len ); if (relayed_msg==FAKED_REPLY) { /* to-tags for local replies */ update_local_tags(t, &bm, uas_rb->buffer.s, buf); } stats_trans_rpl( relayed_code, (relayed_msg==FAKED_REPLY)?1:0 ); /* update the status ... */ t->uas.status = relayed_code; if (is_invite(t) && relayed_msg!=FAKED_REPLY && relayed_code>=200 && relayed_code < 300 && has_tran_tmcbs( t, TMCB_RESPONSE_OUT|TMCB_RESPONSE_PRE_OUT)) { totag_retr=update_totag_set(t, relayed_msg); } }; /* if relay ... */ UNLOCK_REPLIES( t ); /* Setup retransmission timer _before_ the reply is sent * to avoid race conditions */ if (reply_status == RPS_COMPLETED) { /* for auth related replies, we do not do retransmission (via set_final_timer()), but only wait for a final reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */ if ((relayed_code != 401) && (relayed_code != 407)) set_final_timer(t); else put_on_wait(t); } /* send it now (from the private buffer) */ if (relay >= 0) { /* run the PRE sending out callback */ if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_PRE_OUT) ) { cb_s.s = buf; cb_s.len = res_len; set_extra_tmcb_params( &cb_s, &uas_rb->dst); run_trans_callbacks_locked(TMCB_RESPONSE_PRE_OUT,t,t->uas.request, relayed_msg, relayed_code); } SEND_PR_BUFFER( uas_rb, buf, res_len ); LM_DBG("sent buf=%p: %.9s..., shmem=%p: %.9s\n", buf, buf, uas_rb->buffer.s, uas_rb->buffer.s ); /* run the POST sending out callback */ if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_OUT) ) { cb_s.s = buf; cb_s.len = res_len; set_extra_tmcb_params( &cb_s, &uas_rb->dst); run_trans_callbacks_locked( TMCB_RESPONSE_OUT, t, t->uas.request, relayed_msg, relayed_code); } pkg_free( buf ); } /* success */ return reply_status; error03: pkg_free( buf ); error02: if (save_clone) { if (t->uac[branch].reply!=FAKED_REPLY) sip_msg_free( t->uac[branch].reply ); t->uac[branch].reply = NULL; } error01: text.s = "Reply processing error"; text.len = sizeof("Reply processing error")-1; t_reply_unsafe( t, t->uas.request, 500, &text ); UNLOCK_REPLIES(t); if (is_invite(t)) cancel_uacs( t, *cancel_bitmap ); /* a serious error occurred -- attempt to send an error reply; it will take care of clean-ups */ /* failure */ return RPS_ERROR; }
/* function returns: * 1 - forward successful * -1 - error during forward */ int t_forward_nonack( struct cell *t, struct sip_msg* p_msg , struct proxy_l * proxy, int reset_bcounter, int locked) { str reply_reason_487 = str_init("Request Terminated"); str backup_uri; str backup_dst; int branch_ret, lowest_ret; str current_uri; branch_bm_t added_branches; int i, q; struct cell *t_invite; int success_branch; str dst_uri; struct socket_info *bk_sock; unsigned int br_flags, bk_bflags; int idx; str path; str bk_path; /* make -Wall happy */ current_uri.s=0; /* before doing enything, update the t flags from msg */ t->uas.request->flags = p_msg->flags; if (p_msg->REQ_METHOD==METHOD_CANCEL) { t_invite=t_lookupOriginalT( p_msg ); if (t_invite!=T_NULL_CELL) { t_invite->flags |= T_WAS_CANCELLED_FLAG; cancel_invite( p_msg, t, t_invite, locked ); return 1; } } /* do not forward requests which were already cancelled*/ if (no_new_branches(t)) { LM_INFO("discarding fwd for a 6xx transaction\n"); ser_error = E_NO_DESTINATION; return -1; } if (was_cancelled(t)) { /* is this the first attempt of sending a branch out ? */ if (t->nr_of_outgoings==0) { /* if no other signalling was performed on the transaction * and the transaction was already canceled, better * internally generate the 487 reply here */ if (locked) t_reply_unsafe( t, p_msg , 487 , &reply_reason_487); else t_reply( t, p_msg , 487 , &reply_reason_487); } LM_INFO("discarding fwd for a cancelled transaction\n"); ser_error = E_NO_DESTINATION; return -1; } /* backup current uri, sock and flags... add_uac changes it */ backup_uri = p_msg->new_uri; backup_dst = p_msg->dst_uri; bk_sock = p_msg->force_send_socket; bk_path = p_msg->path_vec; bk_bflags = p_msg->ruri_bflags; /* advertised address/port are not changed */ /* check if the UAS retranmission port needs to be updated */ if ( (p_msg->msg_flags ^ t->uas.request->msg_flags) & FL_FORCE_RPORT ) su_setport( &t->uas.response.dst.to, p_msg->rcv.src_port ); /* if no more specific error code is known, use this */ lowest_ret=E_BUG; /* branches added */ added_branches=0; /* branch to begin with */ if (reset_bcounter) { t->first_branch=t->nr_of_outgoings; /* check if the previous branch is a PHONY one and if yes * keep it in the set of active branches; that means the * transaction had a t_wait_for_new_branches() call prior to relay() */ if ( t->first_branch>0 && (t->uac[t->first_branch-1].flags & T_UAC_IS_PHONY) ) t->first_branch--; } /* as first branch, use current uri */ current_uri = *GET_RURI(p_msg); branch_ret = add_uac( t, p_msg, ¤t_uri, &backup_dst, getb0flags(p_msg), &p_msg->path_vec, proxy); if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; /* ....and now add the remaining additional branches */ for( idx=0; (current_uri.s=get_branch( idx, ¤t_uri.len, &q, &dst_uri, &path, &br_flags, &p_msg->force_send_socket))!=0 ; idx++ ) { branch_ret = add_uac( t, p_msg, ¤t_uri, &dst_uri, br_flags, &path, proxy); /* pick some of the errors in case things go wrong; note that picking lowest error is just as good as any other algorithm which picks any other negative branch result */ if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; } /* consume processed branches */ clear_branches(); /* restore original stuff */ p_msg->new_uri=backup_uri; p_msg->parsed_uri_ok = 0;/* just to be sure; add_uac may parse other uris*/ p_msg->dst_uri = backup_dst; p_msg->force_send_socket = bk_sock; p_msg->path_vec = bk_path; p_msg->ruri_bflags = bk_bflags; /* update on_branch, _only_ if modified, otherwise it overwrites * whatever it is already in the transaction */ if (get_on_branch()) t->on_branch = get_on_branch(); /* update flags, if changed in branch route */ t->uas.request->flags = p_msg->flags; /* things went wrong ... no new branch has been fwd-ed at all */ if (added_branches==0) { LM_ERR("failure to add branches\n"); ser_error = lowest_ret; return lowest_ret; } /* send them out now */ success_branch=0; for (i=t->first_branch; i<t->nr_of_outgoings; i++) { if (added_branches & (1<<i)) { if (t->uac[i].br_flags & tcp_no_new_conn_bflag) tcp_no_new_conn = 1; do { if (check_blacklists( t->uac[i].request.dst.proto, &t->uac[i].request.dst.to, t->uac[i].request.buffer.s, t->uac[i].request.buffer.len)) { LM_DBG("blocked by blacklists\n"); ser_error=E_IP_BLOCKED; } else { run_trans_callbacks(TMCB_PRE_SEND_BUFFER, t, p_msg, 0, i); if (SEND_BUFFER( &t->uac[i].request)==0) { ser_error = 0; break; } LM_ERR("sending request failed\n"); ser_error=E_SEND; } /* get next dns entry */ if ( t->uac[i].proxy==0 || get_next_su( t->uac[i].proxy, &t->uac[i].request.dst.to, (ser_error==E_IP_BLOCKED)?0:1)!=0 ) break; t->uac[i].request.dst.proto = t->uac[i].proxy->proto; /* update branch */ if ( update_uac_dst( p_msg, &t->uac[i] )!=0) break; }while(1); tcp_no_new_conn = 0; if (ser_error) { shm_free(t->uac[i].request.buffer.s); t->uac[i].request.buffer.s = NULL; t->uac[i].request.buffer.len = 0; continue; } success_branch++; start_retr( &t->uac[i].request ); set_kr(REQ_FWDED); /* successfully sent out -> run callbacks */ if ( has_tran_tmcbs( t, TMCB_REQUEST_BUILT|TMCB_MSG_SENT_OUT) ) { set_extra_tmcb_params( &t->uac[i].request.buffer, &t->uac[i].request.dst); run_trans_callbacks( TMCB_REQUEST_BUILT|TMCB_MSG_SENT_OUT, t, p_msg, 0, 0); } } } return (success_branch>0)?1:-1; }
/* function returns: * 1 - forward successful * -1 - error during forward */ int t_forward_nonack( struct cell *t, struct sip_msg* p_msg , struct proxy_l * proxy) { str backup_uri; str backup_dst; int branch_ret, lowest_ret; str current_uri; branch_bm_t added_branches; int i, q; struct cell *t_invite; int success_branch; str dst_uri; struct socket_info *bk_sock; unsigned int br_flags; unsigned int bk_br_flags; int idx; str path; str bk_path; /* make -Wall happy */ current_uri.s=0; /* before doing enything, update the t falgs from msg */ t->uas.request->flags = p_msg->flags; if (p_msg->REQ_METHOD==METHOD_CANCEL) { t_invite=t_lookupOriginalT( p_msg ); if (t_invite!=T_NULL_CELL) { t_invite->flags |= T_WAS_CANCELLED_FLAG; cancel_invite( p_msg, t, t_invite ); return 1; } } /* do not forward requests which were already cancelled*/ if (was_cancelled(t) || no_new_branches(t)) { LM_ERR("discarding fwd for a cancelled/6xx transaction\n"); ser_error = E_NO_DESTINATION; return -1; } /* backup current uri, sock and flags... add_uac changes it */ backup_uri = p_msg->new_uri; backup_dst = p_msg->dst_uri; bk_sock = p_msg->force_send_socket; bk_br_flags = getb0flags(); bk_path = p_msg->path_vec; /* check if the UAS retranmission port needs to be updated */ if ( (p_msg->msg_flags ^ t->uas.request->msg_flags) & FL_FORCE_RPORT ) su_setport( &t->uas.response.dst.to, p_msg->rcv.src_port ); /* if no more specific error code is known, use this */ lowest_ret=E_BUG; /* branches added */ added_branches=0; /* branch to begin with */ t->first_branch=t->nr_of_outgoings; /* as first branch, use current uri */ current_uri = *GET_RURI(p_msg); branch_ret = add_uac( t, p_msg, ¤t_uri, &backup_dst, &p_msg->path_vec, proxy); if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; /* ....and now add the remaining additional branches */ for( idx=0; (current_uri.s=get_branch( idx, ¤t_uri.len, &q, &dst_uri, &path, &br_flags, &p_msg->force_send_socket))!=0 ; idx++ ) { setb0flags(br_flags); branch_ret = add_uac( t, p_msg, ¤t_uri, &dst_uri, &path, proxy); /* pick some of the errors in case things go wrong; note that picking lowest error is just as good as any other algorithm which picks any other negative branch result */ if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; } /* consume processed branches */ clear_branches(); /* restore original stuff */ p_msg->new_uri=backup_uri; p_msg->parsed_uri_ok = 0;/* just to be sure; add_uac may parse other uris*/ p_msg->dst_uri = backup_dst; p_msg->force_send_socket = bk_sock; p_msg->path_vec = bk_path; setb0flags(bk_br_flags); /* update on_branch, if modified */ t->on_branch = get_on_branch(); /* update flags, if changed in branch route */ t->uas.request->flags = p_msg->flags; /* things went wrong ... no new branch has been fwd-ed at all */ if (added_branches==0) { LM_ERR("failure to add branches\n"); ser_error = lowest_ret; return lowest_ret; } /* send them out now */ success_branch=0; for (i=t->first_branch; i<t->nr_of_outgoings; i++) { if (added_branches & (1<<i)) { #ifdef USE_TCP if (t->uac[i].br_flags & tcp_no_new_conn_bflag) tcp_no_new_conn = 1; #endif do { if (check_blacklists( t->uac[i].request.dst.proto, &t->uac[i].request.dst.to, t->uac[i].request.buffer.s, t->uac[i].request.buffer.len)) { LM_DBG("blocked by blacklists\n"); ser_error=E_IP_BLOCKED; } else { if (SEND_BUFFER( &t->uac[i].request)==0) { ser_error = 0; break; } LM_ERR("sending request failed\n"); ser_error=E_SEND; } /* get next dns entry */ if ( t->uac[i].proxy==0 || get_next_su( t->uac[i].proxy, &t->uac[i].request.dst.to, (ser_error==E_IP_BLOCKED)?0:1)!=0 ) break; t->uac[i].request.dst.proto = t->uac[i].proxy->proto; /* update branch */ if ( update_uac_dst( p_msg, &t->uac[i] )!=0) break; }while(1); #ifdef USE_TCP tcp_no_new_conn = 0; #endif if (ser_error) { shm_free(t->uac[i].request.buffer.s); t->uac[i].request.buffer.s = NULL; t->uac[i].request.buffer.len = 0; continue; } success_branch++; start_retr( &t->uac[i].request ); set_kr(REQ_FWDED); /* successfully sent out -> run callbacks */ if ( has_tran_tmcbs( t, TMCB_REQUEST_BUILT) ) { set_extra_tmcb_params( &t->uac[i].request.buffer, &t->uac[i].request.dst); run_trans_callbacks( TMCB_REQUEST_BUILT, t, p_msg,0, -p_msg->REQ_METHOD); } } } return (success_branch>0)?1:-1; }