inline static int w_t_reply(struct sip_msg* msg, char* str, char* str2) { struct cell *t; if (msg->REQ_METHOD==METHOD_ACK) { LOG(L_WARN, "WARNING: t_reply: ACKs are not replied\n"); return -1; } if (t_check( msg , 0 )==-1) return -1; t=get_t(); if (!t) { LOG(L_ERR, "ERROR: t_reply: cannot send a t_reply to a message " "for which no T-state has been established\n"); return -1; } /* if called from reply_route, make sure that unsafe version * is called; we are already in a mutex and another mutex in * the safe version would lead to a deadlock */ if (rmode==MODE_ONFAILURE) { DBG("DEBUG: t_reply_unsafe called from w_t_reply\n"); return t_reply_unsafe(t, msg, (unsigned int)(long) str, str2); } else if (rmode==MODE_REQUEST) { return t_reply( t, msg, (unsigned int)(long) str, str2); } else { LOG(L_CRIT, "BUG: w_t_reply entered in unsupported mode\n"); return -1; } }
inline static int w_t_reply(struct sip_msg* msg, char* str, char* str2) { struct cell *t; if (msg->REQ_METHOD==METHOD_ACK) { LOG(L_WARN, "WARNING: t_reply: ACKs are not replied\n"); return -1; } if (t_check( msg , 0 )==-1) return -1; t=get_t(); if (!t) { LOG(L_ERR, "ERROR: t_reply: cannot send a t_reply to a message " "for which no T-state has been established\n"); return -1; } /* if called from reply_route, make sure that unsafe version * is called; we are already in a mutex and another mutex in * the safe version would lead to a deadlock */ switch (route_type) { case FAILURE_ROUTE: DBG("DEBUG: t_reply_unsafe called from w_t_reply\n"); return t_reply_unsafe(t, msg, (unsigned int)(long) str, str2); case REQUEST_ROUTE: return t_reply( t, msg, (unsigned int)(long) str, str2); default: LOG(L_CRIT, "BUG:tm:w_t_reply: unsupported route_type (%d)\n", route_type); return -1; } }
inline static int w_t_reply(struct sip_msg* msg, char* p1, char* p2) { struct cell *t; int code, ret = -1; str reason; char* r; if (msg->REQ_METHOD==METHOD_ACK) { LOG(L_WARN, "WARNING: t_reply: ACKs are not replied\n"); return -1; } if (t_check( msg , 0 )==-1) return -1; t=get_t(); if (!t) { LOG(L_ERR, "ERROR: t_reply: cannot send a t_reply to a message " "for which no T-state has been established\n"); return -1; } if (get_int_fparam(&code, msg, (fparam_t*)p1) < 0) { code = default_code; } if (get_str_fparam(&reason, msg, (fparam_t*)p2) < 0) { reason = default_reason; } r = as_asciiz(&reason); if (r == NULL) r = default_reason.s; /* if called from reply_route, make sure that unsafe version * is called; we are already in a mutex and another mutex in * the safe version would lead to a deadlock */ if (rmode==MODE_ONFAILURE) { DBG("DEBUG: t_reply_unsafe called from w_t_reply\n"); ret = t_reply_unsafe(t, msg, code, r); } else if (rmode==MODE_REQUEST) { ret = t_reply( t, msg, code, r); } else { LOG(L_CRIT, "BUG: w_t_reply entered in unsupported mode\n"); ret = -1; } if (r) pkg_free(r); return ret; }
void cancel_invite(struct sip_msg *cancel_msg, struct cell *t_cancel, struct cell *t_invite, int locked) { branch_bm_t cancel_bitmap; str reason; cancel_bitmap=0; /* send back 200 OK as per RFC3261 */ reason.s = CANCELING; reason.len = sizeof(CANCELING)-1; if (locked) t_reply_unsafe( t_cancel, cancel_msg, 200, &reason ); else t_reply( t_cancel, cancel_msg, 200, &reason ); get_cancel_reason(cancel_msg, t_cancel->flags, &reason); /* generate local cancels for all branches */ which_cancel(t_invite, &cancel_bitmap ); set_cancel_extra_hdrs( reason.s, reason.len); cancel_uacs(t_invite, cancel_bitmap ); set_cancel_extra_hdrs( NULL, 0); /* Do not do anything about branches with no received reply; * continue the retransmission hoping to get something back; * if still not, we will generate the 408 Timeout based on FR * timer; this helps with better coping with missed/lated provisional * replies in the context of cancelling the transaction */ #if 0 /* internally cancel branches with no received reply */ for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) { if (t_invite->uac[i].last_received==0){ /* reset the "request" timers */ reset_timer(&t_invite->uac[i].request.retr_timer); reset_timer(&t_invite->uac[i].request.fr_timer); LOCK_REPLIES( t_invite ); relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm); } } #endif }
inline static int w_t_reply(struct sip_msg* msg, char* code, char* text) { struct cell *t; int r; if (msg->REQ_METHOD==METHOD_ACK) { LM_DBG("ACKs are not replied\n"); return 0; } switch (route_type) { case FAILURE_ROUTE: /* if called from reply_route, make sure that unsafe version * is called; we are already in a mutex and another mutex in * the safe version would lead to a deadlock */ t=get_t(); if ( t==0 || t==T_UNDEFINED ) { LM_ERR("BUG - no transaction found in Failure Route\n"); return -1; } return t_reply_unsafe(t, msg, (unsigned int)(long)code,(str*)text); case REQUEST_ROUTE: t=get_t(); if ( t==0 || t==T_UNDEFINED ) { r = t_newtran( msg , 0/*no full UAS cloning*/ ); if (r==0) { /* retransmission -> break the script */ return 0; } else if (r<0) { LM_ERR("could not create a new transaction\n"); return -1; } t=get_t(); } return t_reply( t, msg, (unsigned int)(long)code, (str*)text); default: LM_CRIT("unsupported route_type (%d)\n", route_type); return -1; } }
/* unsafe version of kill_transaction() that works * in failure route * WARNING: assumes that the reply lock is held! */ int kill_transaction_unsafe( struct cell *trans, int error ) { char err_buffer[128]; int sip_err; int reply_ret; int ret; /* we reply statefully and enter WAIT state since error might have occurred in middle of forking and we do not want to put the forking burden on upstream client; however, it may fail too due to lack of memory */ ret=err2reason_phrase(error, &sip_err, err_buffer, sizeof(err_buffer), "TM" ); if (ret>0) { reply_ret=t_reply_unsafe( trans, trans->uas.request, sip_err, err_buffer); /* t_release_transaction( T ); */ return reply_ret; } else { LOG(L_ERR, "ERROR: kill_transaction_unsafe: err2reason failed\n"); return -1; } }
/* this is the code which decides what and when shall be relayed upstream; note well -- it assumes it is entered locked with REPLY_LOCK and it returns unlocked! */ enum rps relay_reply( struct cell *t, struct sip_msg *p_msg, int branch, unsigned int msg_status, branch_bm_t *cancel_bitmap ) { int relay; int save_clone; char *buf; /* length of outbound reply */ unsigned int res_len; int relayed_code; struct sip_msg *relayed_msg; struct bookmark bm; int totag_retr; enum rps reply_status; /* retransmission structure of outbound reply and request */ struct retr_buf *uas_rb; str cb_s; str text; /* keep compiler warnings about use of uninit vars silent */ res_len=0; buf=0; relayed_msg=0; relayed_code=0; totag_retr=0; /* remember, what was sent upstream to know whether we are * forwarding a first final reply or not */ /* *** store and relay message as needed *** */ reply_status = t_should_relay_response(t, msg_status, branch, &save_clone, &relay, cancel_bitmap, p_msg ); LM_DBG("branch=%d, save=%d, relay=%d\n", branch, save_clone, relay ); /* store the message if needed */ if (save_clone) /* save for later use, typically branch picking */ { if (!store_reply( t, branch, p_msg )) goto error01; } uas_rb = & t->uas.response; if (relay >= 0 ) { /* initialize sockets for outbound reply */ uas_rb->activ_type=msg_status; t->relaied_reply_branch = relay; /* try building the outbound reply from either the current * or a stored message */ relayed_msg = branch==relay ? p_msg : t->uac[relay].reply; if (relayed_msg==FAKED_REPLY) { relayed_code = branch==relay ? msg_status : t->uac[relay].last_received; text.s = error_text(relayed_code); text.len = strlen(text.s); /* FIXME - bogdan*/ if (relayed_code>=180 && t->uas.request->to && (get_to(t->uas.request)->tag_value.s==0 || get_to(t->uas.request)->tag_value.len==0)) { calc_crc_suffix( t->uas.request, tm_tag_suffix ); buf = build_res_buf_from_sip_req( relayed_code, &text, &tm_tag, t->uas.request, &res_len, &bm ); } else { buf = build_res_buf_from_sip_req( relayed_code, &text, 0/* no to-tag */, t->uas.request, &res_len, &bm ); } } else { /* run callbacks for all types of responses - * even if they are shmem-ed or not */ if (has_tran_tmcbs(t,TMCB_RESPONSE_FWDED) ) { run_trans_callbacks( TMCB_RESPONSE_FWDED, t, t->uas.request, relayed_msg, msg_status ); } relayed_code=relayed_msg->REPLY_STATUS; buf = build_res_buf_from_sip_res( relayed_msg, &res_len, uas_rb->dst.send_sock); /* remove all lumps which are not in shm * added either by build_res_buf_from_sip_res, or by * the callbacks that have been called with shmem-ed messages - vlad */ if (branch!=relay) { del_notflaged_lumps( &(relayed_msg->add_rm), LUMPFLAG_SHMEM); del_notflaged_lumps( &(relayed_msg->body_lumps), LUMPFLAG_SHMEM); } } if (!buf) { LM_ERR("no mem for outbound reply buffer\n"); goto error02; } /* attempt to copy the message to UAS's shmem: - copy to-tag for ACK matching as well - allocate little a bit more for provisional as larger messages are likely to follow and we will be able to reuse the memory frag */ uas_rb->buffer.s = (char*)shm_resize( uas_rb->buffer.s, res_len + (msg_status<200 ? REPLY_OVERBUFFER_LEN : 0)); if (!uas_rb->buffer.s) { LM_ERR("no more share memory\n"); goto error03; } uas_rb->buffer.len = res_len; memcpy( uas_rb->buffer.s, buf, res_len ); if (relayed_msg==FAKED_REPLY) { /* to-tags for local replies */ update_local_tags(t, &bm, uas_rb->buffer.s, buf); } stats_trans_rpl( relayed_code, (relayed_msg==FAKED_REPLY)?1:0 ); /* update the status ... */ t->uas.status = relayed_code; if (is_invite(t) && relayed_msg!=FAKED_REPLY && relayed_code>=200 && relayed_code < 300 && has_tran_tmcbs( t, TMCB_RESPONSE_OUT|TMCB_RESPONSE_PRE_OUT)) { totag_retr=update_totag_set(t, relayed_msg); } }; /* if relay ... */ UNLOCK_REPLIES( t ); /* Setup retransmission timer _before_ the reply is sent * to avoid race conditions */ if (reply_status == RPS_COMPLETED) { /* for auth related replies, we do not do retransmission (via set_final_timer()), but only wait for a final reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */ if ((relayed_code != 401) && (relayed_code != 407)) set_final_timer(t); else put_on_wait(t); } /* send it now (from the private buffer) */ if (relay >= 0) { /* run the PRE sending out callback */ if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_PRE_OUT) ) { cb_s.s = buf; cb_s.len = res_len; set_extra_tmcb_params( &cb_s, &uas_rb->dst); run_trans_callbacks_locked(TMCB_RESPONSE_PRE_OUT,t,t->uas.request, relayed_msg, relayed_code); } SEND_PR_BUFFER( uas_rb, buf, res_len ); LM_DBG("sent buf=%p: %.9s..., shmem=%p: %.9s\n", buf, buf, uas_rb->buffer.s, uas_rb->buffer.s ); /* run the POST sending out callback */ if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_OUT) ) { cb_s.s = buf; cb_s.len = res_len; set_extra_tmcb_params( &cb_s, &uas_rb->dst); run_trans_callbacks_locked( TMCB_RESPONSE_OUT, t, t->uas.request, relayed_msg, relayed_code); } pkg_free( buf ); } /* success */ return reply_status; error03: pkg_free( buf ); error02: if (save_clone) { if (t->uac[branch].reply!=FAKED_REPLY) sip_msg_free( t->uac[branch].reply ); t->uac[branch].reply = NULL; } error01: text.s = "Reply processing error"; text.len = sizeof("Reply processing error")-1; t_reply_unsafe( t, t->uas.request, 500, &text ); UNLOCK_REPLIES(t); if (is_invite(t)) cancel_uacs( t, *cancel_bitmap ); /* a serious error occurred -- attempt to send an error reply; it will take care of clean-ups */ /* failure */ return RPS_ERROR; }
/* function returns: * 1 - forward successful * -1 - error during forward */ int t_forward_nonack( struct cell *t, struct sip_msg* p_msg , struct proxy_l * proxy, int reset_bcounter, int locked) { str reply_reason_487 = str_init("Request Terminated"); str backup_uri; str backup_dst; int branch_ret, lowest_ret; str current_uri; branch_bm_t added_branches; int i, q; struct cell *t_invite; int success_branch; str dst_uri; struct socket_info *bk_sock; unsigned int br_flags, bk_bflags; int idx; str path; str bk_path; /* make -Wall happy */ current_uri.s=0; /* before doing enything, update the t flags from msg */ t->uas.request->flags = p_msg->flags; if (p_msg->REQ_METHOD==METHOD_CANCEL) { t_invite=t_lookupOriginalT( p_msg ); if (t_invite!=T_NULL_CELL) { t_invite->flags |= T_WAS_CANCELLED_FLAG; cancel_invite( p_msg, t, t_invite, locked ); return 1; } } /* do not forward requests which were already cancelled*/ if (no_new_branches(t)) { LM_INFO("discarding fwd for a 6xx transaction\n"); ser_error = E_NO_DESTINATION; return -1; } if (was_cancelled(t)) { /* is this the first attempt of sending a branch out ? */ if (t->nr_of_outgoings==0) { /* if no other signalling was performed on the transaction * and the transaction was already canceled, better * internally generate the 487 reply here */ if (locked) t_reply_unsafe( t, p_msg , 487 , &reply_reason_487); else t_reply( t, p_msg , 487 , &reply_reason_487); } LM_INFO("discarding fwd for a cancelled transaction\n"); ser_error = E_NO_DESTINATION; return -1; } /* backup current uri, sock and flags... add_uac changes it */ backup_uri = p_msg->new_uri; backup_dst = p_msg->dst_uri; bk_sock = p_msg->force_send_socket; bk_path = p_msg->path_vec; bk_bflags = p_msg->ruri_bflags; /* advertised address/port are not changed */ /* check if the UAS retranmission port needs to be updated */ if ( (p_msg->msg_flags ^ t->uas.request->msg_flags) & FL_FORCE_RPORT ) su_setport( &t->uas.response.dst.to, p_msg->rcv.src_port ); /* if no more specific error code is known, use this */ lowest_ret=E_BUG; /* branches added */ added_branches=0; /* branch to begin with */ if (reset_bcounter) { t->first_branch=t->nr_of_outgoings; /* check if the previous branch is a PHONY one and if yes * keep it in the set of active branches; that means the * transaction had a t_wait_for_new_branches() call prior to relay() */ if ( t->first_branch>0 && (t->uac[t->first_branch-1].flags & T_UAC_IS_PHONY) ) t->first_branch--; } /* as first branch, use current uri */ current_uri = *GET_RURI(p_msg); branch_ret = add_uac( t, p_msg, ¤t_uri, &backup_dst, getb0flags(p_msg), &p_msg->path_vec, proxy); if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; /* ....and now add the remaining additional branches */ for( idx=0; (current_uri.s=get_branch( idx, ¤t_uri.len, &q, &dst_uri, &path, &br_flags, &p_msg->force_send_socket))!=0 ; idx++ ) { branch_ret = add_uac( t, p_msg, ¤t_uri, &dst_uri, br_flags, &path, proxy); /* pick some of the errors in case things go wrong; note that picking lowest error is just as good as any other algorithm which picks any other negative branch result */ if (branch_ret>=0) added_branches |= 1<<branch_ret; else lowest_ret=branch_ret; } /* consume processed branches */ clear_branches(); /* restore original stuff */ p_msg->new_uri=backup_uri; p_msg->parsed_uri_ok = 0;/* just to be sure; add_uac may parse other uris*/ p_msg->dst_uri = backup_dst; p_msg->force_send_socket = bk_sock; p_msg->path_vec = bk_path; p_msg->ruri_bflags = bk_bflags; /* update on_branch, _only_ if modified, otherwise it overwrites * whatever it is already in the transaction */ if (get_on_branch()) t->on_branch = get_on_branch(); /* update flags, if changed in branch route */ t->uas.request->flags = p_msg->flags; /* things went wrong ... no new branch has been fwd-ed at all */ if (added_branches==0) { LM_ERR("failure to add branches\n"); ser_error = lowest_ret; return lowest_ret; } /* send them out now */ success_branch=0; for (i=t->first_branch; i<t->nr_of_outgoings; i++) { if (added_branches & (1<<i)) { if (t->uac[i].br_flags & tcp_no_new_conn_bflag) tcp_no_new_conn = 1; do { if (check_blacklists( t->uac[i].request.dst.proto, &t->uac[i].request.dst.to, t->uac[i].request.buffer.s, t->uac[i].request.buffer.len)) { LM_DBG("blocked by blacklists\n"); ser_error=E_IP_BLOCKED; } else { run_trans_callbacks(TMCB_PRE_SEND_BUFFER, t, p_msg, 0, i); if (SEND_BUFFER( &t->uac[i].request)==0) { ser_error = 0; break; } LM_ERR("sending request failed\n"); ser_error=E_SEND; } /* get next dns entry */ if ( t->uac[i].proxy==0 || get_next_su( t->uac[i].proxy, &t->uac[i].request.dst.to, (ser_error==E_IP_BLOCKED)?0:1)!=0 ) break; t->uac[i].request.dst.proto = t->uac[i].proxy->proto; /* update branch */ if ( update_uac_dst( p_msg, &t->uac[i] )!=0) break; }while(1); tcp_no_new_conn = 0; if (ser_error) { shm_free(t->uac[i].request.buffer.s); t->uac[i].request.buffer.s = NULL; t->uac[i].request.buffer.len = 0; continue; } success_branch++; start_retr( &t->uac[i].request ); set_kr(REQ_FWDED); /* successfully sent out -> run callbacks */ if ( has_tran_tmcbs( t, TMCB_REQUEST_BUILT|TMCB_MSG_SENT_OUT) ) { set_extra_tmcb_params( &t->uac[i].request.buffer, &t->uac[i].request.dst); run_trans_callbacks( TMCB_REQUEST_BUILT|TMCB_MSG_SENT_OUT, t, p_msg, 0, 0); } } } return (success_branch>0)?1:-1; }