Example #1
0
int cancel_all_uacs(struct cell *trans, int how)
{
	struct cancel_info cancel_data;
	int i,j;

#ifdef EXTRA_DEBUG
	assert(trans);
#endif
	DBG("Canceling T@%p [%u:%u]\n", trans, trans->hash_index, trans->label);
	
	init_cancel_info(&cancel_data);
	prepare_to_cancel(trans, &cancel_data.cancel_bitmap, 0);
	 /* tell tm to cancel the call */
	i=cancel_uacs(trans, &cancel_data, how);
	
	if (how & F_CANCEL_UNREF)
#ifndef TM_DEL_UNREF
	/* in case of 'too many' _buggy_ invocations, the ref count (a uint) might 
	 * actually wrap around, possibly leaving the T leaking. */
#warning "use of F_CANCEL_UNREF flag is unsafe without defining TM_DEL_UNREF"
#endif
		UNREF(trans);

	/* count the still active branches */
	if (! how) {
		j=0;
		while(i){
			j++;
			i&=i-1;
		}
		return j;
	}
	return 0;
}
Example #2
0
void cancel_invite(struct sip_msg *cancel_msg,
								struct cell *t_cancel, struct cell *t_invite )
{
#define CANCEL_REASON_SIP_487  \
	"Reason: SIP;cause=487;text=\"ORIGINATOR_CANCEL\"" CRLF

	branch_bm_t cancel_bitmap;
	branch_bm_t dummy_bm;
	str reason;
	unsigned int i;
	struct hdr_field *hdr;

	cancel_bitmap=0;

	/* send back 200 OK as per RFC3261 */
	reason.s = CANCELING;
	reason.len = sizeof(CANCELING)-1;
	t_reply( t_cancel, cancel_msg, 200, &reason );

	reason.s = NULL;
	reason.len = 0;
	/* propagate the REASON flag ? */
	if ( t_cancel->flags&T_CANCEL_REASON_FLAG ) {
		/* look for the Reason header */
		if (parse_headers(cancel_msg, HDR_EOH_F, 0)<0) {
			LM_ERR("failed to parse all hdrs - ignoring Reason hdr\n");
		} else {
			hdr = get_header_by_static_name(cancel_msg, "Reason");
			if (hdr!=NULL) {
				reason.s = hdr->name.s;
				reason.len = hdr->len;
			}
		}
	}

	/* if no reason, use NORMAL CLEARING */
	if (reason.s == NULL) {
		reason.s = CANCEL_REASON_SIP_487;
		reason.len = sizeof(CANCEL_REASON_SIP_487) - 1;
	}

	/* generate local cancels for all branches */
	which_cancel(t_invite, &cancel_bitmap );

	set_cancel_extra_hdrs( reason.s, reason.len);
	cancel_uacs(t_invite, cancel_bitmap );
	set_cancel_extra_hdrs( NULL, 0);

	/* internally cancel branches with no received reply */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (t_invite->uac[i].last_received==0){
			/* reset the "request" timers */
			reset_timer(&t_invite->uac[i].request.retr_timer);
			reset_timer(&t_invite->uac[i].request.fr_timer);
			LOCK_REPLIES( t_invite );
			relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm);
		}
	}
}
Example #3
0
File: tm.c Project: Deni90/opensips
inline static int w_t_cancel_branch(struct sip_msg *msg, char *sflags)
{
	branch_bm_t cancel_bitmap = 0;
	struct cell *t;
	unsigned int flags = (unsigned long)sflags;

	t=get_t();

	if (t==NULL || t==T_UNDEFINED) {
		/* no transaction */
		LM_ERR("cannot cancel a reply with no transaction");
		return -1;
	}
	if (!is_invite(t))
		return -1;

	if (flags&TM_CANCEL_BRANCH_ALL) {
		/* lock and get the branches to cancel */
		if (!onreply_avp_mode) {
			LOCK_REPLIES(t);
			which_cancel( t, &cancel_bitmap );
			UNLOCK_REPLIES(t);
		} else {
			which_cancel( t, &cancel_bitmap );
		}
		if (msg->first_line.u.reply.statuscode>=200)
			/* do not cancel the current branch as we got
			 * a final response here */
			cancel_bitmap &= ~(1<<_tm_branch_index);
	} else if (flags&TM_CANCEL_BRANCH_OTHERS) {
		/* lock and get the branches to cancel */
		if (!onreply_avp_mode) {
			LOCK_REPLIES(t);
			which_cancel( t, &cancel_bitmap );
			UNLOCK_REPLIES(t);
		} else {
			which_cancel( t, &cancel_bitmap );
		}
		/* ignore current branch */
		cancel_bitmap &= ~(1<<_tm_branch_index);
	} else {
		/* cancel only local branch (only if still ongoing) */
		if (msg->first_line.u.reply.statuscode<200)
			cancel_bitmap = 1<<_tm_branch_index;
	}

	/* send cancels out */
	cancel_uacs(t, cancel_bitmap);

	return 1;
}
Example #4
0
void cancel_invite(struct sip_msg *cancel_msg,
					struct cell *t_cancel, struct cell *t_invite, int locked)
{

	branch_bm_t cancel_bitmap;
	str reason;

	cancel_bitmap=0;

	/* send back 200 OK as per RFC3261 */
	reason.s = CANCELING;
	reason.len = sizeof(CANCELING)-1;
	if (locked)
		t_reply_unsafe( t_cancel, cancel_msg, 200, &reason );
	else
		t_reply( t_cancel, cancel_msg, 200, &reason );

	get_cancel_reason(cancel_msg, t_cancel->flags, &reason);

	/* generate local cancels for all branches */
	which_cancel(t_invite, &cancel_bitmap );

	set_cancel_extra_hdrs( reason.s, reason.len);
	cancel_uacs(t_invite, cancel_bitmap );
	set_cancel_extra_hdrs( NULL, 0);

	/* Do not do anything about branches with no received reply;
	 * continue the retransmission hoping to get something back;
	 * if still not, we will generate the 408 Timeout based on FR
	 * timer; this helps with better coping with missed/lated provisional
	 * replies in the context of cancelling the transaction
	 */
#if 0
	/* internally cancel branches with no received reply */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (t_invite->uac[i].last_received==0){
			/* reset the "request" timers */
			reset_timer(&t_invite->uac[i].request.retr_timer);
			reset_timer(&t_invite->uac[i].request.fr_timer);
			LOCK_REPLIES( t_invite );
			relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm);
		}
	}
#endif
}
Example #5
0
/* fifo command to cancel a pending call (Uli)
 * Syntax:
 *
 * ":uac_cancel:[response file]\n
 * callid\n
 * cseq\n
 */
void rpc_cancel(rpc_t* rpc, void* c)
{
	struct cell *trans;
	static char cseq[128], callid[128];
	struct cancel_info cancel_data;
	int i,j;

	str cseq_s;   /* cseq */
	str callid_s; /* callid */

	cseq_s.s=cseq;
	callid_s.s=callid;
	init_cancel_info(&cancel_data);

	if (rpc->scan(c, "SS", &callid_s, &cseq_s) < 2) {
		rpc->fault(c, 400, "Callid and CSeq expected as parameters");
		return;
	}

	if( t_lookup_callid(&trans, callid_s, cseq_s) < 0 ) {
		DBG("Lookup failed\n");
		rpc->fault(c, 400, "Transaction not found");
		return;
	}
	/*  find the branches that need cancel-ing */
	prepare_to_cancel(trans, &cancel_data.cancel_bitmap, 0);
	 /* tell tm to cancel the call */
	DBG("Now calling cancel_uacs\n");
	i=cancel_uacs(trans, &cancel_data, 0); /* don't fake 487s, 
										 just wait for timeout */
	
	/* t_lookup_callid REF`d the transaction for us, we must UNREF here! */
	UNREF(trans);
	j=0;
	while(i){
		j++;
		i&=i-1;
	}
	rpc->add(c, "ds", j, "branches remaining (waiting for timeout)");
}
Example #6
0
File: tm.c Project: Deni90/opensips
int t_cancel_trans(struct cell *t, str *extra_hdrs)
{
	branch_bm_t cancel_bitmap = 0;

	if (t==NULL || t==T_UNDEFINED) {
		/* no transaction */
		LM_ERR("cannot cancel with no transaction");
		return -1;
	}

	LOCK_REPLIES(t);
	which_cancel( t, &cancel_bitmap );
	UNLOCK_REPLIES(t);

	/* send cancels out */
	if (extra_hdrs)
		set_cancel_extra_hdrs( extra_hdrs->s, extra_hdrs->len);
	cancel_uacs(t, cancel_bitmap);
	set_cancel_extra_hdrs( NULL, 0);

	return 0;
}
Example #7
0
/* This is the neurological point of reply processing -- called
 * from within a REPLY_LOCK, t_should_relay_response decides
 * how a reply shall be processed and how transaction state is
 * affected.
 *
 * Checks if the new reply (with new_code status) should be sent or not
 *  based on the current
 * transaction status.
 * Returns 	- branch number (0,1,...) which should be relayed
 *         -1 if nothing to be relayed
 */
static enum rps t_should_relay_response( struct cell *Trans , int new_code,
	int branch , int *should_store, int *should_relay,
	branch_bm_t *cancel_bitmap, struct sip_msg *reply )
{
	int branch_cnt;
	int picked_code;
	int inv_through;
	int do_cancel;

	/* note: this code never lets replies to CANCEL go through;
	   we generate always a local 200 for CANCEL; 200s are
	   not relayed because it's not an INVITE transaction;
	   >= 300 are not relayed because 200 was already sent
	   out
	*/
	LM_DBG("T_code=%d, new_code=%d\n", Trans->uas.status,new_code);
	inv_through=new_code>=200 && new_code<300 && is_invite(Trans);
	/* if final response sent out, allow only INVITE 2xx  */
	if ( Trans->uas.status >= 200 ) {
		if (inv_through) {
			LM_DBG("200 OK for INVITE after final sent\n");
			*should_store=0;
			Trans->uac[branch].last_received=new_code;
			*should_relay=branch;
			return RPS_PUSHED_AFTER_COMPLETION;
		} 
		if ( is_hopbyhop_cancel(Trans) && new_code>=200) {
			*should_store=0;
			*should_relay=-1;
			picked_branch=-1;
			return RPS_COMPLETED;
		}
		/* except the exception above, too late  messages will
		   be discarded */
		goto discard;
	} 

	/* if final response received at this branch, allow only INVITE 2xx */
	if (Trans->uac[branch].last_received>=200
			&& !(inv_through && Trans->uac[branch].last_received<300)) {
#ifdef EXTRA_DEBUG
		/* don't report on retransmissions */
		if (Trans->uac[branch].last_received==new_code) {
			LM_DBG("final reply retransmission\n");
		} else
		/* if you FR-timed-out, faked a local 408 and 487 came, don't
		 * report on it either */
		if (Trans->uac[branch].last_received==408 && new_code==487) {
			LM_DBG("487 reply came for a timed-out branch\n");
		} else {
		/* this looks however how a very strange status rewrite attempt;
		 * report on it */
			LM_DBG("status rewrite by UAS: stored: %d, received: %d\n",
				Trans->uac[branch].last_received, new_code );
		}
#endif
		goto discard;
	}

	/* no final response sent yet */
	/* negative replies subject to fork picking */
	if (new_code >=300 ) {

		Trans->uac[branch].last_received=new_code;
		/* also append the current reply to the transaction to 
		 * make it available in failure routes - a kind of "fake"
		 * save of the final reply per branch */
		Trans->uac[branch].reply = reply;

		if (new_code>=600 && !disable_6xx_block) {
			/* this is a winner and close all branches */
			which_cancel( Trans, cancel_bitmap );
			picked_branch=branch;
			/* no more new branches should be added to this transaction */
			Trans->flags |= T_NO_NEW_BRANCHES_FLAG;
		} else {
			/* if all_final return lowest */
			picked_branch = t_pick_branch( Trans, &picked_code, &do_cancel);
			if (picked_branch==-2) { /* branches open yet */
				*should_store=1;
				*should_relay=-1;
				picked_branch=-1;
				Trans->uac[branch].reply = 0;
				return RPS_STORE;
			}
			if (picked_branch==-1) {
				LM_CRIT("pick_branch failed (lowest==-1) for code %d\n",new_code);
				Trans->uac[branch].reply = 0;
				goto discard;
			}
			if (do_cancel) {
				branch_bm_t cb = 0;
				which_cancel( Trans, &cb );
				cleanup_uac_timers(Trans);
				cancel_uacs( Trans, cb);
			}
		}

		/* no more pending branches -- try if that changes after
		 * a callback; save branch count to be able to determine
		 * later if new branches were initiated */
		branch_cnt=Trans->nr_of_outgoings;
		reset_kr();

		if ( !(Trans->flags&T_NO_DNS_FAILOVER_FLAG) &&
		Trans->uac[picked_branch].proxy!=NULL ) {
			/* is is a DNS failover scenario, according to RFC 3263 ? */
			if (is_3263_failure(Trans)) {
				LM_DBG("trying DNS-based failover\n");
				/* do DNS failover -> add new branches */
				if (do_dns_failover( Trans )!=0) {
					/* skip the failed added branches */
					branch_cnt = Trans->nr_of_outgoings;
				}
			}
		}

		/* run ON_FAILURE handlers ( route and callbacks) */
		if ( branch_cnt==Trans->nr_of_outgoings &&
		(has_tran_tmcbs( Trans, TMCB_ON_FAILURE) || Trans->on_negative) ) {
			run_failure_handlers( Trans );
		}

		/* now reset it; after the failure logic, the reply may
		 * not be stored any more and we don't want to keep into
		 * transaction some broken reference */
		Trans->uac[branch].reply = 0;

		/* look if the callback perhaps replied transaction; it also
		   covers the case in which a transaction is replied localy
		   on CANCEL -- then it would make no sense to proceed to
		   new branches bellow
		*/
		if (Trans->uas.status >= 200) {
			*should_store=0;
			*should_relay=-1;
			/* this might deserve an improvement -- if something
			   was already replied, it was put on wait and then,
			   returning RPS_COMPLETED will make t_on_reply
			   put it on wait again; perhaps splitting put_on_wait
			   from send_reply or a new RPS_ code would be healthy
			*/
			picked_branch=-1;
			return RPS_COMPLETED;
		}
		/* look if the callback/failure_route introduced new branches ... */
		if (branch_cnt<Trans->nr_of_outgoings && get_kr()==REQ_FWDED)  {
			/* await then result of new branches */
			*should_store=1;
			*should_relay=-1;
			picked_branch=-1;
			return RPS_STORE;
		}

		/* really no more pending branches -- return selected code */
		*should_store=0;
		*should_relay=picked_branch;
		picked_branch=-1;
		return RPS_COMPLETED;
	} 

	/* not >=300 ... it must be 2xx or provisional 1xx */
	if (new_code>=100) {
		/* 1xx and 2xx except 100 will be relayed */
		Trans->uac[branch].last_received=new_code;
		*should_store=0;
		*should_relay= new_code==100? -1 : branch;
		if (new_code>=200 ) {
			which_cancel( Trans, cancel_bitmap );
			return RPS_COMPLETED;
		} else return RPS_PROVISIONAL;
	}

discard:
	*should_store=0;
	*should_relay=-1;
	return RPS_DISCARDED;
}
Example #8
0
static int _reply_light( struct cell *trans, char* buf, unsigned int len,
			 unsigned int code, char *to_tag, unsigned int to_tag_len,
			 int lock, struct bookmark *bm)
{
	struct retr_buf *rb;
	unsigned int buf_len;
	branch_bm_t cancel_bitmap;
	str cb_s;

	if (!buf)
	{
		LM_DBG("response building failed\n");
		/* determine if there are some branches to be canceled */
		if ( is_invite(trans) ) {
			if (lock) LOCK_REPLIES( trans );
			which_cancel(trans, &cancel_bitmap );
			if (lock) UNLOCK_REPLIES( trans );
		}
		/* and clean-up, including cancellations, if needed */
		goto error;
	}

	cancel_bitmap=0;
	if (lock) LOCK_REPLIES( trans );
	if ( is_invite(trans) ) which_cancel(trans, &cancel_bitmap );
	if (trans->uas.status>=200) {
		LM_ERR("failed to generate %d reply when a final %d was sent out\n",
				code, trans->uas.status);
		goto error2;
	}


	rb = & trans->uas.response;
	rb->activ_type=code;

	trans->uas.status = code;
	buf_len = rb->buffer.s ? len : len + REPLY_OVERBUFFER_LEN;
	rb->buffer.s = (char*)shm_resize( rb->buffer.s, buf_len );
	/* puts the reply's buffer to uas.response */
	if (! rb->buffer.s ) {
			LM_ERR("failed to allocate shmem buffer\n");
			goto error3;
	}
	update_local_tags(trans, bm, rb->buffer.s, buf);

	rb->buffer.len = len ;
	memcpy( rb->buffer.s , buf , len );
	/* needs to be protected too because what timers are set depends
	   on current transactions status */
	/* t_update_timers_after_sending_reply( rb ); */
	trans->relaied_reply_branch=-2;
	if (lock) UNLOCK_REPLIES( trans );
	
	/* do UAC cleanup procedures in case we generated
	   a final answer whereas there are pending UACs */
	if (code>=200) {
		if ( is_local(trans) ) {
			LM_DBG("local transaction completed\n");
			if ( has_tran_tmcbs(trans, TMCB_LOCAL_COMPLETED) ) {
				run_trans_callbacks( TMCB_LOCAL_COMPLETED, trans,
					0, FAKED_REPLY, code);
			}
		} else {
			/* run the PRE send callbacks */
			if ( has_tran_tmcbs(trans, TMCB_RESPONSE_PRE_OUT) ) {
				cb_s.s = buf;
				cb_s.len = len;
				set_extra_tmcb_params( &cb_s, &rb->dst);
				if (lock)
					run_trans_callbacks_locked( TMCB_RESPONSE_PRE_OUT, trans,
						trans->uas.request, FAKED_REPLY, code);
				else
					run_trans_callbacks( TMCB_RESPONSE_PRE_OUT, trans,
						trans->uas.request, FAKED_REPLY, code);
			}
		}

		if (!is_hopbyhop_cancel(trans)) {
			cleanup_uac_timers( trans );
			if (is_invite(trans)) cancel_uacs( trans, cancel_bitmap );
			/* for auth related replies, we do not do retransmission 
			   (via set_final_timer()), but only wait for a final 
			   reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */
			if ((code != 401) && (code != 407))
				set_final_timer(  trans );
			else
				put_on_wait(trans);
		}
	}

	/* send it out : response.dst.send_sock is valid all the time now, 
	 * as it's taken from original request -bogdan */
	if (!trans->uas.response.dst.send_sock) {
		LM_CRIT("send_sock is NULL\n");
	}
	SEND_PR_BUFFER( rb, buf, len );
	LM_DBG("reply sent out. buf=%p: %.9s..., "
		"shmem=%p: %.9s\n", buf, buf, rb->buffer.s, rb->buffer.s );

	/* run the POST send callbacks */
	if (code>=200&&!is_local(trans)&&has_tran_tmcbs(trans,TMCB_RESPONSE_OUT)) {
		cb_s.s = buf;
		cb_s.len = len;
		set_extra_tmcb_params( &cb_s, &rb->dst);
		if (lock)
			run_trans_callbacks_locked( TMCB_RESPONSE_OUT, trans,
				trans->uas.request, FAKED_REPLY, code);
		else
			run_trans_callbacks( TMCB_RESPONSE_OUT, trans,
				trans->uas.request, FAKED_REPLY, code);
	}


	pkg_free( buf ) ;
	stats_trans_rpl( code, 1 /*local*/ );
	LM_DBG("finished\n");
	return 1;

error3:
error2:
	if (lock) UNLOCK_REPLIES( trans );
	pkg_free ( buf );
error:
	/* do UAC cleanup */
	cleanup_uac_timers( trans );
	if ( is_invite(trans) ) cancel_uacs( trans, cancel_bitmap );
	/* we did not succeed -- put the transaction on wait */
	put_on_wait(trans);
	return -1;
}
Example #9
0
/*  This function is called whenever a reply for our module is received; 
  * we need to register  this function on module initialization;
  *  Returns :   0 - core router stops
  *              1 - core router relay statelessly
  */
int reply_received( struct sip_msg  *p_msg )
{
	int msg_status;
	int last_uac_status;
	int branch;
	int reply_status;
	utime_t timer;
	/* has the transaction completed now and we need to clean-up? */
	branch_bm_t cancel_bitmap;
	struct ua_client *uac;
	struct cell *t;
	struct usr_avp **backup_list;
	unsigned int has_reply_route;

	set_t(T_UNDEFINED);

	/* make sure we know the associated transaction ... */
	if (t_check(p_msg, &branch ) == -1) goto not_found;

	/*... if there is none, tell the core router to fwd statelessly */
	t = get_t();
	if ((t == 0) || (t == T_UNDEFINED)) goto not_found;

	cancel_bitmap=0;
	msg_status=p_msg->REPLY_STATUS;

	uac=&t->uac[branch];
	LM_DBG("org. status uas=%d, uac[%d]=%d local=%d is_invite=%d)\n",
		t->uas.status, branch, uac->last_received, 
		is_local(t), is_invite(t));
	last_uac_status=uac->last_received;
	if_update_stat( tm_enable_stats, tm_rcv_rpls , 1);

	/* it's a cancel which is not e2e ? */
	if ( get_cseq(p_msg)->method_id==METHOD_CANCEL && is_invite(t) ) {
		/* ... then just stop timers */
		reset_timer( &uac->local_cancel.retr_timer);
		if ( msg_status >= 200 ) {
				reset_timer( &uac->local_cancel.fr_timer);
		}
		LM_DBG("reply to local CANCEL processed\n");
		goto done;
	}

	/* *** stop timers *** */
	/* stop retransmission */
	reset_timer(&uac->request.retr_timer);

	/* stop final response timer only if I got a final response */
	if ( msg_status >= 200 ) {
		reset_timer( &uac->request.fr_timer);
	}

	/* acknowledge negative INVITE replies (do it before detailed
	 * on_reply processing, which may take very long, like if it
	 * is attempted to establish a TCP connection to a fail-over dst */
	if (is_invite(t) && ((msg_status >= 300) ||
	(is_local(t) && !no_autoack(t) && msg_status >= 200) )) {
		if (send_ack(p_msg, t, branch)!=0)
			LM_ERR("failed to send ACK (local=%s)\n", is_local(t)?"yes":"no");
	}

	_tm_branch_index = branch;

	/* processing of on_reply block */
	has_reply_route = (t->on_reply) || (t->uac[branch].on_reply);
	if (has_reply_route) {
		if (onreply_avp_mode) {
			/* lock the reply*/
			LOCK_REPLIES( t );
			/* set the as avp_list the one from transaction */
			backup_list = set_avp_list(&t->user_avps);
		} else {
			backup_list = 0;
		}
		/* transfer transaction flag to branch context */
		p_msg->flags = t->uas.request->flags;
		setb0flags(t->uac[branch].br_flags);
		/* run block - first per branch and then global one */
		if ( t->uac[branch].on_reply &&
		(run_top_route(onreply_rlist[t->uac[branch].on_reply].a,p_msg)
		&ACT_FL_DROP) && (msg_status<200) ) {
			if (onreply_avp_mode) {
				UNLOCK_REPLIES( t );
				set_avp_list( backup_list );
			}
			LM_DBG("dropping provisional reply %d\n", msg_status);
			goto done;
		}
		if ( t->on_reply && (run_top_route(onreply_rlist[t->on_reply].a,p_msg)
		&ACT_FL_DROP) && (msg_status<200) ) {
			if (onreply_avp_mode) {
				UNLOCK_REPLIES( t );
				set_avp_list( backup_list );
			}
			LM_DBG("dropping provisional reply %d\n", msg_status);
			goto done;
		}
		/* transfer current message context back to t */
		t->uac[branch].br_flags = getb0flags();
		t->uas.request->flags = p_msg->flags;
		if (onreply_avp_mode)
			/* restore original avp list */
			set_avp_list( backup_list );
	}

	if (!onreply_avp_mode || !has_reply_route)
		/* lock the reply*/
		LOCK_REPLIES( t );

	/* mark that the UAC received replies */
	uac->flags |= T_UAC_HAS_RECV_REPLY;

	/* we fire a cancel on spot if (a) branch is marked "to be canceled" or (b)
	 * the whole transaction was canceled (received cancel) and no cancel sent
	 * yet on this branch; and of course, only if a provisional reply :) */
	if (t->uac[branch].flags&T_UAC_TO_CANCEL_FLAG ||
	((t->flags&T_WAS_CANCELLED_FLAG) && !t->uac[branch].local_cancel.buffer.s)) {
		if ( msg_status < 200 )
			/* reply for an UAC with a pending cancel -> do cancel now */
			cancel_branch(t, branch);
		/* reset flag */
		t->uac[branch].flags &= ~(T_UAC_TO_CANCEL_FLAG);
	}

	if (is_local(t)) {
		reply_status = local_reply(t,p_msg, branch,msg_status,&cancel_bitmap);
		if (reply_status == RPS_COMPLETED) {
			cleanup_uac_timers(t);
			if (is_invite(t)) cancel_uacs(t, cancel_bitmap);
			/* There is no need to call set_final_timer because we know
			 * that the transaction is local */
			put_on_wait(t);
		}
	} else {
		reply_status = relay_reply(t,p_msg,branch,msg_status,&cancel_bitmap);
		/* clean-up the transaction when transaction completed */
		if (reply_status == RPS_COMPLETED) {
			/* no more UAC FR/RETR (if I received a 2xx, there may
			 * be still pending branches ...
			 */
			cleanup_uac_timers(t);
			if (is_invite(t)) cancel_uacs(t, cancel_bitmap);
			/* FR for negative INVITES, WAIT anything else */
			/* set_final_timer(t); */
		}
	}
	
	if (reply_status!=RPS_PROVISIONAL)
		goto done;
	
	/* update FR/RETR timers on provisional replies */
	if (msg_status < 200 && (restart_fr_on_each_reply ||
	((last_uac_status<msg_status) &&
	((msg_status >= 180) || (last_uac_status == 0)))
	) ) { /* provisional now */
		if (is_invite(t)) {
			/* invite: change FR to longer FR_INV, do not
			 * attempt to restart retransmission any more
			 */
			backup_list = set_avp_list(&t->user_avps);
			if (!fr_inv_avp2timer(&timer)) {
				LM_DBG("FR_INV_TIMER = %lld\n", timer);
				set_timer(&uac->request.fr_timer,
					FR_INV_TIMER_LIST, &timer);
			} else {
				set_timer(& uac->request.fr_timer, FR_INV_TIMER_LIST, 0);
			}
			set_avp_list(backup_list);
		} else {
			/* non-invite: restart retransmissions (slow now) */
			uac->request.retr_list = RT_T2;
			set_timer(&uac->request.retr_timer, RT_T2, 0);
		}
	} /* provisional replies */
	
done:
	/* we are done with the transaction, so unref it - the reference
	 * was incremented by t_check() function -bogdan*/
	t_unref(p_msg);
	/* don't try to relay statelessly neither on success
	 * (we forwarded statefully) nor on error; on troubles, 
	 * simply do nothing; that will make the other party to 
	 * retransmit; hopefuly, we'll then be better off 
	 */
	_tm_branch_index = 0;
	return 0;
not_found:
	set_t(T_UNDEFINED);
	return 1;
}
Example #10
0
/* this is the "UAC" above transaction layer; if a final reply
   is received, it triggers a callback; note well -- it assumes
   it is entered locked with REPLY_LOCK and it returns unlocked!
*/
enum rps local_reply( struct cell *t, struct sip_msg *p_msg, int branch, 
	unsigned int msg_status, branch_bm_t *cancel_bitmap)
{
	/* how to deal with replies for local transaction */
	int local_store, local_winner;
	enum rps reply_status;
	struct sip_msg *winning_msg;
	int winning_code;
	int totag_retr;
	/* branch_bm_t cancel_bitmap; */

	/* keep warning 'var might be used un-inited' silent */	
	winning_msg=0;
	winning_code=0;
	totag_retr=0;

	*cancel_bitmap=0;

	reply_status=t_should_relay_response( t, msg_status, branch,
		&local_store, &local_winner, cancel_bitmap, p_msg );
	LM_DBG("branch=%d, save=%d, winner=%d\n",
		branch, local_store, local_winner );
	if (local_store) {
		if (!store_reply(t, branch, p_msg))
			goto error;
	}
	if (local_winner>=0) {
		winning_msg= branch==local_winner 
			? p_msg :  t->uac[local_winner].reply;
		if (winning_msg==FAKED_REPLY) {
			winning_code = branch==local_winner
				? msg_status : t->uac[local_winner].last_received;
		} else {
			winning_code=winning_msg->REPLY_STATUS;
		}
		t->uas.status = winning_code;
		stats_trans_rpl( winning_code, (winning_msg==FAKED_REPLY)?1:0 );
		if (is_invite(t) && winning_msg!=FAKED_REPLY
		&& winning_code>=200 && winning_code <300
		&& has_tran_tmcbs(t,
		TMCB_RESPONSE_OUT|TMCB_RESPONSE_PRE_OUT) )  {
			totag_retr=update_totag_set(t, winning_msg);
		}
	}
	UNLOCK_REPLIES(t);

	if ( local_winner >= 0 ) {
		if (winning_code < 200) {
			if (!totag_retr && has_tran_tmcbs(t,TMCB_LOCAL_RESPONSE_OUT)) {
				LM_DBG("Passing provisional reply %d to FIFO application\n",
						winning_code);
				run_trans_callbacks( TMCB_LOCAL_RESPONSE_OUT, t, 0,
					winning_msg, winning_code);
			}
		} else {
			LM_DBG("local transaction completed\n");
			if (!totag_retr && has_tran_tmcbs(t,TMCB_LOCAL_COMPLETED) ) {
				run_trans_callbacks( TMCB_LOCAL_COMPLETED, t, 0,
					winning_msg, winning_code );
			}
		}
	}
	return reply_status;

error:
	which_cancel(t, cancel_bitmap);
	UNLOCK_REPLIES(t);
	cleanup_uac_timers(t);
	if ( get_cseq(p_msg)->method_id==METHOD_INVITE )
		cancel_uacs( t, *cancel_bitmap );
	put_on_wait(t);
	return RPS_ERROR;
}
Example #11
0
/* this is the code which decides what and when shall be relayed
   upstream; note well -- it assumes it is entered locked with 
   REPLY_LOCK and it returns unlocked!
*/
enum rps relay_reply( struct cell *t, struct sip_msg *p_msg, int branch, 
	unsigned int msg_status, branch_bm_t *cancel_bitmap )
{
	int relay;
	int save_clone;
	char *buf;
	/* length of outbound reply */
	unsigned int res_len;
	int relayed_code;
	struct sip_msg *relayed_msg;
	struct bookmark bm;
	int totag_retr;
	enum rps reply_status;
	/* retransmission structure of outbound reply and request */
	struct retr_buf *uas_rb;
	str cb_s;
	str text;

	/* keep compiler warnings about use of uninit vars silent */
	res_len=0;
	buf=0;
	relayed_msg=0;
	relayed_code=0;
	totag_retr=0;


	/* remember, what was sent upstream to know whether we are
	 * forwarding a first final reply or not */

	/* *** store and relay message as needed *** */
	reply_status = t_should_relay_response(t, msg_status, branch, 
		&save_clone, &relay, cancel_bitmap, p_msg );
	LM_DBG("branch=%d, save=%d, relay=%d\n",
		branch, save_clone, relay );

	/* store the message if needed */
	if (save_clone) /* save for later use, typically branch picking */
	{
		if (!store_reply( t, branch, p_msg ))
			goto error01;
	}

	uas_rb = & t->uas.response;
	if (relay >= 0 ) {
		/* initialize sockets for outbound reply */
		uas_rb->activ_type=msg_status;

		t->relaied_reply_branch = relay;

		/* try building the outbound reply from either the current
		 * or a stored message */
		relayed_msg = branch==relay ? p_msg :  t->uac[relay].reply;
		if (relayed_msg==FAKED_REPLY) {
			relayed_code = branch==relay
				? msg_status : t->uac[relay].last_received;

			text.s = error_text(relayed_code);
			text.len = strlen(text.s); /* FIXME - bogdan*/

			if (relayed_code>=180 && t->uas.request->to 
					&& (get_to(t->uas.request)->tag_value.s==0 
					|| get_to(t->uas.request)->tag_value.len==0)) {
				calc_crc_suffix( t->uas.request, tm_tag_suffix );
				buf = build_res_buf_from_sip_req(
						relayed_code,
						&text,
						&tm_tag,
						t->uas.request, &res_len, &bm );
			} else {
				buf = build_res_buf_from_sip_req( relayed_code,
					&text, 0/* no to-tag */,
					t->uas.request, &res_len, &bm );
			}

		} else {
			/* run callbacks for all types of responses -
			 * even if they are shmem-ed or not */
			if (has_tran_tmcbs(t,TMCB_RESPONSE_FWDED) ) {
				run_trans_callbacks( TMCB_RESPONSE_FWDED, t, t->uas.request,
					relayed_msg, msg_status );
			}
			relayed_code=relayed_msg->REPLY_STATUS;
			buf = build_res_buf_from_sip_res( relayed_msg, &res_len,
							uas_rb->dst.send_sock);
			/* remove all lumps which are not in shm 
			 * added either by build_res_buf_from_sip_res, or by
			 * the callbacks that have been called with shmem-ed messages - vlad */
			if (branch!=relay) {
				del_notflaged_lumps( &(relayed_msg->add_rm), LUMPFLAG_SHMEM);
				del_notflaged_lumps( &(relayed_msg->body_lumps), LUMPFLAG_SHMEM);
			}
		}
		if (!buf) {
			LM_ERR("no mem for outbound reply buffer\n");
			goto error02;
		}

		/* attempt to copy the message to UAS's shmem:
		   - copy to-tag for ACK matching as well
		   -  allocate little a bit more for provisional as
		      larger messages are likely to follow and we will be 
		      able to reuse the memory frag 
		*/
		uas_rb->buffer.s = (char*)shm_resize( uas_rb->buffer.s, res_len +
			(msg_status<200 ?  REPLY_OVERBUFFER_LEN : 0));
		if (!uas_rb->buffer.s) {
			LM_ERR("no more share memory\n");
			goto error03;
		}
		uas_rb->buffer.len = res_len;
		memcpy( uas_rb->buffer.s, buf, res_len );
		if (relayed_msg==FAKED_REPLY) { /* to-tags for local replies */
			update_local_tags(t, &bm, uas_rb->buffer.s, buf);
		}
		stats_trans_rpl( relayed_code, (relayed_msg==FAKED_REPLY)?1:0 );

		/* update the status ... */
		t->uas.status = relayed_code;

		if (is_invite(t) && relayed_msg!=FAKED_REPLY
		&& relayed_code>=200 && relayed_code < 300
		&& has_tran_tmcbs( t,
		TMCB_RESPONSE_OUT|TMCB_RESPONSE_PRE_OUT)) {
			totag_retr=update_totag_set(t, relayed_msg);
		}
	}; /* if relay ... */

	UNLOCK_REPLIES( t );

	/* Setup retransmission timer _before_ the reply is sent
	 * to avoid race conditions
	 */
	if (reply_status == RPS_COMPLETED) {
		/* for auth related replies, we do not do retransmission 
		   (via set_final_timer()), but only wait for a final 
		   reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */
		if ((relayed_code != 401) && (relayed_code != 407))
			set_final_timer(t);
		else
			put_on_wait(t);
	}

	/* send it now (from the private buffer) */
	if (relay >= 0) {
		/* run the PRE sending out callback */
		if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_PRE_OUT) ) {
			cb_s.s = buf;
			cb_s.len = res_len;
			set_extra_tmcb_params( &cb_s, &uas_rb->dst);
			run_trans_callbacks_locked(TMCB_RESPONSE_PRE_OUT,t,t->uas.request,
				relayed_msg, relayed_code);
		}
		SEND_PR_BUFFER( uas_rb, buf, res_len );
		LM_DBG("sent buf=%p: %.9s..., shmem=%p: %.9s\n", 
			buf, buf, uas_rb->buffer.s, uas_rb->buffer.s );
		/* run the POST sending out callback */
		if (!totag_retr && has_tran_tmcbs(t, TMCB_RESPONSE_OUT) ) {
			cb_s.s = buf;
			cb_s.len = res_len;
			set_extra_tmcb_params( &cb_s, &uas_rb->dst);
			run_trans_callbacks_locked( TMCB_RESPONSE_OUT, t, t->uas.request,
				relayed_msg, relayed_code);
		}
		pkg_free( buf );
	}

	/* success */
	return reply_status;

error03:
	pkg_free( buf );
error02:
	if (save_clone) {
		if (t->uac[branch].reply!=FAKED_REPLY)
			sip_msg_free( t->uac[branch].reply );
		t->uac[branch].reply = NULL;
	}
error01:
	text.s = "Reply processing error";
	text.len = sizeof("Reply processing error")-1;
	t_reply_unsafe( t, t->uas.request, 500, &text );
	UNLOCK_REPLIES(t);
	if (is_invite(t)) cancel_uacs( t, *cancel_bitmap );
	/* a serious error occurred -- attempt to send an error reply;
	   it will take care of clean-ups  */

	/* failure */
	return RPS_ERROR;
}
Example #12
0
int t_inject_branch( struct cell *t, struct sip_msg *msg, int flags)
{
	static struct sip_msg faked_req;
	branch_bm_t cancel_bm;
	str reason = str_init(CANCEL_REASON_200);
	int rc;

	/* does the transaction state still accept new branches ? */
	if (t->uas.status >= 200) {
		LM_DBG("cannot add branches to a transaction with %d UAS\n",
			t->uas.status);
		return -2;
	}

	if (!fake_req( &faked_req, t->uas.request, &t->uas, NULL, 0)) {
		LM_ERR("fake_req failed\n");
		return -1;
	}

	/* do we have the branches to be injected ? */
	if (flags&TM_INJECT_SRC_EVENT) {
		/* get the branch from Event AVPs, as populated by EVI/EBR */
		if (ul_contact_event_to_msg( &faked_req )<0) {
			LM_ERR("failed to grab new branch from Event\n");
			goto error;
		}
	} else {
		/* use the RURI+dset array as destinations to be injected */
		if (msg->first_line.type==SIP_REQUEST) {
			/* take the RURI branch from the script msg and move it
			 * into the faked msg (that will be used by t_fwd function) */
			if (dst_to_msg( msg, &faked_req )<0) {
				LM_ERR("failed to grab new branch from Event\n");
				goto error;
			}
		} else {
			/* current message is a reply, so take the first branch from dset
			 * and move it into the faked msg (that will be used by t_fwd 
			 * function)*/
			if (move_branch_to_ruri( 0, &faked_req)<0) {
				LM_ERR("no branch found to be moved as new destination\n");
				goto error;
			}
			/* remove it from set */
			remove_branch(0);
		}
	}

	/* do we have to cancel the existing branches before injecting new ones? */
	if (flags&TM_INJECT_FLAG_CANCEL) {
		which_cancel( t, &cancel_bm );
	}

	/* generated the new branches, without branch counter reset */
	rc = t_forward_nonack( t, &faked_req , NULL, 0, 1/*locked*/ );

	/* do we have to cancel the existing branches before injecting new ones? */
	if (flags&TM_INJECT_FLAG_CANCEL) {
		set_cancel_extra_hdrs( reason.s, reason.len);
		cancel_uacs( t, cancel_bm );
		set_cancel_extra_hdrs( NULL, 0);
	}

	/* cleanup the faked request */
	free_faked_req( &faked_req, t);

	return (rc==1)?1:-3;

error:
	/* cleanup the faked request */
	free_faked_req( &faked_req, t);
	return -1;
}
Example #13
0
/* Continues the SIP request processing previously saved by
 * t_suspend(). The script does not continue from the same
 * point, but a separate route block is executed instead.
 *
 * Return value:
 * 	0  - success
 * 	<0 - failure
 */
int t_continue(unsigned int hash_index, unsigned int label,
		struct action *route)
{
	struct cell	*t;
	struct sip_msg	faked_req;
	struct cancel_info cancel_data;
	int	branch;
	struct ua_client *uac =NULL;
	int	ret;
	int cb_type;
	int msg_status;
	int last_uac_status;
	int reply_status;
	int do_put_on_wait;
	struct hdr_field *hdr, *prev = 0, *tmp = 0;

	if (t_lookup_ident(&t, hash_index, label) < 0) {
		LM_ERR("transaction not found\n");
		return -1;
	}

	if (!(t->flags & T_ASYNC_SUSPENDED)) {
		LM_WARN("transaction is not suspended [%u:%u]\n", hash_index, label);
		return -2;
	}

	if (t->flags & T_CANCELED) {
		t->flags &= ~T_ASYNC_SUSPENDED;
		/* The transaction has already been canceled,
		 * needless to continue */
		UNREF(t); /* t_unref would kill the transaction */
		/* reset T as we have no working T anymore */
		set_t(T_UNDEFINED, T_BR_UNDEFINED);
		return 1;
	}

	/* The transaction has to be locked to protect it
	 * form calling t_continue() multiple times simultaneously */
	LOCK_ASYNC_CONTINUE(t);

	t->flags |= T_ASYNC_CONTINUE;   /* we can now know anywhere in kamailio
					 * that we are executing post a suspend */

	/* which route block type were we in when we were suspended */
	cb_type =  FAILURE_CB_TYPE;;
	switch (t->async_backup.backup_route) {
		case REQUEST_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
		case FAILURE_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
		case TM_ONREPLY_ROUTE:
			cb_type = ONREPLY_CB_TYPE;
			break;
		case BRANCH_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
	}

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		branch = t->async_backup.blind_uac;	/* get the branch of the blind UAC setup 
			* during suspend */
		if (branch >= 0) {
			stop_rb_timers(&t->uac[branch].request);
 
			if (t->uac[branch].last_received != 0) {
				/* Either t_continue() has already been
				* called or the branch has already timed out.
				* Needless to continue. */
				t->flags &= ~T_ASYNC_SUSPENDED;
				UNLOCK_ASYNC_CONTINUE(t);
				UNREF(t); /* t_unref would kill the transaction */
				return 1;
			}

			/* Set last_received to something >= 200,
			 * the actual value does not matter, the branch
			 * will never be picked up for response forwarding.
			 * If last_received is lower than 200,
			 * then the branch may tried to be cancelled later,
			 * for example when t_reply() is called from
			 * a failure route => deadlock, because both
			 * of them need the reply lock to be held. */
			t->uac[branch].last_received=500;
			uac = &t->uac[branch];
		}
		/* else
			Not a huge problem, fr timer will fire, but CANCEL
			will not be sent. last_received will be set to 408. */

		/* We should not reset kr here to 0 as it's quite possible before continuing the dev. has correctly set the
		 * kr by, for example, sending a transactional reply in code - resetting here will cause a dirty log message
		 * "WARNING: script writer didn't release transaction" to appear in log files. TODO: maybe we need to add 
		 * a special kr for async?
		 * reset_kr();
		 */

		/* fake the request and the environment, like in failure_route */
		if (!fake_req(&faked_req, t->uas.request, 0 /* extra flags */, uac)) {
			LM_ERR("building fake_req failed\n");
			ret = -1;
			goto kill_trans;
		}
		faked_env( t, &faked_req, 1);

		/* execute the pre/post -script callbacks based on original route block */
		if (exec_pre_script_cb(&faked_req, cb_type)>0) {
			if (run_top_route(route, &faked_req, 0)<0)
				LM_ERR("failure inside run_top_route\n");
			exec_post_script_cb(&faked_req, cb_type);
		}

		/* TODO: save_msg_lumps should clone the lumps to shm mem */

		/* restore original environment and free the fake msg */
		faked_env( t, 0, 1);
		free_faked_req(&faked_req, t);

		/* update the flags */
		t->uas.request->flags = faked_req.flags;

		if (t->uas.status < 200) {
			/* No final reply has been sent yet.
			* Check whether or not there is any pending branch.
			*/
			for (	branch = 0;
				branch < t->nr_of_outgoings;
				branch++
			) {
				if (t->uac[branch].last_received < 200)
					break;
			}

			if (branch == t->nr_of_outgoings) {
			/* There is not any open branch so there is
			* no chance that a final response will be received. */
				ret = 0;
				goto kill_trans;
			}
		}

	} else {
		branch = t->async_backup.backup_branch;

		init_cancel_info(&cancel_data);

		LM_DBG("continuing from a suspended reply"
				" - resetting the suspend branch flag\n");

		if (t->uac[branch].reply) {
		t->uac[branch].reply->msg_flags &= ~FL_RPL_SUSPENDED;
                } else {
			LM_WARN("no reply in t_continue for branch. not much we can do\n");
			return 0;
		}
                
		if (t->uas.request) t->uas.request->msg_flags&= ~FL_RPL_SUSPENDED;

		faked_env( t, t->uac[branch].reply, 1);

		if (exec_pre_script_cb(t->uac[branch].reply, cb_type)>0) {
			if (run_top_route(route, t->uac[branch].reply, 0)<0){
				LOG(L_ERR, "ERROR: t_continue_reply: Error in run_top_route\n");
			}
			exec_post_script_cb(t->uac[branch].reply, cb_type);
		}

		LM_DBG("restoring previous environment");
		faked_env( t, 0, 1);

		/*lock transaction replies - will be unlocked when reply is relayed*/
		LOCK_REPLIES( t );
		if ( is_local(t) ) {
			LM_DBG("t is local - sending reply with status code: [%d]\n",
					t->uac[branch].reply->first_line.u.reply.statuscode);
			reply_status = local_reply( t, t->uac[branch].reply, branch,
					t->uac[branch].reply->first_line.u.reply.statuscode,
					&cancel_data );
			if (reply_status == RPS_COMPLETED) {
				/* no more UAC FR/RETR (if I received a 2xx, there may
				* be still pending branches ...
				*/
				cleanup_uac_timers( t );
				if (is_invite(t)) cancel_uacs(t, &cancel_data, F_CANCEL_B_KILL);
				/* There is no need to call set_final_timer because we know
				* that the transaction is local */
				put_on_wait(t);
			}else if (unlikely(cancel_data.cancel_bitmap)){
				/* cancel everything, even non-INVITEs (e.g in case of 6xx), use
				* cancel_b_method for canceling unreplied branches */
				cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags));
			}

		} else {
			LM_DBG("t is not local - relaying reply with status code: [%d]\n",
					t->uac[branch].reply->first_line.u.reply.statuscode);
			do_put_on_wait = 0;
			if(t->uac[branch].reply->first_line.u.reply.statuscode>=200){
				do_put_on_wait = 1;
			}
			reply_status=relay_reply( t, t->uac[branch].reply, branch,
					t->uac[branch].reply->first_line.u.reply.statuscode,
					&cancel_data, do_put_on_wait );
			if (reply_status == RPS_COMPLETED) {
				/* no more UAC FR/RETR (if I received a 2xx, there may
				be still pending branches ...
				*/
				cleanup_uac_timers( t );
				/* 2xx is a special case: we can have a COMPLETED request
				* with branches still open => we have to cancel them */
				if (is_invite(t) && cancel_data.cancel_bitmap) 
					cancel_uacs( t, &cancel_data,  F_CANCEL_B_KILL);
				/* FR for negative INVITES, WAIT anything else */
				/* Call to set_final_timer is embedded in relay_reply to avoid
				* race conditions when reply is sent out and an ACK to stop
				* retransmissions comes before retransmission timer is set.*/
			}else if (unlikely(cancel_data.cancel_bitmap)){
				/* cancel everything, even non-INVITEs (e.g in case of 6xx), use
				* cancel_b_method for canceling unreplied branches */
				cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags));
			}

		}
		t->uac[branch].request.flags|=F_RB_REPLIED;

		if (reply_status==RPS_ERROR){
			goto done;
		}

		/* update FR/RETR timers on provisional replies */

		msg_status=t->uac[branch].reply->REPLY_STATUS;
		last_uac_status=t->uac[branch].last_received;

		if (is_invite(t) && msg_status<200 &&
			( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) ||
			( (last_uac_status<msg_status) &&
			((msg_status>=180) || (last_uac_status==0)) )
		) ) { /* provisional now */
			restart_rb_fr(& t->uac[branch].request, t->fr_inv_timeout);
			t->uac[branch].request.flags|=F_RB_FR_INV; /* mark fr_inv */
		}
            
	}

done:
	UNLOCK_ASYNC_CONTINUE(t);

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		/* unref the transaction */
		t_unref(t->uas.request);
	} else {
		tm_ctx_set_branch_index(T_BR_UNDEFINED);        
		/* unref the transaction */
		t_unref(t->uac[branch].reply);
		LOG(L_DBG,"DEBUG: t_continue_reply: Freeing earlier cloned reply\n");

		/* free lumps that were added during reply processing */
		del_nonshm_lump( &(t->uac[branch].reply->add_rm) );
		del_nonshm_lump( &(t->uac[branch].reply->body_lumps) );
		del_nonshm_lump_rpl( &(t->uac[branch].reply->reply_lump) );

		/* free header's parsed structures that were added */
		for( hdr=t->uac[branch].reply->headers ; hdr ; hdr=hdr->next ) {
			if ( hdr->parsed && hdr_allocs_parse(hdr) &&
				(hdr->parsed<(void*)t->uac[branch].reply ||
				hdr->parsed>=(void*)t->uac[branch].end_reply)) {
				clean_hdr_field(hdr);
				hdr->parsed = 0;
			}
		}

		/* now go through hdr_fields themselves and remove the pkg allocated space */
		hdr = t->uac[branch].reply->headers;
		while (hdr) {
			if ( hdr && ((void*)hdr<(void*)t->uac[branch].reply ||
				(void*)hdr>=(void*)t->uac[branch].end_reply)) {
				//this header needs to be freed and removed form the list.
				if (!prev) {
					t->uac[branch].reply->headers = hdr->next;
				} else {
					prev->next = hdr->next;
				}
				tmp = hdr;
				hdr = hdr->next;
				pkg_free(tmp);
			} else {
				prev = hdr;
				hdr = hdr->next;
			}
		}
		sip_msg_free(t->uac[branch].reply);
		t->uac[branch].reply = 0;
	}

	/*This transaction is no longer suspended so unsetting the SUSPEND flag*/
	t->flags &= ~T_ASYNC_SUSPENDED;


	return 0;

kill_trans:
	t->flags &= ~T_ASYNC_SUSPENDED;
	/* The script has hopefully set the error code. If not,
	 * let us reply with a default error. */
	if ((kill_transaction_unsafe(t,
		tm_error ? tm_error : E_UNSPEC)) <=0
	) {
		LOG(L_ERR, "ERROR: t_continue: "
			"reply generation failed\n");
		/* The transaction must be explicitely released,
		 * no more timer is running */
		UNLOCK_ASYNC_CONTINUE(t);
		t_release_transaction(t);
	} else {
		UNLOCK_ASYNC_CONTINUE(t);
	}

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		t_unref(t->uas.request);
	} else {
		/* unref the transaction */
		t_unref(t->uac[branch].reply);
	}
	return ret;
}