Esempio n. 1
0
File: tm.c Progetto: Deni90/opensips
inline static int w_t_cancel_branch(struct sip_msg *msg, char *sflags)
{
	branch_bm_t cancel_bitmap = 0;
	struct cell *t;
	unsigned int flags = (unsigned long)sflags;

	t=get_t();

	if (t==NULL || t==T_UNDEFINED) {
		/* no transaction */
		LM_ERR("cannot cancel a reply with no transaction");
		return -1;
	}
	if (!is_invite(t))
		return -1;

	if (flags&TM_CANCEL_BRANCH_ALL) {
		/* lock and get the branches to cancel */
		if (!onreply_avp_mode) {
			LOCK_REPLIES(t);
			which_cancel( t, &cancel_bitmap );
			UNLOCK_REPLIES(t);
		} else {
			which_cancel( t, &cancel_bitmap );
		}
		if (msg->first_line.u.reply.statuscode>=200)
			/* do not cancel the current branch as we got
			 * a final response here */
			cancel_bitmap &= ~(1<<_tm_branch_index);
	} else if (flags&TM_CANCEL_BRANCH_OTHERS) {
		/* lock and get the branches to cancel */
		if (!onreply_avp_mode) {
			LOCK_REPLIES(t);
			which_cancel( t, &cancel_bitmap );
			UNLOCK_REPLIES(t);
		} else {
			which_cancel( t, &cancel_bitmap );
		}
		/* ignore current branch */
		cancel_bitmap &= ~(1<<_tm_branch_index);
	} else {
		/* cancel only local branch (only if still ongoing) */
		if (msg->first_line.u.reply.statuscode<200)
			cancel_bitmap = 1<<_tm_branch_index;
	}

	/* send cancels out */
	cancel_uacs(t, cancel_bitmap);

	return 1;
}
Esempio n. 2
0
void cancel_invite(struct sip_msg *cancel_msg,
								struct cell *t_cancel, struct cell *t_invite )
{
#define CANCEL_REASON_SIP_487  \
	"Reason: SIP;cause=487;text=\"ORIGINATOR_CANCEL\"" CRLF

	branch_bm_t cancel_bitmap;
	branch_bm_t dummy_bm;
	str reason;
	unsigned int i;
	struct hdr_field *hdr;

	cancel_bitmap=0;

	/* send back 200 OK as per RFC3261 */
	reason.s = CANCELING;
	reason.len = sizeof(CANCELING)-1;
	t_reply( t_cancel, cancel_msg, 200, &reason );

	reason.s = NULL;
	reason.len = 0;
	/* propagate the REASON flag ? */
	if ( t_cancel->flags&T_CANCEL_REASON_FLAG ) {
		/* look for the Reason header */
		if (parse_headers(cancel_msg, HDR_EOH_F, 0)<0) {
			LM_ERR("failed to parse all hdrs - ignoring Reason hdr\n");
		} else {
			hdr = get_header_by_static_name(cancel_msg, "Reason");
			if (hdr!=NULL) {
				reason.s = hdr->name.s;
				reason.len = hdr->len;
			}
		}
	}

	/* if no reason, use NORMAL CLEARING */
	if (reason.s == NULL) {
		reason.s = CANCEL_REASON_SIP_487;
		reason.len = sizeof(CANCEL_REASON_SIP_487) - 1;
	}

	/* generate local cancels for all branches */
	which_cancel(t_invite, &cancel_bitmap );

	set_cancel_extra_hdrs( reason.s, reason.len);
	cancel_uacs(t_invite, cancel_bitmap );
	set_cancel_extra_hdrs( NULL, 0);

	/* internally cancel branches with no received reply */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (t_invite->uac[i].last_received==0){
			/* reset the "request" timers */
			reset_timer(&t_invite->uac[i].request.retr_timer);
			reset_timer(&t_invite->uac[i].request.fr_timer);
			LOCK_REPLIES( t_invite );
			relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm);
		}
	}
}
Esempio n. 3
0
void run_trans_callbacks_locked( int type , struct cell *trans,
						struct sip_msg *req, struct sip_msg *rpl, int code )
{
	if (trans->tmcb_hl.first==0 || ((trans->tmcb_hl.reg_types)&type)==0 )
		return;

	LOCK_REPLIES(trans);

	/* run callbacks */
	run_trans_callbacks( type , trans, req, rpl, code );
	/* SHM message cleanup */
	if (trans->uas.request && trans->uas.request->msg_flags&FL_SHM_CLONE)
		clean_msg_clone( trans->uas.request, trans->uas.request,
			trans->uas.end_request);

	UNLOCK_REPLIES(trans);
}
Esempio n. 4
0
void cancel_invite(struct sip_msg *cancel_msg,
					struct cell *t_cancel, struct cell *t_invite, int locked)
{

	branch_bm_t cancel_bitmap;
	str reason;

	cancel_bitmap=0;

	/* send back 200 OK as per RFC3261 */
	reason.s = CANCELING;
	reason.len = sizeof(CANCELING)-1;
	if (locked)
		t_reply_unsafe( t_cancel, cancel_msg, 200, &reason );
	else
		t_reply( t_cancel, cancel_msg, 200, &reason );

	get_cancel_reason(cancel_msg, t_cancel->flags, &reason);

	/* generate local cancels for all branches */
	which_cancel(t_invite, &cancel_bitmap );

	set_cancel_extra_hdrs( reason.s, reason.len);
	cancel_uacs(t_invite, cancel_bitmap );
	set_cancel_extra_hdrs( NULL, 0);

	/* Do not do anything about branches with no received reply;
	 * continue the retransmission hoping to get something back;
	 * if still not, we will generate the 408 Timeout based on FR
	 * timer; this helps with better coping with missed/lated provisional
	 * replies in the context of cancelling the transaction
	 */
#if 0
	/* internally cancel branches with no received reply */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (t_invite->uac[i].last_received==0){
			/* reset the "request" timers */
			reset_timer(&t_invite->uac[i].request.retr_timer);
			reset_timer(&t_invite->uac[i].request.fr_timer);
			LOCK_REPLIES( t_invite );
			relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm);
		}
	}
#endif
}
Esempio n. 5
0
File: tm.c Progetto: Deni90/opensips
int t_cancel_trans(struct cell *t, str *extra_hdrs)
{
	branch_bm_t cancel_bitmap = 0;

	if (t==NULL || t==T_UNDEFINED) {
		/* no transaction */
		LM_ERR("cannot cancel with no transaction");
		return -1;
	}

	LOCK_REPLIES(t);
	which_cancel( t, &cancel_bitmap );
	UNLOCK_REPLIES(t);

	/* send cancels out */
	if (extra_hdrs)
		set_cancel_extra_hdrs( extra_hdrs->s, extra_hdrs->len);
	cancel_uacs(t, cancel_bitmap);
	set_cancel_extra_hdrs( NULL, 0);

	return 0;
}
Esempio n. 6
0
/* Retransmits the last sent inbound reply.
 * input: p_msg==request for which I want to retransmit an associated reply
 * Returns  -1 - error
 *           1 - OK
 */
int t_retransmit_reply( struct cell *t )
{
	static char b[BUF_SIZE];
	int len;

	/* we need to lock the transaction as messages from
	   upstream may change it continuously */
	LOCK_REPLIES( t );

	if (!t->uas.response.buffer.s) {
		LM_DBG("nothing to retransmit\n");
		goto error;
	}

	/* response.dst.send_sock should be valid all the time now, as it's taken
	   from original request -bogdan */
	if (t->uas.response.dst.send_sock==0) {
		LM_CRIT("something to retransmit, but send_sock is NULL\n");
		goto error;
	}

	len=t->uas.response.buffer.len;
	if ( len==0 || len>BUF_SIZE )  {
		LM_DBG("zero length or too big to retransmit: %d\n", len);
		goto error;
	}
	memcpy( b, t->uas.response.buffer.s, len );
	UNLOCK_REPLIES( t );
	SEND_PR_BUFFER( & t->uas.response, b, len );
	LM_DBG("buf=%p: %.9s..., shmem=%p: %.9s\n",b, b, t->uas.response.buffer.s,
			t->uas.response.buffer.s );
	return 1;

error:
	UNLOCK_REPLIES(t);
	return -1;
}
Esempio n. 7
0
static int _reply_light( struct cell *trans, char* buf, unsigned int len,
			 unsigned int code, char *to_tag, unsigned int to_tag_len,
			 int lock, struct bookmark *bm)
{
	struct retr_buf *rb;
	unsigned int buf_len;
	branch_bm_t cancel_bitmap;
	str cb_s;

	if (!buf)
	{
		LM_DBG("response building failed\n");
		/* determine if there are some branches to be canceled */
		if ( is_invite(trans) ) {
			if (lock) LOCK_REPLIES( trans );
			which_cancel(trans, &cancel_bitmap );
			if (lock) UNLOCK_REPLIES( trans );
		}
		/* and clean-up, including cancellations, if needed */
		goto error;
	}

	cancel_bitmap=0;
	if (lock) LOCK_REPLIES( trans );
	if ( is_invite(trans) ) which_cancel(trans, &cancel_bitmap );
	if (trans->uas.status>=200) {
		LM_ERR("failed to generate %d reply when a final %d was sent out\n",
				code, trans->uas.status);
		goto error2;
	}


	rb = & trans->uas.response;
	rb->activ_type=code;

	trans->uas.status = code;
	buf_len = rb->buffer.s ? len : len + REPLY_OVERBUFFER_LEN;
	rb->buffer.s = (char*)shm_resize( rb->buffer.s, buf_len );
	/* puts the reply's buffer to uas.response */
	if (! rb->buffer.s ) {
			LM_ERR("failed to allocate shmem buffer\n");
			goto error3;
	}
	update_local_tags(trans, bm, rb->buffer.s, buf);

	rb->buffer.len = len ;
	memcpy( rb->buffer.s , buf , len );
	/* needs to be protected too because what timers are set depends
	   on current transactions status */
	/* t_update_timers_after_sending_reply( rb ); */
	trans->relaied_reply_branch=-2;
	if (lock) UNLOCK_REPLIES( trans );
	
	/* do UAC cleanup procedures in case we generated
	   a final answer whereas there are pending UACs */
	if (code>=200) {
		if ( is_local(trans) ) {
			LM_DBG("local transaction completed\n");
			if ( has_tran_tmcbs(trans, TMCB_LOCAL_COMPLETED) ) {
				run_trans_callbacks( TMCB_LOCAL_COMPLETED, trans,
					0, FAKED_REPLY, code);
			}
		} else {
			/* run the PRE send callbacks */
			if ( has_tran_tmcbs(trans, TMCB_RESPONSE_PRE_OUT) ) {
				cb_s.s = buf;
				cb_s.len = len;
				set_extra_tmcb_params( &cb_s, &rb->dst);
				if (lock)
					run_trans_callbacks_locked( TMCB_RESPONSE_PRE_OUT, trans,
						trans->uas.request, FAKED_REPLY, code);
				else
					run_trans_callbacks( TMCB_RESPONSE_PRE_OUT, trans,
						trans->uas.request, FAKED_REPLY, code);
			}
		}

		if (!is_hopbyhop_cancel(trans)) {
			cleanup_uac_timers( trans );
			if (is_invite(trans)) cancel_uacs( trans, cancel_bitmap );
			/* for auth related replies, we do not do retransmission 
			   (via set_final_timer()), but only wait for a final 
			   reply (put_on_wait() ) - see RFC 3261 (26.3.2.4 DoS Protection) */
			if ((code != 401) && (code != 407))
				set_final_timer(  trans );
			else
				put_on_wait(trans);
		}
	}

	/* send it out : response.dst.send_sock is valid all the time now, 
	 * as it's taken from original request -bogdan */
	if (!trans->uas.response.dst.send_sock) {
		LM_CRIT("send_sock is NULL\n");
	}
	SEND_PR_BUFFER( rb, buf, len );
	LM_DBG("reply sent out. buf=%p: %.9s..., "
		"shmem=%p: %.9s\n", buf, buf, rb->buffer.s, rb->buffer.s );

	/* run the POST send callbacks */
	if (code>=200&&!is_local(trans)&&has_tran_tmcbs(trans,TMCB_RESPONSE_OUT)) {
		cb_s.s = buf;
		cb_s.len = len;
		set_extra_tmcb_params( &cb_s, &rb->dst);
		if (lock)
			run_trans_callbacks_locked( TMCB_RESPONSE_OUT, trans,
				trans->uas.request, FAKED_REPLY, code);
		else
			run_trans_callbacks( TMCB_RESPONSE_OUT, trans,
				trans->uas.request, FAKED_REPLY, code);
	}


	pkg_free( buf ) ;
	stats_trans_rpl( code, 1 /*local*/ );
	LM_DBG("finished\n");
	return 1;

error3:
error2:
	if (lock) UNLOCK_REPLIES( trans );
	pkg_free ( buf );
error:
	/* do UAC cleanup */
	cleanup_uac_timers( trans );
	if ( is_invite(trans) ) cancel_uacs( trans, cancel_bitmap );
	/* we did not succeed -- put the transaction on wait */
	put_on_wait(trans);
	return -1;
}
Esempio n. 8
0
/*  This function is called whenever a reply for our module is received; 
  * we need to register  this function on module initialization;
  *  Returns :   0 - core router stops
  *              1 - core router relay statelessly
  */
int reply_received( struct sip_msg  *p_msg )
{
	int msg_status;
	int last_uac_status;
	int branch;
	int reply_status;
	utime_t timer;
	/* has the transaction completed now and we need to clean-up? */
	branch_bm_t cancel_bitmap;
	struct ua_client *uac;
	struct cell *t;
	struct usr_avp **backup_list;
	unsigned int has_reply_route;

	set_t(T_UNDEFINED);

	/* make sure we know the associated transaction ... */
	if (t_check(p_msg, &branch ) == -1) goto not_found;

	/*... if there is none, tell the core router to fwd statelessly */
	t = get_t();
	if ((t == 0) || (t == T_UNDEFINED)) goto not_found;

	cancel_bitmap=0;
	msg_status=p_msg->REPLY_STATUS;

	uac=&t->uac[branch];
	LM_DBG("org. status uas=%d, uac[%d]=%d local=%d is_invite=%d)\n",
		t->uas.status, branch, uac->last_received, 
		is_local(t), is_invite(t));
	last_uac_status=uac->last_received;
	if_update_stat( tm_enable_stats, tm_rcv_rpls , 1);

	/* it's a cancel which is not e2e ? */
	if ( get_cseq(p_msg)->method_id==METHOD_CANCEL && is_invite(t) ) {
		/* ... then just stop timers */
		reset_timer( &uac->local_cancel.retr_timer);
		if ( msg_status >= 200 ) {
				reset_timer( &uac->local_cancel.fr_timer);
		}
		LM_DBG("reply to local CANCEL processed\n");
		goto done;
	}

	/* *** stop timers *** */
	/* stop retransmission */
	reset_timer(&uac->request.retr_timer);

	/* stop final response timer only if I got a final response */
	if ( msg_status >= 200 ) {
		reset_timer( &uac->request.fr_timer);
	}

	/* acknowledge negative INVITE replies (do it before detailed
	 * on_reply processing, which may take very long, like if it
	 * is attempted to establish a TCP connection to a fail-over dst */
	if (is_invite(t) && ((msg_status >= 300) ||
	(is_local(t) && !no_autoack(t) && msg_status >= 200) )) {
		if (send_ack(p_msg, t, branch)!=0)
			LM_ERR("failed to send ACK (local=%s)\n", is_local(t)?"yes":"no");
	}

	_tm_branch_index = branch;

	/* processing of on_reply block */
	has_reply_route = (t->on_reply) || (t->uac[branch].on_reply);
	if (has_reply_route) {
		if (onreply_avp_mode) {
			/* lock the reply*/
			LOCK_REPLIES( t );
			/* set the as avp_list the one from transaction */
			backup_list = set_avp_list(&t->user_avps);
		} else {
			backup_list = 0;
		}
		/* transfer transaction flag to branch context */
		p_msg->flags = t->uas.request->flags;
		setb0flags(t->uac[branch].br_flags);
		/* run block - first per branch and then global one */
		if ( t->uac[branch].on_reply &&
		(run_top_route(onreply_rlist[t->uac[branch].on_reply].a,p_msg)
		&ACT_FL_DROP) && (msg_status<200) ) {
			if (onreply_avp_mode) {
				UNLOCK_REPLIES( t );
				set_avp_list( backup_list );
			}
			LM_DBG("dropping provisional reply %d\n", msg_status);
			goto done;
		}
		if ( t->on_reply && (run_top_route(onreply_rlist[t->on_reply].a,p_msg)
		&ACT_FL_DROP) && (msg_status<200) ) {
			if (onreply_avp_mode) {
				UNLOCK_REPLIES( t );
				set_avp_list( backup_list );
			}
			LM_DBG("dropping provisional reply %d\n", msg_status);
			goto done;
		}
		/* transfer current message context back to t */
		t->uac[branch].br_flags = getb0flags();
		t->uas.request->flags = p_msg->flags;
		if (onreply_avp_mode)
			/* restore original avp list */
			set_avp_list( backup_list );
	}

	if (!onreply_avp_mode || !has_reply_route)
		/* lock the reply*/
		LOCK_REPLIES( t );

	/* mark that the UAC received replies */
	uac->flags |= T_UAC_HAS_RECV_REPLY;

	/* we fire a cancel on spot if (a) branch is marked "to be canceled" or (b)
	 * the whole transaction was canceled (received cancel) and no cancel sent
	 * yet on this branch; and of course, only if a provisional reply :) */
	if (t->uac[branch].flags&T_UAC_TO_CANCEL_FLAG ||
	((t->flags&T_WAS_CANCELLED_FLAG) && !t->uac[branch].local_cancel.buffer.s)) {
		if ( msg_status < 200 )
			/* reply for an UAC with a pending cancel -> do cancel now */
			cancel_branch(t, branch);
		/* reset flag */
		t->uac[branch].flags &= ~(T_UAC_TO_CANCEL_FLAG);
	}

	if (is_local(t)) {
		reply_status = local_reply(t,p_msg, branch,msg_status,&cancel_bitmap);
		if (reply_status == RPS_COMPLETED) {
			cleanup_uac_timers(t);
			if (is_invite(t)) cancel_uacs(t, cancel_bitmap);
			/* There is no need to call set_final_timer because we know
			 * that the transaction is local */
			put_on_wait(t);
		}
	} else {
		reply_status = relay_reply(t,p_msg,branch,msg_status,&cancel_bitmap);
		/* clean-up the transaction when transaction completed */
		if (reply_status == RPS_COMPLETED) {
			/* no more UAC FR/RETR (if I received a 2xx, there may
			 * be still pending branches ...
			 */
			cleanup_uac_timers(t);
			if (is_invite(t)) cancel_uacs(t, cancel_bitmap);
			/* FR for negative INVITES, WAIT anything else */
			/* set_final_timer(t); */
		}
	}
	
	if (reply_status!=RPS_PROVISIONAL)
		goto done;
	
	/* update FR/RETR timers on provisional replies */
	if (msg_status < 200 && (restart_fr_on_each_reply ||
	((last_uac_status<msg_status) &&
	((msg_status >= 180) || (last_uac_status == 0)))
	) ) { /* provisional now */
		if (is_invite(t)) {
			/* invite: change FR to longer FR_INV, do not
			 * attempt to restart retransmission any more
			 */
			backup_list = set_avp_list(&t->user_avps);
			if (!fr_inv_avp2timer(&timer)) {
				LM_DBG("FR_INV_TIMER = %lld\n", timer);
				set_timer(&uac->request.fr_timer,
					FR_INV_TIMER_LIST, &timer);
			} else {
				set_timer(& uac->request.fr_timer, FR_INV_TIMER_LIST, 0);
			}
			set_avp_list(backup_list);
		} else {
			/* non-invite: restart retransmissions (slow now) */
			uac->request.retr_list = RT_T2;
			set_timer(&uac->request.retr_timer, RT_T2, 0);
		}
	} /* provisional replies */
	
done:
	/* we are done with the transaction, so unref it - the reference
	 * was incremented by t_check() function -bogdan*/
	t_unref(p_msg);
	/* don't try to relay statelessly neither on success
	 * (we forwarded statefully) nor on error; on troubles, 
	 * simply do nothing; that will make the other party to 
	 * retransmit; hopefuly, we'll then be better off 
	 */
	_tm_branch_index = 0;
	return 0;
not_found:
	set_t(T_UNDEFINED);
	return 1;
}
Esempio n. 9
0
int t_append_branches(void) {
	struct cell *t = NULL;
	struct sip_msg *orig_msg = NULL;
	short outgoings;

	int success_branch;

	str current_uri;
	str dst_uri, path, instance, ruid, location_ua;
	struct socket_info* si;
	int q, i, found;
	flag_t backup_bflags = 0;
	flag_t bflags = 0;
	int new_branch, branch_ret, lowest_ret;
	branch_bm_t	added_branches;
	int replies_locked = 0;

	t = get_t();
	if(t == NULL)
	{
		LM_ERR("cannot get transaction\n");
		return -1;
	}

	LM_DBG("transaction %u:%u in status %d\n", t->hash_index, t->label, t->uas.status);

	/* test if transaction has already been canceled */
	if (t->flags & T_CANCELED) {
		ser_error=E_CANCELED;
		return -1;
	}

	if ((t->uas.status >= 200 && t->uas.status<=399)
			|| ((t->uas.status >= 600 && t->uas.status)
				&& !(t->flags & (T_6xx | T_DISABLE_6xx))) ) {
		LM_DBG("transaction %u:%u in status %d: cannot append new branch\n",
				t->hash_index, t->label, t->uas.status);
		return -1;
	}

	/* set the lock on the transaction here */
	LOCK_REPLIES(t);
	replies_locked = 1;
	outgoings = t->nr_of_outgoings;
	orig_msg = t->uas.request;

	LM_DBG("Call %.*s: %d (%d) outgoing branches\n",orig_msg->callid->body.len,
			orig_msg->callid->body.s,outgoings, nr_branches);

	lowest_ret=E_UNSPEC;
	added_branches=0;

	/* it's a "late" branch so the on_branch variable has already been
	reset by previous execution of t_forward_nonack: we use the saved
	   value  */
	if (t->on_branch_delayed) {
		/* tell add_uac that it should run branch route actions */
		set_branch_route(t->on_branch_delayed);
	}

	outgoings = t->nr_of_outgoings;

	/* not really sure that the following is needed */

	set_branch_iterator(nr_branches-1);
	found = 0;
	while((current_uri.s=next_branch( &current_uri.len, &q, &dst_uri, &path,
										&bflags, &si, &ruid, &instance, &location_ua))) {
		LM_DBG("Current uri %.*s\n",current_uri.len, current_uri.s);

		for (i=0; i<nr_branches; i++) {
			if (t->uac[i].ruid.len == ruid.len
					&& !memcmp(t->uac[i].ruid.s, ruid.s, ruid.len)) {
				LM_DBG("branch already added [%.*s]\n", ruid.len, ruid.s);
				found = 1;
				break;
			}
		}
		if (found)
			continue;

		setbflagsval(0, bflags);
		new_branch=add_uac( t, orig_msg, &current_uri,
					(dst_uri.len) ? (&dst_uri) : &current_uri,
					&path, 0, si, orig_msg->fwd_send_flags,
					orig_msg->rcv.proto, (dst_uri.len)?-1:UAC_SKIP_BR_DST_F, &instance,
					&ruid, &location_ua);

		/* test if cancel was received meanwhile */
		if (t->flags & T_CANCELED) goto canceled;

		if (new_branch>=0)
			added_branches |= 1<<new_branch;
		else
			lowest_ret=MIN_int(lowest_ret, new_branch);
	}

	clear_branches();

	LM_DBG("Call %.*s: %d (%d) outgoing branches after clear_branches()\n",
			orig_msg->callid->body.len, orig_msg->callid->body.s,outgoings, nr_branches);
	setbflagsval(0, backup_bflags);

	/* update message flags, if changed in branch route */
	t->uas.request->flags = orig_msg->flags;

	if (added_branches==0) {
		if(lowest_ret!=E_CFG)
			LOG(L_ERR, "ERROR: t_append_branch: failure to add branches\n");
		ser_error=lowest_ret;
		replies_locked = 0;
		UNLOCK_REPLIES(t);
		return lowest_ret;
	}

	ser_error=0; /* clear branch adding errors */
	/* send them out now */
	success_branch=0;
	/* since t_append_branch can only be called from REQUEST_ROUTE, always lock replies */

	for (i=outgoings; i<t->nr_of_outgoings; i++) {
		if (added_branches & (1<<i)) {
			branch_ret=t_send_branch(t, i, orig_msg , 0, 0 /* replies are already locked */ );
			if (branch_ret>=0){ /* some kind of success */
				if (branch_ret==i) { /* success */
					success_branch++;
					if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
						run_trans_callbacks_with_buf( TMCB_REQUEST_OUT,
								&t->uac[nr_branches].request,
								orig_msg, 0, -orig_msg->REQ_METHOD);
				}
				else /* new branch added */
					added_branches |= 1<<branch_ret;
			}
		}
	}
	if (success_branch<=0) {
		/* return always E_SEND for now
		 * (the real reason could be: denied by onsend routes, blacklisted,
		 *  send failed or any of the errors listed before + dns failed
		 *  when attempting dns failover) */
		ser_error=E_SEND;
		/* else return the last error (?) */
		/* the caller should take care and delete the transaction */
		replies_locked = 0;
		UNLOCK_REPLIES(t);
		return -1;
	}

	ser_error=0; /* clear branch send errors, we have overall success */
	set_kr(REQ_FWDED);
	replies_locked = 0;
	UNLOCK_REPLIES(t);
	return 1;

canceled:
	DBG("t_append_branches: cannot append branches to a canceled transaction\n");
	/* reset processed branches */
	clear_branches();
	/* restore backup flags from initial env */
	setbflagsval(0, backup_bflags);
	/* update message flags, if changed in branch route */
	t->uas.request->flags = orig_msg->flags;
	/* if needed unlock transaction's replies */
	if (likely(replies_locked)) {
		/* restore the number of outgoing branches
		 * since new branches have not been completed */
		t->nr_of_outgoings = outgoings;
		replies_locked = 0;
		UNLOCK_REPLIES(t);
	}
	ser_error=E_CANCELED;
	return -1;
}
Esempio n. 10
0
/* should be called directly only if one of the condition bellow is true:
 *  - prepare_cancel_branch or prepare_to_cancel returned true for this branch
 *  - buffer value was 0 and then set to BUSY in an atomic op.:
 *     if (atomic_cmpxchg_long(&buffer, 0, BUSY_BUFFER)==0).
 *
 * params:  t - transaction
 *          branch - branch number to be canceled
 *          reason - cancel reason structure
 *          flags - howto cancel: 
 *                   F_CANCEL_B_KILL - will completely stop the 
 *                     branch (stops the timers), use with care
 *                   F_CANCEL_B_FAKE_REPLY - will send a fake 487
 *                      to all branches that haven't received any response
 *                      (>=100). It assumes the REPLY_LOCK is not held
 *                      (if it is => deadlock)
 *                  F_CANCEL_B_FORCE_C - will send a cancel (and create the 
 *                       corresp. local cancel rb) even if no reply was 
 *                       received; F_CANCEL_B_FAKE_REPLY will be ignored.
 *                  F_CANCEL_B_FORCE_RETR - don't stop retransmission if no 
 *                       reply was received on the branch; incompatible
 *                       with F_CANCEL_B_FAKE_REPLY, F_CANCEL_B_FORCE_C and
 *                       F_CANCEL_B_KILL (all of them take precedence) a
 *                  default: stop only the retransmissions for the branch
 *                      and leave it to timeout if it doesn't receive any
 *                      response to the CANCEL
 * returns: 0 - branch inactive after running cancel_branch() 
 *          1 - branch still active  (fr_timer)
 *         -1 - error
 * WARNING:
 *          - F_CANCEL_B_KILL should be used only if the transaction is killed
 *            explicitly afterwards (since it might kill all the timers
 *            the transaction won't be able to "kill" itself => if not
 *            explicitly "put_on_wait" it might live forever)
 *          - F_CANCEL_B_FAKE_REPLY must be used only if the REPLY_LOCK is not
 *            held
 *          - checking for buffer==0 under REPLY_LOCK is no enough, an 
 *           atomic_cmpxhcg or atomic_get_and_set _must_ be used.
 */
int cancel_branch( struct cell *t, int branch,
	#ifdef CANCEL_REASON_SUPPORT
					struct cancel_reason* reason,
	#endif /* CANCEL_REASON_SUPPORT */
					int flags )
{
	char *cancel;
	unsigned int len;
	struct retr_buf *crb, *irb;
	int ret;
	struct cancel_info tmp_cd;
	void* pcbuf;

	crb=&t->uac[branch].local_cancel;
	irb=&t->uac[branch].request;
	irb->flags|=F_RB_CANCELED;
	ret=1;
	init_cancel_info(&tmp_cd);

#	ifdef EXTRA_DEBUG
	if (crb->buffer!=BUSY_BUFFER) {
		LOG(L_CRIT, "ERROR: attempt to rewrite cancel buffer: %p\n",
				crb->buffer);
		abort();
	}
#	endif

	if (flags & F_CANCEL_B_KILL){
		stop_rb_timers( irb );
		ret=0;
		if ((t->uac[branch].last_received < 100) &&
				!(flags & F_CANCEL_B_FORCE_C)) {
			DBG("DEBUG: cancel_branch: no response ever received: "
			    "giving up on cancel\n");
			/* remove BUSY_BUFFER -- mark cancel buffer as not used */
			pcbuf=&crb->buffer; /* workaround for type punning warnings */
			atomic_set_long(pcbuf, 0);
			/* try to relay auto-generated 487 canceling response only when
			 * another one is not under relaying on the branch and there is
			 * no forced response per transaction from script */
			if((flags & F_CANCEL_B_FAKE_REPLY)
					&& !(irb->flags&F_RB_RELAYREPLY)
					&& !(t->flags&T_ADMIN_REPLY)) {
				LOCK_REPLIES(t);
				if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1) == 
										RPS_ERROR){
					return -1;
				}
			}
			/* do nothing, hope that the caller will clean up */
			return ret;
		}
	}else{
		if (t->uac[branch].last_received < 100){
			if (!(flags & F_CANCEL_B_FORCE_C)) {
				/* no response received => don't send a cancel on this branch,
				 *  just drop it */
				if (!(flags & F_CANCEL_B_FORCE_RETR))
					stop_rb_retr(irb); /* stop retransmissions */
				/* remove BUSY_BUFFER -- mark cancel buffer as not used */
				pcbuf=&crb->buffer; /* workaround for type punning warnings */
				atomic_set_long(pcbuf, 0);
				if (flags & F_CANCEL_B_FAKE_REPLY){
					stop_rb_timers( irb ); /* stop even the fr timer */
					LOCK_REPLIES(t);
					if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1)== 
											RPS_ERROR){
						return -1;
					}
					return 0; /* should be inactive after the 487 */
				}
				/* do nothing, just wait for the final timeout */
				return 1;
			}
		}
		stop_rb_retr(irb); /* stop retransmissions */
	}

	if (cfg_get(tm, tm_cfg, reparse_invite) ||
			(t->uas.request && t->uas.request->msg_flags&(FL_USE_UAC_FROM|FL_USE_UAC_TO))) {
		/* build the CANCEL from the INVITE which was sent out */
		cancel = build_local_reparse(t, branch, &len, CANCEL, CANCEL_LEN,
									 (t->uas.request && t->uas.request->msg_flags&FL_USE_UAC_TO)?0:&t->to
	#ifdef CANCEL_REASON_SUPPORT
									 , reason
	#endif /* CANCEL_REASON_SUPPORT */
									 );
	} else {
		/* build the CANCEL from the received INVITE */
		cancel = build_local(t, branch, &len, CANCEL, CANCEL_LEN, &t->to
	#ifdef CANCEL_REASON_SUPPORT
								, reason
	#endif /* CANCEL_REASON_SUPPORT */
								);
	}
	if (!cancel) {
		LOG(L_ERR, "ERROR: attempt to build a CANCEL failed\n");
		/* remove BUSY_BUFFER -- mark cancel buffer as not used */
		pcbuf=&crb->buffer; /* workaround for type punning warnings */
		atomic_set_long(pcbuf, 0);
		return -1;
	}
	/* install cancel now */
	crb->dst = irb->dst;
	crb->branch = branch;
	/* label it as cancel so that FR timer can better know how to
	   deal with it */
	crb->activ_type = TYPE_LOCAL_CANCEL;
	/* be extra carefully and check for bugs (the below if could be replaced
	 *  by an atomic_set((void*)&crb->buffer, cancel) */
	if (unlikely(atomic_cmpxchg_long((void*)&crb->buffer, (long)BUSY_BUFFER,
							(long)cancel)!= (long)BUSY_BUFFER)){
		BUG("tm: cancel_branch: local_cancel buffer=%p != BUSY_BUFFER"
				" (trying to continue)\n", crb->buffer);
		shm_free(cancel);
		return -1;
	}
	membar_write_atomic_op(); /* cancel retr. can be called from 
								 reply_received w/o the reply lock held => 
								 they check for buffer_len to 
								 see if a valid reply exists */
	crb->buffer_len = len;

	DBG("DEBUG: cancel_branch: sending cancel...\n");
	if (SEND_BUFFER( crb )>=0){
		if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
			run_trans_callbacks_with_buf(TMCB_REQUEST_OUT, crb, t->uas.request, 0, TMCB_LOCAL_F);
		if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
			run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, crb, t->uas.request, 0, TMCB_LOCAL_F);
	}
	/*sets and starts the FINAL RESPONSE timer */
	if (start_retr( crb )!=0)
		LOG(L_CRIT, "BUG: cancel_branch: failed to start retransmission"
					" for %p\n", crb);
	return ret;
}
Esempio n. 11
0
File: t_fwd.c Progetto: OPSF/uClinux
void e2e_cancel( struct sip_msg *cancel_msg,
                 struct cell *t_cancel, struct cell *t_invite )
{
    branch_bm_t cancel_bm, tmp_bm;
    int i;
    int lowest_error;
    str backup_uri;
    int ret;

    cancel_bm=0;
    lowest_error=0;

    backup_uri=cancel_msg->new_uri;
    /* determine which branches to cancel ... */
    which_cancel( t_invite, &cancel_bm );
    t_cancel->nr_of_outgoings=t_invite->nr_of_outgoings;
    /* fix label -- it must be same for reply matching */
    t_cancel->label=t_invite->label;
    /* ... and install CANCEL UACs */
    for (i=0; i<t_invite->nr_of_outgoings; i++)
        if (cancel_bm & (1<<i)) {
            ret=e2e_cancel_branch(cancel_msg, t_cancel, t_invite, i);
            if (ret<0) cancel_bm &= ~(1<<i);
            if (ret<lowest_error) lowest_error=ret;
        }
    cancel_msg->new_uri=backup_uri;

    /* send them out */
    for (i=0; i<t_cancel->nr_of_outgoings; i++) {
        if (cancel_bm & (1<<i)) {
            /* Provisional reply received on this branch, send CANCEL */
            /* No need to stop timers as they have already been stopped by the reply */
            if (SEND_BUFFER( &t_cancel->uac[i].request)==-1) {
                LOG(L_ERR, "ERROR: e2e_cancel: send failed\n");
            }
            start_retr( &t_cancel->uac[i].request );
        } else {
            if (t_invite->uac[i].last_received < 100) {
                /* No provisional response received, stop
                 * retransmission timers
                 */
                reset_timer(&t_invite->uac[i].request.retr_timer);
                reset_timer(&t_invite->uac[i].request.fr_timer);

                /* Generate faked reply */
                LOCK_REPLIES(t_invite);
                if (relay_reply(t_invite, FAKED_REPLY, i, 487, &tmp_bm) == RPS_ERROR) {
                    lowest_error = -1;
                }
            }
        }
    }


    /* if error occurred, let it know upstream (final reply
       will also move the transaction on wait state
    */
    if (lowest_error<0) {
        LOG(L_ERR, "ERROR: cancel error\n");
        t_reply( t_cancel, cancel_msg, 500, "cancel error");
        /* if there are pending branches, let upstream know we
           are working on it
        */
    } else if (cancel_bm) {
        DBG("DEBUG: e2e_cancel: e2e cancel proceeding\n");
        t_reply( t_cancel, cancel_msg, 200, CANCELING );
        /* if the transaction exists, but there is no more pending
           branch, tell upstream we're done
        */
    } else {
        DBG("DEBUG: e2e_cancel: e2e cancel -- no more pending branches\n");
        t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
    }

#ifdef LOCAL_487

    /* local 487s have been deprecated -- it better handles
     * race conditions (UAS sending 200); hopefully there are
     * no longer UACs who go crazy waiting for the 487 whose
     * forwarding is being blocked by other unresponsive branch
     */

    /* we could await downstream UAS's 487 replies; however,
       if some of the branches does not do that, we could wait
       long time and annoy upstream UAC which wants to see
       a result of CANCEL quickly
    */
    DBG("DEBUG: e2e_cancel: sending 487\n");
    /* in case that something in the meantime has been sent upstream
       (like if FR hit at the same time), don't try to send */
    if (t_invite->uas.status>=200) return;
    /* there is still a race-condition -- the FR can hit now; that's
       not too bad -- we take care in t_reply's REPLY_LOCK; in
       the worst case, both this t_reply and other replier will
       try, and the later one will result in error message
       "can't reply twice"
    */
    t_reply(t_invite, t_invite->uas.request, 487, CANCELED );
#endif
}
Esempio n. 12
0
/* Continues the SIP request processing previously saved by
 * t_suspend(). The script does not continue from the same
 * point, but a separate route block is executed instead.
 *
 * Return value:
 * 	0  - success
 * 	<0 - failure
 */
int t_continue(unsigned int hash_index, unsigned int label,
		struct action *route)
{
	struct cell	*t;
	struct sip_msg	faked_req;
	struct cancel_info cancel_data;
	int	branch;
	struct ua_client *uac =NULL;
	int	ret;
	int cb_type;
	int msg_status;
	int last_uac_status;
	int reply_status;
	int do_put_on_wait;
	struct hdr_field *hdr, *prev = 0, *tmp = 0;

	if (t_lookup_ident(&t, hash_index, label) < 0) {
		LM_ERR("transaction not found\n");
		return -1;
	}

	if (!(t->flags & T_ASYNC_SUSPENDED)) {
		LM_WARN("transaction is not suspended [%u:%u]\n", hash_index, label);
		return -2;
	}

	if (t->flags & T_CANCELED) {
		t->flags &= ~T_ASYNC_SUSPENDED;
		/* The transaction has already been canceled,
		 * needless to continue */
		UNREF(t); /* t_unref would kill the transaction */
		/* reset T as we have no working T anymore */
		set_t(T_UNDEFINED, T_BR_UNDEFINED);
		return 1;
	}

	/* The transaction has to be locked to protect it
	 * form calling t_continue() multiple times simultaneously */
	LOCK_ASYNC_CONTINUE(t);

	t->flags |= T_ASYNC_CONTINUE;   /* we can now know anywhere in kamailio
					 * that we are executing post a suspend */

	/* which route block type were we in when we were suspended */
	cb_type =  FAILURE_CB_TYPE;;
	switch (t->async_backup.backup_route) {
		case REQUEST_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
		case FAILURE_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
		case TM_ONREPLY_ROUTE:
			cb_type = ONREPLY_CB_TYPE;
			break;
		case BRANCH_ROUTE:
			cb_type = FAILURE_CB_TYPE;
			break;
	}

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		branch = t->async_backup.blind_uac;	/* get the branch of the blind UAC setup 
			* during suspend */
		if (branch >= 0) {
			stop_rb_timers(&t->uac[branch].request);
 
			if (t->uac[branch].last_received != 0) {
				/* Either t_continue() has already been
				* called or the branch has already timed out.
				* Needless to continue. */
				t->flags &= ~T_ASYNC_SUSPENDED;
				UNLOCK_ASYNC_CONTINUE(t);
				UNREF(t); /* t_unref would kill the transaction */
				return 1;
			}

			/* Set last_received to something >= 200,
			 * the actual value does not matter, the branch
			 * will never be picked up for response forwarding.
			 * If last_received is lower than 200,
			 * then the branch may tried to be cancelled later,
			 * for example when t_reply() is called from
			 * a failure route => deadlock, because both
			 * of them need the reply lock to be held. */
			t->uac[branch].last_received=500;
			uac = &t->uac[branch];
		}
		/* else
			Not a huge problem, fr timer will fire, but CANCEL
			will not be sent. last_received will be set to 408. */

		/* We should not reset kr here to 0 as it's quite possible before continuing the dev. has correctly set the
		 * kr by, for example, sending a transactional reply in code - resetting here will cause a dirty log message
		 * "WARNING: script writer didn't release transaction" to appear in log files. TODO: maybe we need to add 
		 * a special kr for async?
		 * reset_kr();
		 */

		/* fake the request and the environment, like in failure_route */
		if (!fake_req(&faked_req, t->uas.request, 0 /* extra flags */, uac)) {
			LM_ERR("building fake_req failed\n");
			ret = -1;
			goto kill_trans;
		}
		faked_env( t, &faked_req, 1);

		/* execute the pre/post -script callbacks based on original route block */
		if (exec_pre_script_cb(&faked_req, cb_type)>0) {
			if (run_top_route(route, &faked_req, 0)<0)
				LM_ERR("failure inside run_top_route\n");
			exec_post_script_cb(&faked_req, cb_type);
		}

		/* TODO: save_msg_lumps should clone the lumps to shm mem */

		/* restore original environment and free the fake msg */
		faked_env( t, 0, 1);
		free_faked_req(&faked_req, t);

		/* update the flags */
		t->uas.request->flags = faked_req.flags;

		if (t->uas.status < 200) {
			/* No final reply has been sent yet.
			* Check whether or not there is any pending branch.
			*/
			for (	branch = 0;
				branch < t->nr_of_outgoings;
				branch++
			) {
				if (t->uac[branch].last_received < 200)
					break;
			}

			if (branch == t->nr_of_outgoings) {
			/* There is not any open branch so there is
			* no chance that a final response will be received. */
				ret = 0;
				goto kill_trans;
			}
		}

	} else {
		branch = t->async_backup.backup_branch;

		init_cancel_info(&cancel_data);

		LM_DBG("continuing from a suspended reply"
				" - resetting the suspend branch flag\n");

		if (t->uac[branch].reply) {
		t->uac[branch].reply->msg_flags &= ~FL_RPL_SUSPENDED;
                } else {
			LM_WARN("no reply in t_continue for branch. not much we can do\n");
			return 0;
		}
                
		if (t->uas.request) t->uas.request->msg_flags&= ~FL_RPL_SUSPENDED;

		faked_env( t, t->uac[branch].reply, 1);

		if (exec_pre_script_cb(t->uac[branch].reply, cb_type)>0) {
			if (run_top_route(route, t->uac[branch].reply, 0)<0){
				LOG(L_ERR, "ERROR: t_continue_reply: Error in run_top_route\n");
			}
			exec_post_script_cb(t->uac[branch].reply, cb_type);
		}

		LM_DBG("restoring previous environment");
		faked_env( t, 0, 1);

		/*lock transaction replies - will be unlocked when reply is relayed*/
		LOCK_REPLIES( t );
		if ( is_local(t) ) {
			LM_DBG("t is local - sending reply with status code: [%d]\n",
					t->uac[branch].reply->first_line.u.reply.statuscode);
			reply_status = local_reply( t, t->uac[branch].reply, branch,
					t->uac[branch].reply->first_line.u.reply.statuscode,
					&cancel_data );
			if (reply_status == RPS_COMPLETED) {
				/* no more UAC FR/RETR (if I received a 2xx, there may
				* be still pending branches ...
				*/
				cleanup_uac_timers( t );
				if (is_invite(t)) cancel_uacs(t, &cancel_data, F_CANCEL_B_KILL);
				/* There is no need to call set_final_timer because we know
				* that the transaction is local */
				put_on_wait(t);
			}else if (unlikely(cancel_data.cancel_bitmap)){
				/* cancel everything, even non-INVITEs (e.g in case of 6xx), use
				* cancel_b_method for canceling unreplied branches */
				cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags));
			}

		} else {
			LM_DBG("t is not local - relaying reply with status code: [%d]\n",
					t->uac[branch].reply->first_line.u.reply.statuscode);
			do_put_on_wait = 0;
			if(t->uac[branch].reply->first_line.u.reply.statuscode>=200){
				do_put_on_wait = 1;
			}
			reply_status=relay_reply( t, t->uac[branch].reply, branch,
					t->uac[branch].reply->first_line.u.reply.statuscode,
					&cancel_data, do_put_on_wait );
			if (reply_status == RPS_COMPLETED) {
				/* no more UAC FR/RETR (if I received a 2xx, there may
				be still pending branches ...
				*/
				cleanup_uac_timers( t );
				/* 2xx is a special case: we can have a COMPLETED request
				* with branches still open => we have to cancel them */
				if (is_invite(t) && cancel_data.cancel_bitmap) 
					cancel_uacs( t, &cancel_data,  F_CANCEL_B_KILL);
				/* FR for negative INVITES, WAIT anything else */
				/* Call to set_final_timer is embedded in relay_reply to avoid
				* race conditions when reply is sent out and an ACK to stop
				* retransmissions comes before retransmission timer is set.*/
			}else if (unlikely(cancel_data.cancel_bitmap)){
				/* cancel everything, even non-INVITEs (e.g in case of 6xx), use
				* cancel_b_method for canceling unreplied branches */
				cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags));
			}

		}
		t->uac[branch].request.flags|=F_RB_REPLIED;

		if (reply_status==RPS_ERROR){
			goto done;
		}

		/* update FR/RETR timers on provisional replies */

		msg_status=t->uac[branch].reply->REPLY_STATUS;
		last_uac_status=t->uac[branch].last_received;

		if (is_invite(t) && msg_status<200 &&
			( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) ||
			( (last_uac_status<msg_status) &&
			((msg_status>=180) || (last_uac_status==0)) )
		) ) { /* provisional now */
			restart_rb_fr(& t->uac[branch].request, t->fr_inv_timeout);
			t->uac[branch].request.flags|=F_RB_FR_INV; /* mark fr_inv */
		}
            
	}

done:
	UNLOCK_ASYNC_CONTINUE(t);

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		/* unref the transaction */
		t_unref(t->uas.request);
	} else {
		tm_ctx_set_branch_index(T_BR_UNDEFINED);        
		/* unref the transaction */
		t_unref(t->uac[branch].reply);
		LOG(L_DBG,"DEBUG: t_continue_reply: Freeing earlier cloned reply\n");

		/* free lumps that were added during reply processing */
		del_nonshm_lump( &(t->uac[branch].reply->add_rm) );
		del_nonshm_lump( &(t->uac[branch].reply->body_lumps) );
		del_nonshm_lump_rpl( &(t->uac[branch].reply->reply_lump) );

		/* free header's parsed structures that were added */
		for( hdr=t->uac[branch].reply->headers ; hdr ; hdr=hdr->next ) {
			if ( hdr->parsed && hdr_allocs_parse(hdr) &&
				(hdr->parsed<(void*)t->uac[branch].reply ||
				hdr->parsed>=(void*)t->uac[branch].end_reply)) {
				clean_hdr_field(hdr);
				hdr->parsed = 0;
			}
		}

		/* now go through hdr_fields themselves and remove the pkg allocated space */
		hdr = t->uac[branch].reply->headers;
		while (hdr) {
			if ( hdr && ((void*)hdr<(void*)t->uac[branch].reply ||
				(void*)hdr>=(void*)t->uac[branch].end_reply)) {
				//this header needs to be freed and removed form the list.
				if (!prev) {
					t->uac[branch].reply->headers = hdr->next;
				} else {
					prev->next = hdr->next;
				}
				tmp = hdr;
				hdr = hdr->next;
				pkg_free(tmp);
			} else {
				prev = hdr;
				hdr = hdr->next;
			}
		}
		sip_msg_free(t->uac[branch].reply);
		t->uac[branch].reply = 0;
	}

	/*This transaction is no longer suspended so unsetting the SUSPEND flag*/
	t->flags &= ~T_ASYNC_SUSPENDED;


	return 0;

kill_trans:
	t->flags &= ~T_ASYNC_SUSPENDED;
	/* The script has hopefully set the error code. If not,
	 * let us reply with a default error. */
	if ((kill_transaction_unsafe(t,
		tm_error ? tm_error : E_UNSPEC)) <=0
	) {
		LOG(L_ERR, "ERROR: t_continue: "
			"reply generation failed\n");
		/* The transaction must be explicitely released,
		 * no more timer is running */
		UNLOCK_ASYNC_CONTINUE(t);
		t_release_transaction(t);
	} else {
		UNLOCK_ASYNC_CONTINUE(t);
	}

	if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){
		t_unref(t->uas.request);
	} else {
		/* unref the transaction */
		t_unref(t->uac[branch].reply);
	}
	return ret;
}
Esempio n. 13
0
inline static void final_response_handler( struct timer_link *fr_tl )
{
#define CANCEL_REASON_SIP_480  \
	"Reason: SIP;cause=480;text=\"NO_ANSWER\"" CRLF

	static context_p my_ctx = NULL;
	context_p old_ctx;
	struct retr_buf* r_buf;
	struct cell *t;

	if (fr_tl==0){
		/* or BUG?, ignoring it for now */
		LM_CRIT("final_response_handler(0) called\n");
		return;
	}
	r_buf = get_fr_timer_payload(fr_tl);
	t=r_buf->my_T;

#	ifdef EXTRA_DEBUG
	if (t->damocles)
	{
		LM_ERR("transaction %p scheduled for deletion and"
			" called from FR timer\n",r_buf->my_T);
		abort();
	}
#	endif

	reset_timer(  &(r_buf->retr_timer) );

	/* the transaction is already removed from FR_LIST by the timer */

	/* FR for local cancels.... */
	if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
	{
		LM_DBG("stop retr for Local Cancel\n");
		return;
	}

	/* FR for replies (negative INVITE replies) */
	if (r_buf->activ_type>0) {
#		ifdef EXTRA_DEBUG
		if (t->uas.request->REQ_METHOD!=METHOD_INVITE
			|| t->uas.status < 200 ) {
			LM_ERR("unknown type reply buffer\n");
			abort();
		}
#		endif
		put_on_wait( t );
		return;
	};

	/* as this processing is outside the scope of other messages (it is
	   trigger from timer), a processing context must be attached to it */
	old_ctx = current_processing_ctx;
	if (my_ctx==NULL) {
		my_ctx = context_alloc(CONTEXT_GLOBAL);
		if (my_ctx==NULL) {
			LM_ERR("failed to alloc new ctx in pkg\n");
		}
	}
	memset( my_ctx, 0, context_size(CONTEXT_GLOBAL) );
	current_processing_ctx = my_ctx;
	/* set the T context too */
	set_t( t );

	/* out-of-lock do the cancel I/O */
	if (is_invite(t) && should_cancel_branch(t, r_buf->branch) ) {
		set_cancel_extra_hdrs( CANCEL_REASON_SIP_480, sizeof(CANCEL_REASON_SIP_480)-1);
		cancel_branch(t, r_buf->branch );
		set_cancel_extra_hdrs( NULL, 0);
	}
	/* lock reply processing to determine how to proceed reliably */
	LOCK_REPLIES( t );
	LM_DBG("Cancel sent out, sending 408 (%p)\n", t);
	fake_reply(t, r_buf->branch, 408 );

	/* flush the context */
	if (current_processing_ctx==NULL)
		my_ctx=NULL;
	else
		context_destroy(CONTEXT_GLOBAL, my_ctx);
	/* switch back to the old context */
	current_processing_ctx = old_ctx;
	/* reset the T context */
	init_t();

	LM_DBG("done\n");
}
Esempio n. 14
0
inline static void final_response_handler(
		struct retr_buf *r_buf, struct cell *t)
{
	int silent;
#ifdef USE_DNS_FAILOVER
	/*int i;
	int added_branches;
	*/
	int branch_ret;
	int prev_branch;
	ticks_t now;
#endif

#ifdef EXTRA_DEBUG
	if(t->flags & T_IN_AGONY) {
		LM_ERR("transaction %p scheduled for deletion and"
			   " called from FR timer (flags %x)\n",
				t, t->flags);
		abort();
	}
#endif
	/* FR for local cancels.... */
	if(r_buf->rbtype == TYPE_LOCAL_CANCEL) {
#ifdef TIMER_DEBUG
		LM_DBG("stop retr for local cancel\n");
#endif
		return;
	}
	/* FR for replies (negative INVITE replies) */
	if(r_buf->rbtype > 0) {
#ifdef EXTRA_DEBUG
		if(t->uas.request->REQ_METHOD != METHOD_INVITE || t->uas.status < 200) {
			LM_CRIT("BUG - unknown type reply buffer\n");
			abort();
		}
#endif
		put_on_wait(t);
		return;
	};

	/* lock reply processing to determine how to proceed reliably */
	LOCK_REPLIES(t);
	/* now it can be only a request retransmission buffer;
	   try if you can simply discard the local transaction
	   state without compellingly removing it from the
	   world */
	silent =
			/* don't go silent if disallowed globally ... */
			cfg_get(tm, tm_cfg, noisy_ctimer) == 0
			/* ... or for this particular transaction */
			&& has_noisy_ctimer(t) == 0
			/* not for UACs */
			&& !is_local(t)
			/* invites only */
			&& is_invite(t)
			/* parallel forking does not allow silent state discarding */
			&& t->nr_of_outgoings == 1
			/* on_negativ reply handler not installed -- serial forking
		 * could occur otherwise */
			&& t->on_failure == 0
			/* the same for FAILURE callbacks */
			&& !has_tran_tmcbs(t, TMCB_ON_FAILURE_RO | TMCB_ON_FAILURE)
			/* something received -- we will not be silent on error */
			&& t->uac[r_buf->branch].last_received == 0;

	if(silent) {
		UNLOCK_REPLIES(t);
#ifdef EXTRA_DEBUG
		LM_DBG("transaction silently dropped (%p), branch %d, last_received %d\n",
				t, r_buf->branch, t->uac[r_buf->branch].last_received);
#endif
		put_on_wait(t);
		return;
	}
#ifdef EXTRA_DEBUG
	LM_DBG("stop retr. and send CANCEL (%p)\n", t);
#endif
	if((r_buf->branch < sr_dst_max_branches)
			&& /* r_buf->branch is always >=0 */
			(t->uac[r_buf->branch].last_received == 0)
			&& (t->uac[r_buf->branch].request.buffer
					   != NULL) /* not a blind UAC */
			) {
/* no reply received */
#ifdef USE_DST_BLACKLIST
		if(r_buf->my_T && r_buf->my_T->uas.request
				&& (r_buf->my_T->uas.request->REQ_METHOD
						   & cfg_get(tm, tm_cfg, tm_blst_methods_add)))
			dst_blacklist_add(
					BLST_ERR_TIMEOUT, &r_buf->dst, r_buf->my_T->uas.request);
#endif
#ifdef USE_DNS_FAILOVER
		/* if this is an invite, the destination resolves to more ips, and
		 *  it still hasn't passed more than fr_inv_timeout since we
		 *  started, add another branch/uac */
		if(cfg_get(core, core_cfg, use_dns_failover)) {
			now = get_ticks_raw();
			if((s_ticks_t)(t->end_of_life - now) > 0) {
				branch_ret = add_uac_dns_fallback(
						t, t->uas.request, &t->uac[r_buf->branch], 0);
				prev_branch = -1;
				while((branch_ret >= 0) && (branch_ret != prev_branch)) {
					prev_branch = branch_ret;
					branch_ret =
							t_send_branch(t, branch_ret, t->uas.request, 0, 0);
				}
			}
		}
#endif
	}
	fake_reply(t, r_buf->branch, 408);
}
Esempio n. 15
0
void e2e_cancel( struct sip_msg *cancel_msg, 
	struct cell *t_cancel, struct cell *t_invite )
{
	branch_bm_t cancel_bm;
	branch_bm_t dummy_bm;
	int i;
	int lowest_error;
	str backup_uri;
	int rurib_flags;
	int ret;

	cancel_bm=0;
	lowest_error=0;

	/* e2e_cancel_branch() makes no RURI parsing, so no need to 
	 * save the ->parse_uri_ok */
	backup_uri = cancel_msg->new_uri;
	/* branch flags specific to RURI */
	rurib_flags = cancel_msg->flags&(~gflags_mask);

	/* determine which branches to cancel ... */
	which_cancel( t_invite, &cancel_bm );
	t_cancel->nr_of_outgoings=t_invite->nr_of_outgoings;
	t_cancel->first_branch=t_invite->first_branch;
	/* fix label -- it must be same for reply matching */
	t_cancel->label=t_invite->label;
	/* ... and install CANCEL UACs */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (cancel_bm & (1<<i)) {
			ret=e2e_cancel_branch(cancel_msg, t_cancel, t_invite, i);
			if (ret<0) cancel_bm &= ~(1<<i);
			if (ret<lowest_error) lowest_error=ret;
		}
	}
	/* restore new_uri */
	cancel_msg->new_uri = backup_uri;
	cancel_msg->parsed_uri_ok = 0;

	/* set flags */
	cancel_msg->flags = (cancel_msg->flags&gflags_mask)|rurib_flags;
	t_cancel->uas.request->flags = cancel_msg->flags&gflags_mask;

	/* send them out */
	for (i=t_cancel->first_branch; i<t_cancel->nr_of_outgoings; i++) {
		if (cancel_bm & (1<<i)) {
			if (SEND_BUFFER( &t_cancel->uac[i].request)==-1) {
				LOG(L_ERR, "ERROR: e2e_cancel: send failed\n");
			}
			start_retr( &t_cancel->uac[i].request );
		}
	}

	/* internally cancel branches with no received reply */
	for (i=t_invite->first_branch; i<t_invite->nr_of_outgoings; i++) {
		if (t_invite->uac[i].last_received==0){
			/* mark as cancelled */
			t_invite->uac[i].flags |= T_UAC_TO_CANCEL_FLAG;
			/* reset the "request" timers */
			reset_timer(&t_invite->uac[i].request.retr_timer);
			reset_timer(&t_invite->uac[i].request.fr_timer);
			LOCK_REPLIES( t_invite );
			if (RPS_ERROR==relay_reply(t_invite,FAKED_REPLY,i,487,&dummy_bm))
				lowest_error = -1; /* force sending 500 error */
		}
	}

	/* do not attmpt to send reply for CANCEL if we already did it once;
	 * to work arround the race between receiveing reply and generating
	 * local reply, we better check if we are in failure route (which means that
	 * the reply to UAC is /to be/ sent) or if was actually sent out */
	/* calling here t_relay from within failure route will lead to dead lock
	 * on the transaction's reply lock -bogdan */
	if (route_type==FAILURE_ROUTE || t_cancel->uas.status>=200)
		return;

	/* if error occurred, let it know upstream (final reply
	   will also move the transaction on wait state
	*/
	if (lowest_error<0) {
		LOG(L_ERR, "ERROR: cancel error\n");
		t_reply( t_cancel, cancel_msg, 500, "cancel error");
	/* if there are pending branches, let upstream know we
	   are working on it
	*/
	} else if (cancel_bm) {
		DBG("DEBUG: e2e_cancel: e2e cancel proceeding\n");
		t_reply( t_cancel, cancel_msg, 200, CANCELING );
	/* if the transaction exists, but there is no more pending
	   branch, tell upstream we're done
	*/
	} else {
		DBG("DEBUG: e2e_cancel: e2e cancel -- no more pending branches\n");
		t_reply( t_cancel, cancel_msg, 200, CANCEL_DONE );
	}

#ifdef LOCAL_487

	/* local 487s have been deprecated -- it better handles
	 * race conditions (UAS sending 200); hopefully there are
	 * no longer UACs who go crazy waiting for the 487 whose
	 * forwarding is being blocked by other unresponsive branch
	 */

	/* we could await downstream UAS's 487 replies; however,
	   if some of the branches does not do that, we could wait
	   long time and annoy upstream UAC which wants to see 
	   a result of CANCEL quickly
	*/
	DBG("DEBUG: e2e_cancel: sending 487\n");
	/* in case that something in the meantime has been sent upstream
	   (like if FR hit at the same time), don't try to send */
	if (t_invite->uas.status>=200) return;
	/* there is still a race-condition -- the FR can hit now; that's
	   not too bad -- we take care in t_reply's REPLY_LOCK; in
	   the worst case, both this t_reply and other replier will
	   try, and the later one will result in error message 
	   "can't reply twice"
	*/
	t_reply(t_invite, t_invite->uas.request, 487, CANCELED );
#endif
}