Exemplo n.º 1
0
/**
 * @brief Wrapper function for msg_lump_cloner() with some additional sanity checks
 * @param shm_msg SIP message in shared memory
 * @param pkg_msg SIP message in private memory
 * @return 0 on success, -1 on error
 */
int save_msg_lumps( struct sip_msg *shm_msg, struct sip_msg *pkg_msg)
{
	int ret;
	struct lump* add_rm;
	struct lump* body_lumps;
	struct lump_rpl* reply_lump;
	
	/* make sure that we do not clone the lumps twice */
	if (lumps_are_cloned) {
		LOG(L_DBG, "DEBUG: save_msg_lumps: lumps have been already cloned\n" );
		return 0;
	}
	/* sanity checks */
	if (unlikely(!shm_msg || ((shm_msg->msg_flags & FL_SHM_CLONE)==0))) {
		LOG(L_ERR, "ERROR: save_msg_lumps: BUG, there is no shmem-ized message"
			" (shm_msg=%p)\n", shm_msg);
		return -1;
	}
	if (unlikely(shm_msg->first_line.type!=SIP_REQUEST)) {
		LOG(L_ERR, "ERROR: save_msg_lumps: BUG, the function should be called only for requests\n" );
		return -1;
	}

#ifdef EXTRA_DEBUG
	membar_depends();
	if (shm_msg->add_rm || shm_msg->body_lumps || shm_msg->reply_lump) {
		LOG(L_ERR, "ERROR: save_msg_lumps: BUG, trying to overwrite the already cloned lumps\n");
		return -1;
	}
#endif

	/* needless to clone the lumps for ACK, they will not be used again */
	if (shm_msg->REQ_METHOD == METHOD_ACK)
		return 0;

	/* clean possible previous added vias/clen header or else they would 
	 * get propagated in the failure routes */
	free_via_clen_lump(&pkg_msg->add_rm);

	lumps_are_cloned = 1;
	ret=msg_lump_cloner(pkg_msg, &add_rm, &body_lumps, &reply_lump);
	if (likely(ret==0)){
		/* make sure the lumps are fully written before adding them to
		   shm_msg (in case someone accesses it in the same time) */
		membar_write();
		shm_msg->add_rm = add_rm;
		shm_msg->body_lumps = body_lumps;
		shm_msg->reply_lump = reply_lump;
	}
	return ret<0?-1:0;
}
Exemplo n.º 2
0
/** Prepare to cancel a transaction.
 * Determine which branches should be canceled and prepare them (internally
 * mark them as "cancel in progress", see prepare_cancel_branch()).
 * Can be called without REPLY_LOCK, since prepare_cancel_branch() is atomic 
 *  now *  -- andrei
 * WARNING: - has side effects, see prepare_cancel_branch()
 *          - one _must_ call cancel_uacs(cancel_bm) if *cancel_bm!=0 or
 *             you'll have some un-cancelable branches (because they remain
 *             "marked" internally)
 * @param t - transaction whose branches will be canceled
 * @param cancel_bm - pointer to a branch bitmap that will be filled with
*    the branches that must be canceled (must be passed to cancel_uacs() if
*    !=0).
*  @param skip - branch bitmap of branches that should not be canceled
*/
void prepare_to_cancel(struct cell *t, branch_bm_t *cancel_bm,
						branch_bm_t skip_branches)
{
	int i;
	int branches_no;
	branch_bm_t mask;
	
	*cancel_bm=0;
	branches_no=t->nr_of_outgoings;
	mask=~skip_branches;
	membar_depends(); 
	for( i=0 ; i<branches_no ; i++ ) {
		*cancel_bm |= ((mask & (1<<i)) &&  prepare_cancel_branch(t, i, 1))<<i;
	}
}
Exemplo n.º 3
0
void run_trans_callbacks_internal(struct tmcb_head_list* cb_lst, int type,
									struct cell *trans, 
									struct tmcb_params *params)
{
	struct tm_callback    *cbp;
	avp_list_t* backup_from, *backup_to, *backup_dom_from, *backup_dom_to, *backup_uri_from, *backup_uri_to;
#ifdef WITH_XAVP
	sr_xavp_t **backup_xavps;
#endif

	backup_uri_from = set_avp_list(AVP_CLASS_URI | AVP_TRACK_FROM,
			&trans->uri_avps_from );
	backup_uri_to = set_avp_list(AVP_CLASS_URI | AVP_TRACK_TO, 
			&trans->uri_avps_to );
	backup_from = set_avp_list(AVP_CLASS_USER | AVP_TRACK_FROM, 
			&trans->user_avps_from );
	backup_to = set_avp_list(AVP_CLASS_USER | AVP_TRACK_TO, 
			&trans->user_avps_to );
	backup_dom_from = set_avp_list(AVP_CLASS_DOMAIN | AVP_TRACK_FROM, 
			&trans->domain_avps_from);
	backup_dom_to = set_avp_list(AVP_CLASS_DOMAIN | AVP_TRACK_TO, 
			&trans->domain_avps_to);
#ifdef WITH_XAVP
	backup_xavps = xavp_set_list(&trans->xavps_list);
#endif

	cbp=(struct tm_callback*)cb_lst->first;
	while(cbp){
		membar_depends(); /* make sure the cache has the correct cbp 
							 contents */
		if ( (cbp->types)&type ) {
			DBG("DBG: trans=%p, callback type %d, id %d entered\n",
				trans, type, cbp->id );
			params->param = &(cbp->param);
			cbp->callback( trans, type, params );
		}
		cbp=cbp->next;
	}
	set_avp_list(AVP_CLASS_DOMAIN | AVP_TRACK_TO, backup_dom_to );
	set_avp_list(AVP_CLASS_DOMAIN | AVP_TRACK_FROM, backup_dom_from );
	set_avp_list(AVP_CLASS_USER | AVP_TRACK_TO, backup_to );
	set_avp_list(AVP_CLASS_USER | AVP_TRACK_FROM, backup_from );
	set_avp_list(AVP_CLASS_URI | AVP_TRACK_TO, backup_uri_to );
	set_avp_list(AVP_CLASS_URI | AVP_TRACK_FROM, backup_uri_from );
#ifdef WITH_XAVP
	xavp_set_list(backup_xavps);
#endif
}
Exemplo n.º 4
0
struct dlg_cell* get_dialog_from_tm(struct cell *t)
{
    if (t==NULL || t==T_UNDEFINED)
        return NULL;

    struct tm_callback* x = (struct tm_callback*)(t->tmcb_hl.first);

    while(x){
        membar_depends();
        if (x->types==TMCB_MAX && x->callback==dlg_tmcb_dummy){
            return (struct dlg_cell*)(x->param);
        }
        x=x->next;
    }

    return NULL;
}
Exemplo n.º 5
0
/* the following assumption are made (to avoid deleting/re-adding the timer):
 *  retr_buf->retr_interval < ( 1<<((sizeof(ticks_t)*8-1) )
 *  if retr_buf->retr_interval==0 => timer disabled
 *                            ==(ticks_t) -1 => retr. disabled (fr working)
 *     retr_buf->retr_interval & (1 <<(sizeof(ticks_t)*8-1) => retr. & fr reset
 *     (we never reset only retr, it's either reset both of them or retr
 *      disabled & reset fr). In this case the fr_origin will contain the
 *      "time" of the reset and next retr should occur at
 *      fr->origin+retr_interval (we also assume that we'll never reset retr
 *      to a lower value then the current one)
 */
ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln *tl, void *p)
{
	struct retr_buf *rbuf;
	ticks_t fr_remainder;
	ticks_t retr_remainder;
	ticks_t retr_interval;
	unsigned long new_retr_interval_ms;
	unsigned long crt_retr_interval_ms;
	struct cell *t;

	rbuf = (struct retr_buf *)((void *)tl
							   - (void *)(&((struct retr_buf *)0)->timer));
	membar_depends(); /* to be on the safe side */
	t = rbuf->my_T;

#ifdef TIMER_DEBUG
	LM_DBG("timer retr_buf_handler @%d (%p -> %p -> %p)\n", ticks, tl, rbuf, t);
#endif
	if(unlikely(rbuf->flags & F_RB_DEL_TIMER)) {
		/* timer marked for deletion */
		rbuf->t_active = 0; /* mark it as removed */
		/* a membar is not really needed, in the very unlikely case that
		 * another process will see old t_active's value and will try to
		 * delete the timer again, but since timer_del it's safe in this cases
		 * it will be a no-op */
		return 0;
	}
	/* overflow safe check (should work ok for fr_intervals < max ticks_t/2) */
	if((s_ticks_t)(rbuf->fr_expire - ticks) <= 0) {
		/* final response */
		rbuf->t_active = 0; /* mark the timer as removed
							 (both timers disabled)
							  a little race risk, but
							  nothing bad would happen */
		rbuf->flags |= F_RB_TIMEOUT;
		/* WARNING:  the next line depends on taking care not to start the
		 *           wait timer before finishing with t (if this is not
		 *           guaranteed then comment the timer_allow_del() line) */
		timer_allow_del(); /* [optional] allow timer_dels, since we're done
							  and there is no race risk */
		final_response_handler(rbuf, t);
		return 0;
	} else {
		/*  4 possible states running (t1), t2, paused, disabled */
		if((s_ticks_t)(rbuf->retr_expire - ticks) <= 0) {
			if(rbuf->flags & F_RB_RETR_DISABLED)
				goto disabled;
			crt_retr_interval_ms = (unsigned long)p;
			/* get the  current interval from timer param. */
			if(unlikely((rbuf->flags & F_RB_T2)
						|| (crt_retr_interval_ms > RT_T2_TIMEOUT_MS(rbuf)))) {
				retr_interval = MS_TO_TICKS(RT_T2_TIMEOUT_MS(rbuf));
				new_retr_interval_ms = RT_T2_TIMEOUT_MS(rbuf);
			} else {
				retr_interval = MS_TO_TICKS(crt_retr_interval_ms);
				new_retr_interval_ms = crt_retr_interval_ms << 1;
			}
#ifdef TIMER_DEBUG
			LM_DBG("new interval %ld ms / %d ticks"
				   " (max %d ms)\n",
					new_retr_interval_ms, retr_interval,
					RT_T2_TIMEOUT_MS(rbuf));
#endif
			/* we could race with the reply_received code, but the
				 * worst thing that can happen is to delay a reset_to_t2
				 * for crt_interval and send an extra retr.*/
			rbuf->retr_expire = ticks + retr_interval;
			/* set new interval to -1 on error, or retr_int. on success */
			retr_remainder = retransmission_handler(rbuf) | retr_interval;
			/* store the next retr. interval in ms inside the timer struct,
				 * in the data member */
			tl->data = (void *)(new_retr_interval_ms);
		} else {
			retr_remainder = rbuf->retr_expire - ticks;
			LM_DBG("retr - nothing to do, expire in %d\n", retr_remainder);
		}
	}
	/* skip: */
	/* return minimum of the next retransmission handler and the
	 * final response (side benefit: it properly cancels timer if ret==0 and
	 *  sleeps for fr_remainder if retr. is canceled [==(ticks_t)-1]) */
	fr_remainder = rbuf->fr_expire - ticks; /* to be more precise use
											get_ticks_raw() instead of ticks
											(but make sure that
											crt. ticks < fr_expire */
#ifdef TIMER_DEBUG
	LM_DBG("timer retr_buf_handler @%d (%p ->%p->%p) exiting min (%d, %d)\n",
			ticks, tl, rbuf, t, retr_remainder, fr_remainder);
#endif
#ifdef EXTRA_DEBUG
	if(retr_remainder == 0 || fr_remainder == 0) {
		LM_BUG("0 remainder => disabling timer!: "
			   "retr_remainder=%d, fr_remainder=%d\n",
				retr_remainder, fr_remainder);
	}
#endif
	if(retr_remainder < fr_remainder)
		return retr_remainder;
	else {
/* hack to switch to the slow timer */
#ifdef TM_FAST_RETR_TIMER
		tl->flags &= ~F_TIMER_FAST;
#endif
		return fr_remainder;
	}
disabled:
	return rbuf->fr_expire - ticks;
}