/* return (ticks_t)-1 on error/disable and 0 on success */ inline static ticks_t retransmission_handler(struct retr_buf *r_buf) { #ifdef EXTRA_DEBUG if(r_buf->my_T->flags & T_IN_AGONY) { LM_ERR("transaction %p scheduled for deletion and" " called from RETR timer (flags %x)\n", r_buf->my_T, r_buf->my_T->flags); abort(); } #endif if(r_buf->rbtype == TYPE_LOCAL_CANCEL || r_buf->rbtype == TYPE_REQUEST) { #ifdef EXTRA_DEBUG LM_DBG("request resending (t=%p, %.9s ... )\n", r_buf->my_T, r_buf->buffer); #endif if(SEND_BUFFER(r_buf) == -1) { /* disable retr. timers => return -1 */ fake_reply(r_buf->my_T, r_buf->branch, 503); return (ticks_t)-1; } if(unlikely(has_tran_tmcbs(r_buf->my_T, TMCB_REQUEST_SENT))) run_trans_callbacks_with_buf( TMCB_REQUEST_SENT, r_buf, 0, 0, TMCB_RETR_F); } else { #ifdef EXTRA_DEBUG LM_DBG("reply resending (t=%p, %.9s ... )\n", r_buf->my_T, r_buf->buffer); #endif t_retransmit_reply(r_buf->my_T); } return 0; }
static inline void send_prepared_request_impl(struct retr_buf *request, int retransmit) { if (SEND_BUFFER(request) == -1) { LOG(L_ERR, "t_uac: Attempt to send to precreated request failed\n"); } else if (unlikely(has_tran_tmcbs(request->my_T, TMCB_REQUEST_SENT))) /* we don't know the method here */ run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, request, 0, 0, TMCB_LOCAL_F); if (retransmit && (start_retr(request)!=0)) LOG(L_CRIT, "BUG: t_uac: failed to start retr. for %p\n", request); }
int t_append_branches(void) { struct cell *t = NULL; struct sip_msg *orig_msg = NULL; short outgoings; int success_branch; str current_uri; str dst_uri, path, instance, ruid, location_ua; struct socket_info* si; int q, i, found; flag_t backup_bflags = 0; flag_t bflags = 0; int new_branch, branch_ret, lowest_ret; branch_bm_t added_branches; int replies_locked = 0; t = get_t(); if(t == NULL) { LM_ERR("cannot get transaction\n"); return -1; } LM_DBG("transaction %u:%u in status %d\n", t->hash_index, t->label, t->uas.status); /* test if transaction has already been canceled */ if (t->flags & T_CANCELED) { ser_error=E_CANCELED; return -1; } if ((t->uas.status >= 200 && t->uas.status<=399) || ((t->uas.status >= 600 && t->uas.status) && !(t->flags & (T_6xx | T_DISABLE_6xx))) ) { LM_DBG("transaction %u:%u in status %d: cannot append new branch\n", t->hash_index, t->label, t->uas.status); return -1; } /* set the lock on the transaction here */ LOCK_REPLIES(t); replies_locked = 1; outgoings = t->nr_of_outgoings; orig_msg = t->uas.request; LM_DBG("Call %.*s: %d (%d) outgoing branches\n",orig_msg->callid->body.len, orig_msg->callid->body.s,outgoings, nr_branches); lowest_ret=E_UNSPEC; added_branches=0; /* it's a "late" branch so the on_branch variable has already been reset by previous execution of t_forward_nonack: we use the saved value */ if (t->on_branch_delayed) { /* tell add_uac that it should run branch route actions */ set_branch_route(t->on_branch_delayed); } outgoings = t->nr_of_outgoings; /* not really sure that the following is needed */ set_branch_iterator(nr_branches-1); found = 0; while((current_uri.s=next_branch( ¤t_uri.len, &q, &dst_uri, &path, &bflags, &si, &ruid, &instance, &location_ua))) { LM_DBG("Current uri %.*s\n",current_uri.len, current_uri.s); for (i=0; i<nr_branches; i++) { if (t->uac[i].ruid.len == ruid.len && !memcmp(t->uac[i].ruid.s, ruid.s, ruid.len)) { LM_DBG("branch already added [%.*s]\n", ruid.len, ruid.s); found = 1; break; } } if (found) continue; setbflagsval(0, bflags); new_branch=add_uac( t, orig_msg, ¤t_uri, (dst_uri.len) ? (&dst_uri) : ¤t_uri, &path, 0, si, orig_msg->fwd_send_flags, orig_msg->rcv.proto, (dst_uri.len)?-1:UAC_SKIP_BR_DST_F, &instance, &ruid, &location_ua); /* test if cancel was received meanwhile */ if (t->flags & T_CANCELED) goto canceled; if (new_branch>=0) added_branches |= 1<<new_branch; else lowest_ret=MIN_int(lowest_ret, new_branch); } clear_branches(); LM_DBG("Call %.*s: %d (%d) outgoing branches after clear_branches()\n", orig_msg->callid->body.len, orig_msg->callid->body.s,outgoings, nr_branches); setbflagsval(0, backup_bflags); /* update message flags, if changed in branch route */ t->uas.request->flags = orig_msg->flags; if (added_branches==0) { if(lowest_ret!=E_CFG) LOG(L_ERR, "ERROR: t_append_branch: failure to add branches\n"); ser_error=lowest_ret; replies_locked = 0; UNLOCK_REPLIES(t); return lowest_ret; } ser_error=0; /* clear branch adding errors */ /* send them out now */ success_branch=0; /* since t_append_branch can only be called from REQUEST_ROUTE, always lock replies */ for (i=outgoings; i<t->nr_of_outgoings; i++) { if (added_branches & (1<<i)) { branch_ret=t_send_branch(t, i, orig_msg , 0, 0 /* replies are already locked */ ); if (branch_ret>=0){ /* some kind of success */ if (branch_ret==i) { /* success */ success_branch++; if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT))) run_trans_callbacks_with_buf( TMCB_REQUEST_OUT, &t->uac[nr_branches].request, orig_msg, 0, -orig_msg->REQ_METHOD); } else /* new branch added */ added_branches |= 1<<branch_ret; } } } if (success_branch<=0) { /* return always E_SEND for now * (the real reason could be: denied by onsend routes, blacklisted, * send failed or any of the errors listed before + dns failed * when attempting dns failover) */ ser_error=E_SEND; /* else return the last error (?) */ /* the caller should take care and delete the transaction */ replies_locked = 0; UNLOCK_REPLIES(t); return -1; } ser_error=0; /* clear branch send errors, we have overall success */ set_kr(REQ_FWDED); replies_locked = 0; UNLOCK_REPLIES(t); return 1; canceled: DBG("t_append_branches: cannot append branches to a canceled transaction\n"); /* reset processed branches */ clear_branches(); /* restore backup flags from initial env */ setbflagsval(0, backup_bflags); /* update message flags, if changed in branch route */ t->uas.request->flags = orig_msg->flags; /* if needed unlock transaction's replies */ if (likely(replies_locked)) { /* restore the number of outgoing branches * since new branches have not been completed */ t->nr_of_outgoings = outgoings; replies_locked = 0; UNLOCK_REPLIES(t); } ser_error=E_CANCELED; return -1; }
/* should be called directly only if one of the condition bellow is true: * - prepare_cancel_branch or prepare_to_cancel returned true for this branch * - buffer value was 0 and then set to BUSY in an atomic op.: * if (atomic_cmpxchg_long(&buffer, 0, BUSY_BUFFER)==0). * * params: t - transaction * branch - branch number to be canceled * reason - cancel reason structure * flags - howto cancel: * F_CANCEL_B_KILL - will completely stop the * branch (stops the timers), use with care * F_CANCEL_B_FAKE_REPLY - will send a fake 487 * to all branches that haven't received any response * (>=100). It assumes the REPLY_LOCK is not held * (if it is => deadlock) * F_CANCEL_B_FORCE_C - will send a cancel (and create the * corresp. local cancel rb) even if no reply was * received; F_CANCEL_B_FAKE_REPLY will be ignored. * F_CANCEL_B_FORCE_RETR - don't stop retransmission if no * reply was received on the branch; incompatible * with F_CANCEL_B_FAKE_REPLY, F_CANCEL_B_FORCE_C and * F_CANCEL_B_KILL (all of them take precedence) a * default: stop only the retransmissions for the branch * and leave it to timeout if it doesn't receive any * response to the CANCEL * returns: 0 - branch inactive after running cancel_branch() * 1 - branch still active (fr_timer) * -1 - error * WARNING: * - F_CANCEL_B_KILL should be used only if the transaction is killed * explicitly afterwards (since it might kill all the timers * the transaction won't be able to "kill" itself => if not * explicitly "put_on_wait" it might live forever) * - F_CANCEL_B_FAKE_REPLY must be used only if the REPLY_LOCK is not * held * - checking for buffer==0 under REPLY_LOCK is no enough, an * atomic_cmpxhcg or atomic_get_and_set _must_ be used. */ int cancel_branch( struct cell *t, int branch, #ifdef CANCEL_REASON_SUPPORT struct cancel_reason* reason, #endif /* CANCEL_REASON_SUPPORT */ int flags ) { char *cancel; unsigned int len; struct retr_buf *crb, *irb; int ret; struct cancel_info tmp_cd; void* pcbuf; crb=&t->uac[branch].local_cancel; irb=&t->uac[branch].request; irb->flags|=F_RB_CANCELED; ret=1; init_cancel_info(&tmp_cd); # ifdef EXTRA_DEBUG if (crb->buffer!=BUSY_BUFFER) { LOG(L_CRIT, "ERROR: attempt to rewrite cancel buffer: %p\n", crb->buffer); abort(); } # endif if (flags & F_CANCEL_B_KILL){ stop_rb_timers( irb ); ret=0; if ((t->uac[branch].last_received < 100) && !(flags & F_CANCEL_B_FORCE_C)) { DBG("DEBUG: cancel_branch: no response ever received: " "giving up on cancel\n"); /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); /* try to relay auto-generated 487 canceling response only when * another one is not under relaying on the branch and there is * no forced response per transaction from script */ if((flags & F_CANCEL_B_FAKE_REPLY) && !(irb->flags&F_RB_RELAYREPLY) && !(t->flags&T_ADMIN_REPLY)) { LOCK_REPLIES(t); if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1) == RPS_ERROR){ return -1; } } /* do nothing, hope that the caller will clean up */ return ret; } }else{ if (t->uac[branch].last_received < 100){ if (!(flags & F_CANCEL_B_FORCE_C)) { /* no response received => don't send a cancel on this branch, * just drop it */ if (!(flags & F_CANCEL_B_FORCE_RETR)) stop_rb_retr(irb); /* stop retransmissions */ /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); if (flags & F_CANCEL_B_FAKE_REPLY){ stop_rb_timers( irb ); /* stop even the fr timer */ LOCK_REPLIES(t); if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1)== RPS_ERROR){ return -1; } return 0; /* should be inactive after the 487 */ } /* do nothing, just wait for the final timeout */ return 1; } } stop_rb_retr(irb); /* stop retransmissions */ } if (cfg_get(tm, tm_cfg, reparse_invite) || (t->uas.request && t->uas.request->msg_flags&(FL_USE_UAC_FROM|FL_USE_UAC_TO))) { /* build the CANCEL from the INVITE which was sent out */ cancel = build_local_reparse(t, branch, &len, CANCEL, CANCEL_LEN, (t->uas.request && t->uas.request->msg_flags&FL_USE_UAC_TO)?0:&t->to #ifdef CANCEL_REASON_SUPPORT , reason #endif /* CANCEL_REASON_SUPPORT */ ); } else { /* build the CANCEL from the received INVITE */ cancel = build_local(t, branch, &len, CANCEL, CANCEL_LEN, &t->to #ifdef CANCEL_REASON_SUPPORT , reason #endif /* CANCEL_REASON_SUPPORT */ ); } if (!cancel) { LOG(L_ERR, "ERROR: attempt to build a CANCEL failed\n"); /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); return -1; } /* install cancel now */ crb->dst = irb->dst; crb->branch = branch; /* label it as cancel so that FR timer can better know how to deal with it */ crb->activ_type = TYPE_LOCAL_CANCEL; /* be extra carefully and check for bugs (the below if could be replaced * by an atomic_set((void*)&crb->buffer, cancel) */ if (unlikely(atomic_cmpxchg_long((void*)&crb->buffer, (long)BUSY_BUFFER, (long)cancel)!= (long)BUSY_BUFFER)){ BUG("tm: cancel_branch: local_cancel buffer=%p != BUSY_BUFFER" " (trying to continue)\n", crb->buffer); shm_free(cancel); return -1; } membar_write_atomic_op(); /* cancel retr. can be called from reply_received w/o the reply lock held => they check for buffer_len to see if a valid reply exists */ crb->buffer_len = len; DBG("DEBUG: cancel_branch: sending cancel...\n"); if (SEND_BUFFER( crb )>=0){ if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_OUT))) run_trans_callbacks_with_buf(TMCB_REQUEST_OUT, crb, t->uas.request, 0, TMCB_LOCAL_F); if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT))) run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, crb, t->uas.request, 0, TMCB_LOCAL_F); } /*sets and starts the FINAL RESPONSE timer */ if (start_retr( crb )!=0) LOG(L_CRIT, "BUG: cancel_branch: failed to start retransmission" " for %p\n", crb); return ret; }