int cancel_all_uacs(struct cell *trans, int how) { struct cancel_info cancel_data; int i,j; #ifdef EXTRA_DEBUG assert(trans); #endif DBG("Canceling T@%p [%u:%u]\n", trans, trans->hash_index, trans->label); init_cancel_info(&cancel_data); prepare_to_cancel(trans, &cancel_data.cancel_bitmap, 0); /* tell tm to cancel the call */ i=cancel_uacs(trans, &cancel_data, how); if (how & F_CANCEL_UNREF) #ifndef TM_DEL_UNREF /* in case of 'too many' _buggy_ invocations, the ref count (a uint) might * actually wrap around, possibly leaving the T leaking. */ #warning "use of F_CANCEL_UNREF flag is unsafe without defining TM_DEL_UNREF" #endif UNREF(trans); /* count the still active branches */ if (! how) { j=0; while(i){ j++; i&=i-1; } return j; } return 0; }
/* generate a fake reply * it assumes the REPLY_LOCK is already held and returns unlocked */ static void fake_reply(struct cell *t, int branch, int code) { struct cancel_info cancel_data; short do_cancel_branch; enum rps reply_status; init_cancel_info(&cancel_data); do_cancel_branch = is_invite(t) && prepare_cancel_branch(t, branch, 0); /* mark branch as canceled */ t->uac[branch].request.flags |= F_RB_CANCELED; t->uac[branch].request.flags |= F_RB_RELAYREPLY; if(is_local(t)) { reply_status = local_reply(t, FAKED_REPLY, branch, code, &cancel_data); } else { /* rely reply, but don't put on wait, we still need t * to send the cancels */ reply_status = relay_reply(t, FAKED_REPLY, branch, code, &cancel_data, 0); } /* now when out-of-lock do the cancel I/O */ #ifdef CANCEL_REASON_SUPPORT if(do_cancel_branch) cancel_branch(t, branch, &cancel_data.reason, 0); #else /* CANCEL_REASON_SUPPORT */ if(do_cancel_branch) cancel_branch(t, branch, 0); #endif /* CANCEL_REASON_SUPPORT */ /* it's cleaned up on error; if no error occurred and transaction completed regularly, I have to clean-up myself */ if(reply_status == RPS_COMPLETED) put_on_wait(t); }
static int t_cancel_branches(sip_msg_t* msg, char *k, char *s2) { struct cancel_info cancel_data; tm_cell_t *t = 0; tm_ctx_t *tcx = 0; int n=0; int idx = 0; t=_tmx_tmb.t_gett(); if (t==NULL || t==T_UNDEFINED || !is_invite(t)) return -1; tcx = _tmx_tmb.tm_ctx_get(); if(tcx != NULL) idx = tcx->branch_index; n = (int)(long)k; init_cancel_info(&cancel_data); switch(n) { case 1: /* prepare cancel for every branch except idx (others) */ _tmx_tmb.prepare_to_cancel(t, &cancel_data.cancel_bitmap, 1<<idx); break; case 2: /* prepare cancel for current branch (idx) */ if(msg->first_line.u.reply.statuscode>=200) break; cancel_data.cancel_bitmap = 1<<idx; break; default: /* prepare cancel for all branches */ if (msg->first_line.u.reply.statuscode>=200) /* prepare cancel for every branch except idx */ _tmx_tmb.prepare_to_cancel(t, &cancel_data.cancel_bitmap, 1<<idx); else _tmx_tmb.prepare_to_cancel(t, &cancel_data.cancel_bitmap, 0); } LM_DBG("canceling %d/%d\n", n, (int)cancel_data.cancel_bitmap); if(cancel_data.cancel_bitmap==0) return -1; _tmx_tmb.cancel_uacs(t, &cancel_data, 0); return 1; }
/* fifo command to cancel a pending call (Uli) * Syntax: * * ":uac_cancel:[response file]\n * callid\n * cseq\n */ void rpc_cancel(rpc_t* rpc, void* c) { struct cell *trans; static char cseq[128], callid[128]; struct cancel_info cancel_data; int i,j; str cseq_s; /* cseq */ str callid_s; /* callid */ cseq_s.s=cseq; callid_s.s=callid; init_cancel_info(&cancel_data); if (rpc->scan(c, "SS", &callid_s, &cseq_s) < 2) { rpc->fault(c, 400, "Callid and CSeq expected as parameters"); return; } if( t_lookup_callid(&trans, callid_s, cseq_s) < 0 ) { DBG("Lookup failed\n"); rpc->fault(c, 400, "Transaction not found"); return; } /* find the branches that need cancel-ing */ prepare_to_cancel(trans, &cancel_data.cancel_bitmap, 0); /* tell tm to cancel the call */ DBG("Now calling cancel_uacs\n"); i=cancel_uacs(trans, &cancel_data, 0); /* don't fake 487s, just wait for timeout */ /* t_lookup_callid REF`d the transaction for us, we must UNREF here! */ UNREF(trans); j=0; while(i){ j++; i&=i-1; } rpc->add(c, "ds", j, "branches remaining (waiting for timeout)"); }
/* should be called directly only if one of the condition bellow is true: * - prepare_cancel_branch or prepare_to_cancel returned true for this branch * - buffer value was 0 and then set to BUSY in an atomic op.: * if (atomic_cmpxchg_long(&buffer, 0, BUSY_BUFFER)==0). * * params: t - transaction * branch - branch number to be canceled * reason - cancel reason structure * flags - howto cancel: * F_CANCEL_B_KILL - will completely stop the * branch (stops the timers), use with care * F_CANCEL_B_FAKE_REPLY - will send a fake 487 * to all branches that haven't received any response * (>=100). It assumes the REPLY_LOCK is not held * (if it is => deadlock) * F_CANCEL_B_FORCE_C - will send a cancel (and create the * corresp. local cancel rb) even if no reply was * received; F_CANCEL_B_FAKE_REPLY will be ignored. * F_CANCEL_B_FORCE_RETR - don't stop retransmission if no * reply was received on the branch; incompatible * with F_CANCEL_B_FAKE_REPLY, F_CANCEL_B_FORCE_C and * F_CANCEL_B_KILL (all of them take precedence) a * default: stop only the retransmissions for the branch * and leave it to timeout if it doesn't receive any * response to the CANCEL * returns: 0 - branch inactive after running cancel_branch() * 1 - branch still active (fr_timer) * -1 - error * WARNING: * - F_CANCEL_B_KILL should be used only if the transaction is killed * explicitly afterwards (since it might kill all the timers * the transaction won't be able to "kill" itself => if not * explicitly "put_on_wait" it might live forever) * - F_CANCEL_B_FAKE_REPLY must be used only if the REPLY_LOCK is not * held * - checking for buffer==0 under REPLY_LOCK is no enough, an * atomic_cmpxhcg or atomic_get_and_set _must_ be used. */ int cancel_branch( struct cell *t, int branch, #ifdef CANCEL_REASON_SUPPORT struct cancel_reason* reason, #endif /* CANCEL_REASON_SUPPORT */ int flags ) { char *cancel; unsigned int len; struct retr_buf *crb, *irb; int ret; struct cancel_info tmp_cd; void* pcbuf; crb=&t->uac[branch].local_cancel; irb=&t->uac[branch].request; irb->flags|=F_RB_CANCELED; ret=1; init_cancel_info(&tmp_cd); # ifdef EXTRA_DEBUG if (crb->buffer!=BUSY_BUFFER) { LOG(L_CRIT, "ERROR: attempt to rewrite cancel buffer: %p\n", crb->buffer); abort(); } # endif if (flags & F_CANCEL_B_KILL){ stop_rb_timers( irb ); ret=0; if ((t->uac[branch].last_received < 100) && !(flags & F_CANCEL_B_FORCE_C)) { DBG("DEBUG: cancel_branch: no response ever received: " "giving up on cancel\n"); /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); /* try to relay auto-generated 487 canceling response only when * another one is not under relaying on the branch and there is * no forced response per transaction from script */ if((flags & F_CANCEL_B_FAKE_REPLY) && !(irb->flags&F_RB_RELAYREPLY) && !(t->flags&T_ADMIN_REPLY)) { LOCK_REPLIES(t); if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1) == RPS_ERROR){ return -1; } } /* do nothing, hope that the caller will clean up */ return ret; } }else{ if (t->uac[branch].last_received < 100){ if (!(flags & F_CANCEL_B_FORCE_C)) { /* no response received => don't send a cancel on this branch, * just drop it */ if (!(flags & F_CANCEL_B_FORCE_RETR)) stop_rb_retr(irb); /* stop retransmissions */ /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); if (flags & F_CANCEL_B_FAKE_REPLY){ stop_rb_timers( irb ); /* stop even the fr timer */ LOCK_REPLIES(t); if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1)== RPS_ERROR){ return -1; } return 0; /* should be inactive after the 487 */ } /* do nothing, just wait for the final timeout */ return 1; } } stop_rb_retr(irb); /* stop retransmissions */ } if (cfg_get(tm, tm_cfg, reparse_invite) || (t->uas.request && t->uas.request->msg_flags&(FL_USE_UAC_FROM|FL_USE_UAC_TO))) { /* build the CANCEL from the INVITE which was sent out */ cancel = build_local_reparse(t, branch, &len, CANCEL, CANCEL_LEN, (t->uas.request && t->uas.request->msg_flags&FL_USE_UAC_TO)?0:&t->to #ifdef CANCEL_REASON_SUPPORT , reason #endif /* CANCEL_REASON_SUPPORT */ ); } else { /* build the CANCEL from the received INVITE */ cancel = build_local(t, branch, &len, CANCEL, CANCEL_LEN, &t->to #ifdef CANCEL_REASON_SUPPORT , reason #endif /* CANCEL_REASON_SUPPORT */ ); } if (!cancel) { LOG(L_ERR, "ERROR: attempt to build a CANCEL failed\n"); /* remove BUSY_BUFFER -- mark cancel buffer as not used */ pcbuf=&crb->buffer; /* workaround for type punning warnings */ atomic_set_long(pcbuf, 0); return -1; } /* install cancel now */ crb->dst = irb->dst; crb->branch = branch; /* label it as cancel so that FR timer can better know how to deal with it */ crb->activ_type = TYPE_LOCAL_CANCEL; /* be extra carefully and check for bugs (the below if could be replaced * by an atomic_set((void*)&crb->buffer, cancel) */ if (unlikely(atomic_cmpxchg_long((void*)&crb->buffer, (long)BUSY_BUFFER, (long)cancel)!= (long)BUSY_BUFFER)){ BUG("tm: cancel_branch: local_cancel buffer=%p != BUSY_BUFFER" " (trying to continue)\n", crb->buffer); shm_free(cancel); return -1; } membar_write_atomic_op(); /* cancel retr. can be called from reply_received w/o the reply lock held => they check for buffer_len to see if a valid reply exists */ crb->buffer_len = len; DBG("DEBUG: cancel_branch: sending cancel...\n"); if (SEND_BUFFER( crb )>=0){ if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_OUT))) run_trans_callbacks_with_buf(TMCB_REQUEST_OUT, crb, t->uas.request, 0, TMCB_LOCAL_F); if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT))) run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, crb, t->uas.request, 0, TMCB_LOCAL_F); } /*sets and starts the FINAL RESPONSE timer */ if (start_retr( crb )!=0) LOG(L_CRIT, "BUG: cancel_branch: failed to start retransmission" " for %p\n", crb); return ret; }
static int t_cancel_callid(struct sip_msg* msg, char *cid, char *cseq, char *flag, char *creason) { struct cell *trans; struct cell *bkt; int bkb; struct cancel_info cancel_data; str cseq_s; str callid_s; int fl; int rcode; rcode = 0; fl = -1; if(fixup_get_svalue(msg, (gparam_p)cid, &callid_s)<0) { LM_ERR("cannot get callid\n"); return -1; } if(fixup_get_svalue(msg, (gparam_p)cseq, &cseq_s)<0) { LM_ERR("cannot get cseq\n"); return -1; } if(fixup_get_ivalue(msg, (gparam_p)flag, &fl)<0) { LM_ERR("cannot get flag\n"); return -1; } if(creason!=NULL && fixup_get_ivalue(msg, (gparam_p)creason, &rcode)<0) { LM_ERR("cannot get flag\n"); return -1; } if(rcode<100 || rcode>699) rcode = 0; bkt = _tmx_tmb.t_gett(); bkb = _tmx_tmb.t_gett_branch(); if( _tmx_tmb.t_lookup_callid(&trans, callid_s, cseq_s) < 0 ) { DBG("Lookup failed - no transaction\n"); return -1; } DBG("Now calling cancel_uacs\n"); if(trans->uas.request && fl>0 && fl<32) setflag(trans->uas.request, fl); init_cancel_info(&cancel_data); cancel_data.reason.cause = rcode; cancel_data.cancel_bitmap = 0; _tmx_tmb.prepare_to_cancel(trans, &cancel_data.cancel_bitmap, 0); _tmx_tmb.cancel_uacs(trans, &cancel_data, 0); //_tmx_tmb.unref_cell(trans); _tmx_tmb.t_sett(bkt, bkb); return 1; }
/* Continues the SIP request processing previously saved by * t_suspend(). The script does not continue from the same * point, but a separate route block is executed instead. * * Return value: * 0 - success * <0 - failure */ int t_continue(unsigned int hash_index, unsigned int label, struct action *route) { struct cell *t; struct sip_msg faked_req; struct cancel_info cancel_data; int branch; struct ua_client *uac =NULL; int ret; int cb_type; int msg_status; int last_uac_status; int reply_status; int do_put_on_wait; struct hdr_field *hdr, *prev = 0, *tmp = 0; if (t_lookup_ident(&t, hash_index, label) < 0) { LM_ERR("transaction not found\n"); return -1; } if (!(t->flags & T_ASYNC_SUSPENDED)) { LM_WARN("transaction is not suspended [%u:%u]\n", hash_index, label); return -2; } if (t->flags & T_CANCELED) { t->flags &= ~T_ASYNC_SUSPENDED; /* The transaction has already been canceled, * needless to continue */ UNREF(t); /* t_unref would kill the transaction */ /* reset T as we have no working T anymore */ set_t(T_UNDEFINED, T_BR_UNDEFINED); return 1; } /* The transaction has to be locked to protect it * form calling t_continue() multiple times simultaneously */ LOCK_ASYNC_CONTINUE(t); t->flags |= T_ASYNC_CONTINUE; /* we can now know anywhere in kamailio * that we are executing post a suspend */ /* which route block type were we in when we were suspended */ cb_type = FAILURE_CB_TYPE;; switch (t->async_backup.backup_route) { case REQUEST_ROUTE: cb_type = FAILURE_CB_TYPE; break; case FAILURE_ROUTE: cb_type = FAILURE_CB_TYPE; break; case TM_ONREPLY_ROUTE: cb_type = ONREPLY_CB_TYPE; break; case BRANCH_ROUTE: cb_type = FAILURE_CB_TYPE; break; } if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ branch = t->async_backup.blind_uac; /* get the branch of the blind UAC setup * during suspend */ if (branch >= 0) { stop_rb_timers(&t->uac[branch].request); if (t->uac[branch].last_received != 0) { /* Either t_continue() has already been * called or the branch has already timed out. * Needless to continue. */ t->flags &= ~T_ASYNC_SUSPENDED; UNLOCK_ASYNC_CONTINUE(t); UNREF(t); /* t_unref would kill the transaction */ return 1; } /* Set last_received to something >= 200, * the actual value does not matter, the branch * will never be picked up for response forwarding. * If last_received is lower than 200, * then the branch may tried to be cancelled later, * for example when t_reply() is called from * a failure route => deadlock, because both * of them need the reply lock to be held. */ t->uac[branch].last_received=500; uac = &t->uac[branch]; } /* else Not a huge problem, fr timer will fire, but CANCEL will not be sent. last_received will be set to 408. */ /* We should not reset kr here to 0 as it's quite possible before continuing the dev. has correctly set the * kr by, for example, sending a transactional reply in code - resetting here will cause a dirty log message * "WARNING: script writer didn't release transaction" to appear in log files. TODO: maybe we need to add * a special kr for async? * reset_kr(); */ /* fake the request and the environment, like in failure_route */ if (!fake_req(&faked_req, t->uas.request, 0 /* extra flags */, uac)) { LM_ERR("building fake_req failed\n"); ret = -1; goto kill_trans; } faked_env( t, &faked_req, 1); /* execute the pre/post -script callbacks based on original route block */ if (exec_pre_script_cb(&faked_req, cb_type)>0) { if (run_top_route(route, &faked_req, 0)<0) LM_ERR("failure inside run_top_route\n"); exec_post_script_cb(&faked_req, cb_type); } /* TODO: save_msg_lumps should clone the lumps to shm mem */ /* restore original environment and free the fake msg */ faked_env( t, 0, 1); free_faked_req(&faked_req, t); /* update the flags */ t->uas.request->flags = faked_req.flags; if (t->uas.status < 200) { /* No final reply has been sent yet. * Check whether or not there is any pending branch. */ for ( branch = 0; branch < t->nr_of_outgoings; branch++ ) { if (t->uac[branch].last_received < 200) break; } if (branch == t->nr_of_outgoings) { /* There is not any open branch so there is * no chance that a final response will be received. */ ret = 0; goto kill_trans; } } } else { branch = t->async_backup.backup_branch; init_cancel_info(&cancel_data); LM_DBG("continuing from a suspended reply" " - resetting the suspend branch flag\n"); if (t->uac[branch].reply) { t->uac[branch].reply->msg_flags &= ~FL_RPL_SUSPENDED; } else { LM_WARN("no reply in t_continue for branch. not much we can do\n"); return 0; } if (t->uas.request) t->uas.request->msg_flags&= ~FL_RPL_SUSPENDED; faked_env( t, t->uac[branch].reply, 1); if (exec_pre_script_cb(t->uac[branch].reply, cb_type)>0) { if (run_top_route(route, t->uac[branch].reply, 0)<0){ LOG(L_ERR, "ERROR: t_continue_reply: Error in run_top_route\n"); } exec_post_script_cb(t->uac[branch].reply, cb_type); } LM_DBG("restoring previous environment"); faked_env( t, 0, 1); /*lock transaction replies - will be unlocked when reply is relayed*/ LOCK_REPLIES( t ); if ( is_local(t) ) { LM_DBG("t is local - sending reply with status code: [%d]\n", t->uac[branch].reply->first_line.u.reply.statuscode); reply_status = local_reply( t, t->uac[branch].reply, branch, t->uac[branch].reply->first_line.u.reply.statuscode, &cancel_data ); if (reply_status == RPS_COMPLETED) { /* no more UAC FR/RETR (if I received a 2xx, there may * be still pending branches ... */ cleanup_uac_timers( t ); if (is_invite(t)) cancel_uacs(t, &cancel_data, F_CANCEL_B_KILL); /* There is no need to call set_final_timer because we know * that the transaction is local */ put_on_wait(t); }else if (unlikely(cancel_data.cancel_bitmap)){ /* cancel everything, even non-INVITEs (e.g in case of 6xx), use * cancel_b_method for canceling unreplied branches */ cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags)); } } else { LM_DBG("t is not local - relaying reply with status code: [%d]\n", t->uac[branch].reply->first_line.u.reply.statuscode); do_put_on_wait = 0; if(t->uac[branch].reply->first_line.u.reply.statuscode>=200){ do_put_on_wait = 1; } reply_status=relay_reply( t, t->uac[branch].reply, branch, t->uac[branch].reply->first_line.u.reply.statuscode, &cancel_data, do_put_on_wait ); if (reply_status == RPS_COMPLETED) { /* no more UAC FR/RETR (if I received a 2xx, there may be still pending branches ... */ cleanup_uac_timers( t ); /* 2xx is a special case: we can have a COMPLETED request * with branches still open => we have to cancel them */ if (is_invite(t) && cancel_data.cancel_bitmap) cancel_uacs( t, &cancel_data, F_CANCEL_B_KILL); /* FR for negative INVITES, WAIT anything else */ /* Call to set_final_timer is embedded in relay_reply to avoid * race conditions when reply is sent out and an ACK to stop * retransmissions comes before retransmission timer is set.*/ }else if (unlikely(cancel_data.cancel_bitmap)){ /* cancel everything, even non-INVITEs (e.g in case of 6xx), use * cancel_b_method for canceling unreplied branches */ cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags)); } } t->uac[branch].request.flags|=F_RB_REPLIED; if (reply_status==RPS_ERROR){ goto done; } /* update FR/RETR timers on provisional replies */ msg_status=t->uac[branch].reply->REPLY_STATUS; last_uac_status=t->uac[branch].last_received; if (is_invite(t) && msg_status<200 && ( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) || ( (last_uac_status<msg_status) && ((msg_status>=180) || (last_uac_status==0)) ) ) ) { /* provisional now */ restart_rb_fr(& t->uac[branch].request, t->fr_inv_timeout); t->uac[branch].request.flags|=F_RB_FR_INV; /* mark fr_inv */ } } done: UNLOCK_ASYNC_CONTINUE(t); if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ /* unref the transaction */ t_unref(t->uas.request); } else { tm_ctx_set_branch_index(T_BR_UNDEFINED); /* unref the transaction */ t_unref(t->uac[branch].reply); LOG(L_DBG,"DEBUG: t_continue_reply: Freeing earlier cloned reply\n"); /* free lumps that were added during reply processing */ del_nonshm_lump( &(t->uac[branch].reply->add_rm) ); del_nonshm_lump( &(t->uac[branch].reply->body_lumps) ); del_nonshm_lump_rpl( &(t->uac[branch].reply->reply_lump) ); /* free header's parsed structures that were added */ for( hdr=t->uac[branch].reply->headers ; hdr ; hdr=hdr->next ) { if ( hdr->parsed && hdr_allocs_parse(hdr) && (hdr->parsed<(void*)t->uac[branch].reply || hdr->parsed>=(void*)t->uac[branch].end_reply)) { clean_hdr_field(hdr); hdr->parsed = 0; } } /* now go through hdr_fields themselves and remove the pkg allocated space */ hdr = t->uac[branch].reply->headers; while (hdr) { if ( hdr && ((void*)hdr<(void*)t->uac[branch].reply || (void*)hdr>=(void*)t->uac[branch].end_reply)) { //this header needs to be freed and removed form the list. if (!prev) { t->uac[branch].reply->headers = hdr->next; } else { prev->next = hdr->next; } tmp = hdr; hdr = hdr->next; pkg_free(tmp); } else { prev = hdr; hdr = hdr->next; } } sip_msg_free(t->uac[branch].reply); t->uac[branch].reply = 0; } /*This transaction is no longer suspended so unsetting the SUSPEND flag*/ t->flags &= ~T_ASYNC_SUSPENDED; return 0; kill_trans: t->flags &= ~T_ASYNC_SUSPENDED; /* The script has hopefully set the error code. If not, * let us reply with a default error. */ if ((kill_transaction_unsafe(t, tm_error ? tm_error : E_UNSPEC)) <=0 ) { LOG(L_ERR, "ERROR: t_continue: " "reply generation failed\n"); /* The transaction must be explicitely released, * no more timer is running */ UNLOCK_ASYNC_CONTINUE(t); t_release_transaction(t); } else { UNLOCK_ASYNC_CONTINUE(t); } if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ t_unref(t->uas.request); } else { /* unref the transaction */ t_unref(t->uac[branch].reply); } return ret; }