char *test_Dir_serve_file() { int rc = 0; Request *req = NULL; Dir *test = Dir_create("tests/", "sample.html", "test/plain"); Connection conn = {0}; conn.fd = 1; conn.send = my_send; req = fake_req("GET", "/sample.json"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == -1, "Should fail to write since it's not a socket."); req = fake_req("HEAD", "/sample.json"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == -1, "Should fail to write since it's not a socket."); req = fake_req("POST", "/sample.json"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == -1, "Should fail to write since it's not a socket."); return NULL; }
/* return 1 if a failure_route processes */ static inline int run_failure_handlers(struct cell *t) { static struct sip_msg faked_req; struct sip_msg *shmem_msg; struct ua_client *uac; int on_failure; shmem_msg = t->uas.request; uac = &t->uac[picked_branch]; /* failure_route for a local UAC? */ if (!shmem_msg || REQ_LINE(shmem_msg).method_value==METHOD_CANCEL ) { LM_WARN("no UAC or CANCEL support (%d, %d) \n", t->on_negative, t->tmcb_hl.reg_types); return 0; } /* don't start faking anything if we don't have to */ if ( !has_tran_tmcbs( t, TMCB_ON_FAILURE) && !t->on_negative ) { LM_WARN("no negative handler (%d, %d)\n",t->on_negative, t->tmcb_hl.reg_types); return 1; } if (!fake_req(&faked_req, shmem_msg, &t->uas, uac)) { LM_ERR("fake_req failed\n"); return 0; } /* fake also the env. conforming to the fake msg */ faked_env( t, &faked_req); /* DONE with faking ;-) -> run the failure handlers */ if ( has_tran_tmcbs( t, TMCB_ON_FAILURE) ) { run_trans_callbacks( TMCB_ON_FAILURE, t, &faked_req, uac->reply, uac->last_received); } if (t->on_negative) { /* update flags in transaction if changed by callbacks */ shmem_msg->flags = faked_req.flags; /* avoid recursion -- if failure_route forwards, and does not * set next failure route, failure_route will not be reentered * on failure */ on_failure = t->on_negative; t->on_negative=0; /* run a reply_route action if some was marked */ run_top_route(failure_rlist[on_failure].a, &faked_req); } /* restore original environment and free the fake msg */ faked_env( t, 0); free_faked_req(&faked_req,t); /* if failure handler changed flag, update transaction context */ shmem_msg->flags = faked_req.flags; /* branch flags do not need to be updated as this branch will be never * used again */ return 1; }
char *test_Dir_serve_file() { int rc = 0; Request *req = NULL; Dir *test = Dir_create( bfromcstr("tests/"), bfromcstr("sample.html"), bfromcstr("test/plain"), 0); Connection conn = {.iob = NULL}; int zero_fd = open("/dev/null", O_WRONLY); conn.iob = IOBuf_create(1024, zero_fd, IOBUF_NULL); req = fake_req("GET", "/", "/sample.json"); rc = Dir_serve_file(test, req, &conn); // TODO: different platforms barf on sendfile for different reasons // mu_assert(req->response_size > -1, "Should serve the /sample.json"); req = fake_req("HEAD", "/", "/sample.json"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == 0, "Should serve the HEAD of /sample.json"); req = fake_req("POST", "/", "/sample.json"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == -1, "POST should pass through but send an error."); mu_assert(req->status_code == 405, "POST to file should 405."); req = fake_req("GET", "/tests/", "/test"); rc = Dir_serve_file(test, req, &conn); mu_assert(rc == -1, "GET of path shorter than prefix should 404"); mu_assert(req->status_code == 404, "GET shortpath should 404."); return NULL; }
static inline int do_dns_failover(struct cell *t) { static struct sip_msg faked_req; struct sip_msg *shmem_msg; struct ua_client *uac; int ret; shmem_msg = t->uas.request; uac = &t->uac[picked_branch]; /* check if the DNS resolver can get at least one new IP */ if ( get_next_su( uac->proxy, &uac->request.dst.to, 1)!=0 ) return -1; LM_DBG("new destination available\n"); if (!fake_req(&faked_req, shmem_msg, &t->uas, uac)) { LM_ERR("fake_req failed\n"); return -1; } /* fake also the env. conforming to the fake msg */ faked_env( t, &faked_req); ret = -1; /* set info as current destination */ if ( set_ruri( &faked_req, &uac->uri)!= 0) goto done; setb0flags( uac->br_flags ); faked_req.force_send_socket = shmem_msg->force_send_socket; /* send it out */ if (t_forward_nonack( t, &faked_req, uac->proxy)==1) ret = 0; done: /* restore original environment and free the fake msg */ faked_env( t, 0); free_faked_req(&faked_req,t); return ret; }
int t_inject_branch( struct cell *t, struct sip_msg *msg, int flags) { static struct sip_msg faked_req; branch_bm_t cancel_bm; str reason = str_init(CANCEL_REASON_200); int rc; /* does the transaction state still accept new branches ? */ if (t->uas.status >= 200) { LM_DBG("cannot add branches to a transaction with %d UAS\n", t->uas.status); return -2; } if (!fake_req( &faked_req, t->uas.request, &t->uas, NULL, 0)) { LM_ERR("fake_req failed\n"); return -1; } /* do we have the branches to be injected ? */ if (flags&TM_INJECT_SRC_EVENT) { /* get the branch from Event AVPs, as populated by EVI/EBR */ if (ul_contact_event_to_msg( &faked_req )<0) { LM_ERR("failed to grab new branch from Event\n"); goto error; } } else { /* use the RURI+dset array as destinations to be injected */ if (msg->first_line.type==SIP_REQUEST) { /* take the RURI branch from the script msg and move it * into the faked msg (that will be used by t_fwd function) */ if (dst_to_msg( msg, &faked_req )<0) { LM_ERR("failed to grab new branch from Event\n"); goto error; } } else { /* current message is a reply, so take the first branch from dset * and move it into the faked msg (that will be used by t_fwd * function)*/ if (move_branch_to_ruri( 0, &faked_req)<0) { LM_ERR("no branch found to be moved as new destination\n"); goto error; } /* remove it from set */ remove_branch(0); } } /* do we have to cancel the existing branches before injecting new ones? */ if (flags&TM_INJECT_FLAG_CANCEL) { which_cancel( t, &cancel_bm ); } /* generated the new branches, without branch counter reset */ rc = t_forward_nonack( t, &faked_req , NULL, 0, 1/*locked*/ ); /* do we have to cancel the existing branches before injecting new ones? */ if (flags&TM_INJECT_FLAG_CANCEL) { set_cancel_extra_hdrs( reason.s, reason.len); cancel_uacs( t, cancel_bm ); set_cancel_extra_hdrs( NULL, 0); } /* cleanup the faked request */ free_faked_req( &faked_req, t); return (rc==1)?1:-3; error: /* cleanup the faked request */ free_faked_req( &faked_req, t); return -1; }
/* function triggered from reactor in order to continue the processing */ int t_resume_async(int *fd, void *param) { static struct sip_msg faked_req; static struct ua_client uac; async_ctx *ctx = (async_ctx *)param; struct cell *backup_t; struct cell *backup_cancelled_t; struct cell *backup_e2eack_t; struct usr_avp **backup_list; struct socket_info* backup_si; struct cell *t= ctx->t; int route; LM_DBG("resuming on fd %d, transaction %p \n",*fd, t); if (current_processing_ctx) { LM_CRIT("BUG - a context already set!\n"); abort(); } /* prepare for resume route */ uac.br_flags = getb0flags( t->uas.request ) ; uac.uri = *GET_RURI( t->uas.request ); if (!fake_req( &faked_req /* the fake msg to be built*/, t->uas.request, /* the template msg saved in transaction */ &t->uas, /*the UAS side of the transaction*/ &uac, /* the fake UAC */ 1 /* copy dst_uri too */) ) { LM_ERR("fake_req failed\n"); return 0; } /* enviroment setting */ current_processing_ctx = ctx->msg_ctx; backup_t = get_t(); backup_e2eack_t = get_e2eack_t(); backup_cancelled_t = get_cancelled_t(); /* fake transaction */ set_t( t ); set_cancelled_t(ctx->cancelled_t); set_e2eack_t(ctx->e2eack_t); reset_kr(); set_kr(ctx->kr); /* make available the avp list from transaction */ backup_list = set_avp_list( &t->user_avps ); /* set default send address to the saved value */ backup_si = bind_address; bind_address = t->uac[0].request.dst.send_sock; async_status = ASYNC_DONE; /* assume default status as done */ /* call the resume function in order to read and handle data */ return_code = ctx->resume_f( *fd, &faked_req, ctx->resume_param ); if (async_status==ASYNC_CONTINUE) { /* do not run the resume route */ goto restore; } else if (async_status==ASYNC_CHANGE_FD) { if (return_code<0) { LM_ERR("ASYNC_CHANGE_FD: given file descriptor shall be positive!\n"); goto restore; } else if (return_code > 0 && return_code == *fd) { /*trying to add the same fd; shall continue*/ LM_CRIT("You are trying to replace the old fd with the same fd!" "Will act as in ASYNC_CONTINUE!\n"); goto restore; } /* remove the old fd from the reactor */ reactor_del_reader( *fd, -1, IO_FD_CLOSING); *fd=return_code; /* insert the new fd inside the reactor */ if (reactor_add_reader( *fd, F_SCRIPT_ASYNC, RCT_PRIO_ASYNC, (void*)ctx)<0 ) { LM_ERR("failed to add async FD to reactor -> act in sync mode\n"); do { return_code = ctx->resume_f( *fd, &faked_req, ctx->resume_param ); if (async_status == ASYNC_CHANGE_FD) *fd=return_code; } while(async_status==ASYNC_CONTINUE||async_status==ASYNC_CHANGE_FD); goto route; } /* changed fd; now restore old state */ goto restore; } /* remove from reactor, we are done */ reactor_del_reader( *fd, -1, IO_FD_CLOSING); route: if (async_status == ASYNC_DONE_CLOSE_FD) close(*fd); /* run the resume_route (some type as the original one) */ swap_route_type(route, ctx->route_type); run_resume_route( ctx->resume_route, &faked_req); set_route_type(route); /* no need for the context anymore */ shm_free(ctx); /* free also the processing ctx if still set * NOTE: it may become null if inside the run_resume_route * another async jump was made (and context attached again * to transaction) */ if (current_processing_ctx) { context_destroy(CONTEXT_GLOBAL, current_processing_ctx); pkg_free(current_processing_ctx); } restore: /* restore original environment */ set_t(backup_t); set_cancelled_t(backup_cancelled_t); set_e2eack_t(backup_e2eack_t); /* restore original avp list */ set_avp_list( backup_list ); bind_address = backup_si; free_faked_req( &faked_req, t); current_processing_ctx = NULL; return 0; }
/* Continues the SIP request processing previously saved by * t_suspend(). The script does not continue from the same * point, but a separate route block is executed instead. * * Return value: * 0 - success * <0 - failure */ int t_continue(unsigned int hash_index, unsigned int label, struct action *route) { struct cell *t; struct sip_msg faked_req; struct cancel_info cancel_data; int branch; struct ua_client *uac =NULL; int ret; int cb_type; int msg_status; int last_uac_status; int reply_status; int do_put_on_wait; struct hdr_field *hdr, *prev = 0, *tmp = 0; if (t_lookup_ident(&t, hash_index, label) < 0) { LM_ERR("transaction not found\n"); return -1; } if (!(t->flags & T_ASYNC_SUSPENDED)) { LM_WARN("transaction is not suspended [%u:%u]\n", hash_index, label); return -2; } if (t->flags & T_CANCELED) { t->flags &= ~T_ASYNC_SUSPENDED; /* The transaction has already been canceled, * needless to continue */ UNREF(t); /* t_unref would kill the transaction */ /* reset T as we have no working T anymore */ set_t(T_UNDEFINED, T_BR_UNDEFINED); return 1; } /* The transaction has to be locked to protect it * form calling t_continue() multiple times simultaneously */ LOCK_ASYNC_CONTINUE(t); t->flags |= T_ASYNC_CONTINUE; /* we can now know anywhere in kamailio * that we are executing post a suspend */ /* which route block type were we in when we were suspended */ cb_type = FAILURE_CB_TYPE;; switch (t->async_backup.backup_route) { case REQUEST_ROUTE: cb_type = FAILURE_CB_TYPE; break; case FAILURE_ROUTE: cb_type = FAILURE_CB_TYPE; break; case TM_ONREPLY_ROUTE: cb_type = ONREPLY_CB_TYPE; break; case BRANCH_ROUTE: cb_type = FAILURE_CB_TYPE; break; } if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ branch = t->async_backup.blind_uac; /* get the branch of the blind UAC setup * during suspend */ if (branch >= 0) { stop_rb_timers(&t->uac[branch].request); if (t->uac[branch].last_received != 0) { /* Either t_continue() has already been * called or the branch has already timed out. * Needless to continue. */ t->flags &= ~T_ASYNC_SUSPENDED; UNLOCK_ASYNC_CONTINUE(t); UNREF(t); /* t_unref would kill the transaction */ return 1; } /* Set last_received to something >= 200, * the actual value does not matter, the branch * will never be picked up for response forwarding. * If last_received is lower than 200, * then the branch may tried to be cancelled later, * for example when t_reply() is called from * a failure route => deadlock, because both * of them need the reply lock to be held. */ t->uac[branch].last_received=500; uac = &t->uac[branch]; } /* else Not a huge problem, fr timer will fire, but CANCEL will not be sent. last_received will be set to 408. */ /* We should not reset kr here to 0 as it's quite possible before continuing the dev. has correctly set the * kr by, for example, sending a transactional reply in code - resetting here will cause a dirty log message * "WARNING: script writer didn't release transaction" to appear in log files. TODO: maybe we need to add * a special kr for async? * reset_kr(); */ /* fake the request and the environment, like in failure_route */ if (!fake_req(&faked_req, t->uas.request, 0 /* extra flags */, uac)) { LM_ERR("building fake_req failed\n"); ret = -1; goto kill_trans; } faked_env( t, &faked_req, 1); /* execute the pre/post -script callbacks based on original route block */ if (exec_pre_script_cb(&faked_req, cb_type)>0) { if (run_top_route(route, &faked_req, 0)<0) LM_ERR("failure inside run_top_route\n"); exec_post_script_cb(&faked_req, cb_type); } /* TODO: save_msg_lumps should clone the lumps to shm mem */ /* restore original environment and free the fake msg */ faked_env( t, 0, 1); free_faked_req(&faked_req, t); /* update the flags */ t->uas.request->flags = faked_req.flags; if (t->uas.status < 200) { /* No final reply has been sent yet. * Check whether or not there is any pending branch. */ for ( branch = 0; branch < t->nr_of_outgoings; branch++ ) { if (t->uac[branch].last_received < 200) break; } if (branch == t->nr_of_outgoings) { /* There is not any open branch so there is * no chance that a final response will be received. */ ret = 0; goto kill_trans; } } } else { branch = t->async_backup.backup_branch; init_cancel_info(&cancel_data); LM_DBG("continuing from a suspended reply" " - resetting the suspend branch flag\n"); if (t->uac[branch].reply) { t->uac[branch].reply->msg_flags &= ~FL_RPL_SUSPENDED; } else { LM_WARN("no reply in t_continue for branch. not much we can do\n"); return 0; } if (t->uas.request) t->uas.request->msg_flags&= ~FL_RPL_SUSPENDED; faked_env( t, t->uac[branch].reply, 1); if (exec_pre_script_cb(t->uac[branch].reply, cb_type)>0) { if (run_top_route(route, t->uac[branch].reply, 0)<0){ LOG(L_ERR, "ERROR: t_continue_reply: Error in run_top_route\n"); } exec_post_script_cb(t->uac[branch].reply, cb_type); } LM_DBG("restoring previous environment"); faked_env( t, 0, 1); /*lock transaction replies - will be unlocked when reply is relayed*/ LOCK_REPLIES( t ); if ( is_local(t) ) { LM_DBG("t is local - sending reply with status code: [%d]\n", t->uac[branch].reply->first_line.u.reply.statuscode); reply_status = local_reply( t, t->uac[branch].reply, branch, t->uac[branch].reply->first_line.u.reply.statuscode, &cancel_data ); if (reply_status == RPS_COMPLETED) { /* no more UAC FR/RETR (if I received a 2xx, there may * be still pending branches ... */ cleanup_uac_timers( t ); if (is_invite(t)) cancel_uacs(t, &cancel_data, F_CANCEL_B_KILL); /* There is no need to call set_final_timer because we know * that the transaction is local */ put_on_wait(t); }else if (unlikely(cancel_data.cancel_bitmap)){ /* cancel everything, even non-INVITEs (e.g in case of 6xx), use * cancel_b_method for canceling unreplied branches */ cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags)); } } else { LM_DBG("t is not local - relaying reply with status code: [%d]\n", t->uac[branch].reply->first_line.u.reply.statuscode); do_put_on_wait = 0; if(t->uac[branch].reply->first_line.u.reply.statuscode>=200){ do_put_on_wait = 1; } reply_status=relay_reply( t, t->uac[branch].reply, branch, t->uac[branch].reply->first_line.u.reply.statuscode, &cancel_data, do_put_on_wait ); if (reply_status == RPS_COMPLETED) { /* no more UAC FR/RETR (if I received a 2xx, there may be still pending branches ... */ cleanup_uac_timers( t ); /* 2xx is a special case: we can have a COMPLETED request * with branches still open => we have to cancel them */ if (is_invite(t) && cancel_data.cancel_bitmap) cancel_uacs( t, &cancel_data, F_CANCEL_B_KILL); /* FR for negative INVITES, WAIT anything else */ /* Call to set_final_timer is embedded in relay_reply to avoid * race conditions when reply is sent out and an ACK to stop * retransmissions comes before retransmission timer is set.*/ }else if (unlikely(cancel_data.cancel_bitmap)){ /* cancel everything, even non-INVITEs (e.g in case of 6xx), use * cancel_b_method for canceling unreplied branches */ cancel_uacs(t, &cancel_data, cfg_get(tm,tm_cfg, cancel_b_flags)); } } t->uac[branch].request.flags|=F_RB_REPLIED; if (reply_status==RPS_ERROR){ goto done; } /* update FR/RETR timers on provisional replies */ msg_status=t->uac[branch].reply->REPLY_STATUS; last_uac_status=t->uac[branch].last_received; if (is_invite(t) && msg_status<200 && ( cfg_get(tm, tm_cfg, restart_fr_on_each_reply) || ( (last_uac_status<msg_status) && ((msg_status>=180) || (last_uac_status==0)) ) ) ) { /* provisional now */ restart_rb_fr(& t->uac[branch].request, t->fr_inv_timeout); t->uac[branch].request.flags|=F_RB_FR_INV; /* mark fr_inv */ } } done: UNLOCK_ASYNC_CONTINUE(t); if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ /* unref the transaction */ t_unref(t->uas.request); } else { tm_ctx_set_branch_index(T_BR_UNDEFINED); /* unref the transaction */ t_unref(t->uac[branch].reply); LOG(L_DBG,"DEBUG: t_continue_reply: Freeing earlier cloned reply\n"); /* free lumps that were added during reply processing */ del_nonshm_lump( &(t->uac[branch].reply->add_rm) ); del_nonshm_lump( &(t->uac[branch].reply->body_lumps) ); del_nonshm_lump_rpl( &(t->uac[branch].reply->reply_lump) ); /* free header's parsed structures that were added */ for( hdr=t->uac[branch].reply->headers ; hdr ; hdr=hdr->next ) { if ( hdr->parsed && hdr_allocs_parse(hdr) && (hdr->parsed<(void*)t->uac[branch].reply || hdr->parsed>=(void*)t->uac[branch].end_reply)) { clean_hdr_field(hdr); hdr->parsed = 0; } } /* now go through hdr_fields themselves and remove the pkg allocated space */ hdr = t->uac[branch].reply->headers; while (hdr) { if ( hdr && ((void*)hdr<(void*)t->uac[branch].reply || (void*)hdr>=(void*)t->uac[branch].end_reply)) { //this header needs to be freed and removed form the list. if (!prev) { t->uac[branch].reply->headers = hdr->next; } else { prev->next = hdr->next; } tmp = hdr; hdr = hdr->next; pkg_free(tmp); } else { prev = hdr; hdr = hdr->next; } } sip_msg_free(t->uac[branch].reply); t->uac[branch].reply = 0; } /*This transaction is no longer suspended so unsetting the SUSPEND flag*/ t->flags &= ~T_ASYNC_SUSPENDED; return 0; kill_trans: t->flags &= ~T_ASYNC_SUSPENDED; /* The script has hopefully set the error code. If not, * let us reply with a default error. */ if ((kill_transaction_unsafe(t, tm_error ? tm_error : E_UNSPEC)) <=0 ) { LOG(L_ERR, "ERROR: t_continue: " "reply generation failed\n"); /* The transaction must be explicitely released, * no more timer is running */ UNLOCK_ASYNC_CONTINUE(t); t_release_transaction(t); } else { UNLOCK_ASYNC_CONTINUE(t); } if(t->async_backup.backup_route != TM_ONREPLY_ROUTE){ t_unref(t->uas.request); } else { /* unref the transaction */ t_unref(t->uac[branch].reply); } return ret; }
static inline int do_dns_failover(struct cell *t) { static struct sip_msg faked_req; struct sip_msg *shmem_msg; struct sip_msg *req; struct ua_client *uac; dlg_t dialog; int ret, sip_msg_len; uac = &t->uac[picked_branch]; /* check if the DNS resolver can get at least one new IP */ if ( get_next_su( uac->proxy, &uac->request.dst.to, 1)!=0 ) return -1; LM_DBG("new destination available\n"); if (t->uas.request==NULL) { if (!is_local(t)) { LM_CRIT("BUG: proxy transaction without UAS request :-/\n"); return -1; } /* create the cloned SIP msg -> first create a new SIP msg */ memset( &dialog, 0, sizeof(dialog)); dialog.send_sock = uac->request.dst.send_sock; dialog.hooks.next_hop = &uac->uri; req = buf_to_sip_msg(uac->request.buffer.s, uac->request.buffer.len, &dialog); if (req==NULL) { LM_ERR("failed to generate SIP msg from previous buffer\n"); return -1; } /* now do the actual cloning of the SIP message */ t->uas.request = sip_msg_cloner( req, &sip_msg_len, 1); if (t->uas.request==NULL) { LM_ERR("cloning failed\n"); free_sip_msg(req); return -1; } t->uas.end_request = ((char*)t->uas.request) + sip_msg_len; /* free the actual SIP message, keep the clone only */ free_sip_msg(req); /* the sip_msg structure is static in buf_to_sip_msg, so no need to free it */ } shmem_msg = t->uas.request; if (!fake_req(&faked_req, shmem_msg, &t->uas, uac, 1/*with dst_uri*/)) { LM_ERR("fake_req failed\n"); return -1; } /* fake also the env. conforming to the fake msg */ faked_env( t, &faked_req); ret = -1; /* set info as current destination */ if ( set_ruri( &faked_req, &uac->uri)!= 0) goto done; setb0flags( &faked_req, uac->br_flags ); faked_req.force_send_socket = shmem_msg->force_send_socket; /* send it out */ if (t_forward_nonack( t, &faked_req, uac->proxy)==1) ret = 0; done: /* restore original environment and free the fake msg */ faked_env( t, 0); free_faked_req(&faked_req,t); return ret; }
int t_append_branches(void) { struct cell *t = NULL; struct sip_msg *orig_msg = NULL; static struct sip_msg faked_req; short outgoings; int success_branch; str current_uri; str dst_uri, path, instance, ruid, location_ua; struct socket_info* si; int q, i, found; flag_t backup_bflags = 0; flag_t bflags = 0; int new_branch, branch_ret, lowest_ret; branch_bm_t added_branches; int replies_locked = 0; int ret = 0; t = get_t(); if(t == NULL) { LM_ERR("cannot get transaction\n"); return -1; } LM_DBG("transaction %u:%u in status %d\n", t->hash_index, t->label, t->uas.status); /* test if transaction has already been canceled */ if (t->flags & T_CANCELED) { ser_error=E_CANCELED; return -1; } if ((t->uas.status >= 200 && t->uas.status<=399) || ((t->uas.status >= 600 && t->uas.status) && !(t->flags & (T_6xx | T_DISABLE_6xx))) ) { LM_DBG("transaction %u:%u in status %d: cannot append new branch\n", t->hash_index, t->label, t->uas.status); return -1; } /* set the lock on the transaction here */ LOCK_REPLIES(t); replies_locked = 1; outgoings = t->nr_of_outgoings; orig_msg = t->uas.request; LM_DBG("Call %.*s: %d (%d) outgoing branches\n",orig_msg->callid->body.len, orig_msg->callid->body.s,outgoings, nr_branches); lowest_ret=E_UNSPEC; added_branches=0; /* it's a "late" branch so the on_branch variable has already been reset by previous execution of t_forward_nonack: we use the saved value */ if (t->on_branch_delayed) { /* tell add_uac that it should run branch route actions */ set_branch_route(t->on_branch_delayed); } if (!fake_req(&faked_req, orig_msg, 0, NULL)) { LOG(L_ERR, "ERROR: t_append_branches: fake_req failed\n"); return -1; } /* fake also the env. conforming to the fake msg */ faked_env( t, &faked_req, 0); /* DONE with faking ;-) -> run the failure handlers */ init_branch_iterator(); while((current_uri.s=next_branch( ¤t_uri.len, &q, &dst_uri, &path, &bflags, &si, &ruid, &instance, &location_ua))) { LM_DBG("Current uri %.*s\n",current_uri.len, current_uri.s); found = 0; for (i=0; i<outgoings; i++) { if (t->uac[i].ruid.len == ruid.len && !memcmp(t->uac[i].ruid.s, ruid.s, ruid.len) && t->uac[i].uri.len == current_uri.len && !memcmp(t->uac[i].uri.s, current_uri.s, current_uri.len)) { LM_DBG("branch already added [%.*s]\n", ruid.len, ruid.s); found = 1; break; } } if (found) continue; setbflagsval(0, bflags); new_branch=add_uac( t, &faked_req, ¤t_uri, (dst_uri.len) ? (&dst_uri) : ¤t_uri, &path, 0, si, faked_req.fwd_send_flags, PROTO_NONE, (dst_uri.len)?0:UAC_SKIP_BR_DST_F, &instance, &ruid, &location_ua); LM_DBG("added branch [%.*s] with ruid [%.*s]\n", current_uri.len, current_uri.s, ruid.len, ruid.s); /* test if cancel was received meanwhile */ if (t->flags & T_CANCELED) goto canceled; if (new_branch>=0) added_branches |= 1<<new_branch; else lowest_ret=MIN_int(lowest_ret, new_branch); } clear_branches(); LM_DBG("Call %.*s: %d (%d) outgoing branches after clear_branches()\n", orig_msg->callid->body.len, orig_msg->callid->body.s,outgoings, nr_branches); setbflagsval(0, backup_bflags); /* update message flags, if changed in branch route */ t->uas.request->flags = faked_req.flags; if (added_branches==0) { if(lowest_ret!=E_CFG) LOG(L_ERR, "ERROR: t_append_branch: failure to add branches (%d)\n", lowest_ret); ser_error=lowest_ret; ret = lowest_ret; goto done; } ser_error=0; /* clear branch adding errors */ /* send them out now */ success_branch=0; /* since t_append_branch can only be called from REQUEST_ROUTE, always lock replies */ for (i=outgoings; i<t->nr_of_outgoings; i++) { if (added_branches & (1<<i)) { branch_ret=t_send_branch(t, i, &faked_req , 0, 0 /* replies are already locked */ ); if (branch_ret>=0){ /* some kind of success */ if (branch_ret==i) { /* success */ success_branch++; if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_OUT))) run_trans_callbacks_with_buf( TMCB_REQUEST_OUT, &t->uac[nr_branches].request, &faked_req, 0, -orig_msg->REQ_METHOD); } else /* new branch added */ added_branches |= 1<<branch_ret; } } } if (success_branch<=0) { /* return always E_SEND for now * (the real reason could be: denied by onsend routes, blacklisted, * send failed or any of the errors listed before + dns failed * when attempting dns failover) */ ser_error=E_SEND; /* else return the last error (?) */ ret = -1; goto done; } ser_error=0; /* clear branch send errors, we have overall success */ set_kr(REQ_FWDED); ret = success_branch; goto done; canceled: DBG("t_append_branches: cannot append branches to a canceled transaction\n"); /* reset processed branches */ clear_branches(); /* restore backup flags from initial env */ setbflagsval(0, backup_bflags); /* update message flags, if changed in branch route */ t->uas.request->flags = faked_req.flags; /* if needed unlock transaction's replies */ /* restore the number of outgoing branches * since new branches have not been completed */ t->nr_of_outgoings = outgoings; ser_error=E_CANCELED; ret = -1; done: /* restore original environment and free the fake msg */ faked_env( t, 0, 0); free_faked_req(&faked_req,t); if (likely(replies_locked)) { replies_locked = 0; UNLOCK_REPLIES(t); } return ret; }
/* function triggered from reactor in order to continue the processing */ int t_resume_async(int fd, void *param) { static struct sip_msg faked_req; static struct ua_client uac; async_ctx *ctx = (async_ctx *)param; struct cell *backup_t; struct usr_avp **backup_list; struct socket_info* backup_si; struct cell *t= ctx->t; int route; LM_DBG("resuming on fd %d, transaction %p \n",fd, t); if (current_processing_ctx) { LM_CRIT("BUG - a context already set!\n"); abort(); } /* prepare for resume route */ uac.br_flags = getb0flags( t->uas.request ) ; uac.uri = *GET_RURI( t->uas.request ); if (!fake_req( &faked_req /* the fake msg to be built*/, t->uas.request, /* the template msg saved in transaction */ &t->uas, /*the UAS side of the transaction*/ &uac, /* the fake UAC */ 1 /* copy dst_uri too */) ) { LM_ERR("fake_req failed\n"); return 0; } /* enviroment setting */ current_processing_ctx = ctx->msg_ctx; backup_t = get_t(); /* fake transaction */ set_t( t ); reset_kr(); set_kr(ctx->kr); /* make available the avp list from transaction */ backup_list = set_avp_list( &t->user_avps ); /* set default send address to the saved value */ backup_si = bind_address; bind_address = t->uac[0].request.dst.send_sock; async_status = ASYNC_DONE; /* assume default status as done */ /* call the resume function in order to read and handle data */ return_code = ctx->resume_f( fd, &faked_req, ctx->resume_param ); if (async_status==ASYNC_CONTINUE) { /* do not run the resume route */ goto restore; } /* remove from reactor, we are done */ reactor_del_reader( fd, -1, IO_FD_CLOSING); if (async_status == ASYNC_DONE_CLOSE_FD) close(fd); /* run the resume_route (some type as the original one) */ swap_route_type(route, ctx->route_type); run_resume_route( ctx->resume_route, &faked_req); set_route_type(route); /* no need for the context anymore */ shm_free(ctx); restore: /* restore original environment */ set_t(backup_t); /* restore original avp list */ set_avp_list( backup_list ); bind_address = backup_si; free_faked_req( &faked_req, t); current_processing_ctx = NULL; return 0; }