static void free_old_domain(domain_t* d) { int i; if (!d) return; if (d->did.s) { pkg_free(d->did.s); d->did.s = NULL; } if (d->domain) { for(i = 0; i < d->n; i++) { if (d->domain[i].s) pkg_free(d->domain[i].s); } pkg_free(d->domain); d->domain = NULL; } if (d->flags) { pkg_free(d->flags); d->flags = NULL; } if (d->attrs) { destroy_avp_list(&d->attrs); } }
void reset_avps(void) { assert( crt_avps!=0 ); if ( crt_avps!=&global_avps) { crt_avps = &global_avps; } destroy_avp_list( crt_avps ); }
/* * Release all memory allocated for given domain structure */ static void free_domain(domain_t* d) { int i; if (!d) return; if (d->did.s) shm_free(d->did.s); for(i = 0; i < d->n; i++) { if (d->domain[i].s) shm_free(d->domain[i].s); } shm_free(d->domain); shm_free(d->flags); if (d->attrs) destroy_avp_list(&d->attrs); shm_free(d); }
int evi_raise_event(event_id_t id, evi_params_t* params) { int status; struct sip_msg* req= NULL; struct usr_avp *event_avps = 0; struct usr_avp **bak_avps = 0; /* * because these might be nested, a different message has * to be generated each time */ req = (struct sip_msg*)pkg_malloc(sizeof(struct sip_msg)); if(req == NULL) { LM_ERR("No more memory\n"); return -1; } memset(req, 0, sizeof(struct sip_msg)); req->first_line.type = SIP_REQUEST; req->first_line.u.request.method.s= "DUMMY"; req->first_line.u.request.method.len= 5; req->first_line.u.request.uri.s= "sip:[email protected]"; req->first_line.u.request.uri.len= 19; bak_avps = set_avp_list(&event_avps); status = evi_raise_event_msg(req, id, params); /* clean whatever extra structures were added by script functions */ free_sip_msg(req); pkg_free(req); /* remove all avps added */ destroy_avp_list(&event_avps); set_avp_list(bak_avps); return status; }
inline static int w_t_new_request(struct sip_msg* msg, char *p_method, char *p_ruri, char *p_from, char *p_to, char *p_body, char *p_ctx) { #define CONTENT_TYPE_HDR "Content-Type: " #define CONTENT_TYPE_HDR_LEN (sizeof(CONTENT_TYPE_HDR)-1) static dlg_t dlg; struct usr_avp **avp_list; str ruri; str method; str body; str headers; str s; int_str ctx; char *p; memset( &dlg, 0, sizeof(dlg_t)); /* evaluate the parameters */ /* method */ if ( fixup_get_svalue(msg, (gparam_p)p_method, &method)<0 ) { LM_ERR("failed to extract METHOD param\n"); return -1; } LM_DBG("setting METHOD to <%.*s>\n", method.len, method.s); /* ruri - next hop is the same as RURI */ dlg.hooks.next_hop = dlg.hooks.request_uri = &ruri; if ( fixup_get_svalue(msg, (gparam_p)p_ruri, &ruri)<0 ) { LM_ERR("failed to extract RURI param\n"); return -1; } LM_DBG("setting RURI to <%.*s>\n", dlg.hooks.next_hop->len, dlg.hooks.next_hop->s); /* FROM URI + display */ if ( fixup_get_svalue(msg, (gparam_p)p_from, &s)<0 ) { LM_ERR("failed to extract FROM param\n"); return -1; } if ( (p=q_memrchr(s.s, ' ', s.len))==NULL ) { /* no display, only FROM URI */ dlg.loc_uri = s; dlg.loc_dname.s = NULL; dlg.loc_dname.len = 0; } else { /* display + URI */ dlg.loc_uri.s = p+1; dlg.loc_uri.len = s.s+s.len - dlg.loc_uri.s; dlg.loc_dname.s = s.s; dlg.loc_dname.len = p - s.s; } LM_DBG("setting FROM to <%.*s> + <%.*s>\n", dlg.loc_dname.len, dlg.loc_dname.s, dlg.loc_uri.len, dlg.loc_uri.s); /* TO URI + display */ if ( fixup_get_svalue(msg, (gparam_p)p_to, &s)<0 ) { LM_ERR("failed to extract TO param\n"); return -1; } if ( (p=q_memrchr(s.s, ' ', s.len))==NULL ) { /* no display, only TO URI */ dlg.rem_uri = s; dlg.rem_dname.s = NULL; dlg.rem_dname.len = 0; } else { /* display + URI */ dlg.rem_uri.s = p+1; dlg.rem_uri.len = s.s+s.len - dlg.rem_uri.s; dlg.rem_dname.s = s.s; dlg.rem_dname.len = p - s.s; } LM_DBG("setting TO to <%.*s> + <%.*s>\n", dlg.rem_dname.len, dlg.rem_dname.s, dlg.rem_uri.len, dlg.rem_uri.s); /* BODY and Content-Type */ if (p_body!=NULL) { if ( fixup_get_svalue(msg, (gparam_p)p_body, &body)<0 ) { LM_ERR("failed to extract BODY param\n"); return -1; } if ( (p=q_memchr(body.s, ' ', body.len))==NULL ) { LM_ERR("Content Type not found in the beginning of body <%.*s>\n", body.len, body.s); return -1; } /* build the Content-type header */ headers.len = CONTENT_TYPE_HDR_LEN + (p-body.s) + CRLF_LEN; if ( (headers.s=(char*)pkg_malloc(headers.len))==NULL ) { LM_ERR("failed to get pkg mem (needed %d)\n",headers.len); return -1; } memcpy( headers.s, CONTENT_TYPE_HDR, CONTENT_TYPE_HDR_LEN); memcpy( headers.s+CONTENT_TYPE_HDR_LEN, body.s, p-body.s); memcpy( headers.s+CONTENT_TYPE_HDR_LEN+(p-body.s), CRLF, CRLF_LEN); /* set the body */ body.len = body.s + body.len - (p+1); body.s = p + 1; LM_DBG("setting BODY to <%.*s> <%.*s>\n", headers.len, headers.s, body.len, body.s ); } else { body.s = NULL; body.len = 0; headers.s = NULL; headers.len = 0; } /* context value */ if (p_ctx!=NULL) { if ( fixup_get_svalue(msg, (gparam_p)p_ctx, &ctx.s)<0 ) { LM_ERR("failed to extract BODY param\n"); if (p_body) pkg_free(headers.s); return -1; } LM_DBG("setting CTX AVP to <%.*s>\n", ctx.s.len, ctx.s.s); avp_list = set_avp_list( &dlg.avps ); if (!add_avp( AVP_VAL_STR, uac_ctx_avp_id, ctx)) LM_ERR("failed to add ctx AVP, ignorring...\n"); set_avp_list( avp_list ); } /* add cseq */ dlg.loc_seq.value = DEFAULT_CSEQ; dlg.loc_seq.is_set = 1; /* add callid */ generate_callid(&dlg.id.call_id); /* add FROM tag */ generate_fromtag(&dlg.id.loc_tag, &dlg.id.call_id); /* TO tag is empty as this is a initial request */ dlg.id.rem_tag.s = NULL; dlg.id.rem_tag.len = 0; /* do the actual sending now */ if ( t_uac( &method, headers.s?&headers:NULL, body.s?&body:NULL, &dlg, 0, 0, 0) <= 0 ) { LM_ERR("failed to send the request out\n"); if (headers.s) pkg_free(headers.s); if (dlg.avps) destroy_avp_list(&dlg.avps); return -1; } /* success -> do cleanup */ if (headers.s) pkg_free(headers.s); return 1; }
/* introduce a new uac to transaction; returns its branch id (>=0) or error (<0); it doesn't send a message yet -- a reply to it might interfere with the processes of adding multiple branches */ static int add_uac( struct cell *t, struct sip_msg *request, str *uri, str* next_hop, unsigned int bflags, str* path, struct proxy_l *proxy) { unsigned short branch; int do_free_proxy; int ret; branch=t->nr_of_outgoings; if (branch==MAX_BRANCHES) { LM_ERR("maximum number of branches exceeded\n"); ret=E_CFG; goto error; } /* check existing buffer -- rewriting should never occur */ if (t->uac[branch].request.buffer.s) { LM_CRIT("buffer rewrite attempt\n"); ret=ser_error=E_BUG; goto error; } /* set proper RURI to request to reflect the branch */ request->new_uri=*uri; request->parsed_uri_ok=0; request->dst_uri=*next_hop; request->path_vec=*path; request->ruri_bflags=bflags; if ( pre_print_uac_request( t, branch, request)!= 0 ) { ret = -1; goto error01; } /* check DNS resolution */ if (proxy){ do_free_proxy = 0; }else { proxy=uri2proxy( request->dst_uri.len ? &request->dst_uri:&request->new_uri, request->force_send_socket ? request->force_send_socket->proto : PROTO_NONE ); if (proxy==0) { ret=E_BAD_ADDRESS; goto error01; } do_free_proxy = 1; } msg_callback_process(request, REQ_PRE_FORWARD, (void *)proxy); if ( !(t->flags&T_NO_DNS_FAILOVER_FLAG) ) { t->uac[branch].proxy = shm_clone_proxy( proxy , do_free_proxy ); if (t->uac[branch].proxy==NULL) { ret = E_OUT_OF_MEM; goto error02; } } /* use the first address */ hostent2su( &t->uac[branch].request.dst.to, &proxy->host, proxy->addr_idx, proxy->port ? proxy->port:SIP_PORT); t->uac[branch].request.dst.proto = proxy->proto; if ( update_uac_dst( request, &t->uac[branch] )!=0) { ret = ser_error; goto error02; } /* things went well, move ahead */ t->uac[branch].uri.s=t->uac[branch].request.buffer.s+ request->first_line.u.request.method.len+1; t->uac[branch].uri.len=request->new_uri.len; t->uac[branch].br_flags = request->ruri_bflags; t->uac[branch].added_rr = count_local_rr( request ); t->nr_of_outgoings++; /* done! */ ret=branch; error02: if(do_free_proxy) { free_proxy( proxy ); pkg_free( proxy ); } error01: post_print_uac_request( request, uri, next_hop); if (ret < 0) { /* destroy all the bavps added, the path vector and the destination, * since this branch will never be properly added to * the UAC list, otherwise we'll have memory leaks - razvanc */ if (t->uac[branch].user_avps) destroy_avp_list(&t->uac[branch].user_avps); if (t->uac[branch].path_vec.s) shm_free(t->uac[branch].path_vec.s); if (t->uac[branch].duri.s) shm_free(t->uac[branch].duri.s); } error: return ret; }
struct cell* build_cell( struct sip_msg* p_msg ) { struct cell* new_cell; int sip_msg_len; avp_list_t* old; /* allocs a new cell */ new_cell = (struct cell*)shm_malloc( sizeof( struct cell ) ); if ( !new_cell ) { ser_error=E_OUT_OF_MEM; return NULL; } /* filling with 0 */ memset( new_cell, 0, sizeof( struct cell ) ); /* UAS */ new_cell->uas.response.my_T=new_cell; init_rb_timers(&new_cell->uas.response); /* timers */ init_cell_timers(new_cell); old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &new_cell->uri_avps_from ); new_cell->uri_avps_from = *old; *old = 0; old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &new_cell->uri_avps_to ); new_cell->uri_avps_to = *old; *old = 0; old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &new_cell->user_avps_from ); new_cell->user_avps_from = *old; *old = 0; old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &new_cell->user_avps_to ); new_cell->user_avps_to = *old; *old = 0; /* We can just store pointer to domain avps in the transaction context, * because they are read-only */ new_cell->domain_avps_from = get_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN); new_cell->domain_avps_to = get_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN); /* enter callback, which may potentially want to parse some stuff, * before the request is shmem-ized */ if ( p_msg && has_reqin_tmcbs() ) run_reqin_callbacks( new_cell, p_msg, p_msg->REQ_METHOD); if (p_msg) { /* clean possible previous added vias/clen header or else they would * get propagated in the failure routes */ free_via_clen_lump(&p_msg->add_rm); new_cell->uas.request = sip_msg_cloner(p_msg,&sip_msg_len); if (!new_cell->uas.request) goto error; new_cell->uas.end_request=((char*)new_cell->uas.request)+sip_msg_len; } /* UAC */ init_branches(new_cell); new_cell->relayed_reply_branch = -1; /* new_cell->T_canceled = T_UNDEFINED; */ init_synonym_id(new_cell); init_cell_lock( new_cell ); return new_cell; error: destroy_avp_list(&new_cell->user_avps_from); destroy_avp_list(&new_cell->user_avps_to); destroy_avp_list(&new_cell->uri_avps_from); destroy_avp_list(&new_cell->uri_avps_to); shm_free(new_cell); /* unlink transaction AVP list and link back the global AVP list (bogdan)*/ reset_avps(); return NULL; }
void handle_ebr_ipc(int sender, void *payload) { ebr_ipc_job *job = (ebr_ipc_job*)payload; struct usr_avp **old_avps; struct sip_msg req; LM_DBG("EBR notification received via IPC for event %.*s\n", job->ev->event_name.len, job->ev->event_name.s); if (job->flags&EBR_SUBS_TYPE_NOTY) { /* this is a job for notifiying on an event */ /* push our list of AVPs */ old_avps = set_avp_list( &job->avps ); /* prepare a fake/dummy request */ memset( &req, 0, sizeof(struct sip_msg)); req.first_line.type = SIP_REQUEST; req.first_line.u.request.method.s= "DUMMY"; req.first_line.u.request.method.len= 5; req.first_line.u.request.uri.s= "sip:[email protected]"; req.first_line.u.request.uri.len= 19; req.rcv.src_ip.af = AF_INET; req.rcv.dst_ip.af = AF_INET; LM_DBG("using transaction reference %X:%X\n", job->tm.hash, job->tm.label); if (ebr_tmb.t_set_remote_t && job->tm.hash!=0 && job->tm.label!=0 ) ebr_tmb.t_set_remote_t( &job->tm ); /* route the notification route */ set_route_type( REQUEST_ROUTE ); run_top_route( rlist[(int)(long)job->data].a, &req); if (ebr_tmb.t_set_remote_t) ebr_tmb.t_set_remote_t( NULL ); /* cleanup over route execution */ set_avp_list( old_avps ); free_sip_msg( &req ); /* destroy everything */ destroy_avp_list( &job->avps ); shm_free(job); } else { /* this is a job for resuming on WAIT */ /* pass the list of AVPs to be pushed into the msg */ ((async_ctx*)job->data)->resume_param = job->avps; /* invoke the global resume ASYNC function */ async_script_resume_f(ASYNC_FD_NONE, job->data /*the async ctx*/ ); shm_free(job); } return; }
int notify_ebr_subscriptions( ebr_event *ev, evi_params_t *params) { ebr_subscription *sub, *sub_next, *sub_prev; ebr_filter *filter; ebr_ipc_job *job; evi_param_t *e_param; int matches; char *s; struct usr_avp *avps=(void*)-1; unsigned int my_time; LM_DBG("notification received for event %.*s, checking subscriptions\n", ev->event_name.len, ev->event_name.s); my_time = get_ticks(); lock_get( &(ev->lock) ); /* check the EBR subscription on this event and apply the filters */ sub_prev = NULL; for ( sub=ev->subs ; sub ; sub_prev=sub, sub=sub_next?sub_next:(sub?sub->next:NULL) ) { /* discard expired NOTIFY subscriptions */ if (sub->flags&EBR_SUBS_TYPE_NOTY && sub->expire<my_time) { LM_DBG("subscription type [NOTIFY]from process %d(pid %d) on " "event <%.*s> expired at %d\n", sub->proc_no, pt[sub->proc_no].pid, sub->event->event_name.len, sub->event->event_name.s, sub->expire ); /* remove the subscription */ sub_next = sub->next; /* unlink it */ if (sub_prev) sub_prev->next = sub_next; else ev->subs = sub_next; /* free it */ free_ebr_subscription(sub); /* do not count us as prev, as we are removed */ sub = sub_prev; continue; } /* run the filters */ matches = 1; sub_next = NULL; for ( filter=sub->filters ; matches && filter ; filter=filter->next ) { /* look for the evi param with the same name */ for ( e_param=params->first ; e_param ; e_param=e_param->next ) { if (e_param->name.len==filter->key.len && strncasecmp(e_param->name.s,filter->key.s,filter->key.len)==0){ /* name matches, let's see the value */ LM_DBG("key <%.*s> found, checking value \n", filter->key.len, filter->key.s); if (filter->val.len==0) { /* a "no value" matches anything */ } else { if (e_param->flags&EVI_INT_VAL) { s=int2str((unsigned long)e_param->val.n, NULL); if (s==NULL) { LM_ERR("failed to covert int EVI param to " "string, EBR filter failed\n"); matches = 0; } else { /* the output of int2str is NULL terminated */ if (fnmatch( filter->val.s, s, 0)!=0) matches = 0; } } else if (e_param->flags&EVI_STR_VAL) { s=(char*)pkg_malloc(e_param->val.s.len+1); if (s==NULL) { LM_ERR("failed to allocate PKG fnmatch " "buffer, EBR filter failed\n"); matches = 0; } else { memcpy(s,e_param->val.s.s,e_param->val.s.len); s[e_param->val.s.len] = 0; if (fnmatch( filter->val.s, s, 0)!=0) matches = 0; pkg_free(s); } } else { LM_ERR("non-string EVI params are not supported " "yet\n"); matches = 0; } } break; } /* a filter not matching any EVI params is simply ignored */ } /* end EVI param iterator */ } /* end EBR filter iterator */ /* did the EVI event match the EBR filters for this subscription ? */ if (matches) { LM_DBG("subscription type [%s]from process %d(pid %d) matched " "event, generating notification via IPC\n", (sub->flags&EBR_SUBS_TYPE_WAIT)?"WAIT":"NOTIFY", sub->proc_no, pt[sub->proc_no].pid); /* convert the EVI params into AVP (only once) */ if (avps==(void*)-1) { avps = pack_evi_params_as_avp_list(params); } /* pack the EVI params to be attached to the IPC job */ job =(ebr_ipc_job*)shm_malloc( sizeof(ebr_ipc_job) ); if (job==NULL) { LM_ERR("failed to allocated new IPC job, skipping..\n"); continue; /* with the next subscription */ } job->ev = ev; job->avps = clone_avp_list( avps ); job->data = sub->data; job->flags = sub->flags; job->tm = sub->tm; if (sub->flags&EBR_SUBS_TYPE_NOTY) { /* dispatch the event notification via IPC to the right * process. Key question - which one is the "right" process ? * - the current processs * - the process which performed the subscription * Let's give it to ourselves for the moment */ if (ipc_send_job( process_no, ebr_ipc_type , (void*)job)<0) { LM_ERR("failed to send job via IPC, skipping...\n"); shm_free(job); } } else { /* sent the event notification via IPC to resume on the * subscribing process */ if (ipc_send_job( sub->proc_no, ebr_ipc_type , (void*)job)<0) { LM_ERR("failed to send job via IPC, skipping...\n"); shm_free(job); } /* remove the subscription, as it can be triggered only * one time */ sub_next = sub->next; /* unlink it */ if (sub_prev) sub_prev->next = sub_next; else ev->subs = sub_next; /* free it */ free_ebr_subscription(sub); /* do not count us as prev, as we are removed */ sub = sub_prev; } } } /* end EBR subscription iterator */ lock_release( &(ev->lock) ); if (avps!=(void*)-1) destroy_avp_list( &avps ); return 0; }