/* Stop Session Timers */ static void stop_timer(pjsip_inv_session *inv) { if (inv->timer->timer.id != 0) { pjsip_endpt_cancel_timer(inv->dlg->endpt, &inv->timer->timer); inv->timer->timer.id = 0; } if (inv->timer->expire_timer.id != 0) { pjsip_endpt_cancel_timer(inv->dlg->endpt, &inv->timer->expire_timer); inv->timer->expire_timer.id = 0; } }
PJ_DEF(pj_status_t) pjsip_publishc_unpublish(pjsip_publishc *pubc, pjsip_tx_data **p_tdata) { pjsip_tx_data *tdata; pjsip_msg *msg; pjsip_expires_hdr *expires; pj_status_t status; PJ_ASSERT_RETURN(pubc && p_tdata, PJ_EINVAL); if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } status = create_request(pubc, &tdata); if (status != PJ_SUCCESS) return status; msg = tdata->msg; /* Add Expires:0 header */ expires = pjsip_expires_hdr_create(tdata->pool, 0); pjsip_msg_add_hdr( msg, (pjsip_hdr*)expires); *p_tdata = tdata; return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_publishc_publish(pjsip_publishc *pubc, pj_bool_t auto_refresh, pjsip_tx_data **p_tdata) { pj_status_t status; pjsip_tx_data *tdata; PJ_ASSERT_RETURN(pubc && p_tdata, PJ_EINVAL); status = create_request(pubc, &tdata); if (status != PJ_SUCCESS) return status; /* Add Expires header */ if (pubc->expires_hdr) { pjsip_hdr *dup; dup = (pjsip_hdr*) pjsip_hdr_shallow_clone(tdata->pool, pubc->expires_hdr); if (dup) pjsip_msg_add_hdr(tdata->msg, dup); } /* Cancel existing timer */ if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } pubc->auto_refresh = auto_refresh; /* Done */ *p_tdata = tdata; return PJ_SUCCESS; }
static pj_bool_t rt_on_rx_response(pjsip_rx_data *rdata) { if (!pj_strncmp(&rdata->msg_info.cid->id, &rt_call_id, rt_call_id.slen)) { char *pos = pj_strchr(&rdata->msg_info.cid->id, '/')+1; int thread_id = (*pos - '0'); pj_timestamp recv_time; pj_mutex_lock(rt_test_data[thread_id].mutex); /* Stop timer. */ pjsip_endpt_cancel_timer(endpt, &rt_test_data[thread_id].timeout_timer); /* Update counter and end-time. */ rt_test_data[thread_id].recv_response_count++; pj_get_timestamp(&recv_time); pj_sub_timestamp(&recv_time, &rt_test_data[thread_id].send_time); pj_add_timestamp(&rt_test_data[thread_id].total_rt_time, &recv_time); if (!rt_stop) { pj_time_val tx_delay = { 0, 0 }; pj_assert(rt_test_data[thread_id].tx_timer.user_data == NULL); rt_test_data[thread_id].tx_timer.user_data = (void*)1; pjsip_endpt_schedule_timer(endpt, &rt_test_data[thread_id].tx_timer, &tx_delay); } pj_mutex_unlock(rt_test_data[thread_id].mutex); return PJ_TRUE; } return PJ_FALSE; }
PJ_DEF(pj_status_t) pjsip_regc_destroy(pjsip_regc *regc) { PJ_ASSERT_RETURN(regc, PJ_EINVAL); pj_lock_acquire(regc->lock); if (regc->has_tsx || pj_atomic_get(regc->busy_ctr) != 0) { regc->_delete_flag = 1; regc->cb = NULL; pj_lock_release(regc->lock); } else { pjsip_tpselector_dec_ref(®c->tp_sel); if (regc->last_transport) { pjsip_transport_dec_ref(regc->last_transport); regc->last_transport = NULL; } if (regc->timer.id != 0) { pjsip_endpt_cancel_timer(regc->endpt, ®c->timer); regc->timer.id = 0; } pj_atomic_destroy(regc->busy_ctr); pj_lock_release(regc->lock); pj_lock_destroy(regc->lock); regc->lock = NULL; pjsip_endpt_release_pool(regc->endpt, regc->pool); } return PJ_SUCCESS; }
pjsip_regc_set_delay_before_refresh( pjsip_regc *regc, pj_uint32_t delay ) { PJ_ASSERT_RETURN(regc, PJ_EINVAL); if (delay > regc->expires) return PJ_ETOOBIG; pj_lock_acquire(regc->lock); if (regc->delay_before_refresh != delay) { regc->delay_before_refresh = delay; if (regc->timer.id != 0) { /* Cancel registration timer */ pjsip_endpt_cancel_timer(regc->endpt, ®c->timer); regc->timer.id = 0; /* Schedule next registration */ schedule_registration(regc, regc->expires); } } pj_lock_release(regc->lock); return PJ_SUCCESS; }
/* This will update the UAC's refresh schedule. */ static void update_next_refresh(pjsip_event_sub *sub, int interval) { pj_time_val delay = {0, 0}; pj_parsed_time pt; if (interval < SECONDS_BEFORE_EXPIRY) { PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): expiration delay too short (%d sec)! updated.", sub, state[sub->state].ptr, interval)); interval = SECONDS_BEFORE_EXPIRY; } if (sub->timer.id != 0) pjsip_endpt_cancel_timer(sub->endpt, &sub->timer); sub->timer.id = TIMER_ID_REFRESH; sub->timer.user_data = sub; sub->timer.cb = &refresh_timer_cb; pj_gettimeofday(&sub->expiry_time); delay.sec = interval - SECONDS_BEFORE_EXPIRY; sub->expiry_time.sec += delay.sec; pj_time_decode(&sub->expiry_time, &pt); PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): will send SUBSCRIBE at %02d:%02d:%02d (in %d secs)", sub, state[sub->state].ptr, pt.hour, pt.min, pt.sec, delay.sec)); pjsip_endpt_schedule_timer( sub->endpt, &sub->timer, &delay ); }
/* Destroy TCP transport */ static pj_status_t tcp_destroy(pjsip_transport *transport, pj_status_t reason) { struct tcp_transport *tcp = (struct tcp_transport*)transport; if (tcp->close_reason == 0) tcp->close_reason = reason; if (tcp->is_registered) { tcp->is_registered = PJ_FALSE; pjsip_transport_destroy(transport); /* pjsip_transport_destroy will recursively call this function * again. */ return PJ_SUCCESS; } /* Mark transport as closing */ tcp->is_closing = PJ_TRUE; /* Stop keep-alive timer. */ if (tcp->ka_timer.id) { pjsip_endpt_cancel_timer(tcp->base.endpt, &tcp->ka_timer); tcp->ka_timer.id = PJ_FALSE; } /* Cancel all delayed transmits */ while (!pj_list_empty(&tcp->delayed_list)) { struct delayed_tdata *pending_tx; pj_ioqueue_op_key_t *op_key; pending_tx = tcp->delayed_list.next; pj_list_erase(pending_tx); op_key = (pj_ioqueue_op_key_t*)pending_tx->tdata_op_key; on_data_sent(tcp->asock, op_key, -reason); } if (tcp->asock) { pj_activesock_close(tcp->asock); tcp->asock = NULL; tcp->sock = PJ_INVALID_SOCKET; } else if (tcp->sock != PJ_INVALID_SOCKET) { pj_sock_close(tcp->sock); tcp->sock = PJ_INVALID_SOCKET; } if (tcp->grp_lock) { pj_grp_lock_t *grp_lock = tcp->grp_lock; tcp->grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); /* Transport may have been deleted at this point */ } else { tcp_on_destroy(tcp); } return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_regc_register(pjsip_regc *regc, pj_bool_t autoreg, pjsip_tx_data **p_tdata) { pjsip_msg *msg; pjsip_contact_hdr *hdr; pj_status_t status; pjsip_tx_data *tdata; PJ_ASSERT_RETURN(regc && p_tdata, PJ_EINVAL); pj_lock_acquire(regc->lock); status = create_request(regc, &tdata); if (status != PJ_SUCCESS) { pj_lock_release(regc->lock); return status; } msg = tdata->msg; /* Add Contact headers. */ hdr = regc->contact_hdr_list.next; while (hdr != ®c->contact_hdr_list) { pjsip_msg_add_hdr(msg, (pjsip_hdr*) pjsip_hdr_shallow_clone(tdata->pool, hdr)); hdr = hdr->next; } /* Also add bindings which are to be removed */ while (!pj_list_empty(®c->removed_contact_hdr_list)) { hdr = regc->removed_contact_hdr_list.next; pjsip_msg_add_hdr(msg, (pjsip_hdr*) pjsip_hdr_clone(tdata->pool, hdr)); pj_list_erase(hdr); } if (regc->expires_hdr) pjsip_msg_add_hdr(msg, (pjsip_hdr*) pjsip_hdr_shallow_clone(tdata->pool, regc->expires_hdr)); if (regc->timer.id != 0) { pjsip_endpt_cancel_timer(regc->endpt, ®c->timer); regc->timer.id = 0; } regc->auto_reg = autoreg; pj_lock_release(regc->lock); /* Done */ *p_tdata = tdata; return PJ_SUCCESS; }
static pj_status_t rt_send_request(int thread_id) { pj_status_t status; pj_str_t target, from, to, contact, call_id; pjsip_tx_data *tdata; pj_time_val timeout_delay; pj_mutex_lock(rt_test_data[thread_id].mutex); /* Create a request message. */ target = pj_str(rt_target_uri); from = pj_str(FROM_HDR); to = pj_str(rt_target_uri); contact = pj_str(CONTACT_HDR); call_id = rt_test_data[thread_id].call_id; status = pjsip_endpt_create_request( endpt, &pjsip_options_method, &target, &from, &to, &contact, &call_id, -1, NULL, &tdata ); if (status != PJ_SUCCESS) { app_perror(" error: unable to create request", status); pj_mutex_unlock(rt_test_data[thread_id].mutex); return -610; } /* Start time. */ pj_get_timestamp(&rt_test_data[thread_id].send_time); /* Send the message (statelessly). */ status = pjsip_endpt_send_request_stateless( endpt, tdata, NULL, NULL); if (status != PJ_SUCCESS) { /* Immediate error! */ app_perror(" error: send request", status); pjsip_tx_data_dec_ref(tdata); pj_mutex_unlock(rt_test_data[thread_id].mutex); return -620; } /* Update counter. */ rt_test_data[thread_id].sent_request_count++; /* Set timeout timer. */ if (rt_test_data[thread_id].timeout_timer.user_data != NULL) { pjsip_endpt_cancel_timer(endpt, &rt_test_data[thread_id].timeout_timer); } timeout_delay.sec = 100; timeout_delay.msec = 0; rt_test_data[thread_id].timeout_timer.user_data = (void*)1; pjsip_endpt_schedule_timer(endpt, &rt_test_data[thread_id].timeout_timer, &timeout_delay); pj_mutex_unlock(rt_test_data[thread_id].mutex); return PJ_SUCCESS; }
/// Restart the timer using the specified id and timeout. void Flow::restart_timer(int id, int timeout) { if (_timer.id) { // Stop the existing timer. pjsip_endpt_cancel_timer(stack_data.endpt, &_timer); _timer.id = 0; } pj_time_val delay = {timeout, 0}; pjsip_endpt_schedule_timer(stack_data.endpt, &_timer, &delay); _timer.id = id; }
/* * Destroy subscription. * If there are pending transactions, then this will just set the flag. */ PJ_DEF(pj_status_t) pjsip_event_sub_destroy(pjsip_event_sub *sub) { pj_assert(sub != NULL); if (sub == NULL) return -1; /* Application must terminate the subscription first. */ pj_assert(sub->state == PJSIP_EVENT_SUB_STATE_NULL || sub->state == PJSIP_EVENT_SUB_STATE_TERMINATED); PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): about to be destroyed", sub, state[sub->state].ptr)); pj_mutex_lock(mgr.mutex); pj_mutex_lock(sub->mutex); /* Set delete flag. */ sub->delete_flag = 1; /* Unregister timer, if any. */ if (sub->timer.id != 0) { pjsip_endpt_cancel_timer(sub->endpt, &sub->timer); sub->timer.id = 0; } if (sub->pending_tsx > 0) { pj_mutex_unlock(sub->mutex); pj_mutex_unlock(mgr.mutex); PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): has %d pending, will destroy later", sub, state[sub->state].ptr, sub->pending_tsx)); return 1; } /* Unregister from hash table. */ pj_hash_set(sub->pool, mgr.ht, sub->key.ptr, sub->key.slen, NULL); /* Destroy. */ pj_mutex_destroy(sub->mutex); pjsip_endpt_destroy_pool(sub->endpt, sub->pool); pj_mutex_unlock(mgr.mutex); PJ_LOG(4,(THIS_FILE, "event_sub%p: destroyed", sub)); return 0; }
PJ_DEF(pj_status_t) pjsip_regc_unregister_all(pjsip_regc *regc, pjsip_tx_data **p_tdata) { pjsip_tx_data *tdata; pjsip_contact_hdr *hcontact; pjsip_hdr *hdr; pjsip_msg *msg; pj_status_t status; PJ_ASSERT_RETURN(regc && p_tdata, PJ_EINVAL); pj_lock_acquire(regc->lock); if (regc->timer.id != 0) { pjsip_endpt_cancel_timer(regc->endpt, ®c->timer); regc->timer.id = 0; } status = create_request(regc, &tdata); if (status != PJ_SUCCESS) { pj_lock_release(regc->lock); return status; } msg = tdata->msg; /* Clear removed_contact_hdr_list */ pj_list_init(®c->removed_contact_hdr_list); /* Add Contact:* header */ hcontact = pjsip_contact_hdr_create(tdata->pool); hcontact->star = 1; pjsip_msg_add_hdr(msg, (pjsip_hdr*)hcontact); /* Add Expires:0 header */ hdr = (pjsip_hdr*) pjsip_expires_hdr_create(tdata->pool, 0); pjsip_msg_add_hdr(msg, hdr); pj_lock_release(regc->lock); *p_tdata = tdata; return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_publishc_destroy(pjsip_publishc *pubc) { PJ_ASSERT_RETURN(pubc, PJ_EINVAL); if (pubc->pending_tsx || pubc->in_callback) { pubc->_delete_flag = 1; pubc->cb = NULL; } else { /* Cancel existing timer, if any */ if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } if (pubc->mutex) pj_mutex_destroy(pubc->mutex); pjsip_endpt_release_pool(pubc->endpt, pubc->pool); } return PJ_SUCCESS; }
/* * Shutdown presence. */ void pjsua_pres_shutdown(void) { unsigned i; if (pjsua_var.pres_timer.id != 0) { pjsip_endpt_cancel_timer(pjsua_var.endpt, &pjsua_var.pres_timer); pjsua_var.pres_timer.id = PJ_FALSE; } for (i=0; i<PJ_ARRAY_SIZE(pjsua_var.acc); ++i) { if (!pjsua_var.acc[i].valid) continue; pjsua_pres_delete_acc(i); } for (i=0; i<PJ_ARRAY_SIZE(pjsua_var.buddy); ++i) { pjsua_var.buddy[i].monitor = 0; } pjsua_pres_refresh(); }
/* Schedule notifier expiration. */ static void sub_schedule_uas_expire( pjsip_event_sub *sub, int sec_delay) { pj_time_val delay = { 0, 0 }; pj_parsed_time pt; if (sub->timer.id != 0) pjsip_endpt_cancel_timer(sub->endpt, &sub->timer); pj_gettimeofday(&sub->expiry_time); sub->expiry_time.sec += sec_delay; sub->timer.id = TIMER_ID_UAS_EXPIRY; sub->timer.user_data = sub; sub->timer.cb = &uas_expire_timer_cb; delay.sec = sec_delay; pjsip_endpt_schedule_timer( sub->endpt, &sub->timer, &delay); pj_time_decode(&sub->expiry_time, &pt); PJ_LOG(4,(THIS_FILE, "event_sub%p (%s)(UAS): will expire at %02d:%02d:%02d (in %d secs)", sub, state[sub->state].ptr, pt.hour, pt.min, pt.sec, sec_delay)); }
Flow::~Flow() { if (PJSIP_TRANSPORT_IS_RELIABLE(_transport)) { // Remove the state listener to ensure it doesn't get called after the // flow is destroyed. pjsip_transport_remove_state_listener(_transport, _tp_state_listener_key, this); // We incremented the ref count when we put it in the map. pjsip_transport_dec_ref(_transport); } if (_timer.id) { // Stop the keepalive timer. pjsip_endpt_cancel_timer(stack_data.endpt, &_timer); _timer.id = 0; } pthread_mutex_destroy(&_flow_lock); }
/* * Stop subscription. */ PJ_DEF(pj_status_t) pjsip_event_sub_unsubscribe( pjsip_event_sub *sub ) { pjsip_tx_data *tdata; const pjsip_route_hdr *route; pj_status_t status; PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): unsubscribing...", sub, state[sub->state].ptr)); /* Lock subscription. */ pj_mutex_lock(sub->mutex); pj_assert(sub->role == PJSIP_ROLE_UAC); /* Kill refresh timer, if any. */ if (sub->timer.id != 0) { sub->timer.id = 0; pjsip_endpt_cancel_timer(sub->endpt, &sub->timer); } /* Create request. */ tdata = pjsip_endpt_create_request_from_hdr( sub->endpt, &SUBSCRIBE, sub->to->uri, sub->from, sub->to, sub->contact, sub->call_id, sub->cseq++, NULL); if (!tdata) { pj_mutex_unlock(sub->mutex); return -1; } /* Add headers to request. */ pjsip_msg_add_hdr( tdata->msg, pjsip_hdr_shallow_clone(tdata->pool, sub->event)); sub->uac_expires->ivalue = 0; pjsip_msg_add_hdr( tdata->msg, pjsip_hdr_shallow_clone(tdata->pool, sub->uac_expires)); /* Add authentication. */ pjsip_auth_init_req( sub->pool, tdata, &sub->auth_sess, sub->cred_cnt, sub->cred_info); /* Route set. */ route = sub->route_set.next; while (route != &sub->route_set) { pj_list_insert_before( &tdata->msg->hdr, pjsip_hdr_shallow_clone(tdata->pool, route)); route = route->next; } /* Prevent timer from refreshing itself. */ sub->default_interval = 0; /* Set state. */ sub_set_state( sub, PJSIP_EVENT_SUB_STATE_TERMINATED ); /* Send the request. */ status = pjsip_endpt_send_request( sub->endpt, tdata, -1, sub, &on_subscribe_response); if (status == 0) { sub->pending_tsx++; } pj_mutex_unlock(sub->mutex); if (status != 0) { PJ_LOG(4,(THIS_FILE, "event_sub%p (%s): FAILED to unsubscribe!", sub, state[sub->state].ptr)); } return status; }
static void tsx_callback(void *token, pjsip_event *event) { pj_status_t status; pjsip_publishc *pubc = (pjsip_publishc*) token; pjsip_transaction *tsx = event->body.tsx_state.tsx; /* Decrement pending transaction counter. */ pj_assert(pubc->pending_tsx > 0); --pubc->pending_tsx; /* Mark that we're in callback to prevent deletion (#1164) */ ++pubc->in_callback; /* If publication data has been deleted by user then remove publication * data from transaction's callback, and don't call callback. */ if (pubc->_delete_flag) { /* Nothing to do */ ; } else if (tsx->status_code == PJSIP_SC_PROXY_AUTHENTICATION_REQUIRED || tsx->status_code == PJSIP_SC_UNAUTHORIZED) { pjsip_rx_data *rdata = event->body.tsx_state.src.rdata; pjsip_tx_data *tdata; status = pjsip_auth_clt_reinit_req( &pubc->auth_sess, rdata, tsx->last_tx, &tdata); if (status != PJ_SUCCESS) { call_callback(pubc, status, tsx->status_code, &rdata->msg_info.msg->line.status.reason, rdata, -1); } else { status = pjsip_publishc_send(pubc, tdata); } } else { pjsip_rx_data *rdata; pj_int32_t expiration = 0xFFFF; if (tsx->status_code/100 == 2) { pjsip_msg *msg; pjsip_expires_hdr *expires; pjsip_generic_string_hdr *etag_hdr; const pj_str_t STR_ETAG = { "SIP-ETag", 8 }; rdata = event->body.tsx_state.src.rdata; msg = rdata->msg_info.msg; /* Save ETag value */ etag_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(msg, &STR_ETAG, NULL); if (etag_hdr) { pj_strdup(pubc->pool, &pubc->etag, &etag_hdr->hvalue); } else { pubc->etag.slen = 0; } /* Update expires value */ expires = (pjsip_expires_hdr*) pjsip_msg_find_hdr(msg, PJSIP_H_EXPIRES, NULL); if (pubc->auto_refresh && expires) expiration = expires->ivalue; if (pubc->auto_refresh && expiration!=0 && expiration!=0xFFFF) { pj_time_val delay = { 0, 0}; /* Cancel existing timer, if any */ if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } delay.sec = expiration - DELAY_BEFORE_REFRESH; if (pubc->expires != PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED && delay.sec > (pj_int32_t)pubc->expires) { delay.sec = pubc->expires; } if (delay.sec < DELAY_BEFORE_REFRESH) delay.sec = DELAY_BEFORE_REFRESH; pubc->timer.cb = &pubc_refresh_timer_cb; pubc->timer.id = REFRESH_TIMER; pubc->timer.user_data = pubc; pjsip_endpt_schedule_timer( pubc->endpt, &pubc->timer, &delay); pj_gettimeofday(&pubc->last_refresh); pubc->next_refresh = pubc->last_refresh; pubc->next_refresh.sec += delay.sec; } } else { rdata = (event->body.tsx_state.type==PJSIP_EVENT_RX_MSG) ? event->body.tsx_state.src.rdata : NULL; } /* Call callback. */ if (expiration == 0xFFFF) expiration = -1; /* Temporarily increment pending_tsx to prevent callback from * destroying pubc. */ ++pubc->pending_tsx; call_callback(pubc, PJ_SUCCESS, tsx->status_code, (rdata ? &rdata->msg_info.msg->line.status.reason : pjsip_get_status_text(tsx->status_code)), rdata, expiration); --pubc->pending_tsx; /* If we have pending request(s), send them now */ pj_mutex_lock(pubc->mutex); while (!pj_list_empty(&pubc->pending_reqs)) { pjsip_tx_data *tdata = pubc->pending_reqs.next; pj_list_erase(tdata); /* Add SIP-If-Match if we have etag and the request doesn't have * one (http://trac.pjsip.org/repos/ticket/996) */ if (pubc->etag.slen) { const pj_str_t STR_HNAME = { "SIP-If-Match", 12 }; pjsip_generic_string_hdr *sim_hdr; sim_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(tdata->msg, &STR_HNAME, NULL); if (!sim_hdr) { /* Create the header */ sim_hdr = pjsip_generic_string_hdr_create(tdata->pool, &STR_HNAME, &pubc->etag); pjsip_msg_add_hdr(tdata->msg, (pjsip_hdr*)sim_hdr); } else { /* Update */ if (pj_strcmp(&pubc->etag, &sim_hdr->hvalue)) pj_strdup(tdata->pool, &sim_hdr->hvalue, &pubc->etag); } } status = pjsip_publishc_send(pubc, tdata); if (status == PJ_EPENDING) { pj_assert(!"Not expected"); pj_list_erase(tdata); pjsip_tx_data_dec_ref(tdata); } else if (status == PJ_SUCCESS) { break; } } pj_mutex_unlock(pubc->mutex); } /* No longer in callback. */ --pubc->in_callback; /* Delete the record if user destroy pubc during the callback. */ if (pubc->_delete_flag && pubc->pending_tsx==0) { pjsip_publishc_destroy(pubc); } }
/* Destroy TLS transport */ static pj_status_t tls_destroy(pjsip_transport *transport, pj_status_t reason) { struct tls_transport *tls = (struct tls_transport*)transport; if (tls->close_reason == 0) tls->close_reason = reason; if (tls->is_registered) { tls->is_registered = PJ_FALSE; pjsip_transport_destroy(transport); /* pjsip_transport_destroy will recursively call this function * again. */ return PJ_SUCCESS; } /* Mark transport as closing */ tls->is_closing = PJ_TRUE; /* Stop keep-alive timer. */ if (tls->ka_timer.id) { pjsip_endpt_cancel_timer(tls->base.endpt, &tls->ka_timer); tls->ka_timer.id = PJ_FALSE; } /* Cancel all delayed transmits */ while (!pj_list_empty(&tls->delayed_list)) { struct delayed_tdata *pending_tx; pj_ioqueue_op_key_t *op_key; pending_tx = tls->delayed_list.next; pj_list_erase(pending_tx); op_key = (pj_ioqueue_op_key_t*)pending_tx->tdata_op_key; on_data_sent(tls->ssock, op_key, -reason); } if (tls->rdata.tp_info.pool) { pj_pool_release(tls->rdata.tp_info.pool); tls->rdata.tp_info.pool = NULL; } if (tls->ssock) { pj_ssl_sock_close(tls->ssock); tls->ssock = NULL; } if (tls->base.lock) { pj_lock_destroy(tls->base.lock); tls->base.lock = NULL; } if (tls->base.ref_cnt) { pj_atomic_destroy(tls->base.ref_cnt); tls->base.ref_cnt = NULL; } if (tls->base.pool) { pj_pool_t *pool; if (reason != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; pj_strerror(reason, errmsg, sizeof(errmsg)); PJ_LOG(4,(tls->base.obj_name, "TLS transport destroyed with reason %d: %s", reason, errmsg)); } else { PJ_LOG(4,(tls->base.obj_name, "TLS transport destroyed normally")); } pool = tls->base.pool; tls->base.pool = NULL; pj_pool_release(pool); } return PJ_SUCCESS; }
/* * Handler when invite state has changed. */ static void on_call_state(pjsua_call_id call_id, pjsip_event *e) { pjsua_call_info call_info; PJ_UNUSED_ARG(e); pjsua_call_get_info(call_id, &call_info); if (call_info.state == PJSIP_INV_STATE_DISCONNECTED) { /* Cancel duration timer, if any */ if (app_config.call_data[call_id].timer.id != PJSUA_INVALID_ID) { struct call_data *cd = &app_config.call_data[call_id]; pjsip_endpoint *endpt = pjsua_get_pjsip_endpt(); cd->timer.id = PJSUA_INVALID_ID; pjsip_endpt_cancel_timer(endpt, &cd->timer); } PJ_LOG(3,(THIS_FILE, "Call %d is DISCONNECTED [reason=%d (%s)]", call_id, call_info.last_status, call_info.last_status_text.ptr)); if (call_id == current_call) { find_next_call(); } /* Dump media state upon disconnected */ if (1) { PJ_LOG(5,(THIS_FILE, "Call %d disconnected, dumping media stats..", call_id)); log_call_dump(call_id); } } else { if (app_config.duration!=NO_LIMIT && call_info.state == PJSIP_INV_STATE_CONFIRMED) { /* Schedule timer to hangup call after the specified duration */ struct call_data *cd = &app_config.call_data[call_id]; pjsip_endpoint *endpt = pjsua_get_pjsip_endpt(); pj_time_val delay; cd->timer.id = call_id; delay.sec = app_config.duration; delay.msec = 0; pjsip_endpt_schedule_timer(endpt, &cd->timer, &delay); } if (call_info.state == PJSIP_INV_STATE_EARLY) { int code; pj_str_t reason; pjsip_msg *msg; /* This can only occur because of TX or RX message */ pj_assert(e->type == PJSIP_EVENT_TSX_STATE); if (e->body.tsx_state.type == PJSIP_EVENT_RX_MSG) { msg = e->body.tsx_state.src.rdata->msg_info.msg; } else { msg = e->body.tsx_state.src.tdata->msg; } code = msg->line.status.code; reason = msg->line.status.reason; PJ_LOG(3,(THIS_FILE, "Call %d state changed to %s (%d %.*s)", call_id, call_info.state_text.ptr, code, (int)reason.slen, reason.ptr)); } else { PJ_LOG(3,(THIS_FILE, "Call %d state changed to %s", call_id, call_info.state_text.ptr)); } if (current_call==PJSUA_INVALID_ID) current_call = call_id; } // callback if (cb_callstate != 0) cb_callstate(call_id, call_info.state); }
int transport_rt_test( pjsip_transport_type_e tp_type, pjsip_transport *ref_tp, char *target_url, int *lost) { enum { THREADS = 4, INTERVAL = 10 }; int i; pj_status_t status; pj_pool_t *pool; pj_bool_t logger_enabled; pj_timestamp zero_time, total_time; unsigned usec_rt; unsigned total_sent; unsigned total_recv; PJ_UNUSED_ARG(tp_type); PJ_UNUSED_ARG(ref_tp); PJ_LOG(3,(THIS_FILE, " multithreaded round-trip test (%d threads)...", THREADS)); PJ_LOG(3,(THIS_FILE, " this will take approx %d seconds, please wait..", INTERVAL)); /* Make sure msg logger is disabled. */ logger_enabled = msg_logger_set_enabled(0); /* Register module (if not yet registered) */ if (rt_module.id == -1) { status = pjsip_endpt_register_module( endpt, &rt_module ); if (status != PJ_SUCCESS) { app_perror(" error: unable to register module", status); return -600; } } /* Create pool for this test. */ pool = pjsip_endpt_create_pool(endpt, NULL, 4000, 4000); if (!pool) return -610; /* Initialize static test data. */ pj_ansi_strcpy(rt_target_uri, target_url); rt_call_id = pj_str("RT-Call-Id/"); rt_stop = PJ_FALSE; /* Initialize thread data. */ for (i=0; i<THREADS; ++i) { char buf[1]; pj_str_t str_id; pj_strset(&str_id, buf, 1); pj_bzero(&rt_test_data[i], sizeof(rt_test_data[i])); /* Init timer entry */ rt_test_data[i].tx_timer.id = i; rt_test_data[i].tx_timer.cb = &rt_tx_timer; rt_test_data[i].timeout_timer.id = i; rt_test_data[i].timeout_timer.cb = &rt_timeout_timer; /* Generate Call-ID for each thread. */ rt_test_data[i].call_id.ptr = (char*) pj_pool_alloc(pool, rt_call_id.slen+1); pj_strcpy(&rt_test_data[i].call_id, &rt_call_id); buf[0] = '0' + (char)i; pj_strcat(&rt_test_data[i].call_id, &str_id); /* Init mutex. */ status = pj_mutex_create_recursive(pool, "rt", &rt_test_data[i].mutex); if (status != PJ_SUCCESS) { app_perror(" error: unable to create mutex", status); return -615; } /* Create thread, suspended. */ status = pj_thread_create(pool, "rttest%p", &rt_worker_thread, (void*)(long)i, 0, PJ_THREAD_SUSPENDED, &rt_test_data[i].thread); if (status != PJ_SUCCESS) { app_perror(" error: unable to create thread", status); return -620; } } /* Start threads! */ for (i=0; i<THREADS; ++i) { pj_time_val delay = {0,0}; pj_thread_resume(rt_test_data[i].thread); /* Schedule first message transmissions. */ rt_test_data[i].tx_timer.user_data = (void*)1; pjsip_endpt_schedule_timer(endpt, &rt_test_data[i].tx_timer, &delay); } /* Sleep for some time. */ pj_thread_sleep(INTERVAL * 1000); /* Signal thread to stop. */ rt_stop = PJ_TRUE; /* Wait threads to complete. */ for (i=0; i<THREADS; ++i) { pj_thread_join(rt_test_data[i].thread); pj_thread_destroy(rt_test_data[i].thread); } /* Destroy rt_test_data */ for (i=0; i<THREADS; ++i) { pj_mutex_destroy(rt_test_data[i].mutex); pjsip_endpt_cancel_timer(endpt, &rt_test_data[i].timeout_timer); } /* Gather statistics. */ pj_bzero(&total_time, sizeof(total_time)); pj_bzero(&zero_time, sizeof(zero_time)); usec_rt = total_sent = total_recv = 0; for (i=0; i<THREADS; ++i) { total_sent += rt_test_data[i].sent_request_count; total_recv += rt_test_data[i].recv_response_count; pj_add_timestamp(&total_time, &rt_test_data[i].total_rt_time); } /* Display statistics. */ if (total_recv) total_time.u64 = total_time.u64/total_recv; else total_time.u64 = 0; usec_rt = pj_elapsed_usec(&zero_time, &total_time); PJ_LOG(3,(THIS_FILE, " done.")); PJ_LOG(3,(THIS_FILE, " total %d messages sent", total_sent)); PJ_LOG(3,(THIS_FILE, " average round-trip=%d usec", usec_rt)); pjsip_endpt_release_pool(endpt, pool); *lost = total_sent-total_recv; /* Flush events. */ flush_events(500); /* Restore msg logger. */ msg_logger_set_enabled(logger_enabled); return 0; }
/* This callback is called when we received NOTIFY response. */ static void on_notify_response(void *token, pjsip_event *event) { pjsip_event_sub *sub = token; pjsip_event_sub_state old_state = sub->state; pjsip_transaction *tsx = event->obj.tsx; /* Lock the subscription. */ pj_mutex_lock(sub->mutex); pj_assert(sub->role == PJSIP_ROLE_UAS); /* If request failed with authorization failure, silently retry. */ if (tsx->status_code==401 || tsx->status_code==407) { pjsip_tx_data *tdata; tdata = pjsip_auth_reinit_req(sub->endpt, sub->pool, &sub->auth_sess, sub->cred_cnt, sub->cred_info, tsx->last_tx, event->src.rdata ); if (tdata) { int status; pjsip_cseq_hdr *cseq; cseq = pjsip_msg_find_hdr(tdata->msg, PJSIP_H_CSEQ, NULL); cseq->cseq = sub->cseq++; status = pjsip_endpt_send_request( sub->endpt, tdata, -1, sub, &on_notify_response); if (status == 0) { pj_mutex_unlock(sub->mutex); return; } } } /* Notify application. */ if (sub->cb.on_received_notify_response) (*sub->cb.on_received_notify_response)(sub, event); /* Check for response 481. */ if (event->obj.tsx->status_code == 481) { /* Remote says that the subscription does not exist! * Terminate subscription! */ sub_set_state(sub, PJSIP_EVENT_SUB_STATE_TERMINATED); if (sub->timer.id) { pjsip_endpt_cancel_timer(sub->endpt, &sub->timer); sub->timer.id = 0; } PJ_LOG(4, (THIS_FILE, "event_sub%p (%s): got 481 response to NOTIFY. Terminating...", sub, state[sub->state].ptr)); /* Notify app. */ if (sub->state!=old_state && sub->cb.on_sub_terminated) (*sub->cb.on_sub_terminated)(sub, &event->src.rdata->msg->line.status.reason); } /* Decrement pending transaction count. */ --sub->pending_tsx; pj_assert(sub->pending_tsx >= 0); /* Check that the subscription is marked for deletion. */ if (sub->delete_flag && sub->pending_tsx <= 0) { pjsip_event_sub_destroy(sub); } else { pj_mutex_unlock(sub->mutex); } /* DO NOT ACCESS sub, IT MIGHT HAVE BEEN DESTROYED! */ }