/* Destroy */ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) { TRACE_((stun_sock->obj_name, "STUN sock %p request, ref_cnt=%d", stun_sock, pj_grp_lock_get_ref(stun_sock->grp_lock))); pj_grp_lock_acquire(stun_sock->grp_lock); if (stun_sock->is_destroying) { /* Destroy already called */ pj_grp_lock_release(stun_sock->grp_lock); return PJ_EINVALIDOP; } stun_sock->is_destroying = PJ_TRUE; pj_timer_heap_cancel_if_active(stun_sock->stun_cfg.timer_heap, &stun_sock->ka_timer, 0); if (stun_sock->active_sock != NULL) { stun_sock->sock_fd = PJ_INVALID_SOCKET; pj_activesock_close(stun_sock->active_sock); } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) { pj_sock_close(stun_sock->sock_fd); stun_sock->sock_fd = PJ_INVALID_SOCKET; } if (stun_sock->stun_sess) { pj_stun_session_destroy(stun_sock->stun_sess); } pj_grp_lock_dec_ref(stun_sock->grp_lock); pj_grp_lock_release(stun_sock->grp_lock); return PJ_SUCCESS; }
/* This callback is called by transport manager to destroy listener */ static pj_status_t lis_destroy(pjsip_tpfactory *factory) { struct tcp_listener *listener = (struct tcp_listener *)factory; if (listener->is_registered) { pjsip_tpmgr_unregister_tpfactory(listener->tpmgr, &listener->factory); listener->is_registered = PJ_FALSE; } if (listener->asock) { pj_activesock_close(listener->asock); listener->asock = NULL; } if (listener->grp_lock) { pj_grp_lock_t *grp_lock = listener->grp_lock; listener->grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); /* Listener may have been deleted at this point */ } else { lis_on_destroy(listener); } return PJ_SUCCESS; }
/* Destroy TCP transport */ static pj_status_t tcp_destroy(pjsip_transport *transport, pj_status_t reason) { struct tcp_transport *tcp = (struct tcp_transport*)transport; if (tcp->close_reason == 0) tcp->close_reason = reason; if (tcp->is_registered) { tcp->is_registered = PJ_FALSE; pjsip_transport_destroy(transport); /* pjsip_transport_destroy will recursively call this function * again. */ return PJ_SUCCESS; } /* Mark transport as closing */ tcp->is_closing = PJ_TRUE; /* Stop keep-alive timer. */ if (tcp->ka_timer.id) { pjsip_endpt_cancel_timer(tcp->base.endpt, &tcp->ka_timer); tcp->ka_timer.id = PJ_FALSE; } /* Cancel all delayed transmits */ while (!pj_list_empty(&tcp->delayed_list)) { struct delayed_tdata *pending_tx; pj_ioqueue_op_key_t *op_key; pending_tx = tcp->delayed_list.next; pj_list_erase(pending_tx); op_key = (pj_ioqueue_op_key_t*)pending_tx->tdata_op_key; on_data_sent(tcp->asock, op_key, -reason); } if (tcp->asock) { pj_activesock_close(tcp->asock); tcp->asock = NULL; tcp->sock = PJ_INVALID_SOCKET; } else if (tcp->sock != PJ_INVALID_SOCKET) { pj_sock_close(tcp->sock); tcp->sock = PJ_INVALID_SOCKET; } if (tcp->grp_lock) { pj_grp_lock_t *grp_lock = tcp->grp_lock; tcp->grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); /* Transport may have been deleted at this point */ } else { tcp_on_destroy(tcp); } return PJ_SUCCESS; }
/* * udp_destroy() * * This function is called by transport manager (by transport->destroy()). */ static pj_status_t udp_destroy( pjsip_transport *transport ) { struct udp_transport *tp = (struct udp_transport*)transport; int i; /* Mark this transport as closing. */ tp->is_closing = 1; /* Cancel all pending operations. */ /* blp: NO NO NO... * No need to post queued completion as we poll the ioqueue until * we've got events anyway. Posting completion will only cause * callback to be called twice with IOCP: one for the post completion * and another one for closing the socket. * for (i=0; i<tp->rdata_cnt; ++i) { pj_ioqueue_post_completion(tp->key, &tp->rdata[i]->tp_info.op_key.op_key, -1); } */ /* Unregister from ioqueue. */ if (tp->key) { pj_ioqueue_unregister(tp->key); tp->key = NULL; } else { /* Close socket. */ if (tp->sock && tp->sock != PJ_INVALID_SOCKET) { pj_sock_close(tp->sock); tp->sock = PJ_INVALID_SOCKET; } } /* Must poll ioqueue because IOCP calls the callback when socket * is closed. We poll the ioqueue until all pending callbacks * have been called. */ for (i=0; i<50 && tp->is_closing < 1+tp->rdata_cnt; ++i) { int cnt; pj_time_val timeout = {0, 1}; cnt = pj_ioqueue_poll(pjsip_endpt_get_ioqueue(transport->endpt), &timeout); if (cnt == 0) break; } if (tp->grp_lock) { pj_grp_lock_t *grp_lock = tp->grp_lock; tp->grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); /* Transport may have been deleted at this point */ } else { udp_on_destroy(tp); } return PJ_SUCCESS; }
pj_status_t pj_grp_lock_t::grp_lock_release() { grp_lock_unset_owner_thread(); QMapIterator<int, pj_lock_t*> it(lock_list); for (it.toBack(); it.hasPrevious(); it.peekPrevious()) it.value()->pj_lock_release(); return pj_grp_lock_dec_ref(); }
PJ_DEF(unsigned) pj_timer_heap_poll( pj_timer_heap_t *ht, pj_time_val *next_delay ) { pj_time_val now; unsigned count; PJ_ASSERT_RETURN(ht, 0); lock_timer_heap(ht); if (!ht->cur_size && next_delay) { next_delay->sec = next_delay->msec = PJ_MAXINT32; unlock_timer_heap(ht); return 0; } count = 0; pj_gettickcount(&now); while ( ht->cur_size && PJ_TIME_VAL_LTE(ht->heap[0]->_timer_value, now) && count < ht->max_entries_per_poll ) { pj_timer_entry *node = remove_node(ht, 0); pj_grp_lock_t *grp_lock; ++count; grp_lock = node->_grp_lock; node->_grp_lock = NULL; unlock_timer_heap(ht); PJ_RACE_ME(5); if (node->cb) (*node->cb)(ht, node); if (grp_lock) pj_grp_lock_dec_ref(grp_lock); lock_timer_heap(ht); } if (ht->cur_size && next_delay) { *next_delay = ht->heap[0]->_timer_value; PJ_TIME_VAL_SUB(*next_delay, now); if (next_delay->sec < 0 || next_delay->msec < 0) next_delay->sec = next_delay->msec = 0; } else if (next_delay) { next_delay->sec = next_delay->msec = PJ_MAXINT32; } unlock_timer_heap(ht); return count; }
static void destroy(pj_turn_sock *turn_sock) { PJ_LOG(4,(turn_sock->obj_name, "TURN socket destroy request, ref_cnt=%d", pj_grp_lock_get_ref(turn_sock->grp_lock))); pj_grp_lock_acquire(turn_sock->grp_lock); if (turn_sock->is_destroying) { pj_grp_lock_release(turn_sock->grp_lock); return; } turn_sock->is_destroying = PJ_TRUE; if (turn_sock->sess) pj_turn_session_shutdown(turn_sock->sess); if (turn_sock->active_sock) pj_activesock_close(turn_sock->active_sock); pj_grp_lock_dec_ref(turn_sock->grp_lock); pj_grp_lock_release(turn_sock->grp_lock); }
static void sess_destroy(nat_detect_session *sess) { if (sess->stun_sess) { pj_stun_session_destroy(sess->stun_sess); sess->stun_sess = NULL; } if (sess->key) { pj_ioqueue_unregister(sess->key); sess->key = NULL; sess->sock = PJ_INVALID_SOCKET; } else if (sess->sock && sess->sock != PJ_INVALID_SOCKET) { pj_sock_close(sess->sock); sess->sock = PJ_INVALID_SOCKET; } if (sess->grp_lock) { pj_grp_lock_dec_ref(sess->grp_lock); } }
PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess) { pj_stun_tx_data *tdata; PJ_ASSERT_RETURN(sess, PJ_EINVAL); TRACE_((SNAME(sess), "STUN session %p destroy request, ref_cnt=%d", sess, pj_grp_lock_get_ref(sess->grp_lock))); pj_grp_lock_acquire(sess->grp_lock); if (sess->is_destroying) { /* Prevent from decrementing the ref counter more than once */ pj_grp_lock_release(sess->grp_lock); return PJ_EINVALIDOP; } sess->is_destroying = PJ_TRUE; /* We need to stop transactions and cached response because they are * holding the group lock's reference counter while retransmitting. */ tdata = sess->pending_request_list.next; while (tdata != &sess->pending_request_list) { if (tdata->client_tsx) pj_stun_client_tsx_stop(tdata->client_tsx); tdata = tdata->next; } tdata = sess->cached_response_list.next; while (tdata != &sess->cached_response_list) { pj_timer_heap_cancel_if_active(tdata->sess->cfg->timer_heap, &tdata->res_timer, PJ_FALSE); tdata = tdata->next; } pj_grp_lock_dec_ref(sess->grp_lock); pj_grp_lock_release(sess->grp_lock); return PJ_SUCCESS; }
static int cancel_timer(pj_timer_heap_t *ht, pj_timer_entry *entry, unsigned flags, int id_val) { int count; PJ_ASSERT_RETURN(ht && entry, PJ_EINVAL); lock_timer_heap(ht); count = cancel(ht, entry, flags | F_DONT_CALL); if (flags & F_SET_ID) { entry->id = id_val; } if (entry->_grp_lock) { pj_grp_lock_t *grp_lock = entry->_grp_lock; entry->_grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); } unlock_timer_heap(ht); return count; }
static int cancel_timer(pj_timer_heap_t *ht, pj_timer_entry *entry, pj_bool_t set_id, int id_val) { int count; PJ_ASSERT_RETURN(ht && entry, PJ_EINVAL); lock_timer_heap(ht); count = cancel(ht, entry, 1); if (set_id) { entry->id = id_val; } if (entry->_grp_lock) { pj_grp_lock_t *grp_lock = entry->_grp_lock; entry->_grp_lock = NULL; pj_grp_lock_dec_ref(grp_lock); } unlock_timer_heap(ht); return count; }
int UE::send_message(std::string dest, std::string contents) { timeval before; timeval after; pthread_mutex_lock(&_msg_mutex); pj_str_t to; stra(&to, dest.c_str()); pj_str_t data; stra(&data, contents.c_str()); pj_str_t message; stra(&message, "MESSAGE"); pjsip_tx_data* tdata; pjsip_method msg_method; pjsip_method_init(&msg_method, _pool, &message); pjsip_endpt_create_request(get_global_endpoint(), &msg_method, &to, &_my_uri, &to, &_contact, NULL, -1, &data, &tdata); pjsip_tsx_create_uac(ua_module(), tdata, &_msg_tsx); pjsip_tpselector sel; sel.type = PJSIP_TPSELECTOR_TRANSPORT; sel.u.transport = _transport; pjsip_tsx_set_transport(_msg_tsx, &sel); pjsip_route_hdr* rt_hdr = pjsip_route_hdr_create(tdata->pool); rt_hdr->name_addr.uri = _server_uri; pjsip_msg_insert_first_hdr(tdata->msg, (pjsip_hdr*)rt_hdr); _msg_tsx->mod_data[ua_module()->id] = this; pj_grp_lock_add_ref(_msg_tsx->grp_lock); gettimeofday(&before, NULL); pj_status_t status = pjsip_tsx_send_msg(_msg_tsx, NULL); if (status != PJ_SUCCESS) { pthread_mutex_unlock(&_msg_mutex); return -1; } while (_msg_tsx->state < PJSIP_TSX_STATE_COMPLETED) { pthread_cond_wait(&_msg_cond, &_msg_mutex); } gettimeofday(&after, NULL); //unsigned long latency = ((after.tv_sec - before.tv_sec) * 1000000) + (after.tv_usec - before.tv_usec); //printf("Message latency is %lu\n", latency); int ret = _msg_tsx->status_code; pj_grp_lock_dec_ref(_msg_tsx->grp_lock); _msg_tsx = NULL; pthread_mutex_unlock(&_msg_mutex); return ret; }