/* * pj_ioqueue_destroy() */ PJ_DEF(pj_status_t) pj_ioqueue_destroy( pj_ioqueue_t *ioqueue ) { #if PJ_HAS_TCP unsigned i; #endif pj_ioqueue_key_t *key; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(ioqueue, PJ_EINVAL); pj_lock_acquire(ioqueue->lock); #if PJ_HAS_TCP /* Destroy events in the pool */ for (i=0; i<ioqueue->event_count; ++i) { CloseHandle(ioqueue->event_pool[i]); } ioqueue->event_count = 0; #endif if (CloseHandle(ioqueue->iocp) != TRUE) return PJ_RETURN_OS_ERROR(GetLastError()); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Destroy reference counters */ key = ioqueue->active_list.next; while (key != &ioqueue->active_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } key = ioqueue->closing_list.next; while (key != &ioqueue->closing_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } key = ioqueue->free_list.next; while (key != &ioqueue->free_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } #endif if (ioqueue->auto_delete_lock) pj_lock_destroy(ioqueue->lock); return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_regc_destroy(pjsip_regc *regc) { PJ_ASSERT_RETURN(regc, PJ_EINVAL); pj_lock_acquire(regc->lock); if (regc->has_tsx || pj_atomic_get(regc->busy_ctr) != 0) { regc->_delete_flag = 1; regc->cb = NULL; pj_lock_release(regc->lock); } else { pjsip_tpselector_dec_ref(®c->tp_sel); if (regc->last_transport) { pjsip_transport_dec_ref(regc->last_transport); regc->last_transport = NULL; } if (regc->timer.id != 0) { pjsip_endpt_cancel_timer(regc->endpt, ®c->timer); regc->timer.id = 0; } pj_atomic_destroy(regc->busy_ctr); pj_lock_release(regc->lock); pj_lock_destroy(regc->lock); regc->lock = NULL; pjsip_endpt_release_pool(regc->endpt, regc->pool); } return PJ_SUCCESS; }
// // Destroy. // void destroy() { if (var_) { pj_atomic_destroy(var_); var_ = NULL; } }
SipIceTransport::~SipIceTransport() { RING_DBG("~SipIceTransport@%p", this); Manager::instance().unregisterEventHandler((uintptr_t)this); pj_lock_destroy(trData_.base.lock); pj_atomic_destroy(trData_.base.ref_cnt); RING_DBG("destroying SipIceTransport@%p", this); }
/*! * \brief Destroy the pjsip transport. * * Called by pjsip transport manager. */ static pj_status_t ws_destroy(pjsip_transport *transport) { struct ws_transport *wstransport = (struct ws_transport *)transport; if (wstransport->transport.ref_cnt) { pj_atomic_destroy(wstransport->transport.ref_cnt); } if (wstransport->transport.lock) { pj_lock_destroy(wstransport->transport.lock); } pjsip_endpt_release_pool(wstransport->transport.endpt, wstransport->transport.pool); return PJ_SUCCESS; }
/* * Decrease transport data reference, destroy it when the reference count * reaches zero. */ PJ_DEF(pj_status_t) pjsip_tx_data_dec_ref( pjsip_tx_data *tdata ) { pj_assert( pj_atomic_get(tdata->ref_cnt) > 0); if (pj_atomic_dec_and_get(tdata->ref_cnt) <= 0) { PJ_LOG(5,(tdata->obj_name, "Destroying txdata %s", pjsip_tx_data_get_info(tdata))); pjsip_tpselector_dec_ref(&tdata->tp_sel); #if defined(PJ_DEBUG) && PJ_DEBUG!=0 pj_atomic_dec( tdata->mgr->tdata_counter ); #endif pj_atomic_destroy( tdata->ref_cnt ); pj_lock_destroy( tdata->lock ); pjsip_endpt_release_pool( tdata->mgr->endpt, tdata->pool ); return PJSIP_EBUFDESTROYED; } else { return PJ_SUCCESS; } }
/* Clean up TCP resources */ static void tcp_on_destroy(void *arg) { struct tcp_transport *tcp = (struct tcp_transport*)arg; if (tcp->base.lock) { pj_lock_destroy(tcp->base.lock); tcp->base.lock = NULL; } if (tcp->base.ref_cnt) { pj_atomic_destroy(tcp->base.ref_cnt); tcp->base.ref_cnt = NULL; } if (tcp->rdata.tp_info.pool) { pj_pool_release(tcp->rdata.tp_info.pool); tcp->rdata.tp_info.pool = NULL; } if (tcp->base.pool) { pj_pool_t *pool; if (tcp->close_reason != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; pj_strerror(tcp->close_reason, errmsg, sizeof(errmsg)); PJ_LOG(4,(tcp->base.obj_name, "TCP transport destroyed with reason %d: %s", tcp->close_reason, errmsg)); } else { PJ_LOG(4,(tcp->base.obj_name, "TCP transport destroyed normally")); } pool = tcp->base.pool; tcp->base.pool = NULL; pj_pool_release(pool); } }
/* Clean up UDP resources */ static void udp_on_destroy(void *arg) { struct udp_transport *tp = (struct udp_transport*)arg; int i; /* Destroy rdata */ for (i=0; i<tp->rdata_cnt; ++i) { pj_pool_release(tp->rdata[i]->tp_info.pool); } /* Destroy reference counter. */ if (tp->base.ref_cnt) pj_atomic_destroy(tp->base.ref_cnt); /* Destroy lock */ if (tp->base.lock) pj_lock_destroy(tp->base.lock); /* Destroy pool. */ pjsip_endpt_release_pool(tp->base.endpt, tp->base.pool); }
/* Handler to destroy the transport; called by transport manager */ static pj_status_t loop_destroy(pjsip_transport *tp) { struct loop_transport *loop = (struct loop_transport*)tp; PJ_ASSERT_RETURN(tp && (tp->key.type == PJSIP_TRANSPORT_LOOP || tp->key.type == PJSIP_TRANSPORT_LOOP_DGRAM), PJ_EINVAL); loop->thread_quit_flag = 1; /* Unlock transport mutex before joining thread. */ pj_lock_release(tp->lock); pj_thread_join(loop->thread); pj_thread_destroy(loop->thread); /* Clear pending send notifications. */ while (!pj_list_empty(&loop->send_list)) { struct send_list *node = loop->send_list.next; /* Notify callback. */ if (node->callback) { (*node->callback)(&loop->base, node->token, -PJSIP_ESHUTDOWN); } pj_list_erase(node); pjsip_tx_data_dec_ref(node->tdata); } /* Clear "incoming" packets in the queue. */ while (!pj_list_empty(&loop->recv_list)) { struct recv_list *node = loop->recv_list.next; pj_list_erase(node); pjsip_endpt_release_pool(loop->base.endpt, node->rdata.tp_info.pool); } /* Self destruct.. heheh.. */ pj_lock_destroy(loop->base.lock); pj_atomic_destroy(loop->base.ref_cnt); pjsip_endpt_release_pool(loop->base.endpt, loop->base.pool); return PJ_SUCCESS; }
static void transport_dtor(void *arg) { struct ws_transport *wstransport = arg; if (wstransport->ws_session) { ast_websocket_unref(wstransport->ws_session); } if (wstransport->transport.ref_cnt) { pj_atomic_destroy(wstransport->transport.ref_cnt); } if (wstransport->transport.lock) { pj_lock_destroy(wstransport->transport.lock); } if (wstransport->transport.endpt && wstransport->transport.pool) { pjsip_endpt_release_pool(wstransport->transport.endpt, wstransport->transport.pool); } if (wstransport->rdata.tp_info.pool) { pjsip_endpt_release_pool(wstransport->transport.endpt, wstransport->rdata.tp_info.pool); } }
/* Destroy TLS transport */ static pj_status_t tls_destroy(pjsip_transport *transport, pj_status_t reason) { struct tls_transport *tls = (struct tls_transport*)transport; if (tls->close_reason == 0) tls->close_reason = reason; if (tls->is_registered) { tls->is_registered = PJ_FALSE; pjsip_transport_destroy(transport); /* pjsip_transport_destroy will recursively call this function * again. */ return PJ_SUCCESS; } /* Mark transport as closing */ tls->is_closing = PJ_TRUE; /* Stop keep-alive timer. */ if (tls->ka_timer.id) { pjsip_endpt_cancel_timer(tls->base.endpt, &tls->ka_timer); tls->ka_timer.id = PJ_FALSE; } /* Cancel all delayed transmits */ while (!pj_list_empty(&tls->delayed_list)) { struct delayed_tdata *pending_tx; pj_ioqueue_op_key_t *op_key; pending_tx = tls->delayed_list.next; pj_list_erase(pending_tx); op_key = (pj_ioqueue_op_key_t*)pending_tx->tdata_op_key; on_data_sent(tls->ssock, op_key, -reason); } if (tls->rdata.tp_info.pool) { pj_pool_release(tls->rdata.tp_info.pool); tls->rdata.tp_info.pool = NULL; } if (tls->ssock) { pj_ssl_sock_close(tls->ssock); tls->ssock = NULL; } if (tls->base.lock) { pj_lock_destroy(tls->base.lock); tls->base.lock = NULL; } if (tls->base.ref_cnt) { pj_atomic_destroy(tls->base.ref_cnt); tls->base.ref_cnt = NULL; } if (tls->base.pool) { pj_pool_t *pool; if (reason != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; pj_strerror(reason, errmsg, sizeof(errmsg)); PJ_LOG(4,(tls->base.obj_name, "TLS transport destroyed with reason %d: %s", reason, errmsg)); } else { PJ_LOG(4,(tls->base.obj_name, "TLS transport destroyed normally")); } pool = tls->base.pool; tls->base.pool = NULL; pj_pool_release(pool); } return PJ_SUCCESS; }
/* * pj_ioqueue_create() */ PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool, pj_size_t max_fd, pj_ioqueue_t **p_ioqueue) { pj_ioqueue_t *ioqueue; unsigned i; pj_status_t rc; PJ_UNUSED_ARG(max_fd); PJ_ASSERT_RETURN(pool && p_ioqueue, PJ_EINVAL); rc = sizeof(union operation_key); /* Check that sizeof(pj_ioqueue_op_key_t) makes sense. */ PJ_ASSERT_RETURN(sizeof(pj_ioqueue_op_key_t)-sizeof(void*) >= sizeof(union operation_key), PJ_EBUG); /* Create IOCP */ ioqueue = pj_pool_zalloc(pool, sizeof(*ioqueue)); ioqueue->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); if (ioqueue->iocp == NULL) return PJ_RETURN_OS_ERROR(GetLastError()); /* Create IOCP mutex */ rc = pj_lock_create_recursive_mutex(pool, NULL, &ioqueue->lock); if (rc != PJ_SUCCESS) { CloseHandle(ioqueue->iocp); return rc; } ioqueue->auto_delete_lock = PJ_TRUE; ioqueue->default_concurrency = PJ_IOQUEUE_DEFAULT_ALLOW_CONCURRENCY; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* * Create and initialize key pools. */ pj_list_init(&ioqueue->active_list); pj_list_init(&ioqueue->free_list); pj_list_init(&ioqueue->closing_list); /* Preallocate keys according to max_fd setting, and put them * in free_list. */ for (i=0; i<max_fd; ++i) { pj_ioqueue_key_t *key; key = pj_pool_alloc(pool, sizeof(pj_ioqueue_key_t)); rc = pj_atomic_create(pool, 0, &key->ref_count); if (rc != PJ_SUCCESS) { key = ioqueue->free_list.next; while (key != &ioqueue->free_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } CloseHandle(ioqueue->iocp); return rc; } rc = pj_mutex_create_recursive(pool, "ioqkey", &key->mutex); if (rc != PJ_SUCCESS) { pj_atomic_destroy(key->ref_count); key = ioqueue->free_list.next; while (key != &ioqueue->free_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } CloseHandle(ioqueue->iocp); return rc; } pj_list_push_back(&ioqueue->free_list, key); } #endif *p_ioqueue = ioqueue; PJ_LOG(4, ("pjlib", "WinNT IOCP I/O Queue created (%p)", ioqueue)); return PJ_SUCCESS; }
/* * udp_destroy() * * This function is called by transport manager (by transport->destroy()). */ static pj_status_t udp_destroy( pjsip_transport *transport ) { struct udp_transport *tp = (struct udp_transport*)transport; int i; /* Mark this transport as closing. */ tp->is_closing = 1; /* Cancel all pending operations. */ /* blp: NO NO NO... * No need to post queued completion as we poll the ioqueue until * we've got events anyway. Posting completion will only cause * callback to be called twice with IOCP: one for the post completion * and another one for closing the socket. * for (i=0; i<tp->rdata_cnt; ++i) { pj_ioqueue_post_completion(tp->key, &tp->rdata[i]->tp_info.op_key.op_key, -1); } */ /* Unregister from ioqueue. */ if (tp->key) { pj_ioqueue_unregister(tp->key); tp->key = NULL; } else { /* Close socket. */ if (tp->sock && tp->sock != PJ_INVALID_SOCKET) { pj_sock_close(tp->sock); tp->sock = PJ_INVALID_SOCKET; } } /* Must poll ioqueue because IOCP calls the callback when socket * is closed. We poll the ioqueue until all pending callbacks * have been called. */ for (i=0; i<50 && tp->is_closing < 1+tp->rdata_cnt; ++i) { int cnt; pj_time_val timeout = {0, 1}; cnt = pj_ioqueue_poll(pjsip_endpt_get_ioqueue(transport->endpt), &timeout); if (cnt == 0) break; } /* Destroy rdata */ for (i=0; i<tp->rdata_cnt; ++i) { pj_pool_release(tp->rdata[i]->tp_info.pool); } /* Destroy reference counter. */ if (tp->base.ref_cnt) pj_atomic_destroy(tp->base.ref_cnt); /* Destroy lock */ if (tp->base.lock) pj_lock_destroy(tp->base.lock); /* Destroy pool. */ pjsip_endpt_release_pool(tp->base.endpt, tp->base.pool); return PJ_SUCCESS; }
/* Start loop transport. */ PJ_DEF(pj_status_t) pjsip_loop_start( pjsip_endpoint *endpt, pjsip_transport **transport) { pj_pool_t *pool; struct loop_transport *loop; pj_status_t status; /* Create pool. */ pool = pjsip_endpt_create_pool(endpt, "loop", 4000, 4000); if (!pool) return PJ_ENOMEM; /* Create the loop structure. */ loop = pj_pool_zalloc(pool, sizeof(struct loop_transport)); /* Initialize transport properties. */ pj_ansi_snprintf(loop->base.obj_name, sizeof(loop->base.obj_name), "loop%p", loop); loop->base.pool = pool; status = pj_atomic_create(pool, 0, &loop->base.ref_cnt); if (status != PJ_SUCCESS) goto on_error; status = pj_lock_create_recursive_mutex(pool, "loop", &loop->base.lock); if (status != PJ_SUCCESS) goto on_error; loop->base.key.type = PJSIP_TRANSPORT_LOOP_DGRAM; //loop->base.key.rem_addr.sa_family = PJ_AF_INET; loop->base.type_name = "LOOP-DGRAM"; loop->base.info = "LOOP-DGRAM"; loop->base.flag = PJSIP_TRANSPORT_DATAGRAM; loop->base.local_name.host = pj_str(ADDR_LOOP_DGRAM); loop->base.local_name.port = pjsip_transport_get_default_port_for_type(loop->base.key.type); loop->base.addr_len = sizeof(pj_sockaddr_in); loop->base.endpt = endpt; loop->base.tpmgr = pjsip_endpt_get_tpmgr(endpt); loop->base.send_msg = &loop_send_msg; loop->base.destroy = &loop_destroy; pj_list_init(&loop->recv_list); pj_list_init(&loop->send_list); /* Create worker thread. */ status = pj_thread_create(pool, "loop", &loop_transport_worker_thread, loop, 0, PJ_THREAD_SUSPENDED, &loop->thread); if (status != PJ_SUCCESS) goto on_error; /* Register to transport manager. */ status = pjsip_transport_register( loop->base.tpmgr, &loop->base); if (status != PJ_SUCCESS) goto on_error; /* Start the thread. */ status = pj_thread_resume(loop->thread); if (status != PJ_SUCCESS) goto on_error; /* * Done. */ if (transport) *transport = &loop->base; return PJ_SUCCESS; on_error: if (loop->base.lock) pj_lock_destroy(loop->base.lock); if (loop->thread) pj_thread_destroy(loop->thread); if (loop->base.ref_cnt) pj_atomic_destroy(loop->base.ref_cnt); pjsip_endpt_release_pool(endpt, loop->pool); return status; }