/* * Common function to create TLS transport, called when pending accept() and * pending connect() complete. */ static pj_status_t tls_create( struct tls_listener *listener, pj_pool_t *pool, pj_ssl_sock_t *ssock, pj_bool_t is_server, const pj_sockaddr_in *local, const pj_sockaddr_in *remote, const pj_str_t *remote_name, struct tls_transport **p_tls) { struct tls_transport *tls; const pj_str_t ka_pkt = PJSIP_TLS_KEEP_ALIVE_DATA; pj_status_t status; PJ_ASSERT_RETURN(listener && ssock && local && remote && p_tls, PJ_EINVAL); if (pool == NULL) { pool = pjsip_endpt_create_pool(listener->endpt, "tls", POOL_TP_INIT, POOL_TP_INC); PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM); } /* * Create and initialize basic transport structure. */ tls = PJ_POOL_ZALLOC_T(pool, struct tls_transport); tls->is_server = is_server; tls->verify_server = listener->tls_setting.verify_server; pj_list_init(&tls->delayed_list); tls->base.pool = pool; pj_ansi_snprintf(tls->base.obj_name, PJ_MAX_OBJ_NAME, (is_server ? "tlss%p" :"tlsc%p"), tls); status = pj_atomic_create(pool, 0, &tls->base.ref_cnt); if (status != PJ_SUCCESS) { goto on_error; } status = pj_lock_create_recursive_mutex(pool, "tls", &tls->base.lock); if (status != PJ_SUCCESS) { goto on_error; } if (remote_name) pj_strdup(pool, &tls->remote_name, remote_name); tls->base.key.type = PJSIP_TRANSPORT_TLS; pj_memcpy(&tls->base.key.rem_addr, remote, sizeof(pj_sockaddr_in)); tls->base.type_name = "tls"; tls->base.flag = pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TLS); tls->base.info = (char*) pj_pool_alloc(pool, 64); pj_ansi_snprintf(tls->base.info, 64, "TLS to %s:%d", pj_inet_ntoa(remote->sin_addr), (int)pj_ntohs(remote->sin_port)); tls->base.addr_len = sizeof(pj_sockaddr_in); tls->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING; /* Set initial local address */ if (!pj_sockaddr_has_addr(local)) { pj_sockaddr_cp(&tls->base.local_addr, &listener->factory.local_addr); } else { pj_sockaddr_cp(&tls->base.local_addr, local); } sockaddr_to_host_port(pool, &tls->base.local_name, (pj_sockaddr_in*)&tls->base.local_addr); if (tls->remote_name.slen) { tls->base.remote_name.host = tls->remote_name; tls->base.remote_name.port = pj_sockaddr_in_get_port(remote); } else { sockaddr_to_host_port(pool, &tls->base.remote_name, remote); } tls->base.endpt = listener->endpt; tls->base.tpmgr = listener->tpmgr; tls->base.send_msg = &tls_send_msg; tls->base.do_shutdown = &tls_shutdown; tls->base.destroy = &tls_destroy_transport; tls->ssock = ssock; /* Register transport to transport manager */ status = pjsip_transport_register(listener->tpmgr, &tls->base); if (status != PJ_SUCCESS) { goto on_error; } tls->is_registered = PJ_TRUE; /* Initialize keep-alive timer */ tls->ka_timer.user_data = (void*)tls; tls->ka_timer.cb = &tls_keep_alive_timer; pj_ioqueue_op_key_init(&tls->ka_op_key.key, sizeof(pj_ioqueue_op_key_t)); pj_strdup(tls->base.pool, &tls->ka_pkt, &ka_pkt); /* Done setting up basic transport. */ *p_tls = tls; PJ_LOG(4,(tls->base.obj_name, "TLS %s transport created", (tls->is_server ? "server" : "client"))); return PJ_SUCCESS; on_error: tls_destroy(&tls->base, status); return status; }
/* * pjsip_udp_transport_attach() * * Attach UDP socket and start transport. */ static pj_status_t transport_attach( pjsip_endpoint *endpt, pjsip_transport_type_e type, pj_sock_t sock, const pjsip_host_port *a_name, unsigned async_cnt, pjsip_transport **p_transport) { pj_pool_t *pool; struct udp_transport *tp; const char *format, *ipv6_quoteb, *ipv6_quotee; unsigned i; pj_status_t status; PJ_ASSERT_RETURN(endpt && sock!=PJ_INVALID_SOCKET && a_name && async_cnt>0, PJ_EINVAL); /* Object name. */ if (type & PJSIP_TRANSPORT_IPV6) { format = "udpv6%p"; ipv6_quoteb = "["; ipv6_quotee = "]"; } else { format = "udp%p"; ipv6_quoteb = ipv6_quotee = ""; } /* Create pool. */ pool = pjsip_endpt_create_pool(endpt, format, PJSIP_POOL_LEN_TRANSPORT, PJSIP_POOL_INC_TRANSPORT); if (!pool) return PJ_ENOMEM; /* Create the UDP transport object. */ tp = PJ_POOL_ZALLOC_T(pool, struct udp_transport); /* Save pool. */ tp->base.pool = pool; pj_memcpy(tp->base.obj_name, pool->obj_name, PJ_MAX_OBJ_NAME); /* Init reference counter. */ status = pj_atomic_create(pool, 0, &tp->base.ref_cnt); if (status != PJ_SUCCESS) goto on_error; /* Init lock. */ status = pj_lock_create_recursive_mutex(pool, pool->obj_name, &tp->base.lock); if (status != PJ_SUCCESS) goto on_error; /* Set type. */ tp->base.key.type = type; /* Remote address is left zero (except the family) */ tp->base.key.rem_addr.addr.sa_family = (pj_uint16_t) ((type & PJSIP_TRANSPORT_IPV6) ? pj_AF_INET6() : pj_AF_INET()); /* Type name. */ tp->base.type_name = "UDP"; /* Transport flag */ tp->base.flag = pjsip_transport_get_flag_from_type(type); /* Length of addressess. */ tp->base.addr_len = sizeof(tp->base.local_addr); /* Init local address. */ status = pj_sock_getsockname(sock, &tp->base.local_addr, &tp->base.addr_len); if (status != PJ_SUCCESS) goto on_error; /* Init remote name. */ if (type == PJSIP_TRANSPORT_UDP) tp->base.remote_name.host = pj_str("0.0.0.0"); else tp->base.remote_name.host = pj_str("::0"); tp->base.remote_name.port = 0; /* Init direction */ tp->base.dir = PJSIP_TP_DIR_NONE; /* Set endpoint. */ tp->base.endpt = endpt; /* Transport manager and timer will be initialized by tpmgr */ /* Attach socket and assign name. */ udp_set_socket(tp, sock, a_name); /* Register to ioqueue */ status = register_to_ioqueue(tp); if (status != PJ_SUCCESS) goto on_error; /* Set functions. */ tp->base.send_msg = &udp_send_msg; tp->base.do_shutdown = &udp_shutdown; tp->base.destroy = &udp_destroy; /* This is a permanent transport, so we initialize the ref count * to one so that transport manager don't destroy this transport * when there's no user! */ pj_atomic_inc(tp->base.ref_cnt); /* Register to transport manager. */ tp->base.tpmgr = pjsip_endpt_get_tpmgr(endpt); status = pjsip_transport_register( tp->base.tpmgr, (pjsip_transport*)tp); if (status != PJ_SUCCESS) goto on_error; /* Create rdata and put it in the array. */ tp->rdata_cnt = 0; tp->rdata = (pjsip_rx_data**) pj_pool_calloc(tp->base.pool, async_cnt, sizeof(pjsip_rx_data*)); for (i=0; i<async_cnt; ++i) { pj_pool_t *rdata_pool = pjsip_endpt_create_pool(endpt, "rtd%p", PJSIP_POOL_RDATA_LEN, PJSIP_POOL_RDATA_INC); if (!rdata_pool) { pj_atomic_set(tp->base.ref_cnt, 0); pjsip_transport_destroy(&tp->base); return PJ_ENOMEM; } init_rdata(tp, i, rdata_pool, NULL); tp->rdata_cnt++; } /* Start reading the ioqueue. */ status = start_async_read(tp); if (status != PJ_SUCCESS) { pjsip_transport_destroy(&tp->base); return status; } /* Done. */ if (p_transport) *p_transport = &tp->base; PJ_LOG(4,(tp->base.obj_name, "SIP %s started, published address is %s%.*s%s:%d", pjsip_transport_get_type_desc((pjsip_transport_type_e)tp->base.key.type), ipv6_quoteb, (int)tp->base.local_name.host.slen, tp->base.local_name.host.ptr, ipv6_quotee, tp->base.local_name.port)); return PJ_SUCCESS; on_error: udp_destroy((pjsip_transport*)tp); return status; }
/* * Common function to create TCP transport, called when pending accept() and * pending connect() complete. */ static pj_status_t tcp_create( struct tcp_listener *listener, pj_pool_t *pool, pj_sock_t sock, pj_bool_t is_server, const pj_sockaddr *local, const pj_sockaddr *remote, struct tcp_transport **p_tcp) { struct tcp_transport *tcp; pj_ioqueue_t *ioqueue; pj_activesock_cfg asock_cfg; pj_activesock_cb tcp_callback; const pj_str_t ka_pkt = PJSIP_TCP_KEEP_ALIVE_DATA; char print_addr[PJ_INET6_ADDRSTRLEN+10]; pj_status_t status; PJ_ASSERT_RETURN(sock != PJ_INVALID_SOCKET, PJ_EINVAL); if (pool == NULL) { pool = pjsip_endpt_create_pool(listener->endpt, "tcp", POOL_TP_INIT, POOL_TP_INC); PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM); } /* * Create and initialize basic transport structure. */ tcp = PJ_POOL_ZALLOC_T(pool, struct tcp_transport); tcp->is_server = is_server; tcp->sock = sock; /*tcp->listener = listener;*/ pj_list_init(&tcp->delayed_list); tcp->base.pool = pool; pj_ansi_snprintf(tcp->base.obj_name, PJ_MAX_OBJ_NAME, (is_server ? "tcps%p" :"tcpc%p"), tcp); status = pj_atomic_create(pool, 0, &tcp->base.ref_cnt); if (status != PJ_SUCCESS) { goto on_error; } status = pj_lock_create_recursive_mutex(pool, "tcp", &tcp->base.lock); if (status != PJ_SUCCESS) { goto on_error; } tcp->base.key.type = listener->factory.type; pj_sockaddr_cp(&tcp->base.key.rem_addr, remote); tcp->base.type_name = (char*)pjsip_transport_get_type_name( (pjsip_transport_type_e)tcp->base.key.type); tcp->base.flag = pjsip_transport_get_flag_from_type( (pjsip_transport_type_e)tcp->base.key.type); tcp->base.info = (char*) pj_pool_alloc(pool, 64); pj_ansi_snprintf(tcp->base.info, 64, "%s to %s", tcp->base.type_name, pj_sockaddr_print(remote, print_addr, sizeof(print_addr), 3)); tcp->base.addr_len = pj_sockaddr_get_len(remote); pj_sockaddr_cp(&tcp->base.local_addr, local); sockaddr_to_host_port(pool, &tcp->base.local_name, local); sockaddr_to_host_port(pool, &tcp->base.remote_name, remote); tcp->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING; tcp->base.endpt = listener->endpt; tcp->base.tpmgr = listener->tpmgr; tcp->base.send_msg = &tcp_send_msg; tcp->base.do_shutdown = &tcp_shutdown; tcp->base.destroy = &tcp_destroy_transport; /* Create active socket */ pj_activesock_cfg_default(&asock_cfg); asock_cfg.async_cnt = 1; pj_bzero(&tcp_callback, sizeof(tcp_callback)); tcp_callback.on_data_read = &on_data_read; tcp_callback.on_data_sent = &on_data_sent; tcp_callback.on_connect_complete = &on_connect_complete; ioqueue = pjsip_endpt_get_ioqueue(listener->endpt); status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg, ioqueue, &tcp_callback, tcp, &tcp->asock); if (status != PJ_SUCCESS) { goto on_error; } /* Register transport to transport manager */ status = pjsip_transport_register(listener->tpmgr, &tcp->base); if (status != PJ_SUCCESS) { goto on_error; } tcp->is_registered = PJ_TRUE; /* Initialize keep-alive timer */ tcp->ka_timer.user_data = (void*)tcp; tcp->ka_timer.cb = &tcp_keep_alive_timer; pj_ioqueue_op_key_init(&tcp->ka_op_key.key, sizeof(pj_ioqueue_op_key_t)); pj_strdup(tcp->base.pool, &tcp->ka_pkt, &ka_pkt); /* Done setting up basic transport. */ *p_tcp = tcp; PJ_LOG(4,(tcp->base.obj_name, "TCP %s transport created", (tcp->is_server ? "server" : "client"))); return PJ_SUCCESS; on_error: tcp_destroy(&tcp->base, status); return status; }
/*! * \brief Create a pjsip transport. */ static int transport_create(void *data) { struct transport_create_data *create_data = data; struct ws_transport *newtransport = NULL; pjsip_endpoint *endpt = ast_sip_get_pjsip_endpoint(); struct pjsip_tpmgr *tpmgr = pjsip_endpt_get_tpmgr(endpt); pj_pool_t *pool; pj_str_t buf; pj_status_t status; newtransport = ao2_t_alloc_options(sizeof(*newtransport), transport_dtor, AO2_ALLOC_OPT_LOCK_NOLOCK, "pjsip websocket transport"); if (!newtransport) { ast_log(LOG_ERROR, "Failed to allocate WebSocket transport.\n"); goto on_error; } newtransport->transport.endpt = endpt; if (!(pool = pjsip_endpt_create_pool(endpt, "ws", 512, 512))) { ast_log(LOG_ERROR, "Failed to allocate WebSocket endpoint pool.\n"); goto on_error; } newtransport->transport.pool = pool; newtransport->ws_session = create_data->ws_session; /* Keep the session until transport dies */ ast_websocket_ref(newtransport->ws_session); status = pj_atomic_create(pool, 0, &newtransport->transport.ref_cnt); if (status != PJ_SUCCESS) { goto on_error; } status = pj_lock_create_recursive_mutex(pool, pool->obj_name, &newtransport->transport.lock); if (status != PJ_SUCCESS) { goto on_error; } pj_sockaddr_parse(pj_AF_UNSPEC(), 0, pj_cstr(&buf, ast_sockaddr_stringify(ast_websocket_remote_address(newtransport->ws_session))), &newtransport->transport.key.rem_addr); newtransport->transport.key.rem_addr.addr.sa_family = pj_AF_INET(); newtransport->transport.key.type = ast_websocket_is_secure(newtransport->ws_session) ? transport_type_wss : transport_type_ws; newtransport->transport.addr_len = pj_sockaddr_get_len(&newtransport->transport.key.rem_addr); pj_sockaddr_cp(&newtransport->transport.local_addr, &newtransport->transport.key.rem_addr); newtransport->transport.local_name.host.ptr = (char *)pj_pool_alloc(pool, newtransport->transport.addr_len+4); pj_sockaddr_print(&newtransport->transport.key.rem_addr, newtransport->transport.local_name.host.ptr, newtransport->transport.addr_len+4, 0); newtransport->transport.local_name.host.slen = pj_ansi_strlen(newtransport->transport.local_name.host.ptr); newtransport->transport.local_name.port = pj_sockaddr_get_port(&newtransport->transport.key.rem_addr); newtransport->transport.type_name = (char *)pjsip_transport_get_type_name(newtransport->transport.key.type); newtransport->transport.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)newtransport->transport.key.type); newtransport->transport.info = (char *)pj_pool_alloc(newtransport->transport.pool, 64); newtransport->transport.tpmgr = tpmgr; newtransport->transport.send_msg = &ws_send_msg; newtransport->transport.destroy = &ws_destroy; status = pjsip_transport_register(newtransport->transport.tpmgr, (pjsip_transport *)newtransport); if (status != PJ_SUCCESS) { goto on_error; } /* Add a reference for pjsip transport manager */ ao2_ref(newtransport, +1); newtransport->rdata.tp_info.transport = &newtransport->transport; newtransport->rdata.tp_info.pool = pjsip_endpt_create_pool(endpt, "rtd%p", PJSIP_POOL_RDATA_LEN, PJSIP_POOL_RDATA_INC); if (!newtransport->rdata.tp_info.pool) { ast_log(LOG_ERROR, "Failed to allocate WebSocket rdata.\n"); pjsip_transport_destroy((pjsip_transport *)newtransport); goto on_error; } create_data->transport = newtransport; return 0; on_error: ao2_cleanup(newtransport); return -1; }
/* Start loop transport. */ PJ_DEF(pj_status_t) pjsip_loop_start( pjsip_endpoint *endpt, pjsip_transport **transport) { pj_pool_t *pool; struct loop_transport *loop; pj_status_t status; /* Create pool. */ pool = pjsip_endpt_create_pool(endpt, "loop", 4000, 4000); if (!pool) return PJ_ENOMEM; /* Create the loop structure. */ loop = pj_pool_zalloc(pool, sizeof(struct loop_transport)); /* Initialize transport properties. */ pj_ansi_snprintf(loop->base.obj_name, sizeof(loop->base.obj_name), "loop%p", loop); loop->base.pool = pool; status = pj_atomic_create(pool, 0, &loop->base.ref_cnt); if (status != PJ_SUCCESS) goto on_error; status = pj_lock_create_recursive_mutex(pool, "loop", &loop->base.lock); if (status != PJ_SUCCESS) goto on_error; loop->base.key.type = PJSIP_TRANSPORT_LOOP_DGRAM; //loop->base.key.rem_addr.sa_family = PJ_AF_INET; loop->base.type_name = "LOOP-DGRAM"; loop->base.info = "LOOP-DGRAM"; loop->base.flag = PJSIP_TRANSPORT_DATAGRAM; loop->base.local_name.host = pj_str(ADDR_LOOP_DGRAM); loop->base.local_name.port = pjsip_transport_get_default_port_for_type(loop->base.key.type); loop->base.addr_len = sizeof(pj_sockaddr_in); loop->base.endpt = endpt; loop->base.tpmgr = pjsip_endpt_get_tpmgr(endpt); loop->base.send_msg = &loop_send_msg; loop->base.destroy = &loop_destroy; pj_list_init(&loop->recv_list); pj_list_init(&loop->send_list); /* Create worker thread. */ status = pj_thread_create(pool, "loop", &loop_transport_worker_thread, loop, 0, PJ_THREAD_SUSPENDED, &loop->thread); if (status != PJ_SUCCESS) goto on_error; /* Register to transport manager. */ status = pjsip_transport_register( loop->base.tpmgr, &loop->base); if (status != PJ_SUCCESS) goto on_error; /* Start the thread. */ status = pj_thread_resume(loop->thread); if (status != PJ_SUCCESS) goto on_error; /* * Done. */ if (transport) *transport = &loop->base; return PJ_SUCCESS; on_error: if (loop->base.lock) pj_lock_destroy(loop->base.lock); if (loop->thread) pj_thread_destroy(loop->thread); if (loop->base.ref_cnt) pj_atomic_destroy(loop->base.ref_cnt); pjsip_endpt_release_pool(endpt, loop->pool); return status; }
SipIceTransport::SipIceTransport(pjsip_endpoint* endpt, pj_pool_t& /* pool */, long /* t_type */, const std::shared_ptr<IceTransport>& ice, int comp_id) : pool_(nullptr, pj_pool_release) , rxPool_(nullptr, pj_pool_release) , trData_() , rdata_() , ice_(ice) , comp_id_(comp_id) { trData_.self = this; if (not ice or not ice->isRunning()) throw std::logic_error("ice transport must exist and negotiation completed"); RING_DBG("SipIceTransport@%p {tr=%p}", this, &trData_.base); auto& base = trData_.base; pool_.reset(pjsip_endpt_create_pool(endpt, "SipIceTransport.pool", POOL_TP_INIT, POOL_TP_INC)); if (not pool_) throw std::bad_alloc(); auto pool = pool_.get(); pj_ansi_snprintf(base.obj_name, PJ_MAX_OBJ_NAME, "SipIceTransport"); base.endpt = endpt; base.tpmgr = pjsip_endpt_get_tpmgr(endpt); base.pool = pool; rdata_.tp_info.pool = pool; // FIXME: not destroyed in case of exception if (pj_atomic_create(pool, 0, &base.ref_cnt) != PJ_SUCCESS) throw std::runtime_error("Can't create PJSIP atomic."); // FIXME: not destroyed in case of exception if (pj_lock_create_recursive_mutex(pool, "SipIceTransport.mutex", &base.lock) != PJ_SUCCESS) throw std::runtime_error("Can't create PJSIP mutex."); auto remote = ice->getRemoteAddress(comp_id); RING_DBG("SipIceTransport: remote is %s", remote.toString(true).c_str()); pj_sockaddr_cp(&base.key.rem_addr, remote.pjPtr()); base.key.type = PJSIP_TRANSPORT_UDP;//t_type; base.type_name = (char*)pjsip_transport_get_type_name((pjsip_transport_type_e)base.key.type); base.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)base.key.type); base.info = (char*) pj_pool_alloc(pool, TRANSPORT_INFO_LENGTH); char print_addr[PJ_INET6_ADDRSTRLEN+10]; pj_ansi_snprintf(base.info, TRANSPORT_INFO_LENGTH, "%s to %s", base.type_name, pj_sockaddr_print(remote.pjPtr(), print_addr, sizeof(print_addr), 3)); base.addr_len = remote.getLength(); base.dir = PJSIP_TP_DIR_NONE;//is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING; base.data = nullptr; /* Set initial local address */ auto local = ice->getDefaultLocalAddress(); pj_sockaddr_cp(&base.local_addr, local.pjPtr()); sockaddr_to_host_port(pool, &base.local_name, &base.local_addr); sockaddr_to_host_port(pool, &base.remote_name, remote.pjPtr()); base.send_msg = [](pjsip_transport *transport, pjsip_tx_data *tdata, const pj_sockaddr_t *rem_addr, int addr_len, void *token, pjsip_transport_callback callback) { auto& this_ = reinterpret_cast<TransportData*>(transport)->self; return this_->send(tdata, rem_addr, addr_len, token, callback); }; base.do_shutdown = [](pjsip_transport *transport) -> pj_status_t { auto& this_ = reinterpret_cast<TransportData*>(transport)->self; RING_WARN("SipIceTransport@%p: shutdown", this_); return PJ_SUCCESS; }; base.destroy = [](pjsip_transport *transport) -> pj_status_t { auto& this_ = reinterpret_cast<TransportData*>(transport)->self; RING_WARN("SipIceTransport@%p: destroy", this_); delete this_; return PJ_SUCCESS; }; /* Init rdata */ rxPool_.reset(pjsip_endpt_create_pool(base.endpt, "SipIceTransport.rtd%p", PJSIP_POOL_RDATA_LEN, PJSIP_POOL_RDATA_INC)); if (not rxPool_) throw std::bad_alloc(); auto rx_pool = rxPool_.get(); rdata_.tp_info.pool = rx_pool; rdata_.tp_info.transport = &base; rdata_.tp_info.tp_data = this; rdata_.tp_info.op_key.rdata = &rdata_; pj_ioqueue_op_key_init(&rdata_.tp_info.op_key.op_key, sizeof(pj_ioqueue_op_key_t)); rdata_.pkt_info.src_addr = base.key.rem_addr; rdata_.pkt_info.src_addr_len = sizeof(rdata_.pkt_info.src_addr); auto rem_addr = &base.key.rem_addr; pj_sockaddr_print(rem_addr, rdata_.pkt_info.src_name, sizeof(rdata_.pkt_info.src_name), 0); rdata_.pkt_info.src_port = pj_sockaddr_get_port(rem_addr); rdata_.pkt_info.len = 0; rdata_.pkt_info.zero = 0; if (pjsip_transport_register(base.tpmgr, &base) != PJ_SUCCESS) throw std::runtime_error("Can't register PJSIP transport."); is_registered_ = true; Manager::instance().registerEventHandler((uintptr_t)this, [this]{ loop(); }); }