Exemplo n.º 1
0
/* Register socket to ioqueue */
static pj_status_t register_to_ioqueue(struct udp_transport *tp)
{
    pj_ioqueue_t *ioqueue;
    pj_ioqueue_callback ioqueue_cb;
    pj_status_t status;

    /* Ignore if already registered */
    if (tp->key != NULL)
    	return PJ_SUCCESS;

    /* Create group lock */
    status = pj_grp_lock_create(tp->base.pool, NULL, &tp->grp_lock);
    if (status != PJ_SUCCESS)
	return status;

    pj_grp_lock_add_ref(tp->grp_lock);
    pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp, &udp_on_destroy);
    
    /* Register to ioqueue. */
    ioqueue = pjsip_endpt_get_ioqueue(tp->base.endpt);
    pj_memset(&ioqueue_cb, 0, sizeof(ioqueue_cb));
    ioqueue_cb.on_read_complete = &udp_on_read_complete;
    ioqueue_cb.on_write_complete = &udp_on_write_complete;

    return pj_ioqueue_register_sock2(tp->base.pool, ioqueue, tp->sock,
				     tp->grp_lock, tp, &ioqueue_cb, &tp->key);
}
Exemplo n.º 2
0
pj_status_t pj_grp_lock_t::grp_lock_acquire()
{
	Q_ASSERT(ref_cnt > 0);

	for (QMap<int, pj_lock_t*>::ConstIterator it=lock_list.constBegin(); it!=lock_list.constEnd(); ++it)
		(*it)->pj_lock_acquire();
	grp_lock_set_owner_thread();
	pj_grp_lock_add_ref();
    return PJ_SUCCESS;
}
Exemplo n.º 3
0
PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
					    const char *name,
					    const pj_stun_session_cb *cb,
					    pj_bool_t fingerprint,
					    pj_grp_lock_t *grp_lock,
					    pj_stun_session **p_sess)
{
    pj_pool_t	*pool;
    pj_stun_session *sess;
    pj_status_t status;

    PJ_ASSERT_RETURN(cfg && cb && p_sess, PJ_EINVAL);

    if (name==NULL)
	name = "stuse%p";

    pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS, 
			  PJNATH_POOL_INC_STUN_SESS, NULL);
    PJ_ASSERT_RETURN(pool, PJ_ENOMEM);

    sess = PJ_POOL_ZALLOC_T(pool, pj_stun_session);
    sess->cfg = cfg;
    sess->pool = pool;
    pj_memcpy(&sess->cb, cb, sizeof(*cb));
    sess->use_fingerprint = fingerprint;
    sess->log_flag = 0xFFFF;

    if (grp_lock) {
	sess->grp_lock = grp_lock;
    } else {
	status = pj_grp_lock_create(pool, NULL, &sess->grp_lock);
	if (status != PJ_SUCCESS) {
	    pj_pool_release(pool);
	    return status;
	}
    }

    pj_grp_lock_add_ref(sess->grp_lock);
    pj_grp_lock_add_handler(sess->grp_lock, pool, sess,
                            &stun_sess_on_destroy);

    pj_stun_session_set_software_name(sess, &cfg->software_name);

    sess->rx_pool = pj_pool_create(sess->cfg->pf, name,
				   PJNATH_POOL_LEN_STUN_TDATA,
				   PJNATH_POOL_INC_STUN_TDATA, NULL);

    pj_list_init(&sess->pending_request_list);
    pj_list_init(&sess->cached_response_list);

    *p_sess = sess;

    return PJ_SUCCESS;
}
Exemplo n.º 4
0
pj_status_t pj_grp_lock_t::grp_lock_tryacquire()
{
	Q_ASSERT(ref_cnt > 0);

	for (QMapIterator<int, pj_lock_t*> it(lock_list); it.hasNext(); it.next())
	{
		pj_status_t status = it.value()->pj_lock_tryacquire();
		if (status != PJ_SUCCESS)
		{	// Release already acquired locks
			while (it.hasPrevious())
				it.previous().value()->pj_lock_release();
			return status;
		}
	}

	grp_lock_set_owner_thread();
	pj_grp_lock_add_ref();
    return PJ_SUCCESS;
}
Exemplo n.º 5
0
static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
                                       pj_timer_entry *entry,
                                       const pj_time_val *delay,
                                       pj_bool_t set_id,
                                       int id_val,
                                       pj_grp_lock_t *grp_lock)
#endif
{
    pj_status_t status;
    pj_time_val expires;

    PJ_ASSERT_RETURN(ht && entry && delay, PJ_EINVAL);
    PJ_ASSERT_RETURN(entry->cb != NULL, PJ_EINVAL);

    /* Prevent same entry from being scheduled more than once */
    PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP);

#if PJ_TIMER_DEBUG
    entry->src_file = src_file;
    entry->src_line = src_line;
#endif
    pj_gettickcount(&expires);
    PJ_TIME_VAL_ADD(expires, *delay);
    
    lock_timer_heap(ht);
    status = schedule_entry(ht, entry, &expires);
    if (status == PJ_SUCCESS) {
	if (set_id)
	    entry->id = id_val;
	entry->_grp_lock = grp_lock;
	if (entry->_grp_lock) {
	    pj_grp_lock_add_ref(entry->_grp_lock);
	}
    }
    unlock_timer_heap(ht);

    return status;
}
Exemplo n.º 6
0
/*
 * Create.
 */
PJ_DEF(pj_status_t) pj_turn_sock_create(pj_stun_config *cfg,
					int af,
					pj_turn_tp_type conn_type,
					const pj_turn_sock_cb *cb,
					const pj_turn_sock_cfg *setting,
					void *user_data,
					pj_turn_sock **p_turn_sock)
{
    pj_turn_sock *turn_sock;
    pj_turn_session_cb sess_cb;
    pj_turn_sock_cfg default_setting;
    pj_pool_t *pool;
    const char *name_tmpl;
    pj_status_t status;

    PJ_ASSERT_RETURN(cfg && p_turn_sock, PJ_EINVAL);
    PJ_ASSERT_RETURN(af==pj_AF_INET() || af==pj_AF_INET6(), PJ_EINVAL);
    PJ_ASSERT_RETURN(conn_type!=PJ_TURN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);

    if (!setting) {
	pj_turn_sock_cfg_default(&default_setting);
	setting = &default_setting;
    }

    switch (conn_type) {
    case PJ_TURN_TP_UDP:
	name_tmpl = "udprel%p";
	break;
    case PJ_TURN_TP_TCP:
	name_tmpl = "tcprel%p";
	break;
    default:
	PJ_ASSERT_RETURN(!"Invalid TURN conn_type", PJ_EINVAL);
	name_tmpl = "tcprel%p";
	break;
    }

    /* Create and init basic data structure */
    pool = pj_pool_create(cfg->pf, name_tmpl, PJNATH_POOL_LEN_TURN_SOCK,
			  PJNATH_POOL_INC_TURN_SOCK, NULL);
    turn_sock = PJ_POOL_ZALLOC_T(pool, pj_turn_sock);
    turn_sock->pool = pool;
    turn_sock->obj_name = pool->obj_name;
    turn_sock->user_data = user_data;
    turn_sock->af = af;
    turn_sock->conn_type = conn_type;

    /* Copy STUN config (this contains ioqueue, timer heap, etc.) */
    pj_memcpy(&turn_sock->cfg, cfg, sizeof(*cfg));

    /* Copy setting (QoS parameters etc */
    pj_memcpy(&turn_sock->setting, setting, sizeof(*setting));

    /* Set callback */
    if (cb) {
	pj_memcpy(&turn_sock->cb, cb, sizeof(*cb));
    }

    /* Session lock */
    if (setting && setting->grp_lock) {
	turn_sock->grp_lock = setting->grp_lock;
    } else {
	status = pj_grp_lock_create(pool, NULL, &turn_sock->grp_lock);
	if (status != PJ_SUCCESS) {
	    pj_pool_release(pool);
	    return status;
	}
    }

    pj_grp_lock_add_ref(turn_sock->grp_lock);
    pj_grp_lock_add_handler(turn_sock->grp_lock, pool, turn_sock,
                            &turn_sock_on_destroy);

    /* Init timer */
    pj_timer_entry_init(&turn_sock->timer, TIMER_NONE, turn_sock, &timer_cb);

    /* Init TURN session */
    pj_bzero(&sess_cb, sizeof(sess_cb));
    sess_cb.on_send_pkt = &turn_on_send_pkt;
    sess_cb.on_channel_bound = &turn_on_channel_bound;
    sess_cb.on_rx_data = &turn_on_rx_data;
    sess_cb.on_state = &turn_on_state;
    status = pj_turn_session_create(cfg, pool->obj_name, af, conn_type,
                                    turn_sock->grp_lock, &sess_cb, 0,
                                    turn_sock, &turn_sock->sess);
    if (status != PJ_SUCCESS) {
	destroy(turn_sock);
	return status;
    }

    /* Note: socket and ioqueue will be created later once the TURN server
     * has been resolved.
     */

    *p_turn_sock = turn_sock;
    return PJ_SUCCESS;
}
Exemplo n.º 7
0
pj_status_t pj_stun_detect_nat_type(const pj_sockaddr_in *server,
					    pj_stun_config *stun_cfg,
					    void *user_data,
					    pj_stun_nat_detect_cb *cb)
{
    pj_pool_t *pool;
    nat_detect_session *sess;
    pj_stun_session_cb sess_cb;
    pj_ioqueue_callback ioqueue_cb;
    int addr_len;
    pj_status_t status;

    PJ_ASSERT_RETURN(server && stun_cfg, PJ_EINVAL);
    PJ_ASSERT_RETURN(stun_cfg->pf && stun_cfg->ioqueue && stun_cfg->timer_heap,
		     PJ_EINVAL);

    /*
     * Init NAT detection session.
     */
    pool = pj_pool_create(stun_cfg->pf, "natck%p", PJNATH_POOL_LEN_NATCK, 
			  PJNATH_POOL_INC_NATCK, NULL);
    if (!pool)
	return PJ_ENOMEM;

    sess = PJ_POOL_ZALLOC_T(pool, nat_detect_session);
    sess->pool = pool;
    sess->user_data = user_data;
    sess->cb = cb;

    status = pj_grp_lock_create(pool, NULL, &sess->grp_lock);
    if (status != PJ_SUCCESS) {
	/* Group lock not created yet, just destroy pool and return */
	pj_pool_release(pool);
	return status;
    }

    pj_grp_lock_add_ref(sess->grp_lock);
    pj_grp_lock_add_handler(sess->grp_lock, pool, sess, &sess_on_destroy);

    pj_memcpy(&sess->server, server, sizeof(pj_sockaddr_in));

    /*
     * Init timer to self-destroy.
     */
    sess->timer_heap = stun_cfg->timer_heap;
    sess->timer.cb = &on_sess_timer;
    sess->timer.user_data = sess;


    /*
     * Initialize socket.
     */
    status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sess->sock);
    if (status != PJ_SUCCESS)
	goto on_error;

    /*
     * Bind to any.
     */
    pj_bzero(&sess->local_addr, sizeof(pj_sockaddr_in));
    sess->local_addr.sin_family = pj_AF_INET();
    status = pj_sock_bind(sess->sock, &sess->local_addr, 
			  sizeof(pj_sockaddr_in));
    if (status != PJ_SUCCESS)
	goto on_error;

    /*
     * Get local/bound address.
     */
    addr_len = sizeof(sess->local_addr);
    status = pj_sock_getsockname(sess->sock, &sess->local_addr, &addr_len);
    if (status != PJ_SUCCESS)
	goto on_error;

    /*
     * Find out which interface is used to send to the server.
     */
    status = get_local_interface(server, &sess->local_addr.sin_addr);
    if (status != PJ_SUCCESS)
	goto on_error;

    PJ_LOG(5,(sess->pool->obj_name, "Local address is %s:%d",
	      pj_inet_ntoa(sess->local_addr.sin_addr), 
	      pj_ntohs(sess->local_addr.sin_port)));

    PJ_LOG(5,(sess->pool->obj_name, "Server set to %s:%d",
	      pj_inet_ntoa(server->sin_addr), 
	      pj_ntohs(server->sin_port)));

    /*
     * Register socket to ioqueue to receive asynchronous input
     * notification.
     */
    pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
    ioqueue_cb.on_read_complete = &on_read_complete;

    status = pj_ioqueue_register_sock2(sess->pool, stun_cfg->ioqueue, 
				       sess->sock, sess->grp_lock, sess,
				       &ioqueue_cb, &sess->key);
    if (status != PJ_SUCCESS)
	goto on_error;

    /*
     * Create STUN session.
     */
    pj_bzero(&sess_cb, sizeof(sess_cb));
    sess_cb.on_request_complete = &on_request_complete;
    sess_cb.on_send_msg = &on_send_msg;
    status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb,
				    PJ_FALSE, sess->grp_lock, &sess->stun_sess);
    if (status != PJ_SUCCESS)
	goto on_error;

    pj_stun_session_set_user_data(sess->stun_sess, sess);

    /*
     * Kick-off ioqueue reading.
     */
    pj_ioqueue_op_key_init(&sess->read_op, sizeof(sess->read_op));
    pj_ioqueue_op_key_init(&sess->write_op, sizeof(sess->write_op));
    on_read_complete(sess->key, &sess->read_op, 0);

    /*
     * Start TEST_1
     */
    sess->timer.id = TIMER_TEST;
    on_sess_timer(stun_cfg->timer_heap, &sess->timer);

    return PJ_SUCCESS;

on_error:
    sess_destroy(sess);
    return status;
}
Exemplo n.º 8
0
/*
 * Create the STUN transport using the specified configuration.
 */
PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
					 const char *name,
					 int af,
					 const pj_stun_sock_cb *cb,
					 const pj_stun_sock_cfg *cfg,
					 void *user_data,
					 pj_stun_sock **p_stun_sock)
{
    pj_pool_t *pool;
    pj_stun_sock *stun_sock;
    pj_stun_sock_cfg default_cfg;
    pj_sockaddr bound_addr;
    unsigned i;
    pj_uint16_t max_bind_retry;
    pj_status_t status;

    PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
    PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
    PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
    PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);

    status = pj_stun_config_check_valid(stun_cfg);
    if (status != PJ_SUCCESS)
	return status;

    if (name == NULL)
	name = "stuntp%p";

    if (cfg == NULL) {
	pj_stun_sock_cfg_default(&default_cfg);
	cfg = &default_cfg;
    }


    /* Create structure */
    pool = pj_pool_create(stun_cfg->pf, name, 256, 512, NULL);
    stun_sock = PJ_POOL_ZALLOC_T(pool, pj_stun_sock);
    stun_sock->pool = pool;
    stun_sock->obj_name = pool->obj_name;
    stun_sock->user_data = user_data;
    stun_sock->af = af;
    stun_sock->sock_fd = PJ_INVALID_SOCKET;
    pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
    pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));

    stun_sock->ka_interval = cfg->ka_interval;
    if (stun_sock->ka_interval == 0)
	stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;

    if (cfg && cfg->grp_lock) {
	stun_sock->grp_lock = cfg->grp_lock;
    } else {
	status = pj_grp_lock_create(pool, NULL, &stun_sock->grp_lock);
	if (status != PJ_SUCCESS) {
	    pj_pool_release(pool);
	    return status;
	}
    }

    pj_grp_lock_add_ref(stun_sock->grp_lock);
    pj_grp_lock_add_handler(stun_sock->grp_lock, pool, stun_sock,
			    &stun_sock_destructor);

    /* Create socket and bind socket */
    status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &stun_sock->sock_fd);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Apply QoS, if specified */
    status = pj_sock_apply_qos2(stun_sock->sock_fd, cfg->qos_type,
				&cfg->qos_params, 2, stun_sock->obj_name,
				NULL);
    if (status != PJ_SUCCESS && !cfg->qos_ignore_error)
	goto on_error;

    /* Bind socket */
    max_bind_retry = MAX_BIND_RETRY;
    if (cfg->port_range && cfg->port_range < max_bind_retry)
	max_bind_retry = cfg->port_range;
    pj_sockaddr_init(af, &bound_addr, NULL, 0);
    if (cfg->bound_addr.addr.sa_family == pj_AF_INET() || 
	cfg->bound_addr.addr.sa_family == pj_AF_INET6())
    {
	pj_sockaddr_cp(&bound_addr, &cfg->bound_addr);
    }
    status = pj_sock_bind_random(stun_sock->sock_fd, &bound_addr,
				 cfg->port_range, max_bind_retry);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Create more useful information string about this transport */
#if 0
    {
	pj_sockaddr bound_addr;
	int addr_len = sizeof(bound_addr);

	status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr, 
				     &addr_len);
	if (status != PJ_SUCCESS)
	    goto on_error;

	stun_sock->info = pj_pool_alloc(pool, PJ_INET6_ADDRSTRLEN+10);
	pj_sockaddr_print(&bound_addr, stun_sock->info, 
			  PJ_INET6_ADDRSTRLEN, 3);
    }
#endif

    /* Init active socket configuration */
    {
	pj_activesock_cfg activesock_cfg;
	pj_activesock_cb activesock_cb;

	pj_activesock_cfg_default(&activesock_cfg);
	activesock_cfg.grp_lock = stun_sock->grp_lock;
	activesock_cfg.async_cnt = cfg->async_cnt;
	activesock_cfg.concurrency = 0;

	/* Create the active socket */
	pj_bzero(&activesock_cb, sizeof(activesock_cb));
	activesock_cb.on_data_recvfrom = &on_data_recvfrom;
	activesock_cb.on_data_sent = &on_data_sent;
	status = pj_activesock_create(pool, stun_sock->sock_fd, 
				      pj_SOCK_DGRAM(), 
				      &activesock_cfg, stun_cfg->ioqueue,
				      &activesock_cb, stun_sock,
				      &stun_sock->active_sock);
	if (status != PJ_SUCCESS)
	    goto on_error;

	/* Start asynchronous read operations */
	status = pj_activesock_start_recvfrom(stun_sock->active_sock, pool,
					      cfg->max_pkt_size, 0);
	if (status != PJ_SUCCESS)
	    goto on_error;

	/* Init send keys */
	pj_ioqueue_op_key_init(&stun_sock->send_key, 
			       sizeof(stun_sock->send_key));
	pj_ioqueue_op_key_init(&stun_sock->int_send_key,
			       sizeof(stun_sock->int_send_key));
    }

    /* Create STUN session */
    {
	pj_stun_session_cb sess_cb;

	pj_bzero(&sess_cb, sizeof(sess_cb));
	sess_cb.on_request_complete = &sess_on_request_complete;
	sess_cb.on_send_msg = &sess_on_send_msg;
	status = pj_stun_session_create(&stun_sock->stun_cfg, 
					stun_sock->obj_name,
					&sess_cb, PJ_FALSE, 
					stun_sock->grp_lock,
					&stun_sock->stun_sess);
	if (status != PJ_SUCCESS)
	    goto on_error;
    }

    /* Associate us with the STUN session */
    pj_stun_session_set_user_data(stun_sock->stun_sess, stun_sock);

    /* Initialize random numbers to be used as STUN transaction ID for
     * outgoing Binding request. We use the 80bit number to distinguish
     * STUN messages we sent with STUN messages that the application sends.
     * The last 16bit value in the array is a counter.
     */
    for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
	stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
    }
    stun_sock->tsx_id[5] = 0;


    /* Init timer entry */
    stun_sock->ka_timer.cb = &ka_timer_cb;
    stun_sock->ka_timer.user_data = stun_sock;

    /* Done */
    *p_stun_sock = stun_sock;
    return PJ_SUCCESS;

on_error:
    pj_stun_sock_destroy(stun_sock);
    return status;
}
Exemplo n.º 9
0
/*
 * Common function to create TCP transport, called when pending accept() and
 * pending connect() complete.
 */
static pj_status_t tcp_create( struct tcp_listener *listener,
			       pj_pool_t *pool,
			       pj_sock_t sock, pj_bool_t is_server,
			       const pj_sockaddr *local,
			       const pj_sockaddr *remote,
			       struct tcp_transport **p_tcp)
{
    struct tcp_transport *tcp;
    pj_ioqueue_t *ioqueue;
    pj_activesock_cfg asock_cfg;
    pj_activesock_cb tcp_callback;
    const pj_str_t ka_pkt = PJSIP_TCP_KEEP_ALIVE_DATA;
    char print_addr[PJ_INET6_ADDRSTRLEN+10];
    pj_status_t status;
    

    PJ_ASSERT_RETURN(sock != PJ_INVALID_SOCKET, PJ_EINVAL);


    if (pool == NULL) {
	pool = pjsip_endpt_create_pool(listener->endpt, "tcp",
				       POOL_TP_INIT, POOL_TP_INC);
	PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
    }    

    /*
     * Create and initialize basic transport structure.
     */
    tcp = PJ_POOL_ZALLOC_T(pool, struct tcp_transport);
    tcp->is_server = is_server;
    tcp->sock = sock;
    /*tcp->listener = listener;*/
    pj_list_init(&tcp->delayed_list);
    tcp->base.pool = pool;

    pj_ansi_snprintf(tcp->base.obj_name, PJ_MAX_OBJ_NAME, 
		     (is_server ? "tcps%p" :"tcpc%p"), tcp);

    status = pj_atomic_create(pool, 0, &tcp->base.ref_cnt);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    status = pj_lock_create_recursive_mutex(pool, "tcp", &tcp->base.lock);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    tcp->base.key.type = listener->factory.type;
    pj_sockaddr_cp(&tcp->base.key.rem_addr, remote);
    tcp->base.type_name = (char*)pjsip_transport_get_type_name(
				(pjsip_transport_type_e)tcp->base.key.type);
    tcp->base.flag = pjsip_transport_get_flag_from_type(
				(pjsip_transport_type_e)tcp->base.key.type);

    tcp->base.info = (char*) pj_pool_alloc(pool, 64);
    pj_ansi_snprintf(tcp->base.info, 64, "%s to %s",
                     tcp->base.type_name,
                     pj_sockaddr_print(remote, print_addr,
                                       sizeof(print_addr), 3));

    tcp->base.addr_len = pj_sockaddr_get_len(remote);
    pj_sockaddr_cp(&tcp->base.local_addr, local);
    sockaddr_to_host_port(pool, &tcp->base.local_name, local);
    sockaddr_to_host_port(pool, &tcp->base.remote_name, remote);
    tcp->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;

    tcp->base.endpt = listener->endpt;
    tcp->base.tpmgr = listener->tpmgr;
    tcp->base.send_msg = &tcp_send_msg;
    tcp->base.do_shutdown = &tcp_shutdown;
    tcp->base.destroy = &tcp_destroy_transport;

    /* Create group lock */
    status = pj_grp_lock_create(pool, NULL, &tcp->grp_lock);
    if (status != PJ_SUCCESS)
	goto on_error;

    pj_grp_lock_add_ref(tcp->grp_lock);
    pj_grp_lock_add_handler(tcp->grp_lock, pool, tcp, &tcp_on_destroy);

    /* Create active socket */
    pj_activesock_cfg_default(&asock_cfg);
    asock_cfg.async_cnt = 1;
    asock_cfg.grp_lock = tcp->grp_lock;

    pj_bzero(&tcp_callback, sizeof(tcp_callback));
    tcp_callback.on_data_read = &on_data_read;
    tcp_callback.on_data_sent = &on_data_sent;
    tcp_callback.on_connect_complete = &on_connect_complete;

    ioqueue = pjsip_endpt_get_ioqueue(listener->endpt);
    status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
				  ioqueue, &tcp_callback, tcp, &tcp->asock);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    /* Register transport to transport manager */
    status = pjsip_transport_register(listener->tpmgr, &tcp->base);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    tcp->is_registered = PJ_TRUE;

    /* Initialize keep-alive timer */
    tcp->ka_timer.user_data = (void*)tcp;
    tcp->ka_timer.cb = &tcp_keep_alive_timer;
    pj_ioqueue_op_key_init(&tcp->ka_op_key.key, sizeof(pj_ioqueue_op_key_t));
    pj_strdup(tcp->base.pool, &tcp->ka_pkt, &ka_pkt);

    /* Done setting up basic transport. */
    *p_tcp = tcp;

    PJ_LOG(4,(tcp->base.obj_name, "TCP %s transport created",
	      (tcp->is_server ? "server" : "client")));

    return PJ_SUCCESS;

on_error:
    tcp_destroy(&tcp->base, status);
    return status;
}
Exemplo n.º 10
0
/*
 * This is the public API to create, initialize, register, and start the
 * TCP listener.
 */
PJ_DEF(pj_status_t) pjsip_tcp_transport_start3(
					pjsip_endpoint *endpt,
					const pjsip_tcp_transport_cfg *cfg,
					pjsip_tpfactory **p_factory
					)
{
    pj_pool_t *pool;
    pj_sock_t sock = PJ_INVALID_SOCKET;
    struct tcp_listener *listener;
    pj_activesock_cfg asock_cfg;
    pj_activesock_cb listener_cb;
    pj_sockaddr *listener_addr;
    int addr_len;
    pj_status_t status;

    /* Sanity check */
    PJ_ASSERT_RETURN(endpt && cfg->async_cnt, PJ_EINVAL);

    /* Verify that address given in a_name (if any) is valid */
    if (cfg->addr_name.host.slen) {
	pj_sockaddr tmp;

	status = pj_sockaddr_init(cfg->af, &tmp, &cfg->addr_name.host, 
				  (pj_uint16_t)cfg->addr_name.port);
	if (status != PJ_SUCCESS || !pj_sockaddr_has_addr(&tmp) ||
	    (cfg->af==pj_AF_INET() && 
	     tmp.ipv4.sin_addr.s_addr==PJ_INADDR_NONE)) 
	{
	    /* Invalid address */
	    return PJ_EINVAL;
	}
    }

    pool = pjsip_endpt_create_pool(endpt, "tcplis", POOL_LIS_INIT, 
				   POOL_LIS_INC);
    PJ_ASSERT_RETURN(pool, PJ_ENOMEM);


    listener = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
    listener->factory.pool = pool;
    listener->factory.type = cfg->af==pj_AF_INET() ? PJSIP_TRANSPORT_TCP :
						     PJSIP_TRANSPORT_TCP6;
    listener->factory.type_name = (char*)
		pjsip_transport_get_type_name(listener->factory.type);
    listener->factory.flag = 
	pjsip_transport_get_flag_from_type(listener->factory.type);
    listener->qos_type = cfg->qos_type;
    pj_memcpy(&listener->qos_params, &cfg->qos_params,
	      sizeof(cfg->qos_params));
    pj_memcpy(&listener->sockopt_params, &cfg->sockopt_params,
	      sizeof(cfg->sockopt_params));

    pj_ansi_strcpy(listener->factory.obj_name, "tcplis");
    if (listener->factory.type==PJSIP_TRANSPORT_TCP6)
	pj_ansi_strcat(listener->factory.obj_name, "6");

    status = pj_lock_create_recursive_mutex(pool, listener->factory.obj_name,
					    &listener->factory.lock);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create socket */
    status = pj_sock_socket(cfg->af, pj_SOCK_STREAM(), 0, &sock);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Apply QoS, if specified */
    status = pj_sock_apply_qos2(sock, cfg->qos_type, &cfg->qos_params, 
				2, listener->factory.obj_name, 
				"SIP TCP listener socket");

    /* Apply SO_REUSEADDR */
    if (cfg->reuse_addr) {
	int enabled = 1;
	status = pj_sock_setsockopt(sock, pj_SOL_SOCKET(), pj_SO_REUSEADDR(),
				    &enabled, sizeof(enabled));
	if (status != PJ_SUCCESS) {
	    PJ_PERROR(4,(listener->factory.obj_name, status,
		         "Warning: error applying SO_REUSEADDR"));
	}
    }

    /* Apply socket options, if specified */
    if (cfg->sockopt_params.cnt)
	status = pj_sock_setsockopt_params(sock, &cfg->sockopt_params);

    /* Bind address may be different than factory.local_addr because
     * factory.local_addr will be resolved below.
     */
    pj_sockaddr_cp(&listener->bound_addr, &cfg->bind_addr);

    /* Bind socket */
    listener_addr = &listener->factory.local_addr;
    pj_sockaddr_cp(listener_addr, &cfg->bind_addr);

    status = pj_sock_bind(sock, listener_addr, 
			  pj_sockaddr_get_len(listener_addr));
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Retrieve the bound address */
    addr_len = pj_sockaddr_get_len(listener_addr);
    status = pj_sock_getsockname(sock, listener_addr, &addr_len);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* If published host/IP is specified, then use that address as the
     * listener advertised address.
     */
    if (cfg->addr_name.host.slen) {
	/* Copy the address */
	listener->factory.addr_name = cfg->addr_name;
	pj_strdup(listener->factory.pool, &listener->factory.addr_name.host, 
		  &cfg->addr_name.host);
	listener->factory.addr_name.port = cfg->addr_name.port;

    } else {
	/* No published address is given, use the bound address */

	/* If the address returns 0.0.0.0, use the default
	 * interface address as the transport's address.
	 */
	if (!pj_sockaddr_has_addr(listener_addr)) {
	    pj_sockaddr hostip;

	    status = pj_gethostip(listener->bound_addr.addr.sa_family,
	                          &hostip);
	    if (status != PJ_SUCCESS)
		goto on_error;

	    pj_sockaddr_copy_addr(listener_addr, &hostip);
	}

	/* Save the address name */
	sockaddr_to_host_port(listener->factory.pool, 
			      &listener->factory.addr_name, 
			      listener_addr);
    }

    /* If port is zero, get the bound port */
    if (listener->factory.addr_name.port == 0) {
	listener->factory.addr_name.port = pj_sockaddr_get_port(listener_addr);
    }

    pj_ansi_snprintf(listener->factory.obj_name, 
		     sizeof(listener->factory.obj_name),
		     "tcplis:%d",  listener->factory.addr_name.port);


    /* Start listening to the address */
    status = pj_sock_listen(sock, PJSIP_TCP_TRANSPORT_BACKLOG);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create active socket */
    pj_activesock_cfg_default(&asock_cfg);
    if (cfg->async_cnt > MAX_ASYNC_CNT) 
	asock_cfg.async_cnt = MAX_ASYNC_CNT;
    else
	asock_cfg.async_cnt = cfg->async_cnt;

    /* Create group lock */
    status = pj_grp_lock_create(pool, NULL, &listener->grp_lock);
    if (status != PJ_SUCCESS)
	return status;

    pj_grp_lock_add_ref(listener->grp_lock);
    pj_grp_lock_add_handler(listener->grp_lock, pool, listener,
			    &lis_on_destroy);

    asock_cfg.grp_lock = listener->grp_lock;

    pj_bzero(&listener_cb, sizeof(listener_cb));
    listener_cb.on_accept_complete = &on_accept_complete;
    status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
				  pjsip_endpt_get_ioqueue(endpt), 
				  &listener_cb, listener,
				  &listener->asock);

    /* Register to transport manager */
    listener->endpt = endpt;
    listener->tpmgr = pjsip_endpt_get_tpmgr(endpt);
    listener->factory.create_transport = lis_create_transport;
    listener->factory.destroy = lis_destroy;
    listener->is_registered = PJ_TRUE;
    status = pjsip_tpmgr_register_tpfactory(listener->tpmgr,
					    &listener->factory);
    if (status != PJ_SUCCESS) {
	listener->is_registered = PJ_FALSE;
	goto on_error;
    }

    /* Start pending accept() operations */
    status = pj_activesock_start_accept(listener->asock, pool);
    if (status != PJ_SUCCESS)
	goto on_error;

    PJ_LOG(4,(listener->factory.obj_name, 
	     "SIP TCP listener ready for incoming connections at %.*s:%d",
	     (int)listener->factory.addr_name.host.slen,
	     listener->factory.addr_name.host.ptr,
	     listener->factory.addr_name.port));

    /* Return the pointer to user */
    if (p_factory) *p_factory = &listener->factory;

    return PJ_SUCCESS;

on_error:
    if (listener->asock==NULL && sock!=PJ_INVALID_SOCKET)
	pj_sock_close(sock);
    lis_destroy(&listener->factory);
    return status;
}
Exemplo n.º 11
0
int UE::send_message(std::string dest, std::string contents)
{
  timeval before;
  timeval after;
  pthread_mutex_lock(&_msg_mutex);
  pj_str_t to;
  stra(&to, dest.c_str());
  pj_str_t data;
  stra(&data, contents.c_str());
  pj_str_t message;
  stra(&message, "MESSAGE");
  pjsip_tx_data* tdata;
  pjsip_method msg_method;
  pjsip_method_init(&msg_method, _pool, &message);

  pjsip_endpt_create_request(get_global_endpoint(),
      &msg_method,
      &to,
      &_my_uri,
      &to,
      &_contact,
      NULL,
      -1,
      &data,
      &tdata);


  pjsip_tsx_create_uac(ua_module(), tdata, &_msg_tsx);

  pjsip_tpselector sel;
  sel.type = PJSIP_TPSELECTOR_TRANSPORT;
  sel.u.transport = _transport;
  pjsip_tsx_set_transport(_msg_tsx, &sel);
  
  pjsip_route_hdr* rt_hdr = pjsip_route_hdr_create(tdata->pool);
  rt_hdr->name_addr.uri = _server_uri;
  pjsip_msg_insert_first_hdr(tdata->msg, (pjsip_hdr*)rt_hdr);

  _msg_tsx->mod_data[ua_module()->id] = this;

  pj_grp_lock_add_ref(_msg_tsx->grp_lock);
  gettimeofday(&before, NULL);
  pj_status_t status = pjsip_tsx_send_msg(_msg_tsx, NULL);
  if (status != PJ_SUCCESS)
  {
    pthread_mutex_unlock(&_msg_mutex);
    return -1;
  }
  while (_msg_tsx->state < PJSIP_TSX_STATE_COMPLETED)
  {
    pthread_cond_wait(&_msg_cond, &_msg_mutex);
  }
  gettimeofday(&after, NULL);
  //unsigned long latency = ((after.tv_sec - before.tv_sec) * 1000000) + (after.tv_usec - before.tv_usec);
  //printf("Message latency is %lu\n", latency);
  int ret = _msg_tsx->status_code;
  pj_grp_lock_dec_ref(_msg_tsx->grp_lock);
  _msg_tsx = NULL;
  pthread_mutex_unlock(&_msg_mutex);
  return ret;
}