static int parse_test(void)
{
    unsigned i;

    for (i=0; i<PJ_ARRAY_SIZE(p_tests); ++i) {
	pj_pool_t *pool;
	pjsip_media_type ctype;
	pjsip_msg_body *body;
	pj_str_t str;
	int rc;

	pool = pjsip_endpt_create_pool(endpt, NULL, 512, 512);

	init_media_type(&ctype, p_tests[i].ctype, p_tests[i].csubtype,
			p_tests[i].boundary);

	pj_strdup2_with_null(pool, &str, p_tests[i].msg);
	body = pjsip_multipart_parse(pool, str.ptr, str.slen, &ctype, 0);
	if (!body)
	    return -100;

	if (p_tests[i].verify) {
	    rc = p_tests[i].verify(pool, body);
	} else {
	    rc = 0;
	}

	pj_pool_release(pool);
	if (rc)
	    return rc;
    }

    return 0;
}
Esempio n. 2
0
/* 
 * mod_ua_load()
 *
 * Called when module is being loaded by endpoint.
 */
static pj_status_t mod_ua_load(pjsip_endpoint *endpt)
{
    pj_status_t status;

    /* Initialize the user agent. */
    mod_ua.endpt = endpt;
    mod_ua.pool = pjsip_endpt_create_pool( endpt, "ua%p", PJSIP_POOL_LEN_UA,
					   PJSIP_POOL_INC_UA);
    if (mod_ua.pool == NULL)
	return PJ_ENOMEM;

    status = pj_mutex_create_recursive(mod_ua.pool, " ua%p", &mod_ua.mutex);
    if (status != PJ_SUCCESS)
	return status;

    mod_ua.dlg_table = pj_hash_create(mod_ua.pool, PJSIP_MAX_DIALOG_COUNT);
    if (mod_ua.dlg_table == NULL)
	return PJ_ENOMEM;

    pj_list_init(&mod_ua.free_dlgset_nodes);

    /* Initialize dialog lock. */
    status = pj_thread_local_alloc(&pjsip_dlg_lock_tls_id);
    if (status != PJ_SUCCESS)
	return status;

    pj_thread_local_set(pjsip_dlg_lock_tls_id, NULL);

    return PJ_SUCCESS;

}
Esempio n. 3
0
int msg_err_test(void)
{
    pj_pool_t *pool;
    unsigned i;

    PJ_LOG(3,(THIS_FILE, "Testing parsing error"));

    pool = pjsip_endpt_create_pool(endpt, "msgerrtest", 4000, 4000);

    for (i=0; i<PJ_ARRAY_SIZE(test_entries); ++i) {
	pjsip_parser_err_report err_list, *e;

	PJ_LOG(3,(THIS_FILE, "  Parsing msg %d", i));
	pj_list_init(&err_list);
	pjsip_parse_msg(pool, test_entries[i].msg,
			strlen(test_entries[i].msg), &err_list);

	e = err_list.next;
	while (e != &err_list) {
	    PJ_LOG(3,(THIS_FILE, 
		      "   reported syntax error at line %d col %d for %.*s",
		      e->line, e->col,
		      (int)e->hname.slen,
		      e->hname.ptr));
	    e = e->next;
	}
    }

    pj_pool_release(pool);
    return 0;
}
Esempio n. 4
0
/*
 * Module initialization.
 * This will be called by endpoint when it initializes all modules.
 */
static pj_status_t mod_init( pjsip_endpoint *endpt,
			     struct pjsip_module *mod, pj_uint32_t id )
{
    pj_pool_t *pool;

    pool = pjsip_endpt_create_pool(endpt, "esubmgr", MGR_POOL_SIZE, MGR_POOL_INC);
    if (!pool)
	return -1;

    /* Manager initialization: create hash table and mutex. */
    mgr.pool = pool;
    mgr.endpt = endpt;
    mgr.ht = pj_hash_create(pool, HASH_TABLE_SIZE);
    if (!mgr.ht)
	return -1;

    mgr.mutex = pj_mutex_create(pool, "esubmgr", PJ_MUTEX_SIMPLE);
    if (!mgr.mutex)
	return -1;

    /* Attach manager to module. */
    mod->mod_data = &mgr;

    /* Init package list. */
    pj_list_init(&mgr.pkg_list);

    /* Init Allow-Events header. */
    mgr.allow_events = pjsip_allow_events_hdr_create(mgr.pool);

    /* Save the module ID. */
    mod_id = id;

    pjsip_event_notify_init_parser();
    return 0;
}
Esempio n. 5
0
PJ_DEF(pj_status_t) pjsip_publishc_create( pjsip_endpoint *endpt, 
					   const pjsip_publishc_opt *opt,
					   void *token,
					   pjsip_publishc_cb *cb,	
					   pjsip_publishc **p_pubc)
{
    pj_pool_t *pool;
    pjsip_publishc *pubc;
    pjsip_publishc_opt default_opt;
    pj_status_t status;

    /* Verify arguments. */
    PJ_ASSERT_RETURN(endpt && cb && p_pubc, PJ_EINVAL);

    pool = pjsip_endpt_create_pool(endpt, "pubc%p", 1024, 1024);
    PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);

    pubc = PJ_POOL_ZALLOC_T(pool, pjsip_publishc);

    pubc->pool = pool;
    pubc->endpt = endpt;
    pubc->token = token;
    pubc->cb = cb;
    pubc->expires = PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED;

    if (!opt) {
	pjsip_publishc_opt_default(&default_opt);
	opt = &default_opt;
    }
    pj_memcpy(&pubc->opt, opt, sizeof(*opt));
    pj_list_init(&pubc->pending_reqs);
    pj_list_init(&pubc->pending_reqs_empty);

    status = pj_mutex_create_recursive(pubc->pool, "pubc%p", &pubc->mutex);
    if (status != PJ_SUCCESS) {
	pj_pool_release(pool);
	return status;
    }

    status = pjsip_auth_clt_init(&pubc->auth_sess, endpt, pubc->pool, 0);
    if (status != PJ_SUCCESS) {
	pj_mutex_destroy(pubc->mutex);
	pj_pool_release(pool);
	return status;
    }

    pj_list_init(&pubc->route_set);
    pj_list_init(&pubc->usr_hdr);

    /* Done */
    *p_pubc = pubc;
    return PJ_SUCCESS;
}
Esempio n. 6
0
PJ_DEF(pj_status_t) pjsip_regc_create( pjsip_endpoint *endpt, void *token,
				       pjsip_regc_cb *cb,
				       pjsip_regc **p_regc)
{
    pj_pool_t *pool;
    pjsip_regc *regc;
    pj_status_t status;

    /* Verify arguments. */
    PJ_ASSERT_RETURN(endpt && cb && p_regc, PJ_EINVAL);

    pool = pjsip_endpt_create_pool(endpt, "regc%p", 1024, 1024);
    PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);

    regc = PJ_POOL_ZALLOC_T(pool, pjsip_regc);

    regc->pool = pool;
    regc->endpt = endpt;
    regc->token = token;
    regc->cb = cb;
    regc->expires = PJSIP_REGC_EXPIRATION_NOT_SPECIFIED;
    regc->add_xuid_param = pjsip_cfg()->regc.add_xuid_param;

    status = pj_lock_create_recursive_mutex(pool, pool->obj_name, 
					    &regc->lock);
    if (status != PJ_SUCCESS) {
	pj_pool_release(pool);
	return status;
    }

    status = pj_atomic_create(pool, 0, &regc->busy_ctr);
    if (status != PJ_SUCCESS) {
	pj_lock_destroy(regc->lock);
	pj_pool_release(pool);
	return status;
    }

    status = pjsip_auth_clt_init(&regc->auth_sess, endpt, regc->pool, 0);
    if (status != PJ_SUCCESS)
	return status;

    pj_list_init(&regc->route_set);
    pj_list_init(&regc->hdr_list);
    pj_list_init(&regc->contact_hdr_list);
    pj_list_init(&regc->removed_contact_hdr_list);

    /* Done */
    *p_regc = regc;
    return PJ_SUCCESS;
}
Esempio n. 7
0
/*
 * This utility function creates receive data buffers and start
 * asynchronous recv() operations from the socket. It is called after
 * accept() or connect() operation complete.
 */
static pj_status_t tcp_start_read(struct tcp_transport *tcp)
{
    pj_pool_t *pool;
    pj_ssize_t size;
    pj_sockaddr *rem_addr;
    void *readbuf[1];
    pj_status_t status;

    /* Init rdata */
    pool = pjsip_endpt_create_pool(tcp->base.endpt,
				   "rtd%p",
				   PJSIP_POOL_RDATA_LEN,
				   PJSIP_POOL_RDATA_INC);
    if (!pool) {
	tcp_perror(tcp->base.obj_name, "Unable to create pool", PJ_ENOMEM);
	return PJ_ENOMEM;
    }

    tcp->rdata.tp_info.pool = pool;

    tcp->rdata.tp_info.transport = &tcp->base;
    tcp->rdata.tp_info.tp_data = tcp;
    tcp->rdata.tp_info.op_key.rdata = &tcp->rdata;
    pj_ioqueue_op_key_init(&tcp->rdata.tp_info.op_key.op_key, 
			   sizeof(pj_ioqueue_op_key_t));

    tcp->rdata.pkt_info.src_addr = tcp->base.key.rem_addr;
    tcp->rdata.pkt_info.src_addr_len = sizeof(tcp->rdata.pkt_info.src_addr);
    rem_addr = &tcp->base.key.rem_addr;
    pj_sockaddr_print(rem_addr, tcp->rdata.pkt_info.src_name,
                      sizeof(tcp->rdata.pkt_info.src_name), 0);
    tcp->rdata.pkt_info.src_port = pj_sockaddr_get_port(rem_addr);

    size = sizeof(tcp->rdata.pkt_info.packet);
    readbuf[0] = tcp->rdata.pkt_info.packet;
    status = pj_activesock_start_read2(tcp->asock, tcp->base.pool, size,
				       readbuf, 0);
    if (status != PJ_SUCCESS && status != PJ_EPENDING) {
	PJ_LOG(4, (tcp->base.obj_name, 
		   "pj_activesock_start_read() error, status=%d", 
		   status));
	return status;
    }

    return PJ_SUCCESS;
}
Esempio n. 8
0
/*
 * This utility function creates receive data buffers and start
 * asynchronous recv() operations from the socket. It is called after
 * accept() or connect() operation complete.
 */
static pj_status_t tls_start_read(struct tls_transport *tls)
{
    pj_pool_t *pool;
    pj_ssize_t size;
    pj_sockaddr_in *rem_addr;
    void *readbuf[1];
    pj_status_t status;

    /* Init rdata */
    pool = pjsip_endpt_create_pool(tls->base.endpt,
				   "rtd%p",
				   PJSIP_POOL_RDATA_LEN,
				   PJSIP_POOL_RDATA_INC);
    if (!pool) {
	tls_perror(tls->base.obj_name, "Unable to create pool", PJ_ENOMEM);
	return PJ_ENOMEM;
    }

    tls->rdata.tp_info.pool = pool;

    tls->rdata.tp_info.transport = &tls->base;
    tls->rdata.tp_info.tp_data = tls;
    tls->rdata.tp_info.op_key.rdata = &tls->rdata;
    pj_ioqueue_op_key_init(&tls->rdata.tp_info.op_key.op_key, 
			   sizeof(pj_ioqueue_op_key_t));

    tls->rdata.pkt_info.src_addr = tls->base.key.rem_addr;
    tls->rdata.pkt_info.src_addr_len = sizeof(pj_sockaddr_in);
    rem_addr = (pj_sockaddr_in*) &tls->base.key.rem_addr;
    pj_ansi_strcpy(tls->rdata.pkt_info.src_name,
		   pj_inet_ntoa(rem_addr->sin_addr));
    tls->rdata.pkt_info.src_port = pj_ntohs(rem_addr->sin_port);

    size = sizeof(tls->rdata.pkt_info.packet);
    readbuf[0] = tls->rdata.pkt_info.packet;
    status = pj_ssl_sock_start_read2(tls->ssock, tls->base.pool, size,
				     readbuf, 0);
    if (status != PJ_SUCCESS && status != PJ_EPENDING) {
	PJ_LOG(4, (tls->base.obj_name, 
		   "pj_ssl_sock_start_read() error, status=%d", 
		   status));
	return status;
    }

    return PJ_SUCCESS;
}
Esempio n. 9
0
pjsip_messaging_create_session( pjsip_endpoint *endpt, const pj_str_t *param_from,
			        const pj_str_t *param_to )
{
    pj_pool_t *pool;
    pjsip_messaging_session *ses;
    pj_str_t tmp, to;

    pool = pjsip_endpt_create_pool(endpt, "imsess", 1024, 1024);
    if (!pool)
	return NULL;

    ses = pj_pool_calloc(pool, 1, sizeof(pjsip_messaging_session));
    ses->pool = pool;
    ses->endpt = endpt;

    ses->call_id = pjsip_cid_hdr_create(pool);
    pj_create_unique_string(pool, &ses->call_id->id);

    ses->cseq = pjsip_cseq_hdr_create(pool);
    ses->cseq->cseq = pj_rand();
    ses->cseq->method = message_method;

    ses->from = pjsip_from_hdr_create(pool);
    pj_strdup_with_null(pool, &tmp, param_from);
    ses->from->uri = pjsip_parse_uri(pool, tmp.ptr, tmp.slen, PJSIP_PARSE_URI_AS_NAMEADDR);
    if (ses->from->uri == NULL) {
	pjsip_endpt_destroy_pool(endpt, pool);
	return NULL;
    }
    pj_create_unique_string(pool, &ses->from->tag);

    ses->to = pjsip_to_hdr_create(pool);
    pj_strdup_with_null(pool, &to, param_from);
    ses->to->uri = pjsip_parse_uri(pool, to.ptr, to.slen, PJSIP_PARSE_URI_AS_NAMEADDR);
    if (ses->to->uri == NULL) {
	pjsip_endpt_destroy_pool(endpt, pool);
	return NULL;
    }

    PJ_LOG(4,(THIS_FILE, "IM session created: recipient=%s", to.ptr));
    return ses;
}
Esempio n. 10
0
/*
 * Create new transmit buffer.
 */
PJ_DEF(pj_status_t) pjsip_tx_data_create( pjsip_tpmgr *mgr,
					  pjsip_tx_data **p_tdata )
{
    pj_pool_t *pool;
    pjsip_tx_data *tdata;
    pj_status_t status;

    PJ_ASSERT_RETURN(mgr && p_tdata, PJ_EINVAL);

    pool = pjsip_endpt_create_pool( mgr->endpt, "tdta%p",
				    PJSIP_POOL_LEN_TDATA,
				    PJSIP_POOL_INC_TDATA );
    if (!pool)
	return PJ_ENOMEM;

    tdata = PJ_POOL_ZALLOC_T(pool, pjsip_tx_data);
    tdata->pool = pool;
    tdata->mgr = mgr;
    pj_memcpy(tdata->obj_name, pool->obj_name, PJ_MAX_OBJ_NAME);

    status = pj_atomic_create(tdata->pool, 0, &tdata->ref_cnt);
    if (status != PJ_SUCCESS) {
	pjsip_endpt_release_pool( mgr->endpt, tdata->pool );
	return status;
    }
    
    //status = pj_lock_create_simple_mutex(pool, "tdta%p", &tdata->lock);
    status = pj_lock_create_null_mutex(pool, "tdta%p", &tdata->lock);
    if (status != PJ_SUCCESS) {
	pjsip_endpt_release_pool( mgr->endpt, tdata->pool );
	return status;
    }

    pj_ioqueue_op_key_init(&tdata->op_key.key, sizeof(tdata->op_key.key));

#if defined(PJ_DEBUG) && PJ_DEBUG!=0
    pj_atomic_inc( tdata->mgr->tdata_counter );
#endif

    *p_tdata = tdata;
    return PJ_SUCCESS;
}
Esempio n. 11
0
static int simple_uri_test(void)
{
    unsigned i;
    pj_pool_t *pool;
    pj_status_t status;

    PJ_LOG(3,(THIS_FILE, "  simple test"));
    for (i=0; i<PJ_ARRAY_SIZE(uri_test_array); ++i) {
	pool = pjsip_endpt_create_pool(endpt, "", POOL_SIZE, POOL_SIZE);
	status = do_uri_test(pool, &uri_test_array[i]);
	pjsip_endpt_release_pool(endpt, pool);
	if (status != PJ_SUCCESS) {
	    PJ_LOG(3,(THIS_FILE, "  error %d when testing entry %d",
		      status, i));
	    return status;
	}
    }

    return 0;
}
Esempio n. 12
0
/*! \brief Helper function which validates a permanent contact */
static int permanent_contact_validate(void *data)
{
	const char *value = data;
	pj_pool_t *pool;
	pj_str_t contact_uri;
	static const pj_str_t HCONTACT = { "Contact", 7 };

	pool = pjsip_endpt_create_pool(ast_sip_get_pjsip_endpoint(), "Permanent Contact Validation", 256, 256);
	if (!pool) {
		return -1;
	}

	pj_strdup2_with_null(pool, &contact_uri, value);
	if (!pjsip_parse_hdr(pool, &HCONTACT, contact_uri.ptr, contact_uri.slen, NULL)) {
		pjsip_endpt_release_pool(ast_sip_get_pjsip_endpoint(), pool);
		return -1;
	}

	pjsip_endpt_release_pool(ast_sip_get_pjsip_endpoint(), pool);
	return 0;
}
Esempio n. 13
0
/* Helper function to create "incoming" packet */
struct recv_list *create_incoming_packet( struct loop_transport *loop,
					  pjsip_tx_data *tdata )
{
    pj_pool_t *pool;
    struct recv_list *pkt;

    pool = pjsip_endpt_create_pool(loop->base.endpt, "rdata", 
				   PJSIP_POOL_RDATA_LEN, 
				   PJSIP_POOL_RDATA_INC+5);
    if (!pool)
	return NULL;

    pkt = pj_pool_zalloc(pool, sizeof(struct recv_list));

    /* Initialize rdata. */
    pkt->rdata.tp_info.pool = pool;
    pkt->rdata.tp_info.transport = &loop->base;
    
    /* Copy the packet. */
    pj_memcpy(pkt->rdata.pkt_info.packet, tdata->buf.start,
	      tdata->buf.cur - tdata->buf.start);
    pkt->rdata.pkt_info.len = tdata->buf.cur - tdata->buf.start;

    /* "Source address" info. */
    pkt->rdata.pkt_info.src_addr_len = sizeof(pj_sockaddr_in);
    if (loop->base.key.type == PJSIP_TRANSPORT_LOOP)
	pj_ansi_strcpy(pkt->rdata.pkt_info.src_name, ADDR_LOOP);
    else
	pj_ansi_strcpy(pkt->rdata.pkt_info.src_name, ADDR_LOOP_DGRAM);
    pkt->rdata.pkt_info.src_port = loop->base.local_name.port;

    /* When do we need to "deliver" this packet. */
    pj_gettimeofday(&pkt->rdata.pkt_info.timestamp);
    pkt->rdata.pkt_info.timestamp.msec += loop->recv_delay;
    pj_time_val_normalize(&pkt->rdata.pkt_info.timestamp);

    /* Done. */

    return pkt;
}
Esempio n. 14
0
PJ_DEF(pj_status_t) pjsip_publishc_create( pjsip_endpoint *endpt, 
					   unsigned options,
					   void *token,
					   pjsip_publishc_cb *cb,	
					   pjsip_publishc **p_pubc)
{
    pj_pool_t *pool;
    pjsip_publishc *pubc;
    pj_status_t status;

    /* Verify arguments. */
    PJ_ASSERT_RETURN(endpt && cb && p_pubc, PJ_EINVAL);
    PJ_ASSERT_RETURN(options == 0, PJ_EINVAL);

    PJ_UNUSED_ARG(options);

    pool = pjsip_endpt_create_pool(endpt, "pubc%p", 1024, 1024);
    PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);

    pubc = PJ_POOL_ZALLOC_T(pool, pjsip_publishc);

    pubc->pool = pool;
    pubc->endpt = endpt;
    pubc->token = token;
    pubc->cb = cb;
    pubc->expires = PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED;

    status = pjsip_auth_clt_init(&pubc->auth_sess, endpt, pubc->pool, 0);
    if (status != PJ_SUCCESS)
	return status;

    pj_list_init(&pubc->route_set);

    /* Done */
    *p_pubc = pubc;
    return PJ_SUCCESS;
}
Esempio n. 15
0
/*
 * Common function to create TCP transport, called when pending accept() and
 * pending connect() complete.
 */
static pj_status_t tcp_create( struct tcp_listener *listener,
			       pj_pool_t *pool,
			       pj_sock_t sock, pj_bool_t is_server,
			       const pj_sockaddr *local,
			       const pj_sockaddr *remote,
			       struct tcp_transport **p_tcp)
{
    struct tcp_transport *tcp;
    pj_ioqueue_t *ioqueue;
    pj_activesock_cfg asock_cfg;
    pj_activesock_cb tcp_callback;
    const pj_str_t ka_pkt = PJSIP_TCP_KEEP_ALIVE_DATA;
    char print_addr[PJ_INET6_ADDRSTRLEN+10];
    pj_status_t status;
    

    PJ_ASSERT_RETURN(sock != PJ_INVALID_SOCKET, PJ_EINVAL);


    if (pool == NULL) {
	pool = pjsip_endpt_create_pool(listener->endpt, "tcp",
				       POOL_TP_INIT, POOL_TP_INC);
	PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
    }    

    /*
     * Create and initialize basic transport structure.
     */
    tcp = PJ_POOL_ZALLOC_T(pool, struct tcp_transport);
    tcp->is_server = is_server;
    tcp->sock = sock;
    /*tcp->listener = listener;*/
    pj_list_init(&tcp->delayed_list);
    tcp->base.pool = pool;

    pj_ansi_snprintf(tcp->base.obj_name, PJ_MAX_OBJ_NAME, 
		     (is_server ? "tcps%p" :"tcpc%p"), tcp);

    status = pj_atomic_create(pool, 0, &tcp->base.ref_cnt);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    status = pj_lock_create_recursive_mutex(pool, "tcp", &tcp->base.lock);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    tcp->base.key.type = listener->factory.type;
    pj_sockaddr_cp(&tcp->base.key.rem_addr, remote);
    tcp->base.type_name = (char*)pjsip_transport_get_type_name(
				(pjsip_transport_type_e)tcp->base.key.type);
    tcp->base.flag = pjsip_transport_get_flag_from_type(
				(pjsip_transport_type_e)tcp->base.key.type);

    tcp->base.info = (char*) pj_pool_alloc(pool, 64);
    pj_ansi_snprintf(tcp->base.info, 64, "%s to %s",
                     tcp->base.type_name,
                     pj_sockaddr_print(remote, print_addr,
                                       sizeof(print_addr), 3));

    tcp->base.addr_len = pj_sockaddr_get_len(remote);
    pj_sockaddr_cp(&tcp->base.local_addr, local);
    sockaddr_to_host_port(pool, &tcp->base.local_name, local);
    sockaddr_to_host_port(pool, &tcp->base.remote_name, remote);
    tcp->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;

    tcp->base.endpt = listener->endpt;
    tcp->base.tpmgr = listener->tpmgr;
    tcp->base.send_msg = &tcp_send_msg;
    tcp->base.do_shutdown = &tcp_shutdown;
    tcp->base.destroy = &tcp_destroy_transport;

    /* Create active socket */
    pj_activesock_cfg_default(&asock_cfg);
    asock_cfg.async_cnt = 1;

    pj_bzero(&tcp_callback, sizeof(tcp_callback));
    tcp_callback.on_data_read = &on_data_read;
    tcp_callback.on_data_sent = &on_data_sent;
    tcp_callback.on_connect_complete = &on_connect_complete;

    ioqueue = pjsip_endpt_get_ioqueue(listener->endpt);
    status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
				  ioqueue, &tcp_callback, tcp, &tcp->asock);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    /* Register transport to transport manager */
    status = pjsip_transport_register(listener->tpmgr, &tcp->base);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    tcp->is_registered = PJ_TRUE;

    /* Initialize keep-alive timer */
    tcp->ka_timer.user_data = (void*)tcp;
    tcp->ka_timer.cb = &tcp_keep_alive_timer;
    pj_ioqueue_op_key_init(&tcp->ka_op_key.key, sizeof(pj_ioqueue_op_key_t));
    pj_strdup(tcp->base.pool, &tcp->ka_pkt, &ka_pkt);

    /* Done setting up basic transport. */
    *p_tcp = tcp;

    PJ_LOG(4,(tcp->base.obj_name, "TCP %s transport created",
	      (tcp->is_server ? "server" : "client")));

    return PJ_SUCCESS;

on_error:
    tcp_destroy(&tcp->base, status);
    return status;
}
Esempio n. 16
0
/*
 * This is the public API to create, initialize, register, and start the
 * TCP listener.
 */
PJ_DEF(pj_status_t) pjsip_tcp_transport_start3(
					pjsip_endpoint *endpt,
					const pjsip_tcp_transport_cfg *cfg,
					pjsip_tpfactory **p_factory
					)
{
    pj_pool_t *pool;
    pj_sock_t sock = PJ_INVALID_SOCKET;
    struct tcp_listener *listener;
    pj_activesock_cfg asock_cfg;
    pj_activesock_cb listener_cb;
    pj_sockaddr *listener_addr;
    int addr_len;
    pj_status_t status;

    /* Sanity check */
    PJ_ASSERT_RETURN(endpt && cfg->async_cnt, PJ_EINVAL);

    /* Verify that address given in a_name (if any) is valid */
    if (cfg->addr_name.host.slen) {
	pj_sockaddr tmp;

	status = pj_sockaddr_init(cfg->af, &tmp, &cfg->addr_name.host, 
				  (pj_uint16_t)cfg->addr_name.port);
	if (status != PJ_SUCCESS || !pj_sockaddr_has_addr(&tmp) ||
	    (cfg->af==pj_AF_INET() && 
	     tmp.ipv4.sin_addr.s_addr==PJ_INADDR_NONE)) 
	{
	    /* Invalid address */
	    return PJ_EINVAL;
	}
    }

    pool = pjsip_endpt_create_pool(endpt, "tcplis", POOL_LIS_INIT, 
				   POOL_LIS_INC);
    PJ_ASSERT_RETURN(pool, PJ_ENOMEM);


    listener = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
    listener->factory.pool = pool;
    listener->factory.type = cfg->af==pj_AF_INET() ? PJSIP_TRANSPORT_TCP :
						     PJSIP_TRANSPORT_TCP6;
    listener->factory.type_name = (char*)
		pjsip_transport_get_type_name(listener->factory.type);
    listener->factory.flag = 
	pjsip_transport_get_flag_from_type(listener->factory.type);
    listener->qos_type = cfg->qos_type;
    pj_memcpy(&listener->qos_params, &cfg->qos_params,
	      sizeof(cfg->qos_params));

    pj_ansi_strcpy(listener->factory.obj_name, "tcplis");
    if (listener->factory.type==PJSIP_TRANSPORT_TCP6)
	pj_ansi_strcat(listener->factory.obj_name, "6");

    status = pj_lock_create_recursive_mutex(pool, listener->factory.obj_name,
					    &listener->factory.lock);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create socket */
    status = pj_sock_socket(cfg->af, pj_SOCK_STREAM(), 0, &sock);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Apply QoS, if specified */
    status = pj_sock_apply_qos2(sock, cfg->qos_type, &cfg->qos_params, 
				2, listener->factory.obj_name, 
				"SIP TCP listener socket");

    /* Bind address may be different than factory.local_addr because
     * factory.local_addr will be resolved below.
     */
    pj_sockaddr_cp(&listener->bound_addr, &cfg->bind_addr);

    /* Bind socket */
    listener_addr = &listener->factory.local_addr;
    pj_sockaddr_cp(listener_addr, &cfg->bind_addr);

    status = pj_sock_bind(sock, listener_addr, 
			  pj_sockaddr_get_len(listener_addr));
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Retrieve the bound address */
    addr_len = pj_sockaddr_get_len(listener_addr);
    status = pj_sock_getsockname(sock, listener_addr, &addr_len);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* If published host/IP is specified, then use that address as the
     * listener advertised address.
     */
    if (cfg->addr_name.host.slen) {
	/* Copy the address */
	listener->factory.addr_name = cfg->addr_name;
	pj_strdup(listener->factory.pool, &listener->factory.addr_name.host, 
		  &cfg->addr_name.host);
	listener->factory.addr_name.port = cfg->addr_name.port;

    } else {
	/* No published address is given, use the bound address */

	/* If the address returns 0.0.0.0, use the default
	 * interface address as the transport's address.
	 */
	if (!pj_sockaddr_has_addr(listener_addr)) {
	    pj_sockaddr hostip;

	    status = pj_gethostip(listener->bound_addr.addr.sa_family,
	                          &hostip);
	    if (status != PJ_SUCCESS)
		goto on_error;

	    pj_sockaddr_copy_addr(listener_addr, &hostip);
	}

	/* Save the address name */
	sockaddr_to_host_port(listener->factory.pool, 
			      &listener->factory.addr_name, 
			      listener_addr);
    }

    /* If port is zero, get the bound port */
    if (listener->factory.addr_name.port == 0) {
	listener->factory.addr_name.port = pj_sockaddr_get_port(listener_addr);
    }

    pj_ansi_snprintf(listener->factory.obj_name, 
		     sizeof(listener->factory.obj_name),
		     "tcplis:%d",  listener->factory.addr_name.port);


    /* Start listening to the address */
    status = pj_sock_listen(sock, PJSIP_TCP_TRANSPORT_BACKLOG);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create active socket */
    pj_activesock_cfg_default(&asock_cfg);
    if (cfg->async_cnt > MAX_ASYNC_CNT) 
	asock_cfg.async_cnt = MAX_ASYNC_CNT;
    else
	asock_cfg.async_cnt = cfg->async_cnt;

    pj_bzero(&listener_cb, sizeof(listener_cb));
    listener_cb.on_accept_complete = &on_accept_complete;
    status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
				  pjsip_endpt_get_ioqueue(endpt), 
				  &listener_cb, listener,
				  &listener->asock);

    /* Register to transport manager */
    listener->endpt = endpt;
    listener->tpmgr = pjsip_endpt_get_tpmgr(endpt);
    listener->factory.create_transport = lis_create_transport;
    listener->factory.destroy = lis_destroy;
    listener->is_registered = PJ_TRUE;
    status = pjsip_tpmgr_register_tpfactory(listener->tpmgr,
					    &listener->factory);
    if (status != PJ_SUCCESS) {
	listener->is_registered = PJ_FALSE;
	goto on_error;
    }

    /* Start pending accept() operations */
    status = pj_activesock_start_accept(listener->asock, pool);
    if (status != PJ_SUCCESS)
	goto on_error;

    PJ_LOG(4,(listener->factory.obj_name, 
	     "SIP TCP listener ready for incoming connections at %.*s:%d",
	     (int)listener->factory.addr_name.host.slen,
	     listener->factory.addr_name.host.ptr,
	     listener->factory.addr_name.port));

    /* Return the pointer to user */
    if (p_factory) *p_factory = &listener->factory;

    return PJ_SUCCESS;

on_error:
    if (listener->asock==NULL && sock!=PJ_INVALID_SOCKET)
	pj_sock_close(sock);
    lis_destroy(&listener->factory);
    return status;
}
Esempio n. 17
0
SipIceTransport::SipIceTransport(pjsip_endpoint* endpt, pj_pool_t& /* pool */,
                                 long /* t_type */,
                                 const std::shared_ptr<IceTransport>& ice,
                                 int comp_id)
    : pool_(nullptr, pj_pool_release)
    , rxPool_(nullptr, pj_pool_release)
    , trData_()
    , rdata_()
    , ice_(ice)
    , comp_id_(comp_id)
{
    trData_.self = this;

    if (not ice or not ice->isRunning())
        throw std::logic_error("ice transport must exist and negotiation completed");

    RING_DBG("SipIceTransport@%p {tr=%p}", this, &trData_.base);
    auto& base = trData_.base;

    pool_.reset(pjsip_endpt_create_pool(endpt, "SipIceTransport.pool", POOL_TP_INIT, POOL_TP_INC));
    if (not pool_)
        throw std::bad_alloc();
    auto pool = pool_.get();

    pj_ansi_snprintf(base.obj_name, PJ_MAX_OBJ_NAME, "SipIceTransport");
    base.endpt = endpt;
    base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
    base.pool = pool;

    rdata_.tp_info.pool = pool;

    // FIXME: not destroyed in case of exception
    if (pj_atomic_create(pool, 0, &base.ref_cnt) != PJ_SUCCESS)
        throw std::runtime_error("Can't create PJSIP atomic.");

    // FIXME: not destroyed in case of exception
    if (pj_lock_create_recursive_mutex(pool, "SipIceTransport.mutex", &base.lock) != PJ_SUCCESS)
        throw std::runtime_error("Can't create PJSIP mutex.");

    auto remote = ice->getRemoteAddress(comp_id);
    RING_DBG("SipIceTransport: remote is %s", remote.toString(true).c_str());
    pj_sockaddr_cp(&base.key.rem_addr, remote.pjPtr());
    base.key.type = PJSIP_TRANSPORT_UDP;//t_type;
    base.type_name = (char*)pjsip_transport_get_type_name((pjsip_transport_type_e)base.key.type);
    base.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)base.key.type);
    base.info = (char*) pj_pool_alloc(pool, TRANSPORT_INFO_LENGTH);

    char print_addr[PJ_INET6_ADDRSTRLEN+10];
    pj_ansi_snprintf(base.info, TRANSPORT_INFO_LENGTH, "%s to %s",
                     base.type_name,
                     pj_sockaddr_print(remote.pjPtr(), print_addr,
                                       sizeof(print_addr), 3));
    base.addr_len = remote.getLength();
    base.dir = PJSIP_TP_DIR_NONE;//is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;
    base.data = nullptr;

    /* Set initial local address */
    auto local = ice->getDefaultLocalAddress();
    pj_sockaddr_cp(&base.local_addr, local.pjPtr());

    sockaddr_to_host_port(pool, &base.local_name, &base.local_addr);
    sockaddr_to_host_port(pool, &base.remote_name, remote.pjPtr());

    base.send_msg = [](pjsip_transport *transport,
                       pjsip_tx_data *tdata,
                       const pj_sockaddr_t *rem_addr, int addr_len,
                       void *token, pjsip_transport_callback callback) {
        auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
        return this_->send(tdata, rem_addr, addr_len, token, callback);
    };
    base.do_shutdown = [](pjsip_transport *transport) -> pj_status_t {
        auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
        RING_WARN("SipIceTransport@%p: shutdown", this_);
        return PJ_SUCCESS;
    };
    base.destroy = [](pjsip_transport *transport) -> pj_status_t {
        auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
        RING_WARN("SipIceTransport@%p: destroy", this_);
        delete this_;
        return PJ_SUCCESS;
    };

    /* Init rdata */
    rxPool_.reset(pjsip_endpt_create_pool(base.endpt,
                                          "SipIceTransport.rtd%p",
                                          PJSIP_POOL_RDATA_LEN,
                                          PJSIP_POOL_RDATA_INC));
    if (not rxPool_)
        throw std::bad_alloc();
    auto rx_pool = rxPool_.get();

    rdata_.tp_info.pool = rx_pool;
    rdata_.tp_info.transport = &base;
    rdata_.tp_info.tp_data = this;
    rdata_.tp_info.op_key.rdata = &rdata_;
    pj_ioqueue_op_key_init(&rdata_.tp_info.op_key.op_key, sizeof(pj_ioqueue_op_key_t));
    rdata_.pkt_info.src_addr = base.key.rem_addr;
    rdata_.pkt_info.src_addr_len = sizeof(rdata_.pkt_info.src_addr);
    auto rem_addr = &base.key.rem_addr;
    pj_sockaddr_print(rem_addr, rdata_.pkt_info.src_name, sizeof(rdata_.pkt_info.src_name), 0);
    rdata_.pkt_info.src_port = pj_sockaddr_get_port(rem_addr);
    rdata_.pkt_info.len  = 0;
    rdata_.pkt_info.zero = 0;

    if (pjsip_transport_register(base.tpmgr, &base) != PJ_SUCCESS)
        throw std::runtime_error("Can't register PJSIP transport.");
    is_registered_ = true;

    Manager::instance().registerEventHandler((uintptr_t)this,
                                             [this]{ loop(); });
}
Esempio n. 18
0
/*
 * This is the public API to create, initialize, register, and start the
 * TCP listener.
 */
PJ_DEF(pj_status_t) pjsip_tcp_transport_start2(pjsip_endpoint *endpt,
					       const pj_sockaddr_in *local,
					       const pjsip_host_port *a_name,
					       unsigned async_cnt,
					       pjsip_tpfactory **p_factory)
{
    pj_pool_t *pool;
    pj_sock_t sock = PJ_INVALID_SOCKET;
    struct tcp_listener *listener;
    pj_activesock_cfg asock_cfg;
    pj_activesock_cb listener_cb;
    pj_sockaddr_in *listener_addr;
    int addr_len;
    pj_status_t status;

    /* Sanity check */
    PJ_ASSERT_RETURN(endpt && async_cnt, PJ_EINVAL);

    /* Verify that address given in a_name (if any) is valid */
    if (a_name && a_name->host.slen) {
	pj_sockaddr_in tmp;

	status = pj_sockaddr_in_init(&tmp, &a_name->host, 
				     (pj_uint16_t)a_name->port);
	if (status != PJ_SUCCESS || tmp.sin_addr.s_addr == PJ_INADDR_ANY ||
	    tmp.sin_addr.s_addr == PJ_INADDR_NONE)
	{
	    /* Invalid address */
	    return PJ_EINVAL;
	}
    }

    pool = pjsip_endpt_create_pool(endpt, "tcplis", POOL_LIS_INIT, 
				   POOL_LIS_INC);
    PJ_ASSERT_RETURN(pool, PJ_ENOMEM);


    listener = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
    listener->factory.pool = pool;
    listener->factory.type = PJSIP_TRANSPORT_TCP;
    listener->factory.type_name = "tcp";
    listener->factory.flag = 
	pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TCP);

    pj_ansi_strcpy(listener->factory.obj_name, "tcplis");

    status = pj_lock_create_recursive_mutex(pool, "tcplis", 
					    &listener->factory.lock);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create and bind socket */
    status = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &sock);
    if (status != PJ_SUCCESS)
	goto on_error;

    listener_addr = (pj_sockaddr_in*)&listener->factory.local_addr;
    if (local) {
	pj_memcpy(listener_addr, local, sizeof(pj_sockaddr_in));
    } else {
	pj_sockaddr_in_init(listener_addr, NULL, 0);
    }

    status = pj_sock_bind(sock, listener_addr, sizeof(pj_sockaddr_in));
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Retrieve the bound address */
    addr_len = sizeof(pj_sockaddr_in);
    status = pj_sock_getsockname(sock, listener_addr, &addr_len);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* If published host/IP is specified, then use that address as the
     * listener advertised address.
     */
    if (a_name && a_name->host.slen) {
	/* Copy the address */
	listener->factory.addr_name = *a_name;
	pj_strdup(listener->factory.pool, &listener->factory.addr_name.host, 
		  &a_name->host);
	listener->factory.addr_name.port = a_name->port;

    } else {
	/* No published address is given, use the bound address */

	/* If the address returns 0.0.0.0, use the default
	 * interface address as the transport's address.
	 */
	if (listener_addr->sin_addr.s_addr == 0) {
	    pj_sockaddr hostip;

	    status = pj_gethostip(pj_AF_INET(), &hostip);
	    if (status != PJ_SUCCESS)
		goto on_error;

	    listener_addr->sin_addr.s_addr = hostip.ipv4.sin_addr.s_addr;
	}

	/* Save the address name */
	sockaddr_to_host_port(listener->factory.pool, 
			      &listener->factory.addr_name, listener_addr);
    }

    /* If port is zero, get the bound port */
    if (listener->factory.addr_name.port == 0) {
	listener->factory.addr_name.port = pj_ntohs(listener_addr->sin_port);
    }

    pj_ansi_snprintf(listener->factory.obj_name, 
		     sizeof(listener->factory.obj_name),
		     "tcplis:%d",  listener->factory.addr_name.port);


    /* Start listening to the address */
    status = pj_sock_listen(sock, PJSIP_TCP_TRANSPORT_BACKLOG);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create active socket */
    if (async_cnt > MAX_ASYNC_CNT) async_cnt = MAX_ASYNC_CNT;
    pj_activesock_cfg_default(&asock_cfg);
    asock_cfg.async_cnt = async_cnt;

    pj_bzero(&listener_cb, sizeof(listener_cb));
    listener_cb.on_accept_complete = &on_accept_complete;
    status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
				  pjsip_endpt_get_ioqueue(endpt), 
				  &listener_cb, listener,
				  &listener->asock);

    /* Register to transport manager */
    listener->endpt = endpt;
    listener->tpmgr = pjsip_endpt_get_tpmgr(endpt);
    listener->factory.create_transport = lis_create_transport;
    listener->factory.destroy = lis_destroy;
    listener->is_registered = PJ_TRUE;
    status = pjsip_tpmgr_register_tpfactory(listener->tpmgr,
					    &listener->factory);
    if (status != PJ_SUCCESS) {
	listener->is_registered = PJ_FALSE;
	goto on_error;
    }

    /* Start pending accept() operations */
    status = pj_activesock_start_accept(listener->asock, pool);
    if (status != PJ_SUCCESS)
	goto on_error;

    PJ_LOG(4,(listener->factory.obj_name, 
	     "SIP TCP listener ready for incoming connections at %.*s:%d",
	     (int)listener->factory.addr_name.host.slen,
	     listener->factory.addr_name.host.ptr,
	     listener->factory.addr_name.port));

    /* Return the pointer to user */
    if (p_factory) *p_factory = &listener->factory;

    return PJ_SUCCESS;

on_error:
    if (listener->asock==NULL && sock!=PJ_INVALID_SOCKET)
	pj_sock_close(sock);
    lis_destroy(&listener->factory);
    return status;
}
Esempio n. 19
0
static int rx_task(void *data)
{
	RAII_VAR(struct rx_task_data *, task_data, data, ao2_cleanup);
	RAII_VAR(struct ao2_container *, contacts, NULL, ao2_cleanup);

	int added = 0, updated = 0, deleted = 0;
	pjsip_contact_hdr *contact_hdr = NULL;
	struct registrar_contact_details details = { 0, };
	pjsip_tx_data *tdata;
	pjsip_response_addr addr;
	const char *aor_name = ast_sorcery_object_get_id(task_data->aor);

	/* Retrieve the current contacts, we'll need to know whether to update or not */
	contacts = ast_sip_location_retrieve_aor_contacts(task_data->aor);

	/* So we don't count static contacts against max_contacts we prune them out from the container */
	ao2_callback(contacts, OBJ_NODATA | OBJ_UNLINK | OBJ_MULTIPLE, registrar_prune_static, NULL);

	if (registrar_validate_contacts(task_data->rdata, contacts, task_data->aor, &added, &updated, &deleted)) {
		/* The provided Contact headers do not conform to the specification */
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), task_data->rdata, 400, NULL, NULL, NULL);
		ast_sip_report_failed_acl(task_data->endpoint, task_data->rdata, "registrar_invalid_contacts_provided");
		ast_log(LOG_WARNING, "Failed to validate contacts in REGISTER request from '%s'\n",
				ast_sorcery_object_get_id(task_data->endpoint));
		return PJ_TRUE;
	}

	if ((MAX(added - deleted, 0) + (!task_data->aor->remove_existing ? ao2_container_count(contacts) : 0)) > task_data->aor->max_contacts) {
		/* Enforce the maximum number of contacts */
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), task_data->rdata, 403, NULL, NULL, NULL);
		ast_sip_report_failed_acl(task_data->endpoint, task_data->rdata, "registrar_attempt_exceeds_maximum_configured_contacts");
		ast_log(LOG_WARNING, "Registration attempt from endpoint '%s' to AOR '%s' will exceed max contacts of %d\n",
				ast_sorcery_object_get_id(task_data->endpoint), ast_sorcery_object_get_id(task_data->aor), task_data->aor->max_contacts);
		return PJ_TRUE;
	}

	if (!(details.pool = pjsip_endpt_create_pool(ast_sip_get_pjsip_endpoint(), "Contact Comparison", 256, 256))) {
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), task_data->rdata, 500, NULL, NULL, NULL);
		return PJ_TRUE;
	}

	/* Iterate each provided Contact header and add, update, or delete */
	while ((contact_hdr = pjsip_msg_find_hdr(task_data->rdata->msg_info.msg, PJSIP_H_CONTACT, contact_hdr ? contact_hdr->next : NULL))) {
		int expiration;
		char contact_uri[PJSIP_MAX_URL_SIZE];
		RAII_VAR(struct ast_sip_contact *, contact, NULL, ao2_cleanup);

		if (contact_hdr->star) {
			/* A star means to unregister everything, so do so for the possible contacts */
			ao2_callback(contacts, OBJ_NODATA | OBJ_MULTIPLE, registrar_delete_contact, (void *)aor_name);
			break;
		}

		if (!PJSIP_URI_SCHEME_IS_SIP(contact_hdr->uri) && !PJSIP_URI_SCHEME_IS_SIPS(contact_hdr->uri)) {
			/* This registrar only currently supports sip: and sips: URI schemes */
			continue;
		}

		expiration = registrar_get_expiration(task_data->aor, contact_hdr, task_data->rdata);
		details.uri = pjsip_uri_get_uri(contact_hdr->uri);
		pjsip_uri_print(PJSIP_URI_IN_CONTACT_HDR, details.uri, contact_uri, sizeof(contact_uri));

		if (!(contact = ao2_callback(contacts, OBJ_UNLINK, registrar_find_contact, &details))) {
			/* If they are actually trying to delete a contact that does not exist... be forgiving */
			if (!expiration) {
				ast_verb(3, "Attempted to remove non-existent contact '%s' from AOR '%s' by request\n",
					contact_uri, aor_name);
				continue;
			}

			ast_sip_location_add_contact(task_data->aor, contact_uri, ast_tvadd(ast_tvnow(), ast_samp2tv(expiration, 1)));
			ast_verb(3, "Added contact '%s' to AOR '%s' with expiration of %d seconds\n",
				contact_uri, aor_name, expiration);
			ast_test_suite_event_notify("AOR_CONTACT_ADDED",
					"Contact: %s\r\n"
					"AOR: %s\r\n"
					"Expiration: %d",
					contact_uri,
					aor_name,
					expiration);
		} else if (expiration) {
			RAII_VAR(struct ast_sip_contact *, updated, ast_sorcery_copy(ast_sip_get_sorcery(), contact), ao2_cleanup);
			updated->expiration_time = ast_tvadd(ast_tvnow(), ast_samp2tv(expiration, 1));
			updated->qualify_frequency = task_data->aor->qualify_frequency;
			updated->authenticate_qualify = task_data->aor->authenticate_qualify;

			ast_sip_location_update_contact(updated);
			ast_debug(3, "Refreshed contact '%s' on AOR '%s' with new expiration of %d seconds\n",
				contact_uri, aor_name, expiration);
			ast_test_suite_event_notify("AOR_CONTACT_REFRESHED",
					"Contact: %s\r\n"
					"AOR: %s\r\n"
					"Expiration: %d",
					contact_uri,
					aor_name,
					expiration);
		} else {
			ast_sip_location_delete_contact(contact);
			ast_verb(3, "Removed contact '%s' from AOR '%s' due to request\n", contact_uri, aor_name);
			ast_test_suite_event_notify("AOR_CONTACT_REMOVED",
					"Contact: %s\r\n"
					"AOR: %s",
					contact_uri,
					aor_name);
		}
	}

	pjsip_endpt_release_pool(ast_sip_get_pjsip_endpoint(), details.pool);

	/* If the AOR is configured to remove any existing contacts that have not been updated/added as a result of this REGISTER
	 * do so
	 */
	if (task_data->aor->remove_existing) {
		ao2_callback(contacts, OBJ_NODATA | OBJ_MULTIPLE, registrar_delete_contact, NULL);
	}

	/* Update the contacts as things will probably have changed */
	ao2_cleanup(contacts);
	contacts = ast_sip_location_retrieve_aor_contacts(task_data->aor);

	/* Send a response containing all of the contacts (including static) that are present on this AOR */
	if (pjsip_endpt_create_response(ast_sip_get_pjsip_endpoint(), task_data->rdata, 200, NULL, &tdata) != PJ_SUCCESS) {
		return PJ_TRUE;
	}

	/* Add the date header to the response, some UAs use this to set their date and time */
	registrar_add_date_header(tdata);

	ao2_callback(contacts, 0, registrar_add_contact, tdata);

	if (pjsip_get_response_addr(tdata->pool, task_data->rdata, &addr) == PJ_SUCCESS) {
		pjsip_endpt_send_response(ast_sip_get_pjsip_endpoint(), &addr, tdata, NULL, NULL);
	} else {
		pjsip_tx_data_dec_ref(tdata);
	}

	return PJ_TRUE;
}
Esempio n. 20
0
/*
 * Main test entry.
 */
int resolve_test(void)
{
    pj_pool_t *pool;
    pj_dns_resolver *resv;
    pj_str_t nameserver;
    pj_uint16_t port = 5353;
    pj_status_t status;

    pool = pjsip_endpt_create_pool(endpt, NULL, 4000, 4000);

    status = pjsip_endpt_create_resolver(endpt, &resv);

    nameserver = pj_str("192.168.0.106");
    pj_dns_resolver_set_ns(resv, 1, &nameserver, &port);
    pjsip_endpt_set_resolver(endpt, resv);

    add_dns_entries(resv);

    /* These all should be resolved as IP addresses (DNS A query) */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_UDP, "1.1.1.1", 5060);
	status = test_resolve("IP address without transport and port", pool, PJSIP_TRANSPORT_UNSPECIFIED, "1.1.1.1", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -100;
    }
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_UDP, "1.1.1.1", 5060);
	status = test_resolve("IP address with explicit port", pool, PJSIP_TRANSPORT_UNSPECIFIED, "1.1.1.1", 5060, &ref);
	if (status != PJ_SUCCESS)
	    return -110;
    }
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_TCP, "1.1.1.1", 5060);
	status = test_resolve("IP address without port (TCP)", pool, PJSIP_TRANSPORT_TCP,"1.1.1.1", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -120;
    }
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_TLS, "1.1.1.1", 5061);
	status = test_resolve("IP address without port (TLS)", pool, PJSIP_TRANSPORT_TLS, "1.1.1.1", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -130;
    }

    /* This should be resolved as DNS A record (because port is present) */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_UDP, "5.5.5.5", 5060);
	status = test_resolve("domain name with port should resolve to A record", pool, PJSIP_TRANSPORT_UNSPECIFIED, "example.com", 5060, &ref);
	if (status != PJ_SUCCESS)
	    return -140;
    }

    /* This will fail to be resolved as SRV, resolver should fallback to 
     * resolving to A record.
     */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_UDP, "2.2.2.2", 5060);
	status = test_resolve("failure with SRV fallback to A record", pool, PJSIP_TRANSPORT_UNSPECIFIED, "sip02.example.com", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -150;
    }

    /* Same as above, but explicitly for TLS. */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_TLS, "2.2.2.2", 5061);
	status = test_resolve("failure with SRV fallback to A record (for TLS)", pool, PJSIP_TRANSPORT_TLS, "sip02.example.com", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -150;
    }

    /* Standard DNS SRV followed by A recolution */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_UDP, "6.6.6.6", 50060);
	status = test_resolve("standard SRV resolution", pool, PJSIP_TRANSPORT_UNSPECIFIED, "domain.com", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -155;
    }

    /* Standard DNS SRV followed by A recolution (explicit transport) */
    {
	pjsip_server_addresses ref;
	create_ref(&ref, PJSIP_TRANSPORT_TCP, "6.6.6.6", 50060);
	add_ref(&ref, PJSIP_TRANSPORT_TCP, "7.7.7.7", 50060);
	status = test_resolve("standard SRV resolution with explicit transport (TCP)", pool, PJSIP_TRANSPORT_TCP, "domain.com", 0, &ref);
	if (status != PJ_SUCCESS)
	    return -160;
    }


    /* Round robin/load balance test */
    if (round_robin_test(pool) != 0)
	return -170;

    /* Timeout test */
    {
	status = test_resolve("timeout test", pool, PJSIP_TRANSPORT_UNSPECIFIED, "an.invalid.address", 0, NULL);
	if (status == PJ_SUCCESS)
	    return -150;
    }

    return 0;
}
Esempio n. 21
0
/* Start loop transport. */
PJ_DEF(pj_status_t) pjsip_loop_start( pjsip_endpoint *endpt,
				      pjsip_transport **transport)
{
    pj_pool_t *pool;
    struct loop_transport *loop;
    pj_status_t status;

    /* Create pool. */
    pool = pjsip_endpt_create_pool(endpt, "loop", 4000, 4000);
    if (!pool)
	return PJ_ENOMEM;

    /* Create the loop structure. */
    loop = pj_pool_zalloc(pool, sizeof(struct loop_transport));
    
    /* Initialize transport properties. */
    pj_ansi_snprintf(loop->base.obj_name, sizeof(loop->base.obj_name), 
		     "loop%p", loop);
    loop->base.pool = pool;
    status = pj_atomic_create(pool, 0, &loop->base.ref_cnt);
    if (status != PJ_SUCCESS)
	goto on_error;
    status = pj_lock_create_recursive_mutex(pool, "loop", &loop->base.lock);
    if (status != PJ_SUCCESS)
	goto on_error;
    loop->base.key.type = PJSIP_TRANSPORT_LOOP_DGRAM;
    //loop->base.key.rem_addr.sa_family = PJ_AF_INET;
    loop->base.type_name = "LOOP-DGRAM";
    loop->base.info = "LOOP-DGRAM";
    loop->base.flag = PJSIP_TRANSPORT_DATAGRAM;
    loop->base.local_name.host = pj_str(ADDR_LOOP_DGRAM);
    loop->base.local_name.port = 
	pjsip_transport_get_default_port_for_type(loop->base.key.type);
    loop->base.addr_len = sizeof(pj_sockaddr_in);
    loop->base.endpt = endpt;
    loop->base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
    loop->base.send_msg = &loop_send_msg;
    loop->base.destroy = &loop_destroy;

    pj_list_init(&loop->recv_list);
    pj_list_init(&loop->send_list);

    /* Create worker thread. */
    status = pj_thread_create(pool, "loop", 
			      &loop_transport_worker_thread, loop, 0, 
			      PJ_THREAD_SUSPENDED, &loop->thread);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Register to transport manager. */
    status = pjsip_transport_register( loop->base.tpmgr, &loop->base);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Start the thread. */
    status = pj_thread_resume(loop->thread);
    if (status != PJ_SUCCESS)
	goto on_error;

    /*
     * Done.
     */

    if (transport)
	*transport = &loop->base;

    return PJ_SUCCESS;

on_error:
    if (loop->base.lock)
	pj_lock_destroy(loop->base.lock);
    if (loop->thread)
	pj_thread_destroy(loop->thread);
    if (loop->base.ref_cnt)
	pj_atomic_destroy(loop->base.ref_cnt);
    pjsip_endpt_release_pool(endpt, loop->pool);
    return status;
}
Esempio n. 22
0
/*
 * This is the public API to create, initialize, register, and start the
 * TLS listener.
 */
PJ_DEF(pj_status_t) pjsip_tls_transport_start (pjsip_endpoint *endpt,
					       const pjsip_tls_setting *opt,
					       const pj_sockaddr_in *local,
					       const pjsip_host_port *a_name,
					       unsigned async_cnt,
					       pjsip_tpfactory **p_factory)
{
    pj_pool_t *pool;
    struct tls_listener *listener;
    pj_ssl_sock_param ssock_param;
    pj_sockaddr_in *listener_addr;
    pj_bool_t has_listener;
    pj_status_t status;

    /* Sanity check */
    PJ_ASSERT_RETURN(endpt && async_cnt, PJ_EINVAL);

    /* Verify that address given in a_name (if any) is valid */
    if (a_name && a_name->host.slen) {
	pj_sockaddr_in tmp;

	status = pj_sockaddr_in_init(&tmp, &a_name->host, 
				     (pj_uint16_t)a_name->port);
	if (status != PJ_SUCCESS || tmp.sin_addr.s_addr == PJ_INADDR_ANY ||
	    tmp.sin_addr.s_addr == PJ_INADDR_NONE)
	{
	    /* Invalid address */
	    return PJ_EINVAL;
	}
    }

    pool = pjsip_endpt_create_pool(endpt, "tlslis", POOL_LIS_INIT, 
				   POOL_LIS_INC);
    PJ_ASSERT_RETURN(pool, PJ_ENOMEM);

    listener = PJ_POOL_ZALLOC_T(pool, struct tls_listener);
    listener->factory.pool = pool;
    listener->factory.type = PJSIP_TRANSPORT_TLS;
    listener->factory.type_name = "tls";
    listener->factory.flag = 
	pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TLS);

    pj_ansi_strcpy(listener->factory.obj_name, "tlslis");

    if (opt)
	pjsip_tls_setting_copy(pool, &listener->tls_setting, opt);
    else
	pjsip_tls_setting_default(&listener->tls_setting);

    status = pj_lock_create_recursive_mutex(pool, "tlslis", 
					    &listener->factory.lock);
    if (status != PJ_SUCCESS)
	goto on_error;

    if (async_cnt > MAX_ASYNC_CNT) 
	async_cnt = MAX_ASYNC_CNT;

    /* Build SSL socket param */
    pj_ssl_sock_param_default(&ssock_param);
    ssock_param.cb.on_accept_complete = &on_accept_complete;
    ssock_param.cb.on_data_read = &on_data_read;
    ssock_param.cb.on_data_sent = &on_data_sent;
    ssock_param.async_cnt = async_cnt;
    ssock_param.ioqueue = pjsip_endpt_get_ioqueue(endpt);
    ssock_param.require_client_cert = listener->tls_setting.require_client_cert;
    ssock_param.timeout = listener->tls_setting.timeout;
    ssock_param.user_data = listener;
    ssock_param.verify_peer = PJ_FALSE; /* avoid SSL socket closing the socket
					 * due to verification error */
    if (ssock_param.send_buffer_size < PJSIP_MAX_PKT_LEN)
	ssock_param.send_buffer_size = PJSIP_MAX_PKT_LEN;
    if (ssock_param.read_buffer_size < PJSIP_MAX_PKT_LEN)
	ssock_param.read_buffer_size = PJSIP_MAX_PKT_LEN;
    ssock_param.ciphers_num = listener->tls_setting.ciphers_num;
    ssock_param.ciphers = listener->tls_setting.ciphers;
    ssock_param.qos_type = listener->tls_setting.qos_type;
    ssock_param.qos_ignore_error = listener->tls_setting.qos_ignore_error;
    pj_memcpy(&ssock_param.qos_params, &listener->tls_setting.qos_params,
	      sizeof(ssock_param.qos_params));

    has_listener = PJ_FALSE;

    switch(listener->tls_setting.method) {
    case PJSIP_TLSV1_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_TLS1;
	break;
    case PJSIP_SSLV2_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL2;
	break;
    case PJSIP_SSLV3_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL3;
	break;
    case PJSIP_SSLV23_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL23;
	break;
    default:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_DEFAULT;
	break;
    }

    /* Create SSL socket */
    status = pj_ssl_sock_create(pool, &ssock_param, &listener->ssock);
    if (status != PJ_SUCCESS)
	goto on_error;

    listener_addr = (pj_sockaddr_in*)&listener->factory.local_addr;
    if (local) {
	pj_sockaddr_cp((pj_sockaddr_t*)listener_addr, 
		       (const pj_sockaddr_t*)local);
    } else {
	pj_sockaddr_in_init(listener_addr, NULL, 0);
    }

    /* Check if certificate/CA list for SSL socket is set */
    if (listener->tls_setting.cert_file.slen ||
	listener->tls_setting.ca_list_file.slen) 
    {
	status = pj_ssl_cert_load_from_files(pool,
			&listener->tls_setting.ca_list_file,
			&listener->tls_setting.cert_file,
			&listener->tls_setting.privkey_file,
			&listener->tls_setting.password,
			&listener->cert);
	if (status != PJ_SUCCESS)
	    goto on_error;

	status = pj_ssl_sock_set_certificate(listener->ssock, pool, 
					     listener->cert);
	if (status != PJ_SUCCESS)
	    goto on_error;
    }

    /* Start accepting incoming connections. Note that some TLS/SSL backends
     * may not support for SSL socket server.
     */
    has_listener = PJ_FALSE;

    status = pj_ssl_sock_start_accept(listener->ssock, pool, 
			  (pj_sockaddr_t*)listener_addr, 
			  pj_sockaddr_get_len((pj_sockaddr_t*)listener_addr));
    if (status == PJ_SUCCESS || status == PJ_EPENDING) {
	pj_ssl_sock_info info;
	has_listener = PJ_TRUE;

	/* Retrieve the bound address */
	status = pj_ssl_sock_get_info(listener->ssock, &info);
	if (status == PJ_SUCCESS)
	    pj_sockaddr_cp(listener_addr, (pj_sockaddr_t*)&info.local_addr);
    } else if (status != PJ_ENOTSUP) {
	goto on_error;
    }

    /* If published host/IP is specified, then use that address as the
     * listener advertised address.
     */
    if (a_name && a_name->host.slen) {
	/* Copy the address */
	listener->factory.addr_name = *a_name;
	pj_strdup(listener->factory.pool, &listener->factory.addr_name.host, 
		  &a_name->host);
	listener->factory.addr_name.port = a_name->port;

    } else {
	/* No published address is given, use the bound address */

	/* If the address returns 0.0.0.0, use the default
	 * interface address as the transport's address.
	 */
	if (listener_addr->sin_addr.s_addr == 0) {
	    pj_sockaddr hostip;

	    status = pj_gethostip(pj_AF_INET(), &hostip);
	    if (status != PJ_SUCCESS)
		goto on_error;

	    listener_addr->sin_addr.s_addr = hostip.ipv4.sin_addr.s_addr;
	}

	/* Save the address name */
	sockaddr_to_host_port(listener->factory.pool, 
			      &listener->factory.addr_name, listener_addr);
    }

    /* If port is zero, get the bound port */
    if (listener->factory.addr_name.port == 0) {
	listener->factory.addr_name.port = pj_ntohs(listener_addr->sin_port);
    }

    pj_ansi_snprintf(listener->factory.obj_name, 
		     sizeof(listener->factory.obj_name),
		     "tlslis:%d",  listener->factory.addr_name.port);

    /* Register to transport manager */
    listener->endpt = endpt;
    listener->tpmgr = pjsip_endpt_get_tpmgr(endpt);
    listener->factory.create_transport2 = lis_create_transport;
    listener->factory.destroy = lis_destroy;
    listener->is_registered = PJ_TRUE;
    status = pjsip_tpmgr_register_tpfactory(listener->tpmgr,
					    &listener->factory);
    if (status != PJ_SUCCESS) {
	listener->is_registered = PJ_FALSE;
	goto on_error;
    }

    if (has_listener) {
	PJ_LOG(4,(listener->factory.obj_name, 
		 "SIP TLS listener is ready for incoming connections "
		 "at %.*s:%d",
		 (int)listener->factory.addr_name.host.slen,
		 listener->factory.addr_name.host.ptr,
		 listener->factory.addr_name.port));
    } else {
	PJ_LOG(4,(listener->factory.obj_name, "SIP TLS is ready "
		  "(client only)"));
    }

    /* Return the pointer to user */
    if (p_factory) *p_factory = &listener->factory;

    return PJ_SUCCESS;

on_error:
    lis_destroy(&listener->factory);
    return status;
}
Esempio n. 23
0
/*
 * pjsip_udp_transport_attach()
 *
 * Attach UDP socket and start transport.
 */
static pj_status_t transport_attach( pjsip_endpoint *endpt,
				     pjsip_transport_type_e type,
				     pj_sock_t sock,
				     const pjsip_host_port *a_name,
				     unsigned async_cnt,
				     pjsip_transport **p_transport)
{
    pj_pool_t *pool;
    struct udp_transport *tp;
    const char *format, *ipv6_quoteb, *ipv6_quotee;
    unsigned i;
    pj_status_t status;

    PJ_ASSERT_RETURN(endpt && sock!=PJ_INVALID_SOCKET && a_name && async_cnt>0,
		     PJ_EINVAL);

    /* Object name. */
    if (type & PJSIP_TRANSPORT_IPV6) {
	format = "udpv6%p";
	ipv6_quoteb = "[";
	ipv6_quotee = "]";
    } else {
	format = "udp%p";
	ipv6_quoteb = ipv6_quotee = "";
    }

    /* Create pool. */
    pool = pjsip_endpt_create_pool(endpt, format, PJSIP_POOL_LEN_TRANSPORT, 
				   PJSIP_POOL_INC_TRANSPORT);
    if (!pool)
	return PJ_ENOMEM;

    /* Create the UDP transport object. */
    tp = PJ_POOL_ZALLOC_T(pool, struct udp_transport);

    /* Save pool. */
    tp->base.pool = pool;

    pj_memcpy(tp->base.obj_name, pool->obj_name, PJ_MAX_OBJ_NAME);

    /* Init reference counter. */
    status = pj_atomic_create(pool, 0, &tp->base.ref_cnt);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Init lock. */
    status = pj_lock_create_recursive_mutex(pool, pool->obj_name, 
					    &tp->base.lock);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Set type. */
    tp->base.key.type = type;

    /* Remote address is left zero (except the family) */
    tp->base.key.rem_addr.addr.sa_family = (pj_uint16_t)
	((type & PJSIP_TRANSPORT_IPV6) ? pj_AF_INET6() : pj_AF_INET());

    /* Type name. */
    tp->base.type_name = "UDP";

    /* Transport flag */
    tp->base.flag = pjsip_transport_get_flag_from_type(type);


    /* Length of addressess. */
    tp->base.addr_len = sizeof(tp->base.local_addr);

    /* Init local address. */
    status = pj_sock_getsockname(sock, &tp->base.local_addr, 
				 &tp->base.addr_len);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Init remote name. */
    if (type == PJSIP_TRANSPORT_UDP)
	tp->base.remote_name.host = pj_str("0.0.0.0");
    else
	tp->base.remote_name.host = pj_str("::0");
    tp->base.remote_name.port = 0;

    /* Init direction */
    tp->base.dir = PJSIP_TP_DIR_NONE;

    /* Set endpoint. */
    tp->base.endpt = endpt;

    /* Transport manager and timer will be initialized by tpmgr */

    /* Attach socket and assign name. */
    udp_set_socket(tp, sock, a_name);

    /* Register to ioqueue */
    status = register_to_ioqueue(tp);
    if (status != PJ_SUCCESS)
	goto on_error;

    /* Set functions. */
    tp->base.send_msg = &udp_send_msg;
    tp->base.do_shutdown = &udp_shutdown;
    tp->base.destroy = &udp_destroy;

    /* This is a permanent transport, so we initialize the ref count
     * to one so that transport manager don't destroy this transport
     * when there's no user!
     */
    pj_atomic_inc(tp->base.ref_cnt);

    /* Register to transport manager. */
    tp->base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
    status = pjsip_transport_register( tp->base.tpmgr, (pjsip_transport*)tp);
    if (status != PJ_SUCCESS)
	goto on_error;


    /* Create rdata and put it in the array. */
    tp->rdata_cnt = 0;
    tp->rdata = (pjsip_rx_data**)
    		pj_pool_calloc(tp->base.pool, async_cnt, 
			       sizeof(pjsip_rx_data*));
    for (i=0; i<async_cnt; ++i) {
	pj_pool_t *rdata_pool = pjsip_endpt_create_pool(endpt, "rtd%p", 
							PJSIP_POOL_RDATA_LEN,
							PJSIP_POOL_RDATA_INC);
	if (!rdata_pool) {
	    pj_atomic_set(tp->base.ref_cnt, 0);
	    pjsip_transport_destroy(&tp->base);
	    return PJ_ENOMEM;
	}

	init_rdata(tp, i, rdata_pool, NULL);
	tp->rdata_cnt++;
    }

    /* Start reading the ioqueue. */
    status = start_async_read(tp);
    if (status != PJ_SUCCESS) {
	pjsip_transport_destroy(&tp->base);
	return status;
    }

    /* Done. */
    if (p_transport)
	*p_transport = &tp->base;
    
    PJ_LOG(4,(tp->base.obj_name, 
	      "SIP %s started, published address is %s%.*s%s:%d",
	      pjsip_transport_get_type_desc((pjsip_transport_type_e)tp->base.key.type),
	      ipv6_quoteb,
	      (int)tp->base.local_name.host.slen,
	      tp->base.local_name.host.ptr,
	      ipv6_quotee,
	      tp->base.local_name.port));

    return PJ_SUCCESS;

on_error:
    udp_destroy((pjsip_transport*)tp);
    return status;
}
Esempio n. 24
0
static int uri_benchmark(unsigned *p_parse, unsigned *p_print, unsigned *p_cmp)
{
    unsigned i, loop;
    pj_status_t status = PJ_SUCCESS;
    pj_timestamp zero;
    pj_time_val elapsed;
    pj_highprec_t avg_parse, avg_print, avg_cmp, kbytes;

    pj_bzero(&var, sizeof(var));

    zero.u32.hi = zero.u32.lo = 0;

    var.parse_len = var.print_len = var.cmp_len = 0;
    var.parse_time.u32.hi = var.parse_time.u32.lo = 0;
    var.print_time.u32.hi = var.print_time.u32.lo = 0;
    var.cmp_time.u32.hi = var.cmp_time.u32.lo = 0;
    for (loop=0; loop<LOOP_COUNT; ++loop) {
	for (i=0; i<PJ_ARRAY_SIZE(uri_test_array); ++i) {
	    pj_pool_t *pool;
	    pool = pjsip_endpt_create_pool(endpt, "", POOL_SIZE, POOL_SIZE);
	    status = do_uri_test(pool, &uri_test_array[i]);
	    pjsip_endpt_release_pool(endpt, pool);
	    if (status != PJ_SUCCESS) {
		PJ_LOG(3,(THIS_FILE, "  error %d when testing entry %d",
			  status, i));
		pjsip_endpt_release_pool(endpt, pool);
		goto on_return;
	    }
	}
    }

    kbytes = var.parse_len;
    pj_highprec_mod(kbytes, 1000000);
    pj_highprec_div(kbytes, 100000);
    elapsed = pj_elapsed_time(&zero, &var.parse_time);
    avg_parse = pj_elapsed_usec(&zero, &var.parse_time);
    pj_highprec_mul(avg_parse, AVERAGE_URL_LEN);
    pj_highprec_div(avg_parse, var.parse_len);
    if (avg_parse == 0)
        avg_parse = 1;
    avg_parse = 1000000 / avg_parse;

    PJ_LOG(3,(THIS_FILE, 
	      "    %u.%u MB of urls parsed in %d.%03ds (avg=%d urls/sec)", 
	      (unsigned)(var.parse_len/1000000), (unsigned)kbytes,
	      elapsed.sec, elapsed.msec,
	      (unsigned)avg_parse));

    *p_parse = (unsigned)avg_parse;

    kbytes = var.print_len;
    pj_highprec_mod(kbytes, 1000000);
    pj_highprec_div(kbytes, 100000);
    elapsed = pj_elapsed_time(&zero, &var.print_time);
    avg_print = pj_elapsed_usec(&zero, &var.print_time);
    pj_highprec_mul(avg_print, AVERAGE_URL_LEN);
    pj_highprec_div(avg_print, var.parse_len);
    if (avg_print == 0)
        avg_print = 1;
    avg_print = 1000000 / avg_print;

    PJ_LOG(3,(THIS_FILE, 
	      "    %u.%u MB of urls printed in %d.%03ds (avg=%d urls/sec)", 
	      (unsigned)(var.print_len/1000000), (unsigned)kbytes,
	      elapsed.sec, elapsed.msec,
	      (unsigned)avg_print));

    *p_print = (unsigned)avg_print;

    kbytes = var.cmp_len;
    pj_highprec_mod(kbytes, 1000000);
    pj_highprec_div(kbytes, 100000);
    elapsed = pj_elapsed_time(&zero, &var.cmp_time);
    avg_cmp = pj_elapsed_usec(&zero, &var.cmp_time);
    pj_highprec_mul(avg_cmp, AVERAGE_URL_LEN);
    pj_highprec_div(avg_cmp, var.cmp_len);
    if (avg_cmp == 0)
        avg_cmp = 1;
    avg_cmp = 1000000 / avg_cmp;

    PJ_LOG(3,(THIS_FILE, 
	      "    %u.%u MB of urls compared in %d.%03ds (avg=%d urls/sec)", 
	      (unsigned)(var.cmp_len/1000000), (unsigned)kbytes,
	      elapsed.sec, elapsed.msec,
	      (unsigned)avg_cmp));

    *p_cmp = (unsigned)avg_cmp;

on_return:
    return status;
}
Esempio n. 25
0
/*
 * Common function to create TLS transport, called when pending accept() and
 * pending connect() complete.
 */
static pj_status_t tls_create( struct tls_listener *listener,
			       pj_pool_t *pool,
			       pj_ssl_sock_t *ssock,
			       pj_bool_t is_server,
			       const pj_sockaddr_in *local,
			       const pj_sockaddr_in *remote,
			       const pj_str_t *remote_name,
			       struct tls_transport **p_tls)
{
    struct tls_transport *tls;
    const pj_str_t ka_pkt = PJSIP_TLS_KEEP_ALIVE_DATA;
    pj_status_t status;
    

    PJ_ASSERT_RETURN(listener && ssock && local && remote && p_tls, PJ_EINVAL);


    if (pool == NULL) {
	pool = pjsip_endpt_create_pool(listener->endpt, "tls",
				       POOL_TP_INIT, POOL_TP_INC);
	PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
    }    

    /*
     * Create and initialize basic transport structure.
     */
    tls = PJ_POOL_ZALLOC_T(pool, struct tls_transport);
    tls->is_server = is_server;
    tls->verify_server = listener->tls_setting.verify_server;
    pj_list_init(&tls->delayed_list);
    tls->base.pool = pool;

    pj_ansi_snprintf(tls->base.obj_name, PJ_MAX_OBJ_NAME, 
		     (is_server ? "tlss%p" :"tlsc%p"), tls);

    status = pj_atomic_create(pool, 0, &tls->base.ref_cnt);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    status = pj_lock_create_recursive_mutex(pool, "tls", &tls->base.lock);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    if (remote_name)
	pj_strdup(pool, &tls->remote_name, remote_name);

    tls->base.key.type = PJSIP_TRANSPORT_TLS;
    pj_memcpy(&tls->base.key.rem_addr, remote, sizeof(pj_sockaddr_in));
    tls->base.type_name = "tls";
    tls->base.flag = pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TLS);

    tls->base.info = (char*) pj_pool_alloc(pool, 64);
    pj_ansi_snprintf(tls->base.info, 64, "TLS to %s:%d",
		     pj_inet_ntoa(remote->sin_addr), 
		     (int)pj_ntohs(remote->sin_port));

    tls->base.addr_len = sizeof(pj_sockaddr_in);
    tls->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;
    
    /* Set initial local address */
    if (!pj_sockaddr_has_addr(local)) {
        pj_sockaddr_cp(&tls->base.local_addr,
                       &listener->factory.local_addr);
    } else {
	pj_sockaddr_cp(&tls->base.local_addr, local);
    }
    
    sockaddr_to_host_port(pool, &tls->base.local_name, 
			  (pj_sockaddr_in*)&tls->base.local_addr);
    if (tls->remote_name.slen) {
	tls->base.remote_name.host = tls->remote_name;
	tls->base.remote_name.port = pj_sockaddr_in_get_port(remote);
    } else {
	sockaddr_to_host_port(pool, &tls->base.remote_name, remote);
    }

    tls->base.endpt = listener->endpt;
    tls->base.tpmgr = listener->tpmgr;
    tls->base.send_msg = &tls_send_msg;
    tls->base.do_shutdown = &tls_shutdown;
    tls->base.destroy = &tls_destroy_transport;

    tls->ssock = ssock;

    /* Register transport to transport manager */
    status = pjsip_transport_register(listener->tpmgr, &tls->base);
    if (status != PJ_SUCCESS) {
	goto on_error;
    }

    tls->is_registered = PJ_TRUE;

    /* Initialize keep-alive timer */
    tls->ka_timer.user_data = (void*)tls;
    tls->ka_timer.cb = &tls_keep_alive_timer;
    pj_ioqueue_op_key_init(&tls->ka_op_key.key, sizeof(pj_ioqueue_op_key_t));
    pj_strdup(tls->base.pool, &tls->ka_pkt, &ka_pkt);
    
    /* Done setting up basic transport. */
    *p_tls = tls;

    PJ_LOG(4,(tls->base.obj_name, "TLS %s transport created",
	      (tls->is_server ? "server" : "client")));

    return PJ_SUCCESS;

on_error:
    tls_destroy(&tls->base, status);
    return status;
}
Esempio n. 26
0
static int register_aor_core(pjsip_rx_data *rdata,
	struct ast_sip_endpoint *endpoint,
	struct ast_sip_aor *aor,
	const char *aor_name,
	struct ao2_container *contacts)
{
	static const pj_str_t USER_AGENT = { "User-Agent", 10 };

	int added = 0, updated = 0, deleted = 0;
	pjsip_contact_hdr *contact_hdr = NULL;
	struct registrar_contact_details details = { 0, };
	pjsip_tx_data *tdata;
	RAII_VAR(struct ast_str *, path_str, NULL, ast_free);
	struct ast_sip_contact *response_contact;
	char *user_agent = NULL;
	pjsip_user_agent_hdr *user_agent_hdr;
	pjsip_expires_hdr *expires_hdr;
	pjsip_via_hdr *via_hdr;
	pjsip_via_hdr *via_hdr_last;
	char *via_addr = NULL;
	int via_port = 0;
	pjsip_cid_hdr *call_id_hdr;
	char *call_id = NULL;
	size_t alloc_size;

	/* So we don't count static contacts against max_contacts we prune them out from the container */
	ao2_callback(contacts, OBJ_NODATA | OBJ_UNLINK | OBJ_MULTIPLE, registrar_prune_static, NULL);

	if (registrar_validate_contacts(rdata, contacts, aor, &added, &updated, &deleted)) {
		/* The provided Contact headers do not conform to the specification */
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata, 400, NULL, NULL, NULL);
		ast_sip_report_failed_acl(endpoint, rdata, "registrar_invalid_contacts_provided");
		ast_log(LOG_WARNING, "Failed to validate contacts in REGISTER request from '%s'\n",
				ast_sorcery_object_get_id(endpoint));
		return PJ_TRUE;
	}

	if (registrar_validate_path(rdata, aor, &path_str)) {
		/* Ensure that intervening proxies did not make invalid modifications to the request */
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata, 420, NULL, NULL, NULL);
		ast_log(LOG_WARNING, "Invalid modifications made to REGISTER request from '%s' by intervening proxy\n",
				ast_sorcery_object_get_id(endpoint));
		return PJ_TRUE;
	}

	if ((MAX(added - deleted, 0) + (!aor->remove_existing ? ao2_container_count(contacts) : 0)) > aor->max_contacts) {
		/* Enforce the maximum number of contacts */
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata, 403, NULL, NULL, NULL);
		ast_sip_report_failed_acl(endpoint, rdata, "registrar_attempt_exceeds_maximum_configured_contacts");
		ast_log(LOG_WARNING, "Registration attempt from endpoint '%s' to AOR '%s' will exceed max contacts of %u\n",
				ast_sorcery_object_get_id(endpoint), aor_name, aor->max_contacts);
		return PJ_TRUE;
	}

	if (!(details.pool = pjsip_endpt_create_pool(ast_sip_get_pjsip_endpoint(), "Contact Comparison", 256, 256))) {
		pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata, 500, NULL, NULL, NULL);
		return PJ_TRUE;
	}

	user_agent_hdr = pjsip_msg_find_hdr_by_name(rdata->msg_info.msg, &USER_AGENT, NULL);
	if (user_agent_hdr) {
		alloc_size = pj_strlen(&user_agent_hdr->hvalue) + 1;
		user_agent = ast_alloca(alloc_size);
		ast_copy_pj_str(user_agent, &user_agent_hdr->hvalue, alloc_size);
	}

	/* Find the first Via header */
	via_hdr = via_hdr_last = (pjsip_via_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_VIA, NULL);
	if (via_hdr) {
		/* Find the last Via header */
		while ( (via_hdr = (pjsip_via_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg,
				PJSIP_H_VIA, via_hdr->next)) != NULL) {
			via_hdr_last = via_hdr;
		}
		alloc_size = pj_strlen(&via_hdr_last->sent_by.host) + 1;
		via_addr = ast_alloca(alloc_size);
		ast_copy_pj_str(via_addr, &via_hdr_last->sent_by.host, alloc_size);
		via_port=via_hdr_last->sent_by.port;
	}

	call_id_hdr = (pjsip_cid_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CALL_ID, NULL);
	if (call_id_hdr) {
		alloc_size = pj_strlen(&call_id_hdr->id) + 1;
		call_id = ast_alloca(alloc_size);
		ast_copy_pj_str(call_id, &call_id_hdr->id, alloc_size);
	}

	/* Iterate each provided Contact header and add, update, or delete */
	while ((contact_hdr = pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CONTACT, contact_hdr ? contact_hdr->next : NULL))) {
		int expiration;
		char contact_uri[pjsip_max_url_size];
		RAII_VAR(struct ast_sip_contact *, contact, NULL, ao2_cleanup);

		if (contact_hdr->star) {
			/* A star means to unregister everything, so do so for the possible contacts */
			ao2_callback(contacts, OBJ_NODATA | OBJ_MULTIPLE, registrar_delete_contact, (void *)aor_name);
			break;
		}

		if (!PJSIP_URI_SCHEME_IS_SIP(contact_hdr->uri) && !PJSIP_URI_SCHEME_IS_SIPS(contact_hdr->uri)) {
			/* This registrar only currently supports sip: and sips: URI schemes */
			continue;
		}

		expiration = registrar_get_expiration(aor, contact_hdr, rdata);
		details.uri = pjsip_uri_get_uri(contact_hdr->uri);
		pjsip_uri_print(PJSIP_URI_IN_CONTACT_HDR, details.uri, contact_uri, sizeof(contact_uri));

		if (!(contact = ao2_callback(contacts, OBJ_UNLINK, registrar_find_contact, &details))) {
			/* If they are actually trying to delete a contact that does not exist... be forgiving */
			if (!expiration) {
				ast_verb(3, "Attempted to remove non-existent contact '%s' from AOR '%s' by request\n",
					contact_uri, aor_name);
				continue;
			}

			if (ast_sip_location_add_contact_nolock(aor, contact_uri, ast_tvadd(ast_tvnow(),
				ast_samp2tv(expiration, 1)), path_str ? ast_str_buffer(path_str) : NULL,
					user_agent, via_addr, via_port, call_id, endpoint)) {
				ast_log(LOG_ERROR, "Unable to bind contact '%s' to AOR '%s'\n",
						contact_uri, aor_name);
				continue;
			}

			ast_verb(3, "Added contact '%s' to AOR '%s' with expiration of %d seconds\n",
				contact_uri, aor_name, expiration);
			ast_test_suite_event_notify("AOR_CONTACT_ADDED",
					"Contact: %s\r\n"
					"AOR: %s\r\n"
					"Expiration: %d\r\n"
					"UserAgent: %s",
					contact_uri,
					aor_name,
					expiration,
					user_agent);
		} else if (expiration) {
			struct ast_sip_contact *contact_update;

			contact_update = ast_sorcery_copy(ast_sip_get_sorcery(), contact);
			if (!contact_update) {
				ast_log(LOG_ERROR, "Failed to update contact '%s' expiration time to %d seconds.\n",
					contact->uri, expiration);
				continue;
			}

			contact_update->expiration_time = ast_tvadd(ast_tvnow(), ast_samp2tv(expiration, 1));
			contact_update->qualify_frequency = aor->qualify_frequency;
			contact_update->authenticate_qualify = aor->authenticate_qualify;
			if (path_str) {
				ast_string_field_set(contact_update, path, ast_str_buffer(path_str));
			}
			if (user_agent) {
				ast_string_field_set(contact_update, user_agent, user_agent);
			}
			if (!ast_strlen_zero(ast_config_AST_SYSTEM_NAME)) {
				ast_string_field_set(contact_update, reg_server, ast_config_AST_SYSTEM_NAME);
			}

			if (ast_sip_location_update_contact(contact_update)) {
				ast_log(LOG_ERROR, "Failed to update contact '%s' expiration time to %d seconds.\n",
					contact->uri, expiration);
				ast_sip_location_delete_contact(contact);
				continue;
			}
			ast_debug(3, "Refreshed contact '%s' on AOR '%s' with new expiration of %d seconds\n",
				contact_uri, aor_name, expiration);
			ast_test_suite_event_notify("AOR_CONTACT_REFRESHED",
					"Contact: %s\r\n"
					"AOR: %s\r\n"
					"Expiration: %d\r\n"
					"UserAgent: %s",
					contact_uri,
					aor_name,
					expiration,
					contact_update->user_agent);
			ao2_cleanup(contact_update);
		} else {
			/* We want to report the user agent that was actually in the removed contact */
			ast_sip_location_delete_contact(contact);
			ast_verb(3, "Removed contact '%s' from AOR '%s' due to request\n", contact_uri, aor_name);
			ast_test_suite_event_notify("AOR_CONTACT_REMOVED",
					"Contact: %s\r\n"
					"AOR: %s\r\n"
					"UserAgent: %s",
					contact_uri,
					aor_name,
					contact->user_agent);
		}
	}

	pjsip_endpt_release_pool(ast_sip_get_pjsip_endpoint(), details.pool);

	/* If the AOR is configured to remove any existing contacts that have not been updated/added as a result of this REGISTER
	 * do so
	 */
	if (aor->remove_existing) {
		ao2_callback(contacts, OBJ_NODATA | OBJ_MULTIPLE, registrar_delete_contact, NULL);
	}

	/* Re-retrieve contacts.  Caller will clean up the original container. */
	contacts = ast_sip_location_retrieve_aor_contacts_nolock(aor);
	response_contact = ao2_callback(contacts, 0, NULL, NULL);

	/* Send a response containing all of the contacts (including static) that are present on this AOR */
	if (ast_sip_create_response(rdata, 200, response_contact, &tdata) != PJ_SUCCESS) {
		ao2_cleanup(response_contact);
		ao2_cleanup(contacts);
		return PJ_TRUE;
	}
	ao2_cleanup(response_contact);

	/* Add the date header to the response, some UAs use this to set their date and time */
	registrar_add_date_header(tdata);

	ao2_callback(contacts, 0, registrar_add_contact, tdata);
	ao2_cleanup(contacts);

	if ((expires_hdr = pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_EXPIRES, NULL))) {
		expires_hdr = pjsip_expires_hdr_create(tdata->pool, registrar_get_expiration(aor, NULL, rdata));
		pjsip_msg_add_hdr(tdata->msg, (pjsip_hdr*)expires_hdr);
	}

	ast_sip_send_stateful_response(rdata, tdata, endpoint);

	return PJ_TRUE;
}
Esempio n. 27
0
/* This callback is called by transport manager for the TLS factory
 * to create outgoing transport to the specified destination.
 */
static pj_status_t lis_create_transport(pjsip_tpfactory *factory,
					pjsip_tpmgr *mgr,
					pjsip_endpoint *endpt,
					const pj_sockaddr *rem_addr,
					int addr_len,
					pjsip_tx_data *tdata,
					pjsip_transport **p_transport)
{
    struct tls_listener *listener;
    struct tls_transport *tls;
    pj_pool_t *pool;
    pj_ssl_sock_t *ssock;
    pj_ssl_sock_param ssock_param;
    pj_sockaddr_in local_addr;
    pj_str_t remote_name;
    pj_status_t status;

    /* Sanity checks */
    PJ_ASSERT_RETURN(factory && mgr && endpt && rem_addr &&
		     addr_len && p_transport, PJ_EINVAL);

    /* Check that address is a sockaddr_in */
    PJ_ASSERT_RETURN(rem_addr->addr.sa_family == pj_AF_INET() &&
		     addr_len == sizeof(pj_sockaddr_in), PJ_EINVAL);


    listener = (struct tls_listener*)factory;

    pool = pjsip_endpt_create_pool(listener->endpt, "tls",
				   POOL_TP_INIT, POOL_TP_INC);
    PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);

    /* Get remote host name from tdata */
    if (tdata)
	remote_name = tdata->dest_info.name;
    else
	pj_bzero(&remote_name, sizeof(remote_name));

    /* Build SSL socket param */
    pj_ssl_sock_param_default(&ssock_param);
    ssock_param.cb.on_connect_complete = &on_connect_complete;
    ssock_param.cb.on_data_read = &on_data_read;
    ssock_param.cb.on_data_sent = &on_data_sent;
    ssock_param.async_cnt = 1;
    ssock_param.ioqueue = pjsip_endpt_get_ioqueue(listener->endpt);
    ssock_param.server_name = remote_name;
    ssock_param.timeout = listener->tls_setting.timeout;
    ssock_param.user_data = NULL; /* pending, must be set later */
    ssock_param.verify_peer = PJ_FALSE; /* avoid SSL socket closing the socket
					 * due to verification error */
    if (ssock_param.send_buffer_size < PJSIP_MAX_PKT_LEN)
	ssock_param.send_buffer_size = PJSIP_MAX_PKT_LEN;
    if (ssock_param.read_buffer_size < PJSIP_MAX_PKT_LEN)
	ssock_param.read_buffer_size = PJSIP_MAX_PKT_LEN;
    ssock_param.ciphers_num = listener->tls_setting.ciphers_num;
    ssock_param.ciphers = listener->tls_setting.ciphers;
    ssock_param.qos_type = listener->tls_setting.qos_type;
    ssock_param.qos_ignore_error = listener->tls_setting.qos_ignore_error;
    pj_memcpy(&ssock_param.qos_params, &listener->tls_setting.qos_params,
	      sizeof(ssock_param.qos_params));

    switch(listener->tls_setting.method) {
    case PJSIP_TLSV1_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_TLS1;
	break;
    case PJSIP_SSLV2_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL2;
	break;
    case PJSIP_SSLV3_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL3;
	break;
    case PJSIP_SSLV23_METHOD:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL23;
	break;
    default:
	ssock_param.proto = PJ_SSL_SOCK_PROTO_DEFAULT;
	break;
    }

    status = pj_ssl_sock_create(pool, &ssock_param, &ssock);
    if (status != PJ_SUCCESS)
	return status;

    /* Apply SSL certificate */
    if (listener->cert) {
	status = pj_ssl_sock_set_certificate(ssock, pool, listener->cert);
	if (status != PJ_SUCCESS)
	    return status;
    }

    /* Initially set bind address to PJ_INADDR_ANY port 0 */
    pj_sockaddr_in_init(&local_addr, NULL, 0);

    /* Create the transport descriptor */
    status = tls_create(listener, pool, ssock, PJ_FALSE, &local_addr, 
			(pj_sockaddr_in*)rem_addr, &remote_name, &tls);
    if (status != PJ_SUCCESS)
	return status;

    /* Set the "pending" SSL socket user data */
    pj_ssl_sock_set_user_data(tls->ssock, tls);

    /* Start asynchronous connect() operation */
    tls->has_pending_connect = PJ_TRUE;
    status = pj_ssl_sock_start_connect(tls->ssock, tls->base.pool, 
				       (pj_sockaddr_t*)&local_addr,
				       (pj_sockaddr_t*)rem_addr,
				       addr_len);
    if (status == PJ_SUCCESS) {
	on_connect_complete(tls->ssock, PJ_SUCCESS);
    } else if (status != PJ_EPENDING) {
	tls_destroy(&tls->base, status);
	return status;
    }

    if (tls->has_pending_connect) {
	pj_ssl_sock_info info;

	/* Update local address, just in case local address currently set is 
	 * different now that asynchronous connect() is started.
	 */

	/* Retrieve the bound address */
	status = pj_ssl_sock_get_info(tls->ssock, &info);
	if (status == PJ_SUCCESS) {
	    pj_uint16_t new_port;

	    new_port = pj_sockaddr_get_port((pj_sockaddr_t*)&info.local_addr);

	    if (pj_sockaddr_has_addr((pj_sockaddr_t*)&info.local_addr)) {
		/* Update sockaddr */
		pj_sockaddr_cp((pj_sockaddr_t*)&tls->base.local_addr,
			       (pj_sockaddr_t*)&info.local_addr);
	    } else if (new_port && new_port != pj_sockaddr_get_port(
					(pj_sockaddr_t*)&tls->base.local_addr))
	    {
		/* Update port only */
		pj_sockaddr_set_port(&tls->base.local_addr, 
				     new_port);
	    }

	    sockaddr_to_host_port(tls->base.pool, &tls->base.local_name,
				  (pj_sockaddr_in*)&tls->base.local_addr);
	}

	PJ_LOG(4,(tls->base.obj_name, 
		  "TLS transport %.*s:%d is connecting to %.*s:%d...",
		  (int)tls->base.local_name.host.slen,
		  tls->base.local_name.host.ptr,
		  tls->base.local_name.port,
		  (int)tls->base.remote_name.host.slen,
		  tls->base.remote_name.host.ptr,
		  tls->base.remote_name.port));
    }

    /* Done */
    *p_transport = &tls->base;

    return PJ_SUCCESS;
}
Esempio n. 28
0
/*
 * main()
 *
 */
int main(int argc, char *argv[])
{
    pj_caching_pool cp;
    pj_thread_t *thread;
    pj_pool_t *pool;
    pj_status_t status;
    
    if (argc != 2) {
	puts("Error: destination URL needed");
	return 0;
    }

    /* Must init PJLIB first: */
    status = pj_init();
    PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1);


    /* Then init PJLIB-UTIL: */
    status = pjlib_util_init();
    PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1);

    /* Must create a pool factory before we can allocate any memory. */
    pj_caching_pool_init(&cp, &pj_pool_factory_default_policy, 0);


    /* Create the endpoint: */
    status = pjsip_endpt_create(&cp.factory, "sipstateless", 
				&sip_endpt);
    PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1);

    /* 
     * Add UDP transport, with hard-coded port 
     */
    {
	pj_sockaddr_in addr;

	addr.sin_family = pj_AF_INET();
	addr.sin_addr.s_addr = 0;
	addr.sin_port = pj_htons(PORT);

	status = pjsip_udp_transport_start( sip_endpt, &addr, NULL, 1, NULL);
	if (status != PJ_SUCCESS) {
	    PJ_LOG(3,(THIS_FILE, 
		      "Error starting UDP transport (port in use?)"));
	    return 1;
	}
    }

    status = pjsip_tsx_layer_init_module(sip_endpt);
    pj_assert(status == PJ_SUCCESS);

    status = pjsip_ua_init_module(sip_endpt, NULL);
    pj_assert(status == PJ_SUCCESS);

    /*
     * Register our module to receive incoming requests.
     */
    status = pjsip_endpt_register_module( sip_endpt, &mod_app);
    PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1);

    pool = pjsip_endpt_create_pool(sip_endpt, "", 1000, 1000);

    status = pj_thread_create(pool, "", &worker_thread, NULL, 0, 0, &thread);
    PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1);

    printf("Destination URL: %s\n", argv[1]);

    for (;;) {
	char line[10];

	fgets(line, sizeof(line), stdin);

	switch (line[0]) {
	case 'm':
	    make_call(argv[1], PJ_FALSE);
	    break;
	case 'M':
	    make_call(argv[1], PJ_TRUE);
	    break;
	case 'r':
	    reinvite(PJ_FALSE);
	    break;
	case 'R':
	    reinvite(PJ_TRUE);
	    break;
	case 'h':
	    hangup();
	    break;
	case 'q':
	    goto on_quit;
	}
    }

on_quit:
    quit_flag = 1;
    pj_thread_join(thread);

    pjsip_endpt_destroy(sip_endpt);
    pj_caching_pool_destroy(&cp);
    pj_shutdown();
    return 0;
}
Esempio n. 29
0
int transport_rt_test( pjsip_transport_type_e tp_type,
		       pjsip_transport *ref_tp,
		       char *target_url,
		       int *lost)
{
    enum { THREADS = 4, INTERVAL = 10 };
    int i;
    pj_status_t status;
    pj_pool_t *pool;
    pj_bool_t logger_enabled;

    pj_timestamp zero_time, total_time;
    unsigned usec_rt;
    unsigned total_sent;
    unsigned total_recv;

    PJ_UNUSED_ARG(tp_type);
    PJ_UNUSED_ARG(ref_tp);

    PJ_LOG(3,(THIS_FILE, "  multithreaded round-trip test (%d threads)...",
		  THREADS));
    PJ_LOG(3,(THIS_FILE, "    this will take approx %d seconds, please wait..",
		INTERVAL));

    /* Make sure msg logger is disabled. */
    logger_enabled = msg_logger_set_enabled(0);

    /* Register module (if not yet registered) */
    if (rt_module.id == -1) {
	status = pjsip_endpt_register_module( endpt, &rt_module );
	if (status != PJ_SUCCESS) {
	    app_perror("   error: unable to register module", status);
	    return -600;
	}
    }

    /* Create pool for this test. */
    pool = pjsip_endpt_create_pool(endpt, NULL, 4000, 4000);
    if (!pool)
	return -610;

    /* Initialize static test data. */
    pj_ansi_strcpy(rt_target_uri, target_url);
    rt_call_id = pj_str("RT-Call-Id/");
    rt_stop = PJ_FALSE;

    /* Initialize thread data. */
    for (i=0; i<THREADS; ++i) {
	char buf[1];
	pj_str_t str_id;
	
	pj_strset(&str_id, buf, 1);
	pj_bzero(&rt_test_data[i], sizeof(rt_test_data[i]));

	/* Init timer entry */
	rt_test_data[i].tx_timer.id = i;
	rt_test_data[i].tx_timer.cb = &rt_tx_timer;
	rt_test_data[i].timeout_timer.id = i;
	rt_test_data[i].timeout_timer.cb = &rt_timeout_timer;

	/* Generate Call-ID for each thread. */
	rt_test_data[i].call_id.ptr = (char*) pj_pool_alloc(pool, rt_call_id.slen+1);
	pj_strcpy(&rt_test_data[i].call_id, &rt_call_id);
	buf[0] = '0' + (char)i;
	pj_strcat(&rt_test_data[i].call_id, &str_id);

	/* Init mutex. */
	status = pj_mutex_create_recursive(pool, "rt", &rt_test_data[i].mutex);
	if (status != PJ_SUCCESS) {
	    app_perror("   error: unable to create mutex", status);
	    return -615;
	}

	/* Create thread, suspended. */
	status = pj_thread_create(pool, "rttest%p", &rt_worker_thread, (void*)(long)i, 0,
				  PJ_THREAD_SUSPENDED, &rt_test_data[i].thread);
	if (status != PJ_SUCCESS) {
	    app_perror("   error: unable to create thread", status);
	    return -620;
	}
    }

    /* Start threads! */
    for (i=0; i<THREADS; ++i) {
	pj_time_val delay = {0,0};
	pj_thread_resume(rt_test_data[i].thread);

	/* Schedule first message transmissions. */
	rt_test_data[i].tx_timer.user_data = (void*)1;
	pjsip_endpt_schedule_timer(endpt, &rt_test_data[i].tx_timer, &delay);
    }

    /* Sleep for some time. */
    pj_thread_sleep(INTERVAL * 1000);

    /* Signal thread to stop. */
    rt_stop = PJ_TRUE;

    /* Wait threads to complete. */
    for (i=0; i<THREADS; ++i) {
	pj_thread_join(rt_test_data[i].thread);
	pj_thread_destroy(rt_test_data[i].thread);
    }

    /* Destroy rt_test_data */
    for (i=0; i<THREADS; ++i) {
	pj_mutex_destroy(rt_test_data[i].mutex);
	pjsip_endpt_cancel_timer(endpt, &rt_test_data[i].timeout_timer);
    }

    /* Gather statistics. */
    pj_bzero(&total_time, sizeof(total_time));
    pj_bzero(&zero_time, sizeof(zero_time));
    usec_rt = total_sent = total_recv = 0;
    for (i=0; i<THREADS; ++i) {
	total_sent += rt_test_data[i].sent_request_count;
	total_recv +=  rt_test_data[i].recv_response_count;
	pj_add_timestamp(&total_time, &rt_test_data[i].total_rt_time);
    }

    /* Display statistics. */
    if (total_recv)
	total_time.u64 = total_time.u64/total_recv;
    else
	total_time.u64 = 0;
    usec_rt = pj_elapsed_usec(&zero_time, &total_time);
    PJ_LOG(3,(THIS_FILE, "    done."));
    PJ_LOG(3,(THIS_FILE, "    total %d messages sent", total_sent));
    PJ_LOG(3,(THIS_FILE, "    average round-trip=%d usec", usec_rt));

    pjsip_endpt_release_pool(endpt, pool);

    *lost = total_sent-total_recv;

    /* Flush events. */
    flush_events(500);

    /* Restore msg logger. */
    msg_logger_set_enabled(logger_enabled);

    return 0;
}
/*!
 * \brief Create a pjsip transport.
 */
static int transport_create(void *data)
{
	struct transport_create_data *create_data = data;
	struct ws_transport *newtransport = NULL;

	pjsip_endpoint *endpt = ast_sip_get_pjsip_endpoint();
	struct pjsip_tpmgr *tpmgr = pjsip_endpt_get_tpmgr(endpt);

	pj_pool_t *pool;
	pj_str_t buf;
	pj_status_t status;

	newtransport = ao2_t_alloc_options(sizeof(*newtransport), transport_dtor,
			AO2_ALLOC_OPT_LOCK_NOLOCK, "pjsip websocket transport");
	if (!newtransport) {
		ast_log(LOG_ERROR, "Failed to allocate WebSocket transport.\n");
		goto on_error;
	}

	newtransport->transport.endpt = endpt;

	if (!(pool = pjsip_endpt_create_pool(endpt, "ws", 512, 512))) {
		ast_log(LOG_ERROR, "Failed to allocate WebSocket endpoint pool.\n");
		goto on_error;
	}

	newtransport->transport.pool = pool;
	newtransport->ws_session = create_data->ws_session;

	/* Keep the session until transport dies */
	ast_websocket_ref(newtransport->ws_session);

	status = pj_atomic_create(pool, 0, &newtransport->transport.ref_cnt);
	if (status != PJ_SUCCESS) {
		goto on_error;
	}

	status = pj_lock_create_recursive_mutex(pool, pool->obj_name, &newtransport->transport.lock);
	if (status != PJ_SUCCESS) {
		goto on_error;
	}

	pj_sockaddr_parse(pj_AF_UNSPEC(), 0, pj_cstr(&buf, ast_sockaddr_stringify(ast_websocket_remote_address(newtransport->ws_session))), &newtransport->transport.key.rem_addr);
	newtransport->transport.key.rem_addr.addr.sa_family = pj_AF_INET();
	newtransport->transport.key.type = ast_websocket_is_secure(newtransport->ws_session) ? transport_type_wss : transport_type_ws;

	newtransport->transport.addr_len = pj_sockaddr_get_len(&newtransport->transport.key.rem_addr);

	pj_sockaddr_cp(&newtransport->transport.local_addr, &newtransport->transport.key.rem_addr);

	newtransport->transport.local_name.host.ptr = (char *)pj_pool_alloc(pool, newtransport->transport.addr_len+4);
	pj_sockaddr_print(&newtransport->transport.key.rem_addr, newtransport->transport.local_name.host.ptr, newtransport->transport.addr_len+4, 0);
	newtransport->transport.local_name.host.slen = pj_ansi_strlen(newtransport->transport.local_name.host.ptr);
	newtransport->transport.local_name.port = pj_sockaddr_get_port(&newtransport->transport.key.rem_addr);

	newtransport->transport.type_name = (char *)pjsip_transport_get_type_name(newtransport->transport.key.type);
	newtransport->transport.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)newtransport->transport.key.type);
	newtransport->transport.info = (char *)pj_pool_alloc(newtransport->transport.pool, 64);

	newtransport->transport.tpmgr = tpmgr;
	newtransport->transport.send_msg = &ws_send_msg;
	newtransport->transport.destroy = &ws_destroy;

	status = pjsip_transport_register(newtransport->transport.tpmgr,
			(pjsip_transport *)newtransport);
	if (status != PJ_SUCCESS) {
		goto on_error;
	}

	/* Add a reference for pjsip transport manager */
	ao2_ref(newtransport, +1);

	newtransport->rdata.tp_info.transport = &newtransport->transport;
	newtransport->rdata.tp_info.pool = pjsip_endpt_create_pool(endpt, "rtd%p",
		PJSIP_POOL_RDATA_LEN, PJSIP_POOL_RDATA_INC);
	if (!newtransport->rdata.tp_info.pool) {
		ast_log(LOG_ERROR, "Failed to allocate WebSocket rdata.\n");
		pjsip_transport_destroy((pjsip_transport *)newtransport);
		goto on_error;
	}

	create_data->transport = newtransport;
	return 0;

on_error:
	ao2_cleanup(newtransport);
	return -1;
}