Beispiel #1
0
static void test_loop(long duration)
{
    g_app.wall_clock.sec = 0;
    g_app.wall_clock.msec = 0;

    while (PJ_TIME_VAL_MSEC(g_app.wall_clock) <= duration) {

	/* Run TX tick */
	tx_tick(&g_app.wall_clock);

	/* Run RX tick */
	rx_tick(&g_app.wall_clock);

	/* Increment tick */
	g_app.wall_clock.msec += WALL_CLOCK_TICK;
	pj_time_val_normalize(&g_app.wall_clock);
    }
}
Beispiel #2
0
/* get frame from mmap */
static pj_status_t vid4lin_stream_get_frame_mmap(vid4lin_stream *stream,
                                                 pjmedia_frame *frame)
{
    struct v4l2_buffer buf;
    pj_time_val time;
    pj_status_t status = PJ_SUCCESS;
    unsigned tmp_idx;

    pj_bzero(&buf, sizeof(buf));
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    status = xioctl(stream->fd, VIDIOC_DQBUF, &buf);
    if (status != PJ_SUCCESS)
	return status;

    if (frame->size < buf.bytesused) {
	/* supplied buffer is too small */
	pj_assert(!"frame buffer is too small for v4l2");
	status = PJ_ETOOSMALL;
	goto on_return;
    }

    time.sec = buf.timestamp.tv_sec;
    time.msec = buf.timestamp.tv_usec / 1000;
    PJ_TIME_VAL_SUB(time, stream->start_time);

    frame->type = PJMEDIA_FRAME_TYPE_VIDEO;
    frame->bit_info = 0;
    frame->size = buf.bytesused;
    frame->timestamp.u64 = PJ_UINT64(1) * PJ_TIME_VAL_MSEC(time) *
			   stream->param.clock_rate / PJ_UINT64(1000);
    pj_memcpy(frame->buf, stream->buffers[buf.index].start, buf.bytesused);

on_return:
    tmp_idx = buf.index;
    pj_bzero(&buf, sizeof(buf));
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    buf.index = tmp_idx;
    xioctl(stream->fd, VIDIOC_QBUF, &buf);

    return status;
}
Beispiel #3
0
static int sdp_perform_test(pj_pool_factory *pf)
{
    pj_pool_t *pool;
    int inputlen, len;
    pjsdp_session_desc *ses;
    char buf[1500];
    enum { LOOP=1000000 };
    int i;
    pj_time_val start, end;

    printf("Parsing and printing %d SDP messages..\n", LOOP);

    pool = pj_pool_create(pf, "", 4096, 0, NULL);
    inputlen = strlen(sdp[0]);
    pj_gettimeofday(&start);
    for (i=0; i<LOOP; ++i) {
	ses = pjsdp_parse(sdp[0], inputlen, pool);
	len = pjsdp_print(ses, buf, sizeof(buf));
	buf[len] = '\0';
	pj_pool_reset(pool);
    }
    pj_gettimeofday(&end);

    printf("Original:\n%s\n", sdp[0]);
    printf("Parsed:\n%s\n", buf);

    PJ_TIME_VAL_SUB(end, start);
    printf("Time: %ld:%03lds\n", end.sec, end.msec);

    if (end.msec==0 && end.sec==0) end.msec=1;
    printf("Performance: %ld msg/sec\n", LOOP*1000/PJ_TIME_VAL_MSEC(end));
    puts("");

    pj_pool_release(pool);
    return 0;
}
Beispiel #4
0
/*
 * pj_ioqueue_poll()
 *
 * Few things worth written:
 *
 *  - we used to do only one callback called per poll, but it didn't go
 *    very well. The reason is because on some situation, the write 
 *    callback gets called all the time, thus doesn't give the read
 *    callback to get called. This happens, for example, when user
 *    submit write operation inside the write callback.
 *    As the result, we changed the behaviour so that now multiple
 *    callbacks are called in a single poll. It should be fast too,
 *    just that we need to be carefull with the ioqueue data structs.
 *
 *  - to guarantee preemptiveness etc, the poll function must strictly
 *    work on fd_set copy of the ioqueue (not the original one).
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    pj_fd_set_t rfdset, wfdset, xfdset;
    int count, counter;
    pj_ioqueue_key_t *h;
    struct event
    {
        pj_ioqueue_key_t	*key;
        enum ioqueue_event_type  event_type;
    } event[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];

    PJ_ASSERT_RETURN(ioqueue, -PJ_EINVAL);

    /* Lock ioqueue before making fd_set copies */
    pj_lock_acquire(ioqueue->lock);

    /* We will only do select() when there are sockets to be polled.
     * Otherwise select() will return error.
     */
    if (PJ_FD_COUNT(&ioqueue->rfdset)==0 &&
        PJ_FD_COUNT(&ioqueue->wfdset)==0 
#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
        && PJ_FD_COUNT(&ioqueue->xfdset)==0
#endif
	)
    {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	scan_closing_keys(ioqueue);
#endif
	pj_lock_release(ioqueue->lock);
	TRACE__((THIS_FILE, "     poll: no fd is set"));
        if (timeout)
            pj_thread_sleep(PJ_TIME_VAL_MSEC(*timeout));
        return 0;
    }

    /* Copy ioqueue's pj_fd_set_t to local variables. */
    pj_memcpy(&rfdset, &ioqueue->rfdset, sizeof(pj_fd_set_t));
    pj_memcpy(&wfdset, &ioqueue->wfdset, sizeof(pj_fd_set_t));
#if PJ_HAS_TCP
    pj_memcpy(&xfdset, &ioqueue->xfdset, sizeof(pj_fd_set_t));
#else
    PJ_FD_ZERO(&xfdset);
#endif

#if VALIDATE_FD_SET
    validate_sets(ioqueue, &rfdset, &wfdset, &xfdset);
#endif

    /* Unlock ioqueue before select(). */
    pj_lock_release(ioqueue->lock);

    count = pj_sock_select(ioqueue->nfds+1, &rfdset, &wfdset, &xfdset, 
			   timeout);
    
    if (count == 0)
	return 0;
    else if (count < 0)
	return -pj_get_netos_error();
    else if (count > PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL)
        count = PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL;

    /* Scan descriptor sets for event and add the events in the event
     * array to be processed later in this function. We do this so that
     * events can be processed in parallel without holding ioqueue lock.
     */
    pj_lock_acquire(ioqueue->lock);

    counter = 0;

    /* Scan for writable sockets first to handle piggy-back data
     * coming with accept().
     */
    h = ioqueue->active_list.next;
    for ( ; h!=&ioqueue->active_list && counter<count; h = h->next) {

	if ( (key_has_pending_write(h) || key_has_pending_connect(h))
	     && PJ_FD_ISSET(h->fd, &wfdset) && !IS_CLOSING(h))
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = WRITEABLE_EVENT;
            ++counter;
        }

        /* Scan for readable socket. */
	if ((key_has_pending_read(h) || key_has_pending_accept(h))
            && PJ_FD_ISSET(h->fd, &rfdset) && !IS_CLOSING(h) &&
	    counter<count)
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = READABLE_EVENT;
            ++counter;
	}

#if PJ_HAS_TCP
        if (key_has_pending_connect(h) && PJ_FD_ISSET(h->fd, &xfdset) &&
	    !IS_CLOSING(h) && counter<count) 
	{
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = EXCEPTION_EVENT;
            ++counter;
        }
#endif
    }

    pj_lock_release(ioqueue->lock);

    count = counter;

    /* Now process all events. The dispatch functions will take care
     * of locking in each of the key
     */
    for (counter=0; counter<count; ++counter) {
        switch (event[counter].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, event[counter].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, event[counter].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, event[counter].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(event[counter].key);
#endif
    }


    return count;
}
Beispiel #5
0
static unsigned dump_media_stat(const char *indent,
				char *buf, unsigned maxlen,
				const pjmedia_rtcp_stat *stat,
				const char *rx_info, const char *tx_info)
{
    char last_update[64];
    char packets[32], bytes[32], ipbytes[32], avg_bps[32], avg_ipbps[32];
    pj_time_val media_duration, now;
    char *p = buf, *end = buf+maxlen;
    int len;

    if (stat->rx.update_cnt == 0)
	strcpy(last_update, "never");
    else {
	pj_gettimeofday(&now);
	PJ_TIME_VAL_SUB(now, stat->rx.update);
	sprintf(last_update, "%02ldh:%02ldm:%02ld.%03lds ago",
		now.sec / 3600,
		(now.sec % 3600) / 60,
		now.sec % 60,
		now.msec);
    }

    pj_gettimeofday(&media_duration);
    PJ_TIME_VAL_SUB(media_duration, stat->start);
    if (PJ_TIME_VAL_MSEC(media_duration) == 0)
	media_duration.msec = 1;

    len = pj_ansi_snprintf(p, end-p,
	   "%s     RX %s last update:%s\n"
	   "%s        total %spkt %sB (%sB +IP hdr) @avg=%sbps/%sbps\n"
	   "%s        pkt loss=%d (%3.1f%%), discrd=%d (%3.1f%%), dup=%d (%2.1f%%), reord=%d (%3.1f%%)\n"
	   "%s              (msec)    min     avg     max     last    dev\n"
	   "%s        loss period: %7.3f %7.3f %7.3f %7.3f %7.3f\n"
	   "%s        jitter     : %7.3f %7.3f %7.3f %7.3f %7.3f\n"
#if defined(PJMEDIA_RTCP_STAT_HAS_RAW_JITTER) && PJMEDIA_RTCP_STAT_HAS_RAW_JITTER!=0
	   "%s        raw jitter : %7.3f %7.3f %7.3f %7.3f %7.3f\n"
#endif
#if defined(PJMEDIA_RTCP_STAT_HAS_IPDV) && PJMEDIA_RTCP_STAT_HAS_IPDV!=0
	   "%s        IPDV       : %7.3f %7.3f %7.3f %7.3f %7.3f\n"
#endif
	   "%s",
	   indent,
	   rx_info? rx_info : "",
	   last_update,

	   indent,
	   good_number(packets, stat->rx.pkt),
	   good_number(bytes, stat->rx.bytes),
	   good_number(ipbytes, stat->rx.bytes + stat->rx.pkt * 40),
	   good_number(avg_bps, (pj_int32_t)((pj_int64_t)stat->rx.bytes * 8 * 1000 / PJ_TIME_VAL_MSEC(media_duration))),
	   good_number(avg_ipbps, (pj_int32_t)(((pj_int64_t)stat->rx.bytes + stat->rx.pkt * 40) * 8 * 1000 / PJ_TIME_VAL_MSEC(media_duration))),
	   indent,
	   stat->rx.loss,
	   (stat->rx.loss? stat->rx.loss * 100.0 / (stat->rx.pkt + stat->rx.loss) : 0),
	   stat->rx.discard,
	   (stat->rx.discard? stat->rx.discard * 100.0 / (stat->rx.pkt + stat->rx.loss) : 0),
	   stat->rx.dup,
	   (stat->rx.dup? stat->rx.dup * 100.0 / (stat->rx.pkt + stat->rx.loss) : 0),
	   stat->rx.reorder,
	   (stat->rx.reorder? stat->rx.reorder * 100.0 / (stat->rx.pkt + stat->rx.loss) : 0),
	   indent, indent,
	   stat->rx.loss_period.min / 1000.0,
	   stat->rx.loss_period.mean / 1000.0,
	   stat->rx.loss_period.max / 1000.0,
	   stat->rx.loss_period.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->rx.loss_period) / 1000.0,
	   indent,
	   stat->rx.jitter.min / 1000.0,
	   stat->rx.jitter.mean / 1000.0,
	   stat->rx.jitter.max / 1000.0,
	   stat->rx.jitter.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->rx.jitter) / 1000.0,
#if defined(PJMEDIA_RTCP_STAT_HAS_RAW_JITTER) && PJMEDIA_RTCP_STAT_HAS_RAW_JITTER!=0
	   indent,
	   stat->rx_raw_jitter.min / 1000.0,
	   stat->rx_raw_jitter.mean / 1000.0,
	   stat->rx_raw_jitter.max / 1000.0,
	   stat->rx_raw_jitter.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->rx_raw_jitter) / 1000.0,
#endif
#if defined(PJMEDIA_RTCP_STAT_HAS_IPDV) && PJMEDIA_RTCP_STAT_HAS_IPDV!=0
	   indent,
	   stat->rx_ipdv.min / 1000.0,
	   stat->rx_ipdv.mean / 1000.0,
	   stat->rx_ipdv.max / 1000.0,
	   stat->rx_ipdv.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->rx_ipdv) / 1000.0,
#endif
	   ""
	   );

    if (len < 1 || len > end-p) {
	*p = '\0';
	return (p-buf);
    }
    p += len;

    if (stat->tx.update_cnt == 0)
	strcpy(last_update, "never");
    else {
	pj_gettimeofday(&now);
	PJ_TIME_VAL_SUB(now, stat->tx.update);
	sprintf(last_update, "%02ldh:%02ldm:%02ld.%03lds ago",
		now.sec / 3600,
		(now.sec % 3600) / 60,
		now.sec % 60,
		now.msec);
    }

    len = pj_ansi_snprintf(p, end-p,
	   "%s     TX %s last update:%s\n"
	   "%s        total %spkt %sB (%sB +IP hdr) @avg=%sbps/%sbps\n"
	   "%s        pkt loss=%d (%3.1f%%), dup=%d (%3.1f%%), reorder=%d (%3.1f%%)\n"
	   "%s              (msec)    min     avg     max     last    dev \n"
	   "%s        loss period: %7.3f %7.3f %7.3f %7.3f %7.3f\n"
	   "%s        jitter     : %7.3f %7.3f %7.3f %7.3f %7.3f\n",
	   indent,
	   tx_info,
	   last_update,

	   indent,
	   good_number(packets, stat->tx.pkt),
	   good_number(bytes, stat->tx.bytes),
	   good_number(ipbytes, stat->tx.bytes + stat->tx.pkt * 40),
	   good_number(avg_bps, (pj_int32_t)((pj_int64_t)stat->tx.bytes * 8 * 1000 / PJ_TIME_VAL_MSEC(media_duration))),
	   good_number(avg_ipbps, (pj_int32_t)(((pj_int64_t)stat->tx.bytes + stat->tx.pkt * 40) * 8 * 1000 / PJ_TIME_VAL_MSEC(media_duration))),

	   indent,
	   stat->tx.loss,
	   (stat->tx.loss? stat->tx.loss * 100.0 / (stat->tx.pkt + stat->tx.loss) : 0),
	   stat->tx.dup,
	   (stat->tx.dup? stat->tx.dup * 100.0 / (stat->tx.pkt + stat->tx.loss) : 0),
	   stat->tx.reorder,
	   (stat->tx.reorder? stat->tx.reorder * 100.0 / (stat->tx.pkt + stat->tx.loss) : 0),

	   indent, indent,
	   stat->tx.loss_period.min / 1000.0,
	   stat->tx.loss_period.mean / 1000.0,
	   stat->tx.loss_period.max / 1000.0,
	   stat->tx.loss_period.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->tx.loss_period) / 1000.0,
	   indent,
	   stat->tx.jitter.min / 1000.0,
	   stat->tx.jitter.mean / 1000.0,
	   stat->tx.jitter.max / 1000.0,
	   stat->tx.jitter.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->tx.jitter) / 1000.0
	   );

    if (len < 1 || len > end-p) {
	*p = '\0';
	return (p-buf);
    }
    p += len;

    len = pj_ansi_snprintf(p, end-p,
	   "%s     RTT msec      : %7.3f %7.3f %7.3f %7.3f %7.3f\n",
	   indent,
	   stat->rtt.min / 1000.0,
	   stat->rtt.mean / 1000.0,
	   stat->rtt.max / 1000.0,
	   stat->rtt.last / 1000.0,
	   pj_math_stat_get_stddev(&stat->rtt) / 1000.0
	   );
    if (len < 1 || len > end-p) {
	*p = '\0';
	return (p-buf);
    }
    p += len;

    return (p-buf);
}
Beispiel #6
0
/*
 * Transmit message.
 */
static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx)
{
    pj_status_t status;

    PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY);

    if (tsx->require_retransmit) {
	/* Calculate retransmit/timeout delay */
	if (tsx->transmit_count == 0) {
	    tsx->retransmit_time.sec = 0;
	    tsx->retransmit_time.msec = tsx->rto_msec;

	} else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) {
	    unsigned msec;

	    msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time);
	    msec <<= 1;
	    tsx->retransmit_time.sec = msec / 1000;
	    tsx->retransmit_time.msec = msec % 1000;

	} else {
	    tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000;
	    tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000;
	}

	/* Schedule timer first because when send_msg() failed we can
	 * cancel it (as opposed to when schedule_timer() failed we cannot
	 * cancel transmission).
	 */;
	status = pj_timer_heap_schedule(tsx->timer_heap, 
					&tsx->retransmit_timer,
					&tsx->retransmit_time);
	if (status != PJ_SUCCESS) {
	    tsx->retransmit_timer.id = 0;
	    return status;
	}
	tsx->retransmit_timer.id = TIMER_ACTIVE;
    }


    tsx->transmit_count++;

    PJ_LOG(5,(tsx->obj_name, "STUN sending message (transmit count=%d)",
	      tsx->transmit_count));

    /* Send message */
    status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size);

    if (status == PJNATH_ESTUNDESTROYED) {
	/* We've been destroyed, don't access the object. */
    } else if (status != PJ_SUCCESS) {
	if (tsx->retransmit_timer.id != 0) {
	    pj_timer_heap_cancel(tsx->timer_heap, 
				 &tsx->retransmit_timer);
	    tsx->retransmit_timer.id = 0;
	}
	stun_perror(tsx, "STUN error sending message", status);
	return status;
    }

    return status;
}
Beispiel #7
0
/*
 * Get NTP time.
 */
PJ_DEF(pj_status_t) pjmedia_rtcp_get_ntp_time(const pjmedia_rtcp_session *sess,
					      pjmedia_rtcp_ntp_rec *ntp)
{
/* Seconds between 1900-01-01 to 1970-01-01 */
#define JAN_1970  (2208988800UL)
    pj_timestamp ts;
    pj_status_t status;

    status = pj_get_timestamp(&ts);

    /* Fill up the high 32bit part */
    ntp->hi = (pj_uint32_t)((ts.u64 - sess->ts_base.u64) / sess->ts_freq.u64)
	      + sess->tv_base.sec + JAN_1970;

    /* Calculate seconds fractions */
    ts.u64 = (ts.u64 - sess->ts_base.u64) % sess->ts_freq.u64;
    pj_assert(ts.u64 < sess->ts_freq.u64);
    ts.u64 = (ts.u64 << 32) / sess->ts_freq.u64;

    /* Fill up the low 32bit part */
    ntp->lo = ts.u32.lo;


#if (defined(PJ_WIN32) && PJ_WIN32!=0) || \
    (defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0)

    /* On Win32, since we use QueryPerformanceCounter() as the backend
     * timestamp API, we need to protect against this bug:
     *   Performance counter value may unexpectedly leap forward
     *   http://support.microsoft.com/default.aspx?scid=KB;EN-US;Q274323
     */
    {
	/*
	 * Compare elapsed time reported by timestamp with actual elapsed 
	 * time. If the difference is too excessive, then we use system
	 * time instead.
	 */

	/* MIN_DIFF needs to be large enough so that "normal" diff caused
	 * by system activity or context switch doesn't trigger the time
	 * correction.
	 */
	enum { MIN_DIFF = 400 };

	pj_time_val ts_time, elapsed, diff;

	pj_gettimeofday(&elapsed);

	ts_time.sec = ntp->hi - sess->tv_base.sec - JAN_1970;
	ts_time.msec = (long)(ntp->lo * 1000.0 / 0xFFFFFFFF);

	PJ_TIME_VAL_SUB(elapsed, sess->tv_base);

	if (PJ_TIME_VAL_LT(ts_time, elapsed)) {
	    diff = elapsed;
	    PJ_TIME_VAL_SUB(diff, ts_time);
	} else {
	    diff = ts_time;
	    PJ_TIME_VAL_SUB(diff, elapsed);
	}

	if (PJ_TIME_VAL_MSEC(diff) >= MIN_DIFF) {

	    TRACE_((sess->name, "RTCP NTP timestamp corrected by %d ms",
		    PJ_TIME_VAL_MSEC(diff)));


	    ntp->hi = elapsed.sec + sess->tv_base.sec + JAN_1970;
	    ntp->lo = (elapsed.msec * 65536 / 1000) << 16;
	}

    }
#endif

    return status;
}
Beispiel #8
0
/*
 * Benchmarking IOQueue
 */
static int bench_test(pj_bool_t allow_concur, int bufsize, 
		      int inactive_sock_count)
{
    pj_sock_t ssock=-1, csock=-1;
    pj_sockaddr_in addr;
    pj_pool_t *pool = NULL;
    pj_sock_t *inactive_sock=NULL;
    pj_ioqueue_op_key_t *inactive_read_op;
    char *send_buf, *recv_buf;
    pj_ioqueue_t *ioque = NULL;
    pj_ioqueue_key_t *skey, *ckey, *keys[SOCK_INACTIVE_MAX+2];
    pj_timestamp t1, t2, t_elapsed;
    int rc=0, i;    /* i must be signed */
    pj_str_t temp;
    char errbuf[PJ_ERR_MSG_SIZE];

    TRACE__((THIS_FILE, "   bench test %d", inactive_sock_count));

    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);

    // Allocate buffers for send and receive.
    send_buf = (char*)pj_pool_alloc(pool, bufsize);
    recv_buf = (char*)pj_pool_alloc(pool, bufsize);

    // Allocate sockets for sending and receiving.
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &ssock);
    if (rc == PJ_SUCCESS) {
        rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &csock);
    } else
        csock = PJ_INVALID_SOCKET;
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_sock_socket()", rc);
	goto on_error;
    }

    // Bind server socket.
    pj_bzero(&addr, sizeof(addr));
    addr.sin_family = pj_AF_INET();
    addr.sin_port = pj_htons(PORT);
    if (pj_sock_bind(ssock, &addr, sizeof(addr)))
	goto on_error;

    pj_assert(inactive_sock_count+2 <= PJ_IOQUEUE_MAX_HANDLES);

    // Create I/O Queue.
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_ioqueue_create()", rc);
	goto on_error;
    }

    // Set concurrency
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_ioqueue_set_default_concurrency()", rc);
	goto on_error;
    }

    // Allocate inactive sockets, and bind them to some arbitrary address.
    // Then register them to the I/O queue, and start a read operation.
    inactive_sock = (pj_sock_t*)pj_pool_alloc(pool, 
				    inactive_sock_count*sizeof(pj_sock_t));
    inactive_read_op = (pj_ioqueue_op_key_t*)pj_pool_alloc(pool,
                              inactive_sock_count*sizeof(pj_ioqueue_op_key_t));
    pj_bzero(&addr, sizeof(addr));
    addr.sin_family = pj_AF_INET();
    for (i=0; i<inactive_sock_count; ++i) {
        pj_ssize_t bytes;

	rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &inactive_sock[i]);
	if (rc != PJ_SUCCESS || inactive_sock[i] < 0) {
	    app_perror("...error: pj_sock_socket()", rc);
	    goto on_error;
	}
	if ((rc=pj_sock_bind(inactive_sock[i], &addr, sizeof(addr))) != 0) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error: pj_sock_bind()", rc);
	    goto on_error;
	}
	rc = pj_ioqueue_register_sock(pool, ioque, inactive_sock[i], 
			              NULL, &test_cb, &keys[i]);
	if (rc != PJ_SUCCESS) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error(1): pj_ioqueue_register_sock()", rc);
	    PJ_LOG(3,(THIS_FILE, "....i=%d", i));
	    goto on_error;
	}
        bytes = bufsize;
	rc = pj_ioqueue_recv(keys[i], &inactive_read_op[i], recv_buf, &bytes, 0);
	if (rc != PJ_EPENDING) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error: pj_ioqueue_read()", rc);
	    goto on_error;
	}
    }

    // Register server and client socket.
    // We put this after inactivity socket, hopefully this can represent the
    // worst waiting time.
    rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, 
			          &test_cb, &skey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(2): pj_ioqueue_register_sock()", rc);
	goto on_error;
    }

    rc = pj_ioqueue_register_sock(pool, ioque, csock, NULL, 
			          &test_cb, &ckey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(3): pj_ioqueue_register_sock()", rc);
	goto on_error;
    }

    // Set destination address to send the packet.
    pj_sockaddr_in_init(&addr, pj_cstr(&temp, "127.0.0.1"), PORT);

    // Test loop.
    t_elapsed.u64 = 0;
    for (i=0; i<LOOP; ++i) {
	pj_ssize_t bytes;
        pj_ioqueue_op_key_t read_op, write_op;

	// Randomize send buffer.
	pj_create_random_string(send_buf, bufsize);

	// Start reading on the server side.
        bytes = bufsize;
	rc = pj_ioqueue_recv(skey, &read_op, recv_buf, &bytes, 0);
	if (rc != PJ_EPENDING) {
	    app_perror("...error: pj_ioqueue_read()", rc);
	    break;
	}

	// Starts send on the client side.
        bytes = bufsize;
	rc = pj_ioqueue_sendto(ckey, &write_op, send_buf, &bytes, 0,
			       &addr, sizeof(addr));
	if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
	    app_perror("...error: pj_ioqueue_write()", rc);
	    break;
	}
	if (rc == PJ_SUCCESS) {
	    if (bytes < 0) {
		app_perror("...error: pj_ioqueue_sendto()",(pj_status_t)-bytes);
		break;
	    }
	}

	// Begin time.
	pj_get_timestamp(&t1);

	// Poll the queue until we've got completion event in the server side.
        callback_read_key = NULL;
        callback_read_size = 0;
	TRACE__((THIS_FILE, "     waiting for key = %p", skey));
	do {
	    pj_time_val timeout = { 1, 0 };
#ifdef PJ_SYMBIAN
	    rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
	    rc = pj_ioqueue_poll(ioque, &timeout);
#endif
	    TRACE__((THIS_FILE, "     poll rc=%d", rc));
	} while (rc >= 0 && callback_read_key != skey);

	// End time.
	pj_get_timestamp(&t2);
	t_elapsed.u64 += (t2.u64 - t1.u64);

	if (rc < 0) {
	    app_perror("   error: pj_ioqueue_poll", -rc);
	    break;
	}

	// Compare recv buffer with send buffer.
	if (callback_read_size != bufsize || 
	    pj_memcmp(send_buf, recv_buf, bufsize)) 
	{
	    rc = -10;
	    PJ_LOG(3,(THIS_FILE, "   error: size/buffer mismatch"));
	    break;
	}

	// Poll until all events are exhausted, before we start the next loop.
	do {
	    pj_time_val timeout = { 0, 10 };
#ifdef PJ_SYMBIAN
	    PJ_UNUSED_ARG(timeout);
	    rc = pj_symbianos_poll(-1, 100);
#else	    
	    rc = pj_ioqueue_poll(ioque, &timeout);
#endif
	} while (rc>0);

	rc = 0;
    }

    // Print results
    if (rc == 0) {
	pj_timestamp tzero;
	pj_uint32_t usec_delay;

	tzero.u32.hi = tzero.u32.lo = 0;
	usec_delay = pj_elapsed_usec( &tzero, &t_elapsed);

	PJ_LOG(3, (THIS_FILE, "...%10d %15d  % 9d", 
	           bufsize, inactive_sock_count, usec_delay));

    } else {
	PJ_LOG(2, (THIS_FILE, "...ERROR rc=%d (buf:%d, fds:%d)", 
			      rc, bufsize, inactive_sock_count+2));
    }

    // Cleaning up.
    for (i=inactive_sock_count-1; i>=0; --i) {
	pj_ioqueue_unregister(keys[i]);
    }

    pj_ioqueue_unregister(skey);
    pj_ioqueue_unregister(ckey);


    pj_ioqueue_destroy(ioque);
    pj_pool_release( pool);
    return rc;

on_error:
    PJ_LOG(1,(THIS_FILE, "...ERROR: %s", 
	      pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
    if (ssock)
	pj_sock_close(ssock);
    if (csock)
	pj_sock_close(csock);
    for (i=0; i<inactive_sock_count && inactive_sock && 
	      inactive_sock[i]!=PJ_INVALID_SOCKET; ++i) 
    {
	pj_sock_close(inactive_sock[i]);
    }
    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release( pool);
    return -1;
}
Beispiel #9
0
/* 
 * The generic test framework, used by most of the tests. 
 */
static int perform_tsx_test(int dummy, char *target_uri, char *from_uri, 
			    char *branch_param, int test_time, 
			    const pjsip_method *method)
{
    pjsip_tx_data *tdata;
    pjsip_transaction *tsx;
    pj_str_t target, from, tsx_key;
    pjsip_via_hdr *via;
    pj_time_val timeout;
    pj_status_t status;

    PJ_LOG(3,(THIS_FILE, 
	      "   please standby, this will take at most %d seconds..",
	      test_time));

    /* Reset test. */
    recv_count = 0;
    test_complete = 0;

    /* Init headers. */
    target = pj_str(target_uri);
    from = pj_str(from_uri);

    /* Create request. */
    status = pjsip_endpt_create_request( endpt, method, &target,
					 &from, &target, NULL, NULL, -1, 
					 NULL, &tdata);
    if (status != PJ_SUCCESS) {
	app_perror("   Error: unable to create request", status);
	return -100;
    }

    /* Set the branch param for test 1. */
    via = pjsip_msg_find_hdr(tdata->msg, PJSIP_H_VIA, NULL);
    via->branch_param = pj_str(branch_param);

    /* Add additional reference to tdata to prevent transaction from
     * deleting it.
     */
    pjsip_tx_data_add_ref(tdata);

    /* Create transaction. */
    status = pjsip_tsx_create_uac( &tsx_user, tdata, &tsx);
    if (status != PJ_SUCCESS) {
	app_perror("   Error: unable to create UAC transaction", status);
	pjsip_tx_data_dec_ref(tdata);
	return -110;
    }

    /* Get transaction key. */
    pj_strdup(tdata->pool, &tsx_key, &tsx->transaction_key);

    /* Send the message. */
    status = pjsip_tsx_send_msg(tsx, NULL);
    // Ignore send result. Some tests do deliberately triggers error
    // when sending message.
    if (status != PJ_SUCCESS) {
	// app_perror("   Error: unable to send request", status);
        pjsip_tx_data_dec_ref(tdata);
	// return -120;
    }


    /* Set test completion time. */
    pj_gettimeofday(&timeout);
    timeout.sec += test_time;

    /* Wait until test complete. */
    while (!test_complete) {
	pj_time_val now, poll_delay = {0, 10};

	pjsip_endpt_handle_events(endpt, &poll_delay);

	pj_gettimeofday(&now);
	if (now.sec > timeout.sec) {
	    PJ_LOG(3,(THIS_FILE, "   Error: test has timed out"));
	    pjsip_tx_data_dec_ref(tdata);
	    return -130;
	}
    }

    if (test_complete < 0) {
	tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE);
	if (tsx) {
	    pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED);
	    pj_mutex_unlock(tsx->mutex);
	    flush_events(1000);
	}
	pjsip_tx_data_dec_ref(tdata);
	return test_complete;

    } else {
	pj_time_val now;

	/* Allow transaction to destroy itself */
	flush_events(500);

	/* Wait until test completes */
	pj_gettimeofday(&now);

	if (PJ_TIME_VAL_LT(now, timeout)) {
	    pj_time_val interval;
	    interval = timeout;
	    PJ_TIME_VAL_SUB(interval, now);
	    flush_events(PJ_TIME_VAL_MSEC(interval));
	}
    }

    /* Make sure transaction has been destroyed. */
    if (pjsip_tsx_layer_find_tsx(&tsx_key, PJ_FALSE) != NULL) {
	PJ_LOG(3,(THIS_FILE, "   Error: transaction has not been destroyed"));
	pjsip_tx_data_dec_ref(tdata);
	return -140;
    }

    /* Check tdata reference counter. */
    if (pj_atomic_get(tdata->ref_cnt) != 1) {
	PJ_LOG(3,(THIS_FILE, "   Error: tdata reference counter is %d",
		      pj_atomic_get(tdata->ref_cnt)));
	pjsip_tx_data_dec_ref(tdata);
	return -150;
    }

    /* Destroy txdata */
    pjsip_tx_data_dec_ref(tdata);

    return PJ_SUCCESS;
}
Beispiel #10
0
/*
 * This function checks for events from both timer and ioqueue (for
 * network events). It is invoked by the worker thread.
 */
static pj_status_t handle_events(struct ice_trans_s* icetrans, unsigned max_msec, unsigned *p_count)
{
    enum { MAX_NET_EVENTS = 1 };
    pj_time_val max_timeout = {0, 0};
    pj_time_val timeout = { 0, 0};
    unsigned count = 0, net_event_count = 0;
    int c;

    max_timeout.msec = max_msec;

    /* Poll the timer to run it and also to retrieve the earliest entry. */
    timeout.sec = timeout.msec = 0;
    c = pj_timer_heap_poll( icetrans->ice_cfg.stun_cfg.timer_heap, &timeout );
    if (c > 0)
        count += c;

    /* timer_heap_poll should never ever returns negative value, or otherwise
     * ioqueue_poll() will block forever!
     */
    pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
    if (timeout.msec >= 1000) timeout.msec = 999;

    /* compare the value with the timeout to wait from timer, and use the
     * minimum value.
    */
    if (PJ_TIME_VAL_GT(timeout, max_timeout))
        timeout = max_timeout;

    /* Poll ioqueue.
     * Repeat polling the ioqueue while we have immediate events, because
     * timer heap may process more than one events, so if we only process
     * one network events at a time (such as when IOCP backend is used),
     * the ioqueue may have trouble keeping up with the request rate.
     *
     * For example, for each send() request, one network event will be
     *   reported by ioqueue for the send() completion. If we don't poll
     *   the ioqueue often enough, the send() completion will not be
     *   reported in timely manner.
     */
    do {
        c = pj_ioqueue_poll( icetrans->ice_cfg.stun_cfg.ioqueue, &timeout);
        if (c < 0) {
            pj_status_t err = pj_get_netos_error();
            pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
            if (p_count)
                *p_count = count;
            return err;
        } else if (c == 0) {
            break;
        } else {
            net_event_count += c;
            timeout.sec = timeout.msec = 0;
        }
    } while (c > 0 && net_event_count < MAX_NET_EVENTS);

    count += net_event_count;
    if (p_count)
        *p_count = count;

    return PJ_SUCCESS;

}
int main(int argc, char *argv[])
{
    static char report[1024];

    printf("PJSIP Performance Measurement Tool v%s\n"
           "(c)2006 pjsip.org\n\n",
	   PJ_VERSION);

    if (create_app() != 0)
	return 1;

    if (init_options(argc, argv) != 0)
	return 1;

    if (init_sip() != 0)
	return 1;

    if (init_media() != 0)
	return 1;

    pj_log_set_level(app.log_level);

    if (app.log_level > 4) {
	pjsip_endpt_register_module(app.sip_endpt, &msg_logger);
    }


    /* Misc infos */
    if (app.client.dst_uri.slen != 0) {
	if (app.client.method.id == PJSIP_INVITE_METHOD) {
	    if (app.client.stateless) {
		PJ_LOG(3,(THIS_FILE, 
			  "Info: --stateless option makes no sense for INVITE,"
			  " ignored."));
	    }
	}

    }



    if (app.client.dst_uri.slen) {
	/* Client mode */
	pj_status_t status;
	char test_type[64];
	unsigned msec_req, msec_res;
	unsigned i;

	/* Get the job name */
	if (app.client.method.id == PJSIP_INVITE_METHOD) {
	    pj_ansi_strcpy(test_type, "INVITE calls");
	} else if (app.client.stateless) {
	    pj_ansi_sprintf(test_type, "stateless %.*s requests",
			    (int)app.client.method.name.slen,
			    app.client.method.name.ptr);
	} else {
	    pj_ansi_sprintf(test_type, "stateful %.*s requests",
			    (int)app.client.method.name.slen,
			    app.client.method.name.ptr);
	}
	

	printf("Sending %d %s to '%.*s' with %d maximum outstanding jobs, please wait..\n", 
		  app.client.job_count, test_type,
		  (int)app.client.dst_uri.slen, app.client.dst_uri.ptr,
		  app.client.job_window);

	for (i=0; i<app.thread_count; ++i) {
	    status = pj_thread_create(app.pool, NULL, &client_thread, 
				      (void*)(long)i, 0, 0, &app.thread[i]);
	    if (status != PJ_SUCCESS) {
		app_perror(THIS_FILE, "Unable to create thread", status);
		return 1;
	    }
	}

	for (i=0; i<app.thread_count; ++i) {
	    pj_thread_join(app.thread[i]);
	    app.thread[i] = NULL;
	}

	if (app.client.last_completion.sec) {
	    pj_time_val duration;
	    duration = app.client.last_completion;
	    PJ_TIME_VAL_SUB(duration, app.client.first_request);
	    msec_res = PJ_TIME_VAL_MSEC(duration);
	} else {
	    msec_res = app.client.timeout * 1000;
	}

	if (msec_res == 0) msec_res = 1;

	if (app.client.requests_sent.sec) {
	    pj_time_val duration;
	    duration = app.client.requests_sent;
	    PJ_TIME_VAL_SUB(duration, app.client.first_request);
	    msec_req = PJ_TIME_VAL_MSEC(duration);
	} else {
	    msec_req = app.client.timeout * 1000;
	}

	if (msec_req == 0) msec_req = 1;

	if (app.client.job_submitted < app.client.job_count)
	    puts("\ntimed-out!\n");
	else
	    puts("\ndone.\n");

	pj_ansi_snprintf(
	    report, sizeof(report),
	    "Total %d %s sent in %d ms at rate of %d/sec\n"
	    "Total %d responses receieved in %d ms at rate of %d/sec:",
	    app.client.job_submitted, test_type, msec_req, 
	    app.client.job_submitted * 1000 / msec_req,
	    app.client.total_responses, msec_res,
	    app.client.total_responses*1000/msec_res);
	write_report(report);

	/* Print detailed response code received */
	pj_ansi_sprintf(report, "\nDetailed responses received:");
	write_report(report);

	for (i=0; i<PJ_ARRAY_SIZE(app.client.response_codes); ++i) {
	    const pj_str_t *reason;

	    if (app.client.response_codes[i] == 0)
		continue;

	    reason = pjsip_get_status_text(i);
	    pj_ansi_snprintf( report, sizeof(report),
			      " - %d responses:  %7d     (%.*s)",
			      i, app.client.response_codes[i],
			      (int)reason->slen, reason->ptr);
	    write_report(report);
	}

	/* Total responses and rate */
	pj_ansi_snprintf( report, sizeof(report),
	    "                    ------\n"
	    " TOTAL responses:  %7d (rate=%d/sec)\n",
	    app.client.total_responses, 
	    app.client.total_responses*1000/msec_res);

	write_report(report);

	pj_ansi_sprintf(report, "Maximum outstanding job: %d", 
			app.client.stat_max_window);
	write_report(report);


    } else {
	/* Server mode */
	char s[10], *unused;
	pj_status_t status;
	unsigned i;

	puts("pjsip-perf started in server-mode");

	printf("Receiving requests on the following URIs:\n"
	       "  sip:0@%.*s:%d%s    for stateless handling\n"
	       "  sip:1@%.*s:%d%s    for stateful handling\n"
	       "  sip:2@%.*s:%d%s    for call handling\n",
	       (int)app.local_addr.slen,
	       app.local_addr.ptr,
	       app.local_port,
	       (app.use_tcp ? ";transport=tcp" : ""),
	       (int)app.local_addr.slen,
	       app.local_addr.ptr,
	       app.local_port,
	       (app.use_tcp ? ";transport=tcp" : ""),
	       (int)app.local_addr.slen,
	       app.local_addr.ptr,
	       app.local_port,
	       (app.use_tcp ? ";transport=tcp" : ""));
	printf("INVITE with non-matching user part will be handled call-statefully\n");

	for (i=0; i<app.thread_count; ++i) {
	    status = pj_thread_create(app.pool, NULL, &server_thread, 
				      (void*)(long)i, 0, 0, &app.thread[i]);
	    if (status != PJ_SUCCESS) {
		app_perror(THIS_FILE, "Unable to create thread", status);
		return 1;
	    }
	}

	puts("\nPress <ENTER> to quit\n");
	fflush(stdout);
	unused = fgets(s, sizeof(s), stdin);
	PJ_UNUSED_ARG(unused);

	app.thread_quit = PJ_TRUE;
	for (i=0; i<app.thread_count; ++i) {
	    pj_thread_join(app.thread[i]);
	    app.thread[i] = NULL;
	}

	puts("");
    }


    destroy_app();

    return 0;
}
Beispiel #12
0
static int sleep_duration_test(void)
{
    enum { MIS = 20};
    unsigned duration[] = { 2000, 1000, 500, 200, 100 };
    unsigned i;
    pj_status_t rc;

    PJ_LOG(3,(THIS_FILE, "..running sleep duration test"));

    /* Test pj_thread_sleep() and pj_gettimeofday() */
    for (i=0; i<PJ_ARRAY_SIZE(duration); ++i) {
        pj_time_val start, stop;
	pj_uint32_t msec;

        /* Mark start of test. */
        rc = pj_gettimeofday(&start);
        if (rc != PJ_SUCCESS) {
            app_perror("...error: pj_gettimeofday()", rc);
            return -10;
        }

        /* Sleep */
        rc = pj_thread_sleep(duration[i]);
        if (rc != PJ_SUCCESS) {
            app_perror("...error: pj_thread_sleep()", rc);
            return -20;
        }

        /* Mark end of test. */
        rc = pj_gettimeofday(&stop);

        /* Calculate duration (store in stop). */
        PJ_TIME_VAL_SUB(stop, start);

	/* Convert to msec. */
	msec = PJ_TIME_VAL_MSEC(stop);

	/* Check if it's within range. */
	if (msec < duration[i] * (100-MIS)/100 ||
	    msec > duration[i] * (100+MIS)/100)
	{
	    PJ_LOG(3,(THIS_FILE, 
		      "...error: slept for %d ms instead of %d ms "
		      "(outside %d%% err window)",
		      msec, duration[i], MIS));
	    return -30;
	}
    }


    /* Test pj_thread_sleep() and pj_get_timestamp() and friends */
    for (i=0; i<PJ_ARRAY_SIZE(duration); ++i) {
	pj_time_val t1, t2;
        pj_timestamp start, stop;
	pj_uint32_t msec;

	pj_thread_sleep(0);

        /* Mark start of test. */
        rc = pj_get_timestamp(&start);
        if (rc != PJ_SUCCESS) {
            app_perror("...error: pj_get_timestamp()", rc);
            return -60;
        }

	/* ..also with gettimeofday() */
	pj_gettimeofday(&t1);

        /* Sleep */
        rc = pj_thread_sleep(duration[i]);
        if (rc != PJ_SUCCESS) {
            app_perror("...error: pj_thread_sleep()", rc);
            return -70;
        }

        /* Mark end of test. */
        pj_get_timestamp(&stop);

	/* ..also with gettimeofday() */
	pj_gettimeofday(&t2);

	/* Compare t1 and t2. */
	if (PJ_TIME_VAL_LT(t2, t1)) {
	    PJ_LOG(3,(THIS_FILE, "...error: t2 is less than t1!!"));
	    return -75;
	}

        /* Get elapsed time in msec */
        msec = pj_elapsed_msec(&start, &stop);

	/* Check if it's within range. */
	if (msec < duration[i] * (100-MIS)/100 ||
	    msec > duration[i] * (100+MIS)/100)
	{
	    PJ_LOG(3,(THIS_FILE, 
		      "...error: slept for %d ms instead of %d ms "
		      "(outside %d%% err window)",
		      msec, duration[i], MIS));
	    PJ_TIME_VAL_SUB(t2, t1);
	    PJ_LOG(3,(THIS_FILE, 
		      "...info: gettimeofday() reported duration is "
		      "%d msec",
		      PJ_TIME_VAL_MSEC(t2)));

	    return -76;
	}
    }

    /* All done. */
    return 0;
}
Beispiel #13
0
static pj_status_t krx_ice_handle_events(krx_ice* k, unsigned int maxms, unsigned int* pcount) {

  if(!k) {
    printf("Error: krx_ice_handle_events(), invalid krx_ice pointer.\n");
    return PJ_FALSE;
  }

  printf("lets poll: %p.\n", k);
  
  pj_time_val max_timeout = { 0, 0 };
  pj_time_val timeout = { 0, 0 };
  unsigned int count = 0;
  unsigned int net_event_count = 0;
  int c;

  max_timeout.msec = maxms;
  timeout.sec = timeout.msec = 0;

  /* poll the timer to run it and also retrieve earliest entry */
  c = pj_timer_heap_poll(k->ice_cfg.stun_cfg.timer_heap, &timeout);
  if(c > 0) {
    count += c;
  }
  
  /* timer_heap_poll should never return negative values! */
  if(timeout.sec < 0 || timeout.msec < 0) {
    printf("Error: timer returns negative values. Should never happen.\n");
    exit(1);
  }
  
  if(timeout.msec >= 1000) {
    timeout.msec = 999;
  }

  /* use the minimum timeout value */
  if(PJ_TIME_VAL_GT(timeout, max_timeout)) {
    timeout = max_timeout;
  }


  /* poll ioqueue */
  do { 

    c = pj_ioqueue_poll(k->ice_cfg.stun_cfg.ioqueue, &timeout);
    if(c < 0) {
      pj_status_t err = pj_get_netos_error();
      pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
      if(pcount) {
        *pcount = count;
        return err;
      }
      else if(c == 0) {
        break;
      }
      else {
        net_event_count += c;
        timeout.sec = timeout.msec = 0;
      }
    }

    
  } while(c > 0 && net_event_count < 1 );

  count += net_event_count;
  if(pcount) {
    *pcount = count;
  }

  return PJ_SUCCESS;
}
static void print_call(int call_index)
{
    struct call *call = &app.call[call_index];
    int len;
    pjsip_inv_session *inv = call->inv;
    pjsip_dialog *dlg = inv->dlg;
    struct media_stream *audio = &call->media[0];
    char userinfo[128];
    char duration[80], last_update[80];
    char bps[16], ipbps[16], packets[16], bytes[16], ipbytes[16];
    unsigned decor;
    pj_time_val now;


    decor = pj_log_get_decor();
    pj_log_set_decor(PJ_LOG_HAS_NEWLINE);

    pj_gettimeofday(&now);

    if (app.report_filename)
	puts(app.report_filename);

    /* Print duration */
    if (inv->state >= PJSIP_INV_STATE_CONFIRMED && call->connect_time.sec) {

	PJ_TIME_VAL_SUB(now, call->connect_time);

	sprintf(duration, " [duration: %02ld:%02ld:%02ld.%03ld]",
		now.sec / 3600,
		(now.sec % 3600) / 60,
		(now.sec % 60),
		now.msec);

    } else {
	duration[0] = '\0';
    }



    /* Call number and state */
    PJ_LOG(3, (THIS_FILE,
	      "Call #%d: %s%s", 
	      call_index, pjsip_inv_state_name(inv->state), 
	      duration));



    /* Call identification */
    len = pjsip_hdr_print_on(dlg->remote.info, userinfo, sizeof(userinfo));
    if (len < 0)
	pj_ansi_strcpy(userinfo, "<--uri too long-->");
    else
	userinfo[len] = '\0';

    PJ_LOG(3, (THIS_FILE, "   %s", userinfo));


    if (call->inv == NULL || call->inv->state < PJSIP_INV_STATE_CONFIRMED ||
	call->connect_time.sec == 0) 
    {
	pj_log_set_decor(decor);
	return;
    }


    /* Signaling quality */
    {
	char pdd[64], connectdelay[64];
	pj_time_val t;

	if (call->response_time.sec) {
	    t = call->response_time;
	    PJ_TIME_VAL_SUB(t, call->start_time);
	    sprintf(pdd, "got 1st response in %ld ms", PJ_TIME_VAL_MSEC(t));
	} else {
	    pdd[0] = '\0';
	}

	if (call->connect_time.sec) {
	    t = call->connect_time;
	    PJ_TIME_VAL_SUB(t, call->start_time);
	    sprintf(connectdelay, ", connected after: %ld ms", 
		    PJ_TIME_VAL_MSEC(t));
	} else {
	    connectdelay[0] = '\0';
	}

	PJ_LOG(3, (THIS_FILE, 
		   "   Signaling quality: %s%s", pdd, connectdelay));
    }


    PJ_LOG(3, (THIS_FILE,
	       "   Stream #0: audio %.*s@%dHz, %dms/frame, %sB/s (%sB/s +IP hdr)",
   	(int)audio->si.fmt.encoding_name.slen,
	audio->si.fmt.encoding_name.ptr,
	audio->clock_rate,
	audio->samples_per_frame * 1000 / audio->clock_rate,
	good_number(bps, audio->bytes_per_frame * audio->clock_rate / audio->samples_per_frame),
	good_number(ipbps, (audio->bytes_per_frame+32) * audio->clock_rate / audio->samples_per_frame)));

    if (audio->rtcp.stat.rx.update_cnt == 0)
	strcpy(last_update, "never");
    else {
	pj_gettimeofday(&now);
	PJ_TIME_VAL_SUB(now, audio->rtcp.stat.rx.update);
	sprintf(last_update, "%02ldh:%02ldm:%02ld.%03lds ago",
		now.sec / 3600,
		(now.sec % 3600) / 60,
		now.sec % 60,
		now.msec);
    }

    PJ_LOG(3, (THIS_FILE, 
	   "              RX stat last update: %s\n"
	   "                 total %s packets %sB received (%sB +IP hdr)%s\n"
	   "                 pkt loss=%d (%3.1f%%), dup=%d (%3.1f%%), reorder=%d (%3.1f%%)%s\n"
	   "                       (msec)    min     avg     max     last\n"
	   "                 loss period: %7.3f %7.3f %7.3f %7.3f%s\n"
	   "                 jitter     : %7.3f %7.3f %7.3f %7.3f%s",
	   last_update,
	   good_number(packets, audio->rtcp.stat.rx.pkt),
	   good_number(bytes, audio->rtcp.stat.rx.bytes),
	   good_number(ipbytes, audio->rtcp.stat.rx.bytes + audio->rtcp.stat.rx.pkt * 32),
	   "",
	   audio->rtcp.stat.rx.loss,
	   audio->rtcp.stat.rx.loss * 100.0 / (audio->rtcp.stat.rx.pkt + audio->rtcp.stat.rx.loss),
	   audio->rtcp.stat.rx.dup, 
	   audio->rtcp.stat.rx.dup * 100.0 / (audio->rtcp.stat.rx.pkt + audio->rtcp.stat.rx.loss),
	   audio->rtcp.stat.rx.reorder, 
	   audio->rtcp.stat.rx.reorder * 100.0 / (audio->rtcp.stat.rx.pkt + audio->rtcp.stat.rx.loss),
	   "",
	   audio->rtcp.stat.rx.loss_period.min / 1000.0, 
	   audio->rtcp.stat.rx.loss_period.mean / 1000.0, 
	   audio->rtcp.stat.rx.loss_period.max / 1000.0,
	   audio->rtcp.stat.rx.loss_period.last / 1000.0,
	   "",
	   audio->rtcp.stat.rx.jitter.min / 1000.0,
	   audio->rtcp.stat.rx.jitter.mean / 1000.0,
	   audio->rtcp.stat.rx.jitter.max / 1000.0,
	   audio->rtcp.stat.rx.jitter.last / 1000.0,
	   ""
	   ));


    if (audio->rtcp.stat.tx.update_cnt == 0)
	strcpy(last_update, "never");
    else {
	pj_gettimeofday(&now);
	PJ_TIME_VAL_SUB(now, audio->rtcp.stat.tx.update);
	sprintf(last_update, "%02ldh:%02ldm:%02ld.%03lds ago",
		now.sec / 3600,
		(now.sec % 3600) / 60,
		now.sec % 60,
		now.msec);
    }

    PJ_LOG(3, (THIS_FILE,
	   "              TX stat last update: %s\n"
	   "                 total %s packets %sB sent (%sB +IP hdr)%s\n"
	   "                 pkt loss=%d (%3.1f%%), dup=%d (%3.1f%%), reorder=%d (%3.1f%%)%s\n"
	   "                       (msec)    min     avg     max     last\n"
	   "                 loss period: %7.3f %7.3f %7.3f %7.3f%s\n"
	   "                 jitter     : %7.3f %7.3f %7.3f %7.3f%s",
	   last_update,
	   good_number(packets, audio->rtcp.stat.tx.pkt),
	   good_number(bytes, audio->rtcp.stat.tx.bytes),
	   good_number(ipbytes, audio->rtcp.stat.tx.bytes + audio->rtcp.stat.tx.pkt * 32),
	   "",
	   audio->rtcp.stat.tx.loss,
	   audio->rtcp.stat.tx.loss * 100.0 / (audio->rtcp.stat.tx.pkt + audio->rtcp.stat.tx.loss),
	   audio->rtcp.stat.tx.dup, 
	   audio->rtcp.stat.tx.dup * 100.0 / (audio->rtcp.stat.tx.pkt + audio->rtcp.stat.tx.loss),
	   audio->rtcp.stat.tx.reorder, 
	   audio->rtcp.stat.tx.reorder * 100.0 / (audio->rtcp.stat.tx.pkt + audio->rtcp.stat.tx.loss),
	   "",
	   audio->rtcp.stat.tx.loss_period.min / 1000.0, 
	   audio->rtcp.stat.tx.loss_period.mean / 1000.0, 
	   audio->rtcp.stat.tx.loss_period.max / 1000.0,
	   audio->rtcp.stat.tx.loss_period.last / 1000.0,
	   "",
	   audio->rtcp.stat.tx.jitter.min / 1000.0,
	   audio->rtcp.stat.tx.jitter.mean / 1000.0,
	   audio->rtcp.stat.tx.jitter.max / 1000.0,
	   audio->rtcp.stat.tx.jitter.last / 1000.0,
	   ""
	   ));


    PJ_LOG(3, (THIS_FILE,
	   "             RTT delay      : %7.3f %7.3f %7.3f %7.3f%s\n", 
	   audio->rtcp.stat.rtt.min / 1000.0,
	   audio->rtcp.stat.rtt.mean / 1000.0,
	   audio->rtcp.stat.rtt.max / 1000.0,
	   audio->rtcp.stat.rtt.last / 1000.0,
	   ""
	   ));

    pj_log_set_decor(decor);
}
Beispiel #15
0
/*
 * pj_ioqueue_poll()
 *
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    int i, count, processed;
    int msec;
    //struct epoll_event *events = ioqueue->events;
    //struct queue *queue = ioqueue->queue;
    struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    pj_timestamp t1, t2;
    
    PJ_CHECK_STACK();

    msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000;

    TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec));
    pj_get_timestamp(&t1);
 
    //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec);
    count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec);
    if (count == 0) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Check the closing keys only when there's no activity and when there are
     * pending closing keys.
     */
    if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) {
	pj_lock_acquire(ioqueue->lock);
	scan_closing_keys(ioqueue);
	pj_lock_release(ioqueue->lock);
    }
#endif
	TRACE_((THIS_FILE, "os_epoll_wait timed out"));
	return count;
    }
    else if (count < 0) {
	TRACE_((THIS_FILE, "os_epoll_wait error"));
	return -pj_get_netos_error();
    }

    pj_get_timestamp(&t2);
    TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec",
		       count, pj_elapsed_usec(&t1, &t2)));

    /* Lock ioqueue. */
    pj_lock_acquire(ioqueue->lock);

    for (processed=0, i=0; i<count; ++i) {
	pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type)
				events[i].epoll_data;

	TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events));

	/*
	 * Check readability.
	 */
	if ((events[i].events & EPOLLIN) && 
	    (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = READABLE_EVENT;
	    ++processed;
	    continue;
	}

	/*
	 * Check for writeability.
	 */
	if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}

#if PJ_HAS_TCP
	/*
	 * Check for completion of connect() operation.
	 */
	if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}
#endif /* PJ_HAS_TCP */

	/*
	 * Check for error condition.
	 */
	if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) {
	    /*
	     * We need to handle this exception event.  If it's related to us
	     * connecting, report it as such.  If not, just report it as a
	     * read event and the higher layers will handle it.
	     */
	    if (h->connecting) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = EXCEPTION_EVENT;
		++processed;
	    } else if (key_has_pending_read(h) || key_has_pending_accept(h)) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = READABLE_EVENT;
		++processed;
	    }
	    continue;
	}
    }
    for (i=0; i<processed; ++i) {
	if (queue[i].key->grp_lock)
	    pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0);
    }

    PJ_RACE_ME(5);

    pj_lock_release(ioqueue->lock);

    PJ_RACE_ME(5);

    /* Now process the events. */
    for (i=0; i<processed; ++i) {
	switch (queue[i].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, queue[i].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, queue[i].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, queue[i].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(queue[i].key);
#endif

	if (queue[i].key->grp_lock)
	    pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock,
	                            "ioqueue", 0);
    }

    /* Special case:
     * When epoll returns > 0 but no descriptors are actually set!
     */
    if (count > 0 && !processed && msec > 0) {
	pj_thread_sleep(msec);
    }

    pj_get_timestamp(&t1);
    TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec",
		       processed, pj_elapsed_usec(&t2, &t1)));

    return processed;
}
Beispiel #16
0
/*
 * compliance_test()
 * To test that the basic IOQueue functionality works. It will just exchange
 * data between two sockets.
 */ 
static int compliance_test(pj_bool_t allow_concur)
{
    pj_sock_t ssock=-1, csock=-1;
    pj_sockaddr_in addr, dst_addr;
    int addrlen;
    pj_pool_t *pool = NULL;
    char *send_buf, *recv_buf;
    pj_ioqueue_t *ioque = NULL;
    pj_ioqueue_key_t *skey = NULL, *ckey = NULL;
    pj_ioqueue_op_key_t read_op, write_op;
    int bufsize = BUF_MIN_SIZE;
    pj_ssize_t bytes;
    int status = -1;
    pj_str_t temp;
    pj_bool_t send_pending, recv_pending;
    pj_status_t rc;

    pj_set_os_error(PJ_SUCCESS);

    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);

    // Allocate buffers for send and receive.
    send_buf = (char*)pj_pool_alloc(pool, bufsize);
    recv_buf = (char*)pj_pool_alloc(pool, bufsize);

    // Allocate sockets for sending and receiving.
    TRACE_("creating sockets...");
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &ssock);
    if (rc==PJ_SUCCESS)
        rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &csock);
    else
        csock = PJ_INVALID_SOCKET;
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_sock_socket()", rc);
	status=-1; goto on_error;
    }

    // Bind server socket.
    TRACE_("bind socket...");
    pj_bzero(&addr, sizeof(addr));
    addr.sin_family = pj_AF_INET();
    addr.sin_port = pj_htons(PORT);
    if (pj_sock_bind(ssock, &addr, sizeof(addr))) {
	status=-10; goto on_error;
    }

    // Create I/O Queue.
    TRACE_("create ioqueue...");
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (rc != PJ_SUCCESS) {
	status=-20; goto on_error;
    }

    // Set concurrency
    TRACE_("set concurrency...");
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
	status=-21; goto on_error;
    }

    // Register server and client socket.
    // We put this after inactivity socket, hopefully this can represent the
    // worst waiting time.
    TRACE_("registering first sockets...");
    rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, 
			          &test_cb, &skey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(10): ioqueue_register error", rc);
	status=-25; goto on_error;
    }
    TRACE_("registering second sockets...");
    rc = pj_ioqueue_register_sock( pool, ioque, csock, NULL, 
			           &test_cb, &ckey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(11): ioqueue_register error", rc);
	status=-26; goto on_error;
    }

    // Randomize send_buf.
    pj_create_random_string(send_buf, bufsize);

    // Register reading from ioqueue.
    TRACE_("start recvfrom...");
    pj_bzero(&addr, sizeof(addr));
    addrlen = sizeof(addr);
    bytes = bufsize;
    rc = pj_ioqueue_recvfrom(skey, &read_op, recv_buf, &bytes, 0,
			     &addr, &addrlen);
    if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
        app_perror("...error: pj_ioqueue_recvfrom", rc);
	status=-28; goto on_error;
    } else if (rc == PJ_EPENDING) {
	recv_pending = 1;
	PJ_LOG(3, (THIS_FILE, 
		   "......ok: recvfrom returned pending"));
    } else {
	PJ_LOG(3, (THIS_FILE, 
		   "......error: recvfrom returned immediate ok!"));
	status=-29; goto on_error;
    }

    // Set destination address to send the packet.
    TRACE_("set destination address...");
    temp = pj_str("127.0.0.1");
    if ((rc=pj_sockaddr_in_init(&dst_addr, &temp, PORT)) != 0) {
	app_perror("...error: unable to resolve 127.0.0.1", rc);
	status=-290; goto on_error;
    }

    // Write must return the number of bytes.
    TRACE_("start sendto...");
    bytes = bufsize;
    rc = pj_ioqueue_sendto(ckey, &write_op, send_buf, &bytes, 0, &dst_addr, 
			   sizeof(dst_addr));
    if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
        app_perror("...error: pj_ioqueue_sendto", rc);
	status=-30; goto on_error;
    } else if (rc == PJ_EPENDING) {
	send_pending = 1;
	PJ_LOG(3, (THIS_FILE, 
		   "......ok: sendto returned pending"));
    } else {
	send_pending = 0;
	PJ_LOG(3, (THIS_FILE, 
		   "......ok: sendto returned immediate success"));
    }

    // reset callback variables.
    callback_read_size = callback_write_size = 0;
    callback_accept_status = callback_connect_status = -2;
    callback_read_key = callback_write_key = 
        callback_accept_key = callback_connect_key = NULL;
    callback_read_op = callback_write_op = NULL;

    // Poll if pending.
    while (send_pending || recv_pending) {
	int rc;
	pj_time_val timeout = { 5, 0 };

	TRACE_("poll...");
#ifdef PJ_SYMBIAN
	rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
	rc = pj_ioqueue_poll(ioque, &timeout);
#endif

	if (rc == 0) {
	    PJ_LOG(1,(THIS_FILE, "...ERROR: timed out..."));
	    status=-45; goto on_error;
        } else if (rc < 0) {
            app_perror("...ERROR in ioqueue_poll()", -rc);
	    status=-50; goto on_error;
	}

	if (callback_read_key != NULL) {
            if (callback_read_size != bufsize) {
                status=-61; goto on_error;
            }
            if (callback_read_key != skey) {
                status=-65; goto on_error;
            }
            if (callback_read_op != &read_op) {
                status=-66; goto on_error;
            }

	    if (pj_memcmp(send_buf, recv_buf, bufsize) != 0) {
		status=-67; goto on_error;
	    }
	    if (addrlen != sizeof(pj_sockaddr_in)) {
		status=-68; goto on_error;
	    }
	    if (addr.sin_family != pj_AF_INET()) {
		status=-69; goto on_error;
	    }


	    recv_pending = 0;
	} 

        if (callback_write_key != NULL) {
            if (callback_write_size != bufsize) {
                status=-73; goto on_error;
            }
            if (callback_write_key != ckey) {
                status=-75; goto on_error;
            }
            if (callback_write_op != &write_op) {
                status=-76; goto on_error;
            }

            send_pending = 0;
	}
    } 
    
    // Success
    status = 0;

on_error:
    if (skey)
    	pj_ioqueue_unregister(skey);
    else if (ssock != -1)
	pj_sock_close(ssock);
    
    if (ckey)
    	pj_ioqueue_unregister(ckey);
    else if (csock != -1)
	pj_sock_close(csock);
    
    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release(pool);
    return status;

}
Beispiel #17
0
/*
 * Compliance test for success scenario.
 */
static int compliance_test_0(pj_bool_t allow_concur)
{
    pj_sock_t ssock=-1, csock0=-1, csock1=-1;
    pj_sockaddr_in addr, client_addr, rmt_addr;
    int client_addr_len;
    pj_pool_t *pool = NULL;
    char *send_buf, *recv_buf;
    pj_ioqueue_t *ioque = NULL;
    pj_ioqueue_key_t *skey=NULL, *ckey0=NULL, *ckey1=NULL;
    pj_ioqueue_op_key_t accept_op;
    int bufsize = BUF_MIN_SIZE;
    pj_ssize_t status = -1;
    int pending_op = 0;
    pj_timestamp t_elapsed;
    pj_str_t s;
    pj_status_t rc;

    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);

    // Allocate buffers for send and receive.
    send_buf = (char*)pj_pool_alloc(pool, bufsize);
    recv_buf = (char*)pj_pool_alloc(pool, bufsize);

    // Create server socket and client socket for connecting
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &ssock);
    if (rc != PJ_SUCCESS) {
        app_perror("...error creating socket", rc);
        status=-1; goto on_error;
    }

    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &csock1);
    if (rc != PJ_SUCCESS) {
        app_perror("...error creating socket", rc);
	status=-1; goto on_error;
    }

    // Bind server socket.
    pj_sockaddr_in_init(&addr, 0, 0);
    if ((rc=pj_sock_bind(ssock, &addr, sizeof(addr))) != 0 ) {
        app_perror("...bind error", rc);
	status=-10; goto on_error;
    }

    // Get server address.
    client_addr_len = sizeof(addr);
    rc = pj_sock_getsockname(ssock, &addr, &client_addr_len);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_sock_getsockname()", rc);
	status=-15; goto on_error;
    }
    addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));

    // Create I/O Queue.
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_create()", rc);
	status=-20; goto on_error;
    }

    // Concurrency
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_set_default_concurrency()", rc);
	status=-21; goto on_error;
    }

    // Register server socket and client socket.
    rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, &test_cb, &skey);
    if (rc == PJ_SUCCESS)
        rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL, &test_cb, 
                                      &ckey1);
    else
        ckey1 = NULL;
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
	status=-23; goto on_error;
    }

    // Server socket listen().
    if (pj_sock_listen(ssock, 5)) {
        app_perror("...ERROR in pj_sock_listen()", rc);
	status=-25; goto on_error;
    }

    // Server socket accept()
    client_addr_len = sizeof(pj_sockaddr_in);
    status = pj_ioqueue_accept(skey, &accept_op, &csock0, 
                               &client_addr, &rmt_addr, &client_addr_len);
    if (status != PJ_EPENDING) {
        app_perror("...ERROR in pj_ioqueue_accept()", rc);
	status=-30; goto on_error;
    }
    if (status==PJ_EPENDING) {
	++pending_op;
    }

    // Client socket connect()
    status = pj_ioqueue_connect(ckey1, &addr, sizeof(addr));
    if (status!=PJ_SUCCESS && status != PJ_EPENDING) {
        app_perror("...ERROR in pj_ioqueue_connect()", rc);
	status=-40; goto on_error;
    }
    if (status==PJ_EPENDING) {
	++pending_op;
    }

    // Poll until connected
    callback_read_size = callback_write_size = 0;
    callback_accept_status = callback_connect_status = -2;
    callback_call_count = 0;

    callback_read_key = callback_write_key = 
        callback_accept_key = callback_connect_key = NULL;
    callback_accept_op = callback_read_op = callback_write_op = NULL;

    while (pending_op) {
	pj_time_val timeout = {1, 0};

#ifdef PJ_SYMBIAN
	callback_call_count = 0;
	pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
	status = callback_call_count;
#else
	status = pj_ioqueue_poll(ioque, &timeout);
#endif
	if (status > 0) {
            if (callback_accept_status != -2) {
                if (callback_accept_status != 0) {
                    status=-41; goto on_error;
                }
                if (callback_accept_key != skey) {
                    status=-42; goto on_error;
                }
                if (callback_accept_op != &accept_op) {
                    status=-43; goto on_error;
                }
                callback_accept_status = -2;
            }

            if (callback_connect_status != -2) {
                if (callback_connect_status != 0) {
                    status=-50; goto on_error;
                }
                if (callback_connect_key != ckey1) {
                    status=-51; goto on_error;
                }
                callback_connect_status = -2;
            }

	    if (status > pending_op) {
		PJ_LOG(3,(THIS_FILE,
			  "...error: pj_ioqueue_poll() returned %d "
			  "(only expecting %d)",
			  status, pending_op));
		return -52;
	    }
	    pending_op -= status;

	    if (pending_op == 0) {
		status = 0;
	    }
	}
    }

    // There's no pending operation.
    // When we poll the ioqueue, there must not be events.
    if (pending_op == 0) {
        pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
	status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
        status = pj_ioqueue_poll(ioque, &timeout);
#endif
        if (status != 0) {
            status=-60; goto on_error;
        }
    }

    // Check accepted socket.
    if (csock0 == PJ_INVALID_SOCKET) {
	status = -69;
        app_perror("...accept() error", pj_get_os_error());
	goto on_error;
    }

    // Register newly accepted socket.
    rc = pj_ioqueue_register_sock(pool, ioque, csock0, NULL, 
                                  &test_cb, &ckey0);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_register_sock", rc);
	status = -70;
	goto on_error;
    }

    // Test send and receive.
    t_elapsed.u32.lo = 0;
    status = send_recv_test(ioque, ckey0, ckey1, send_buf, 
                            recv_buf, bufsize, &t_elapsed);
    if (status != 0) {
	goto on_error;
    }

    // Success
    status = 0;

on_error:
    if (skey != NULL)
    	pj_ioqueue_unregister(skey);
    else if (ssock != PJ_INVALID_SOCKET)
	pj_sock_close(ssock);
    
    if (ckey1 != NULL)
    	pj_ioqueue_unregister(ckey1);
    else if (csock1 != PJ_INVALID_SOCKET)
	pj_sock_close(csock1);
    
    if (ckey0 != NULL)
    	pj_ioqueue_unregister(ckey0);
    else if (csock0 != PJ_INVALID_SOCKET)
	pj_sock_close(csock0);
    
    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release(pool);
    return status;

}
Beispiel #18
0
/*
 * Transmit message.
 */
static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx,
                                    pj_bool_t mod_count)
{
    pj_status_t status;

    PJ_ASSERT_RETURN(tsx->retransmit_timer.id == TIMER_INACTIVE ||
		     !tsx->require_retransmit, PJ_EBUSY);

    if (tsx->require_retransmit && mod_count) {
	/* Calculate retransmit/timeout delay */
	if (tsx->transmit_count == 0) {
	    tsx->retransmit_time.sec = 0;
	    tsx->retransmit_time.msec = tsx->rto_msec;

	} else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) {
	    unsigned msec;

	    msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time);
	    msec <<= 1;
	    tsx->retransmit_time.sec = msec / 1000;
	    tsx->retransmit_time.msec = msec % 1000;

	} else {
	    tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000;
	    tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000;
	}

	/* Schedule timer first because when send_msg() failed we can
	 * cancel it (as opposed to when schedule_timer() failed we cannot
	 * cancel transmission).
	 */;
	status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap,
						   &tsx->retransmit_timer,
						   &tsx->retransmit_time,
						   TIMER_ACTIVE,
						   tsx->grp_lock);
	if (status != PJ_SUCCESS) {
	    tsx->retransmit_timer.id = TIMER_INACTIVE;
	    return status;
	}
    }


    if (mod_count)
        tsx->transmit_count++;

    PJ_LOG(5,(tsx->obj_name, "STUN sending message (transmit count=%d)",
	      tsx->transmit_count));
    pj_log_push_indent();

    /* Send message */
    status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size);

    if (status == PJNATH_ESTUNDESTROYED) {
	/* We've been destroyed, don't access the object. */
    } else if (status != PJ_SUCCESS) {
	if (mod_count) {
		pj_timer_heap_cancel_if_active( tsx->timer_heap,
	                               		&tsx->retransmit_timer,
	                               		TIMER_INACTIVE);
	}
	PJ_PERROR(4, (tsx->obj_name, status, "STUN error sending message"));
    }

    pj_log_pop_indent();
    return status;
}
Beispiel #19
0
/*
 * Compliance test for failed scenario.
 * In this case, the client connects to a non-existant service.
 */
static int compliance_test_1(pj_bool_t allow_concur)
{
    pj_sock_t csock1=PJ_INVALID_SOCKET;
    pj_sockaddr_in addr;
    pj_pool_t *pool = NULL;
    pj_ioqueue_t *ioque = NULL;
    pj_ioqueue_key_t *ckey1 = NULL;
    pj_ssize_t status = -1;
    int pending_op = 0;
    pj_str_t s;
    pj_status_t rc;

    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);

    // Create I/O Queue.
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (!ioque) {
	status=-20; goto on_error;
    }

    // Concurrency
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
	status=-21; goto on_error;
    }

    // Create client socket
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &csock1);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_sock_socket()", rc);
	status=-1; goto on_error;
    }

    // Register client socket.
    rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL, 
                                  &test_cb, &ckey1);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
	status=-23; goto on_error;
    }

    // Initialize remote address.
    pj_sockaddr_in_init(&addr, pj_cstr(&s, "127.0.0.1"), NON_EXISTANT_PORT);

    // Client socket connect()
    status = pj_ioqueue_connect(ckey1, &addr, sizeof(addr));
    if (status==PJ_SUCCESS) {
	// unexpectedly success!
	status = -30;
	goto on_error;
    }
    if (status != PJ_EPENDING) {
	// success
    } else {
	++pending_op;
    }

    callback_connect_status = -2;
    callback_connect_key = NULL;

    // Poll until we've got result
    while (pending_op) {
	pj_time_val timeout = {1, 0};

#ifdef PJ_SYMBIAN
	callback_call_count = 0;
	pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
	status = callback_call_count;
#else
	status = pj_ioqueue_poll(ioque, &timeout);
#endif
	if (status > 0) {
            if (callback_connect_key==ckey1) {
		if (callback_connect_status == 0) {
		    // unexpectedly connected!
		    status = -50;
		    goto on_error;
		}
	    }

	    if (status > pending_op) {
		PJ_LOG(3,(THIS_FILE,
			  "...error: pj_ioqueue_poll() returned %d "
			  "(only expecting %d)",
			  status, pending_op));
		return -552;
	    }

	    pending_op -= status;
	    if (pending_op == 0) {
		status = 0;
	    }
	}
    }

    // There's no pending operation.
    // When we poll the ioqueue, there must not be events.
    if (pending_op == 0) {
        pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
	status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
        status = pj_ioqueue_poll(ioque, &timeout);
#endif
        if (status != 0) {
            status=-60; goto on_error;
        }
    }

    // Success
    status = 0;

on_error:
    if (ckey1 != NULL)
    	pj_ioqueue_unregister(ckey1);
    else if (csock1 != PJ_INVALID_SOCKET)
	pj_sock_close(csock1);
    
    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release(pool);
    return status;
}
Beispiel #20
0
 long to_msec() const 
 { 
     return PJ_TIME_VAL_MSEC((*this)); 
 }
Beispiel #21
0
/*
 * Repeated connect/accept on the same listener socket.
 */
static int compliance_test_2(pj_bool_t allow_concur)
{
#if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0
    enum { MAX_PAIR = 1, TEST_LOOP = 2 };
#else
    enum { MAX_PAIR = 4, TEST_LOOP = 2 };
#endif

    struct listener
    {
	pj_sock_t	     sock;
	pj_ioqueue_key_t    *key;
	pj_sockaddr_in	     addr;
	int		     addr_len;
    } listener;

    struct server
    {
	pj_sock_t	     sock;
	pj_ioqueue_key_t    *key;
	pj_sockaddr_in	     local_addr;
	pj_sockaddr_in	     rem_addr;
	int		     rem_addr_len;
	pj_ioqueue_op_key_t  accept_op;
    } server[MAX_PAIR];

    struct client
    {
	pj_sock_t	     sock;
	pj_ioqueue_key_t    *key;
    } client[MAX_PAIR];

    pj_pool_t *pool = NULL;
    char *send_buf, *recv_buf;
    pj_ioqueue_t *ioque = NULL;
    int i, bufsize = BUF_MIN_SIZE;
    pj_ssize_t status;
    int test_loop, pending_op = 0;
    pj_timestamp t_elapsed;
    pj_str_t s;
    pj_status_t rc;

    listener.sock = PJ_INVALID_SOCKET;
    listener.key = NULL;
    
    for (i=0; i<MAX_PAIR; ++i) {
    	server[i].sock = PJ_INVALID_SOCKET;
    	server[i].key = NULL;
    }
    
    for (i=0; i<MAX_PAIR; ++i) {
    	client[i].sock = PJ_INVALID_SOCKET;
    	client[i].key = NULL;	
    }
    
    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);


    // Create I/O Queue.
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_create()", rc);
	return -10;
    }


    // Concurrency
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_ioqueue_set_default_concurrency()", rc);
	return -11;
    }

    // Allocate buffers for send and receive.
    send_buf = (char*)pj_pool_alloc(pool, bufsize);
    recv_buf = (char*)pj_pool_alloc(pool, bufsize);

    // Create listener socket
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &listener.sock);
    if (rc != PJ_SUCCESS) {
        app_perror("...error creating socket", rc);
        status=-20; goto on_error;
    }

    // Bind listener socket.
    pj_sockaddr_in_init(&listener.addr, 0, 0);
    if ((rc=pj_sock_bind(listener.sock, &listener.addr, sizeof(listener.addr))) != 0 ) {
        app_perror("...bind error", rc);
	status=-30; goto on_error;
    }

    // Get listener address.
    listener.addr_len = sizeof(listener.addr);
    rc = pj_sock_getsockname(listener.sock, &listener.addr, &listener.addr_len);
    if (rc != PJ_SUCCESS) {
        app_perror("...ERROR in pj_sock_getsockname()", rc);
	status=-40; goto on_error;
    }
    listener.addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));


    // Register listener socket.
    rc = pj_ioqueue_register_sock(pool, ioque, listener.sock, NULL, &test_cb, 
				  &listener.key);
    if (rc != PJ_SUCCESS) {
	app_perror("...ERROR", rc);
	status=-50; goto on_error;
    }


    // Listener socket listen().
    if (pj_sock_listen(listener.sock, 5)) {
        app_perror("...ERROR in pj_sock_listen()", rc);
	status=-60; goto on_error;
    }


    for (test_loop=0; test_loop < TEST_LOOP; ++test_loop) {
	// Client connect and server accept.
	for (i=0; i<MAX_PAIR; ++i) {
	    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &client[i].sock);
	    if (rc != PJ_SUCCESS) {
		app_perror("...error creating socket", rc);
		status=-70; goto on_error;
	    }

	    rc = pj_ioqueue_register_sock(pool, ioque, client[i].sock, NULL, 
					  &test_cb, &client[i].key);
	    if (rc != PJ_SUCCESS) {
		app_perror("...error ", rc);
		status=-80; goto on_error;
	    }

	    // Server socket accept()
	    pj_ioqueue_op_key_init(&server[i].accept_op, 
				   sizeof(server[i].accept_op));
	    server[i].rem_addr_len = sizeof(pj_sockaddr_in);
	    status = pj_ioqueue_accept(listener.key, &server[i].accept_op, 
				       &server[i].sock, &server[i].local_addr, 
				       &server[i].rem_addr, 
				       &server[i].rem_addr_len);
	    if (status!=PJ_SUCCESS && status != PJ_EPENDING) {
		app_perror("...ERROR in pj_ioqueue_accept()", rc);
		status=-90; goto on_error;
	    }
	    if (status==PJ_EPENDING) {
		++pending_op;
	    }


	    // Client socket connect()
	    status = pj_ioqueue_connect(client[i].key, &listener.addr, 
					sizeof(listener.addr));
	    if (status!=PJ_SUCCESS && status != PJ_EPENDING) {
		app_perror("...ERROR in pj_ioqueue_connect()", rc);
		status=-100; goto on_error;
	    }
	    if (status==PJ_EPENDING) {
		++pending_op;
	    }

	    // Poll until connection of this pair established
	    while (pending_op) {
		pj_time_val timeout = {1, 0};

#ifdef PJ_SYMBIAN
		status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
		status = pj_ioqueue_poll(ioque, &timeout);
#endif
		if (status > 0) {
		    if (status > pending_op) {
			PJ_LOG(3,(THIS_FILE,
				  "...error: pj_ioqueue_poll() returned %d "
				  "(only expecting %d)",
				  status, pending_op));
			return -110;
		    }
		    pending_op -= status;

		    if (pending_op == 0) {
			status = 0;
		    }
		}
	    }
	}

	// There's no pending operation.
	// When we poll the ioqueue, there must not be events.
	if (pending_op == 0) {
	    pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
	    status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
	    status = pj_ioqueue_poll(ioque, &timeout);
#endif
	    if (status != 0) {
		status=-120; goto on_error;
	    }
	}

	for (i=0; i<MAX_PAIR; ++i) {
	    // Check server socket.
	    if (server[i].sock == PJ_INVALID_SOCKET) {
		status = -130;
		app_perror("...accept() error", pj_get_os_error());
		goto on_error;
	    }

	    // Check addresses
	    if (server[i].local_addr.sin_family != pj_AF_INET() ||
		server[i].local_addr.sin_addr.s_addr == 0 ||
		server[i].local_addr.sin_port == 0)
	    {
		app_perror("...ERROR address not set", rc);
		status = -140;
		goto on_error;
	    }

	    if (server[i].rem_addr.sin_family != pj_AF_INET() ||
		server[i].rem_addr.sin_addr.s_addr == 0 ||
		server[i].rem_addr.sin_port == 0)
	    {
		app_perror("...ERROR address not set", rc);
		status = -150;
		goto on_error;
	    }


	    // Register newly accepted socket.
	    rc = pj_ioqueue_register_sock(pool, ioque, server[i].sock, NULL,
					  &test_cb, &server[i].key);
	    if (rc != PJ_SUCCESS) {
		app_perror("...ERROR in pj_ioqueue_register_sock", rc);
		status = -160;
		goto on_error;
	    }

	    // Test send and receive.
	    t_elapsed.u32.lo = 0;
	    status = send_recv_test(ioque, server[i].key, client[i].key, 
				    send_buf, recv_buf, bufsize, &t_elapsed);
	    if (status != 0) {
		goto on_error;
	    }
	}

	// Success
	status = 0;

	for (i=0; i<MAX_PAIR; ++i) {
	    if (server[i].key != NULL) {
		pj_ioqueue_unregister(server[i].key);
		server[i].key = NULL;
		server[i].sock = PJ_INVALID_SOCKET;
	    } else if (server[i].sock != PJ_INVALID_SOCKET) {
		pj_sock_close(server[i].sock);
		server[i].sock = PJ_INVALID_SOCKET;
	    }

	    if (client[i].key != NULL) {
		pj_ioqueue_unregister(client[i].key);
		client[i].key = NULL;
		client[i].sock = PJ_INVALID_SOCKET;
	    } else if (client[i].sock != PJ_INVALID_SOCKET) {
		pj_sock_close(client[i].sock);
		client[i].sock = PJ_INVALID_SOCKET;
	    }
	}
    }

    status = 0;

on_error:
    for (i=0; i<MAX_PAIR; ++i) {
	if (server[i].key != NULL) {
	    pj_ioqueue_unregister(server[i].key);
	    server[i].key = NULL;
	    server[i].sock = PJ_INVALID_SOCKET;
	} else if (server[i].sock != PJ_INVALID_SOCKET) {
	    pj_sock_close(server[i].sock);
	    server[i].sock = PJ_INVALID_SOCKET;
	}

	if (client[i].key != NULL) {
	    pj_ioqueue_unregister(client[i].key);
	    client[i].key = NULL;
	    server[i].sock = PJ_INVALID_SOCKET;
	} else if (client[i].sock != PJ_INVALID_SOCKET) {
	    pj_sock_close(client[i].sock);
	    client[i].sock = PJ_INVALID_SOCKET;
	}
    }

    if (listener.key) {
	pj_ioqueue_unregister(listener.key);
	listener.key = NULL;
    } else if (listener.sock != PJ_INVALID_SOCKET) {
	pj_sock_close(listener.sock);
	listener.sock = PJ_INVALID_SOCKET;
    }

    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release(pool);
    return status;

}