Exemplo n.º 1
0
/* 
 * Handle timer and network events 
 */
static void srv_handle_events(pj_turn_srv *srv, const pj_time_val *max_timeout)
{
    /* timeout is 'out' var. This just to make compiler happy. */
    pj_time_val timeout = { 0, 0};
    unsigned net_event_count = 0;
    int c;

    /* Poll the timer. The timer heap has its own mutex for better 
     * granularity, so we don't need to lock the server. 
     */
    timeout.sec = timeout.msec = 0;
    c = pj_timer_heap_poll( srv->core.timer_heap, &timeout );

    /* timer_heap_poll should never ever returns negative value, or otherwise
     * ioqueue_poll() will block forever!
     */
    pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
    if (timeout.msec >= 1000) timeout.msec = 999;

    /* If caller specifies maximum time to wait, then compare the value with
     * the timeout to wait from timer, and use the minimum value.
     */
    if (max_timeout && PJ_TIME_VAL_GT(timeout, *max_timeout)) {
	timeout = *max_timeout;
    }

    /* Poll ioqueue. 
     * Repeat polling the ioqueue while we have immediate events, because
     * timer heap may process more than one events, so if we only process
     * one network events at a time (such as when IOCP backend is used),
     * the ioqueue may have trouble keeping up with the request rate.
     *
     * For example, for each send() request, one network event will be
     *   reported by ioqueue for the send() completion. If we don't poll
     *   the ioqueue often enough, the send() completion will not be
     *   reported in timely manner.
     */
    do {
	c = pj_ioqueue_poll( srv->core.ioqueue, &timeout);
	if (c < 0) {
	    pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
	    return;
	} else if (c == 0) {
	    break;
	} else {
	    net_event_count += c;
	    timeout.sec = timeout.msec = 0;
	}
    } while (c > 0 && net_event_count < MAX_NET_EVENTS);

}
Exemplo n.º 2
0
pj_status_t PJStunTurn::handle_events(unsigned max_msec, unsigned* p_count) {
  enum { MAX_NET_EVENTS = 1 };
  pj_time_val max_timeout = {0, 0};
  pj_time_val timeout = {0, 0};
  unsigned count = 0, net_event_count = 0;
  int c;
  max_timeout.msec = max_msec;
  /* Poll the timer to run it and also to retrieve the earliest entry. */
  timeout.sec = timeout.msec = 0;
  c = pj_timer_heap_poll(ice_cfg_.stun_cfg.timer_heap, &timeout);
  if (c > 0) count += c;
  /* timer_heap_poll should never ever returns negative value, or otherwise
   * ioqueue_poll() will block forever!
   */
  pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
  if (timeout.msec >= 1000) timeout.msec = 999;
  /* compare the value with the timeout to wait from timer, and use the
   * minimum value.
   */
  if (PJ_TIME_VAL_GT(timeout, max_timeout)) timeout = max_timeout;
  /* Poll ioqueue.
   * Repeat polling the ioqueue while we have immediate events, because
   * timer heap may process more than one events, so if we only process
   * one network events at a time (such as when IOCP backend is used),
   * the ioqueue may have trouble keeping up with the request rate.
   *
   * For example, for each send() request, one network event will be
   *   reported by ioqueue for the send() completion. If we don't poll
   *   the ioqueue often enough, the send() completion will not be
   *   reported in timely manner.
   */
  do {
    c = pj_ioqueue_poll(ice_cfg_.stun_cfg.ioqueue, &timeout);
    if (c < 0) {
      pj_status_t err = pj_get_netos_error();
      pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
      if (p_count) *p_count = count;
      return err;
    } else if (c == 0) {
      break;
    } else {
      net_event_count += c;
      timeout.sec = timeout.msec = 0;
    }
  } while (c > 0 && net_event_count < MAX_NET_EVENTS);
  count += net_event_count;
  if (p_count) *p_count = count;
  return PJ_SUCCESS;
}
Exemplo n.º 3
0
/* Flush all delayed transmision once the socket is connected. */
static void tcp_flush_pending_tx(struct tcp_transport *tcp)
{
    pj_time_val now;

    pj_gettickcount(&now);
    pj_lock_acquire(tcp->base.lock);
    while (!pj_list_empty(&tcp->delayed_list)) {
	struct delayed_tdata *pending_tx;
	pjsip_tx_data *tdata;
	pj_ioqueue_op_key_t *op_key;
	pj_ssize_t size;
	pj_status_t status;

	pending_tx = tcp->delayed_list.next;
	pj_list_erase(pending_tx);

	tdata = pending_tx->tdata_op_key->tdata;
	op_key = (pj_ioqueue_op_key_t*)pending_tx->tdata_op_key;

        if (pending_tx->timeout.sec > 0 &&
            PJ_TIME_VAL_GT(now, pending_tx->timeout))
        {
            continue;
        }

	/* send! */
	size = tdata->buf.cur - tdata->buf.start;
	status = pj_activesock_send(tcp->asock, op_key, tdata->buf.start, 
				    &size, 0);
	if (status != PJ_EPENDING) {
            pj_lock_release(tcp->base.lock);
	    on_data_sent(tcp->asock, op_key, size);
            pj_lock_acquire(tcp->base.lock);
	}

    }
    pj_lock_release(tcp->base.lock);
}
Exemplo n.º 4
0
/* Check if a permission isn't expired. Return NULL if expired. */
static pj_turn_permission *check_permission_expiry(pj_turn_permission *perm)
{
    pj_turn_allocation *alloc = perm->allocation;
    pj_time_val now;

    pj_gettimeofday(&now);
    if (PJ_TIME_VAL_GT(perm->expiry, now)) {
	/* Permission has not expired */
	return perm;
    }

    /* Remove from permission hash table */
    pj_hash_set(NULL, alloc->peer_table,
		pj_sockaddr_get_addr(&perm->hkey.peer_addr),
	        pj_sockaddr_get_addr_len(&perm->hkey.peer_addr), 0, NULL);

    /* Remove from channel hash table, if assigned a channel number */
    if (perm->channel != PJ_TURN_INVALID_CHANNEL) {
	pj_hash_set(NULL, alloc->ch_table, &perm->channel,
		    sizeof(perm->channel), 0, NULL);
    }

    return NULL;
}
Exemplo n.º 5
0
 bool operator >  (const Pj_Time_Val &rhs) const 
 { 
     return PJ_TIME_VAL_GT((*this), rhs);  
 }
Exemplo n.º 6
0
/*
 * Perform unregistration test.
 *
 * This will create ioqueue and register a server socket. Depending
 * on the test method, either the callback or the main thread will
 * unregister and destroy the server socket after some period of time.
 */
static int perform_unreg_test(pj_ioqueue_t *ioqueue,
			      pj_pool_t *test_pool,
			      const char *title, 
			      pj_bool_t other_socket)
{
    enum { WORKER_CNT = 1, MSEC = 500, QUIT_MSEC = 500 };
    int i;
    pj_thread_t *thread[WORKER_CNT];
    struct sock_data osd;
    pj_ioqueue_callback callback;
    pj_time_val end_time;
    pj_status_t status;


    /* Sometimes its important to have other sockets registered to
     * the ioqueue, because when no sockets are registered, the ioqueue
     * will return from the poll early.
     */
    if (other_socket) {
	status = app_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, 56127, &osd.sock);
	if (status != PJ_SUCCESS) {
	    app_perror("Error creating other socket", status);
	    return -12;
	}

	pj_bzero(&callback, sizeof(callback));
	status = pj_ioqueue_register_sock(test_pool, ioqueue, osd.sock,
					  NULL, &callback, &osd.key);
	if (status != PJ_SUCCESS) {
	    app_perror("Error registering other socket", status);
	    return -13;
	}

    } else {
	osd.key = NULL;
	osd.sock = PJ_INVALID_SOCKET;
    }

    /* Init both time duration of testing */
    thread_quitting = 0;
    pj_gettimeofday(&time_to_unregister);
    time_to_unregister.msec += MSEC;
    pj_time_val_normalize(&time_to_unregister);

    end_time = time_to_unregister;
    end_time.msec += QUIT_MSEC;
    pj_time_val_normalize(&end_time);

    
    /* Create polling thread */
    for (i=0; i<WORKER_CNT; ++i) {
	status = pj_thread_create(test_pool, "unregtest", &worker_thread,
				   ioqueue, 0, 0, &thread[i]);
	if (status != PJ_SUCCESS) {
	    app_perror("Error creating thread", status);
	    return -20;
	}
    }

    /* Create pair of client/server sockets */
    status = app_socketpair(pj_AF_INET(), pj_SOCK_DGRAM(), 0, 
			    &sock_data.sock, &sock_data.csock);
    if (status != PJ_SUCCESS) {
	app_perror("app_socketpair error", status);
	return -30;
    }


    /* Initialize test data */
    sock_data.pool = pj_pool_create(mem, "sd", 1000, 1000, NULL);
    sock_data.buffer = (char*) pj_pool_alloc(sock_data.pool, 128);
    sock_data.bufsize = 128;
    sock_data.op_key = (pj_ioqueue_op_key_t*) 
    		       pj_pool_alloc(sock_data.pool, 
				     sizeof(*sock_data.op_key));
    sock_data.received = 0;
    sock_data.unregistered = 0;

    pj_ioqueue_op_key_init(sock_data.op_key, sizeof(*sock_data.op_key));

    status = pj_mutex_create_simple(sock_data.pool, "sd", &sock_data.mutex);
    if (status != PJ_SUCCESS) {
	app_perror("create_mutex() error", status);
	return -35;
    }

    /* Register socket to ioqueue */
    pj_bzero(&callback, sizeof(callback));
    callback.on_read_complete = &on_read_complete;
    status = pj_ioqueue_register_sock(sock_data.pool, ioqueue, sock_data.sock,
				      NULL, &callback, &sock_data.key);
    if (status != PJ_SUCCESS) {
	app_perror("pj_ioqueue_register error", status);
	return -40;
    }

    /* Bootstrap the first send/receive */
    on_read_complete(sock_data.key, sock_data.op_key, 0);

    /* Loop until test time ends */
    for (;;) {
	pj_time_val now, timeout;

	pj_gettimeofday(&now);

	if (test_method == UNREGISTER_IN_APP && 
	    PJ_TIME_VAL_GTE(now, time_to_unregister) &&
	    sock_data.pool) 
	{
	    pj_mutex_lock(sock_data.mutex);

	    sock_data.unregistered = 1;
	    pj_ioqueue_unregister(sock_data.key);
	    pj_mutex_unlock(sock_data.mutex);
	    pj_mutex_destroy(sock_data.mutex);
	    pj_pool_release(sock_data.pool);
	    sock_data.pool = NULL;
	}

	if (PJ_TIME_VAL_GT(now, end_time) && sock_data.unregistered)
	    break;

	timeout.sec = 0; timeout.msec = 10;
	pj_ioqueue_poll(ioqueue, &timeout);
	//pj_thread_sleep(1);

    }

    thread_quitting = 1;

    for (i=0; i<WORKER_CNT; ++i) {
	pj_thread_join(thread[i]);
	pj_thread_destroy(thread[i]);
    }

    if (other_socket) {
	pj_ioqueue_unregister(osd.key);
    }

    pj_sock_close(sock_data.csock);

    PJ_LOG(3,(THIS_FILE, "....%s: done (%d KB/s)",
	      title, sock_data.received * 1000 / MSEC / 1000));
    return 0;
}
Exemplo n.º 7
0
static pj_status_t krx_ice_handle_events(krx_ice* k, unsigned int maxms, unsigned int* pcount) {

  if(!k) {
    printf("Error: krx_ice_handle_events(), invalid krx_ice pointer.\n");
    return PJ_FALSE;
  }

  printf("lets poll: %p.\n", k);
  
  pj_time_val max_timeout = { 0, 0 };
  pj_time_val timeout = { 0, 0 };
  unsigned int count = 0;
  unsigned int net_event_count = 0;
  int c;

  max_timeout.msec = maxms;
  timeout.sec = timeout.msec = 0;

  /* poll the timer to run it and also retrieve earliest entry */
  c = pj_timer_heap_poll(k->ice_cfg.stun_cfg.timer_heap, &timeout);
  if(c > 0) {
    count += c;
  }
  
  /* timer_heap_poll should never return negative values! */
  if(timeout.sec < 0 || timeout.msec < 0) {
    printf("Error: timer returns negative values. Should never happen.\n");
    exit(1);
  }
  
  if(timeout.msec >= 1000) {
    timeout.msec = 999;
  }

  /* use the minimum timeout value */
  if(PJ_TIME_VAL_GT(timeout, max_timeout)) {
    timeout = max_timeout;
  }


  /* poll ioqueue */
  do { 

    c = pj_ioqueue_poll(k->ice_cfg.stun_cfg.ioqueue, &timeout);
    if(c < 0) {
      pj_status_t err = pj_get_netos_error();
      pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
      if(pcount) {
        *pcount = count;
        return err;
      }
      else if(c == 0) {
        break;
      }
      else {
        net_event_count += c;
        timeout.sec = timeout.msec = 0;
      }
    }

    
  } while(c > 0 && net_event_count < 1 );

  count += net_event_count;
  if(pcount) {
    *pcount = count;
  }

  return PJ_SUCCESS;
}