Ejemplo n.º 1
0
pj_status_t PJStunTurn::handle_events(unsigned max_msec, unsigned* p_count) {
  enum { MAX_NET_EVENTS = 1 };
  pj_time_val max_timeout = {0, 0};
  pj_time_val timeout = {0, 0};
  unsigned count = 0, net_event_count = 0;
  int c;
  max_timeout.msec = max_msec;
  /* Poll the timer to run it and also to retrieve the earliest entry. */
  timeout.sec = timeout.msec = 0;
  c = pj_timer_heap_poll(ice_cfg_.stun_cfg.timer_heap, &timeout);
  if (c > 0) count += c;
  /* timer_heap_poll should never ever returns negative value, or otherwise
   * ioqueue_poll() will block forever!
   */
  pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
  if (timeout.msec >= 1000) timeout.msec = 999;
  /* compare the value with the timeout to wait from timer, and use the
   * minimum value.
   */
  if (PJ_TIME_VAL_GT(timeout, max_timeout)) timeout = max_timeout;
  /* Poll ioqueue.
   * Repeat polling the ioqueue while we have immediate events, because
   * timer heap may process more than one events, so if we only process
   * one network events at a time (such as when IOCP backend is used),
   * the ioqueue may have trouble keeping up with the request rate.
   *
   * For example, for each send() request, one network event will be
   *   reported by ioqueue for the send() completion. If we don't poll
   *   the ioqueue often enough, the send() completion will not be
   *   reported in timely manner.
   */
  do {
    c = pj_ioqueue_poll(ice_cfg_.stun_cfg.ioqueue, &timeout);
    if (c < 0) {
      pj_status_t err = pj_get_netos_error();
      pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
      if (p_count) *p_count = count;
      return err;
    } else if (c == 0) {
      break;
    } else {
      net_event_count += c;
      timeout.sec = timeout.msec = 0;
    }
  } while (c > 0 && net_event_count < MAX_NET_EVENTS);
  count += net_event_count;
  if (p_count) *p_count = count;
  return PJ_SUCCESS;
}
Ejemplo n.º 2
0
/*
 * Convert IPv4/IPv6 address to text.
 */
PJ_DEF(pj_status_t) pj_inet_ntop(int af, const void *src,
				 char *dst, int size)

{
    PJ_ASSERT_RETURN(src && dst && size, PJ_EINVAL);

    *dst = '\0';

    PJ_ASSERT_RETURN(af==PJ_AF_INET || af==PJ_AF_INET6, PJ_EAFNOTSUP);

#if defined(PJ_SOCK_HAS_INET_NTOP) && PJ_SOCK_HAS_INET_NTOP != 0
    /*
     * Implementation using inet_ntop()
     */
    if (inet_ntop(af, src, dst, size) == NULL) {
	pj_status_t status = pj_get_netos_error();
	if (status == PJ_SUCCESS)
	    status = PJ_EUNKNOWN;

	return status;
    }

    return PJ_SUCCESS;

#elif defined(PJ_WIN32) || defined(PJ_WIN32_WINCE)
    /*
     * Implementation on Windows, using WSAAddressToString().
     * Should also work on Unicode systems.
     */
    {
	PJ_DECL_UNICODE_TEMP_BUF(wtempaddr,PJ_INET6_ADDRSTRLEN)
	pj_sockaddr sock_addr;
	DWORD addr_len, addr_str_len;
	int rc;

	pj_bzero(&sock_addr, sizeof(sock_addr));
	sock_addr.addr.sa_family = (pj_uint16_t)af;
	if (af == PJ_AF_INET) {
	    if (size < PJ_INET_ADDRSTRLEN)
		return PJ_ETOOSMALL;
	    pj_memcpy(&sock_addr.ipv4.sin_addr, src, 4);
	    addr_len = sizeof(pj_sockaddr_in);
	    addr_str_len = PJ_INET_ADDRSTRLEN;
	} else if (af == PJ_AF_INET6) {
	    if (size < PJ_INET6_ADDRSTRLEN)
		return PJ_ETOOSMALL;
	    pj_memcpy(&sock_addr.ipv6.sin6_addr, src, 16);
	    addr_len = sizeof(pj_sockaddr_in6);
	    addr_str_len = PJ_INET6_ADDRSTRLEN;
	} else {
	    pj_assert(!"Unsupported address family");
	    return PJ_EAFNOTSUP;
	}

#if PJ_NATIVE_STRING_IS_UNICODE
	rc = WSAAddressToString((LPSOCKADDR)&sock_addr, addr_len,
				NULL, wtempaddr, &addr_str_len);
	if (rc == 0) {
	    pj_unicode_to_ansi(wtempaddr, wcslen(wtempaddr), dst, size);
	}
#else
	rc = WSAAddressToString((LPSOCKADDR)&sock_addr, addr_len,
				NULL, dst, &addr_str_len);
#endif

	if (rc != 0) {
	    pj_status_t status = pj_get_netos_error();
	    if (status == PJ_SUCCESS)
		status = PJ_EUNKNOWN;

	    return status;
	}

	return PJ_SUCCESS;
    }

#elif !defined(PJ_HAS_IPV6) || PJ_HAS_IPV6==0
    /* IPv6 support is disabled, just return error without raising assertion */
    return PJ_EIPV6NOTSUP;
#else
    pj_assert(!"Not supported");
    return PJ_EIPV6NOTSUP;
#endif
}
Ejemplo n.º 3
0
/*
 * Convert text to IPv4/IPv6 address.
 */
PJ_DEF(pj_status_t) pj_inet_pton(int af, const pj_str_t *src, void *dst)
{
    char tempaddr[PJ_INET6_ADDRSTRLEN];

    PJ_ASSERT_RETURN(af==PJ_AF_INET || af==PJ_AF_INET6, PJ_EAFNOTSUP);
    PJ_ASSERT_RETURN(src && src->slen && dst, PJ_EINVAL);

    /* Initialize output with PJ_IN_ADDR_NONE for IPv4 (to be 
     * compatible with pj_inet_aton()
     */
    if (af==PJ_AF_INET) {
	((pj_in_addr*)dst)->s_addr = PJ_INADDR_NONE;
    }

    /* Caution:
     *	this function might be called with cp->slen >= 46
     *  (i.e. when called with hostname to check if it's an IP addr).
     */
    if (src->slen >= PJ_INET6_ADDRSTRLEN) {
	return PJ_ENAMETOOLONG;
    }

    pj_memcpy(tempaddr, src->ptr, src->slen);
    tempaddr[src->slen] = '\0';

#if defined(PJ_SOCK_HAS_INET_PTON) && PJ_SOCK_HAS_INET_PTON != 0
    /*
     * Implementation using inet_pton()
     */
    if (inet_pton(af, tempaddr, dst) != 1) {
	pj_status_t status = pj_get_netos_error();
	if (status == PJ_SUCCESS)
	    status = PJ_EUNKNOWN;

	return status;
    }

    return PJ_SUCCESS;

#elif defined(PJ_WIN32) || defined(PJ_WIN32_WINCE)
    /*
     * Implementation on Windows, using WSAStringToAddress().
     * Should also work on Unicode systems.
     */
    {
	PJ_DECL_UNICODE_TEMP_BUF(wtempaddr,PJ_INET6_ADDRSTRLEN)
	pj_sockaddr sock_addr;
	int addr_len = sizeof(sock_addr);
	int rc;

	sock_addr.addr.sa_family = (pj_uint16_t)af;
	rc = WSAStringToAddress(
		PJ_STRING_TO_NATIVE(tempaddr,wtempaddr,sizeof(wtempaddr)), 
		af, NULL, (LPSOCKADDR)&sock_addr, &addr_len);
	if (rc != 0) {
	    /* If you get rc 130022 Invalid argument (WSAEINVAL) with IPv6,
	     * check that you have IPv6 enabled (install it in the network
	     * adapter).
	     */
	    pj_status_t status = pj_get_netos_error();
	    if (status == PJ_SUCCESS)
		status = PJ_EUNKNOWN;

	    return status;
	}

	if (sock_addr.addr.sa_family == PJ_AF_INET) {
	    pj_memcpy(dst, &sock_addr.ipv4.sin_addr, 4);
	    return PJ_SUCCESS;
	} else if (sock_addr.addr.sa_family == PJ_AF_INET6) {
	    pj_memcpy(dst, &sock_addr.ipv6.sin6_addr, 16);
	    return PJ_SUCCESS;
	} else {
	    pj_assert(!"Shouldn't happen");
	    return PJ_EBUG;
	}
    }
#elif !defined(PJ_HAS_IPV6) || PJ_HAS_IPV6==0
    /* IPv6 support is disabled, just return error without raising assertion */
    return PJ_EIPV6NOTSUP;
#else
    pj_assert(!"Not supported");
    return PJ_EIPV6NOTSUP;
#endif
}
Ejemplo n.º 4
0
/*
 * select_test()
 *
 * Test main entry.
 */
int select_test()
{
    pj_sock_t udp1=PJ_INVALID_SOCKET, udp2=PJ_INVALID_SOCKET;
    pj_sockaddr_in udp_addr;
    int status;
    int setcount[3];
    pj_str_t s;
    const char data[] = "hello";
    const int datalen = 5;
    pj_ssize_t sent, received;
    char buf[10];
    pj_status_t rc;

    PJ_LOG(3, (THIS_FILE, "...Testing simple UDP select()"));
    
    // Create two UDP sockets.
    rc = pj_sock_socket( pj_AF_INET(), pj_SOCK_DGRAM(), 0, &udp1);
    if (rc != PJ_SUCCESS) {
        app_perror("...error: unable to create socket", rc);
	status=-10; goto on_return;
    }
    rc = pj_sock_socket( pj_AF_INET(), pj_SOCK_DGRAM(), 0, &udp2);
    if (udp2 == PJ_INVALID_SOCKET) {
        app_perror("...error: unable to create socket", rc);
	status=-20; goto on_return;
    }

    // Bind one of the UDP socket.
    pj_bzero(&udp_addr, sizeof(udp_addr));
    udp_addr.sin_family = pj_AF_INET();
    udp_addr.sin_port = UDP_PORT;
    udp_addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));

    if (pj_sock_bind(udp2, &udp_addr, sizeof(udp_addr))) {
	status=-30; goto on_return;
    }

    // Send data.
    sent = datalen;
    rc = pj_sock_sendto(udp1, data, &sent, 0, &udp_addr, sizeof(udp_addr));
    if (rc != PJ_SUCCESS || sent != datalen) {
        app_perror("...error: sendto() error", rc);
	status=-40; goto on_return;
    }

    // Sleep a bit. See http://trac.pjsip.org/repos/ticket/890
    pj_thread_sleep(10);

    // Check that socket is marked as reable.
    // Note that select() may also report that sockets are writable.
    status = do_select(udp1, udp2, setcount);
    if (status < 0) {
	char errbuf[128];
        pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf));
	PJ_LOG(1,(THIS_FILE, "...error: %s", errbuf));
	status=-50; goto on_return;
    }
    if (status == 0) {
	status=-60; goto on_return;
    }

    if (setcount[READ_FDS] != 1) {
	status=-70; goto on_return;
    }
    if (setcount[WRITE_FDS] != 0) {
	if (setcount[WRITE_FDS] == 2) {
	    PJ_LOG(3,(THIS_FILE, "...info: system reports writable sockets"));
	} else {
	    status=-80; goto on_return;
	}
    } else {
	PJ_LOG(3,(THIS_FILE, 
		  "...info: system doesn't report writable sockets"));
    }
    if (setcount[EXCEPT_FDS] != 0) {
	status=-90; goto on_return;
    }

    // Read the socket to clear readable sockets.
    received = sizeof(buf);
    rc = pj_sock_recv(udp2, buf, &received, 0);
    if (rc != PJ_SUCCESS || received != 5) {
	status=-100; goto on_return;
    }
    
    status = 0;

    // Test timeout on the read part.
    // This won't necessarily return zero, as select() may report that
    // sockets are writable.
    setcount[0] = setcount[1] = setcount[2] = 0;
    status = do_select(udp1, udp2, setcount);
    if (status != 0 && status != setcount[WRITE_FDS]) {
	PJ_LOG(3,(THIS_FILE, "...error: expecting timeout but got %d sks set",
			     status));
	PJ_LOG(3,(THIS_FILE, "          rdset: %d, wrset: %d, exset: %d",
			     setcount[0], setcount[1], setcount[2]));
	status = -110; goto on_return;
    }
    if (setcount[READ_FDS] != 0) {
	PJ_LOG(3,(THIS_FILE, "...error: readable socket not expected"));
	status = -120; goto on_return;
    }

    status = 0;

on_return:
    if (udp1 != PJ_INVALID_SOCKET)
	pj_sock_close(udp1);
    if (udp2 != PJ_INVALID_SOCKET)
	pj_sock_close(udp2);
    return status;
}
Ejemplo n.º 5
0
/*
 * Benchmarking IOQueue
 */
static int bench_test(pj_bool_t allow_concur, int bufsize, 
		      int inactive_sock_count)
{
    pj_sock_t ssock=-1, csock=-1;
    pj_sockaddr_in addr;
    pj_pool_t *pool = NULL;
    pj_sock_t *inactive_sock=NULL;
    pj_ioqueue_op_key_t *inactive_read_op;
    char *send_buf, *recv_buf;
    pj_ioqueue_t *ioque = NULL;
    pj_ioqueue_key_t *skey, *ckey, *keys[SOCK_INACTIVE_MAX+2];
    pj_timestamp t1, t2, t_elapsed;
    int rc=0, i;    /* i must be signed */
    pj_str_t temp;
    char errbuf[PJ_ERR_MSG_SIZE];

    TRACE__((THIS_FILE, "   bench test %d", inactive_sock_count));

    // Create pool.
    pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);

    // Allocate buffers for send and receive.
    send_buf = (char*)pj_pool_alloc(pool, bufsize);
    recv_buf = (char*)pj_pool_alloc(pool, bufsize);

    // Allocate sockets for sending and receiving.
    rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &ssock);
    if (rc == PJ_SUCCESS) {
        rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &csock);
    } else
        csock = PJ_INVALID_SOCKET;
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_sock_socket()", rc);
	goto on_error;
    }

    // Bind server socket.
    pj_bzero(&addr, sizeof(addr));
    addr.sin_family = pj_AF_INET();
    addr.sin_port = pj_htons(PORT);
    if (pj_sock_bind(ssock, &addr, sizeof(addr)))
	goto on_error;

    pj_assert(inactive_sock_count+2 <= PJ_IOQUEUE_MAX_HANDLES);

    // Create I/O Queue.
    rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque);
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_ioqueue_create()", rc);
	goto on_error;
    }

    // Set concurrency
    rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur);
    if (rc != PJ_SUCCESS) {
	app_perror("...error: pj_ioqueue_set_default_concurrency()", rc);
	goto on_error;
    }

    // Allocate inactive sockets, and bind them to some arbitrary address.
    // Then register them to the I/O queue, and start a read operation.
    inactive_sock = (pj_sock_t*)pj_pool_alloc(pool, 
				    inactive_sock_count*sizeof(pj_sock_t));
    inactive_read_op = (pj_ioqueue_op_key_t*)pj_pool_alloc(pool,
                              inactive_sock_count*sizeof(pj_ioqueue_op_key_t));
    pj_bzero(&addr, sizeof(addr));
    addr.sin_family = pj_AF_INET();
    for (i=0; i<inactive_sock_count; ++i) {
        pj_ssize_t bytes;

	rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &inactive_sock[i]);
	if (rc != PJ_SUCCESS || inactive_sock[i] < 0) {
	    app_perror("...error: pj_sock_socket()", rc);
	    goto on_error;
	}
	if ((rc=pj_sock_bind(inactive_sock[i], &addr, sizeof(addr))) != 0) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error: pj_sock_bind()", rc);
	    goto on_error;
	}
	rc = pj_ioqueue_register_sock(pool, ioque, inactive_sock[i], 
			              NULL, &test_cb, &keys[i]);
	if (rc != PJ_SUCCESS) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error(1): pj_ioqueue_register_sock()", rc);
	    PJ_LOG(3,(THIS_FILE, "....i=%d", i));
	    goto on_error;
	}
        bytes = bufsize;
	rc = pj_ioqueue_recv(keys[i], &inactive_read_op[i], recv_buf, &bytes, 0);
	if (rc != PJ_EPENDING) {
	    pj_sock_close(inactive_sock[i]);
	    inactive_sock[i] = PJ_INVALID_SOCKET;
	    app_perror("...error: pj_ioqueue_read()", rc);
	    goto on_error;
	}
    }

    // Register server and client socket.
    // We put this after inactivity socket, hopefully this can represent the
    // worst waiting time.
    rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, 
			          &test_cb, &skey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(2): pj_ioqueue_register_sock()", rc);
	goto on_error;
    }

    rc = pj_ioqueue_register_sock(pool, ioque, csock, NULL, 
			          &test_cb, &ckey);
    if (rc != PJ_SUCCESS) {
	app_perror("...error(3): pj_ioqueue_register_sock()", rc);
	goto on_error;
    }

    // Set destination address to send the packet.
    pj_sockaddr_in_init(&addr, pj_cstr(&temp, "127.0.0.1"), PORT);

    // Test loop.
    t_elapsed.u64 = 0;
    for (i=0; i<LOOP; ++i) {
	pj_ssize_t bytes;
        pj_ioqueue_op_key_t read_op, write_op;

	// Randomize send buffer.
	pj_create_random_string(send_buf, bufsize);

	// Start reading on the server side.
        bytes = bufsize;
	rc = pj_ioqueue_recv(skey, &read_op, recv_buf, &bytes, 0);
	if (rc != PJ_EPENDING) {
	    app_perror("...error: pj_ioqueue_read()", rc);
	    break;
	}

	// Starts send on the client side.
        bytes = bufsize;
	rc = pj_ioqueue_sendto(ckey, &write_op, send_buf, &bytes, 0,
			       &addr, sizeof(addr));
	if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
	    app_perror("...error: pj_ioqueue_write()", rc);
	    break;
	}
	if (rc == PJ_SUCCESS) {
	    if (bytes < 0) {
		app_perror("...error: pj_ioqueue_sendto()",(pj_status_t)-bytes);
		break;
	    }
	}

	// Begin time.
	pj_get_timestamp(&t1);

	// Poll the queue until we've got completion event in the server side.
        callback_read_key = NULL;
        callback_read_size = 0;
	TRACE__((THIS_FILE, "     waiting for key = %p", skey));
	do {
	    pj_time_val timeout = { 1, 0 };
#ifdef PJ_SYMBIAN
	    rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
	    rc = pj_ioqueue_poll(ioque, &timeout);
#endif
	    TRACE__((THIS_FILE, "     poll rc=%d", rc));
	} while (rc >= 0 && callback_read_key != skey);

	// End time.
	pj_get_timestamp(&t2);
	t_elapsed.u64 += (t2.u64 - t1.u64);

	if (rc < 0) {
	    app_perror("   error: pj_ioqueue_poll", -rc);
	    break;
	}

	// Compare recv buffer with send buffer.
	if (callback_read_size != bufsize || 
	    pj_memcmp(send_buf, recv_buf, bufsize)) 
	{
	    rc = -10;
	    PJ_LOG(3,(THIS_FILE, "   error: size/buffer mismatch"));
	    break;
	}

	// Poll until all events are exhausted, before we start the next loop.
	do {
	    pj_time_val timeout = { 0, 10 };
#ifdef PJ_SYMBIAN
	    PJ_UNUSED_ARG(timeout);
	    rc = pj_symbianos_poll(-1, 100);
#else	    
	    rc = pj_ioqueue_poll(ioque, &timeout);
#endif
	} while (rc>0);

	rc = 0;
    }

    // Print results
    if (rc == 0) {
	pj_timestamp tzero;
	pj_uint32_t usec_delay;

	tzero.u32.hi = tzero.u32.lo = 0;
	usec_delay = pj_elapsed_usec( &tzero, &t_elapsed);

	PJ_LOG(3, (THIS_FILE, "...%10d %15d  % 9d", 
	           bufsize, inactive_sock_count, usec_delay));

    } else {
	PJ_LOG(2, (THIS_FILE, "...ERROR rc=%d (buf:%d, fds:%d)", 
			      rc, bufsize, inactive_sock_count+2));
    }

    // Cleaning up.
    for (i=inactive_sock_count-1; i>=0; --i) {
	pj_ioqueue_unregister(keys[i]);
    }

    pj_ioqueue_unregister(skey);
    pj_ioqueue_unregister(ckey);


    pj_ioqueue_destroy(ioque);
    pj_pool_release( pool);
    return rc;

on_error:
    PJ_LOG(1,(THIS_FILE, "...ERROR: %s", 
	      pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
    if (ssock)
	pj_sock_close(ssock);
    if (csock)
	pj_sock_close(csock);
    for (i=0; i<inactive_sock_count && inactive_sock && 
	      inactive_sock[i]!=PJ_INVALID_SOCKET; ++i) 
    {
	pj_sock_close(inactive_sock[i]);
    }
    if (ioque != NULL)
	pj_ioqueue_destroy(ioque);
    pj_pool_release( pool);
    return -1;
}
Ejemplo n.º 6
0
/*
 * pj_ioqueue_poll()
 *
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    int i, count, processed;
    int msec;
    //struct epoll_event *events = ioqueue->events;
    //struct queue *queue = ioqueue->queue;
    struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    pj_timestamp t1, t2;
    
    PJ_CHECK_STACK();

    msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000;

    TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec));
    pj_get_timestamp(&t1);
 
    //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec);
    count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec);
    if (count == 0) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Check the closing keys only when there's no activity and when there are
     * pending closing keys.
     */
    if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) {
	pj_lock_acquire(ioqueue->lock);
	scan_closing_keys(ioqueue);
	pj_lock_release(ioqueue->lock);
    }
#endif
	TRACE_((THIS_FILE, "os_epoll_wait timed out"));
	return count;
    }
    else if (count < 0) {
	TRACE_((THIS_FILE, "os_epoll_wait error"));
	return -pj_get_netos_error();
    }

    pj_get_timestamp(&t2);
    TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec",
		       count, pj_elapsed_usec(&t1, &t2)));

    /* Lock ioqueue. */
    pj_lock_acquire(ioqueue->lock);

    for (processed=0, i=0; i<count; ++i) {
	pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type)
				events[i].epoll_data;

	TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events));

	/*
	 * Check readability.
	 */
	if ((events[i].events & EPOLLIN) && 
	    (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = READABLE_EVENT;
	    ++processed;
	    continue;
	}

	/*
	 * Check for writeability.
	 */
	if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}

#if PJ_HAS_TCP
	/*
	 * Check for completion of connect() operation.
	 */
	if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}
#endif /* PJ_HAS_TCP */

	/*
	 * Check for error condition.
	 */
	if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) {
	    /*
	     * We need to handle this exception event.  If it's related to us
	     * connecting, report it as such.  If not, just report it as a
	     * read event and the higher layers will handle it.
	     */
	    if (h->connecting) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = EXCEPTION_EVENT;
		++processed;
	    } else if (key_has_pending_read(h) || key_has_pending_accept(h)) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = READABLE_EVENT;
		++processed;
	    }
	    continue;
	}
    }
    for (i=0; i<processed; ++i) {
	if (queue[i].key->grp_lock)
	    pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0);
    }

    PJ_RACE_ME(5);

    pj_lock_release(ioqueue->lock);

    PJ_RACE_ME(5);

    /* Now process the events. */
    for (i=0; i<processed; ++i) {
	switch (queue[i].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, queue[i].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, queue[i].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, queue[i].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(queue[i].key);
#endif

	if (queue[i].key->grp_lock)
	    pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock,
	                            "ioqueue", 0);
    }

    /* Special case:
     * When epoll returns > 0 but no descriptors are actually set!
     */
    if (count > 0 && !processed && msec > 0) {
	pj_thread_sleep(msec);
    }

    pj_get_timestamp(&t1);
    TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec",
		       processed, pj_elapsed_usec(&t2, &t1)));

    return processed;
}
Ejemplo n.º 7
0
/*
 * pj_ioqueue_register_sock()
 *
 * Register a socket to ioqueue.
 */
PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool,
					      pj_ioqueue_t *ioqueue,
					      pj_sock_t sock,
					      pj_grp_lock_t *grp_lock,
					      void *user_data,
					      const pj_ioqueue_callback *cb,
                                              pj_ioqueue_key_t **p_key)
{
    pj_ioqueue_key_t *key = NULL;
    pj_uint32_t value;
    struct epoll_event ev;
    int status;
    pj_status_t rc = PJ_SUCCESS;
    
    PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET &&
                     cb && p_key, PJ_EINVAL);

    pj_lock_acquire(ioqueue->lock);

    if (ioqueue->count >= ioqueue->max) {
        rc = PJ_ETOOMANY;
	TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: too many files"));
	goto on_return;
    }

    /* Set socket to nonblocking. */
    value = 1;
    if ((rc=os_ioctl(sock, FIONBIO, (ioctl_val_type)&value))) {
	TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: ioctl rc=%d", 
                rc));
        rc = pj_get_netos_error();
	goto on_return;
    }

    /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get
     * the key from the free list. Otherwise allocate a new one. 
     */
#if PJ_IOQUEUE_HAS_SAFE_UNREG

    /* Scan closing_keys first to let them come back to free_list */
    scan_closing_keys(ioqueue);

    pj_assert(!pj_list_empty(&ioqueue->free_list));
    if (pj_list_empty(&ioqueue->free_list)) {
	rc = PJ_ETOOMANY;
	goto on_return;
    }

    key = ioqueue->free_list.next;
    pj_list_erase(key);
#else
    /* Create key. */
    key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
#endif

    rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb);
    if (rc != PJ_SUCCESS) {
	key = NULL;
	goto on_return;
    }

    /* Create key's mutex */
 /*   rc = pj_mutex_create_recursive(pool, NULL, &key->mutex);
    if (rc != PJ_SUCCESS) {
	key = NULL;
	goto on_return;
    }
*/
    /* os_epoll_ctl. */
    ev.events = EPOLLIN | EPOLLERR;
    ev.epoll_data = (epoll_data_type)key;
    status = os_epoll_ctl(ioqueue->epfd, EPOLL_CTL_ADD, sock, &ev);
    if (status < 0) {
	rc = pj_get_os_error();
	pj_lock_destroy(key->lock);
	key = NULL;
	TRACE_((THIS_FILE, 
                "pj_ioqueue_register_sock error: os_epoll_ctl rc=%d", 
                status));
	goto on_return;
    }
    
    /* Register */
    pj_list_insert_before(&ioqueue->active_list, key);
    ++ioqueue->count;

    //TRACE_((THIS_FILE, "socket registered, count=%d", ioqueue->count));

on_return:
    if (rc != PJ_SUCCESS) {
	if (key && key->grp_lock)
	    pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0);
    }
    *p_key = key;
    pj_lock_release(ioqueue->lock);
    
    return rc;
}
Ejemplo n.º 8
0
/*
 * pj_ioqueue_poll()
 *
 * Few things worth written:
 *
 *  - we used to do only one callback called per poll, but it didn't go
 *    very well. The reason is because on some situation, the write 
 *    callback gets called all the time, thus doesn't give the read
 *    callback to get called. This happens, for example, when user
 *    submit write operation inside the write callback.
 *    As the result, we changed the behaviour so that now multiple
 *    callbacks are called in a single poll. It should be fast too,
 *    just that we need to be carefull with the ioqueue data structs.
 *
 *  - to guarantee preemptiveness etc, the poll function must strictly
 *    work on fd_set copy of the ioqueue (not the original one).
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    pj_fd_set_t rfdset, wfdset, xfdset;
    int count, counter;
    pj_ioqueue_key_t *h;
    struct event
    {
        pj_ioqueue_key_t	*key;
        enum ioqueue_event_type  event_type;
    } event[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];

    PJ_ASSERT_RETURN(ioqueue, -PJ_EINVAL);

    /* Lock ioqueue before making fd_set copies */
    pj_lock_acquire(ioqueue->lock);

    /* We will only do select() when there are sockets to be polled.
     * Otherwise select() will return error.
     */
    if (PJ_FD_COUNT(&ioqueue->rfdset)==0 &&
        PJ_FD_COUNT(&ioqueue->wfdset)==0 
#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
        && PJ_FD_COUNT(&ioqueue->xfdset)==0
#endif
	)
    {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	scan_closing_keys(ioqueue);
#endif
	pj_lock_release(ioqueue->lock);
	TRACE__((THIS_FILE, "     poll: no fd is set"));
        if (timeout)
            pj_thread_sleep(PJ_TIME_VAL_MSEC(*timeout));
        return 0;
    }

    /* Copy ioqueue's pj_fd_set_t to local variables. */
    pj_memcpy(&rfdset, &ioqueue->rfdset, sizeof(pj_fd_set_t));
    pj_memcpy(&wfdset, &ioqueue->wfdset, sizeof(pj_fd_set_t));
#if PJ_HAS_TCP
    pj_memcpy(&xfdset, &ioqueue->xfdset, sizeof(pj_fd_set_t));
#else
    PJ_FD_ZERO(&xfdset);
#endif

#if VALIDATE_FD_SET
    validate_sets(ioqueue, &rfdset, &wfdset, &xfdset);
#endif

    /* Unlock ioqueue before select(). */
    pj_lock_release(ioqueue->lock);

    count = pj_sock_select(ioqueue->nfds+1, &rfdset, &wfdset, &xfdset, 
			   timeout);
    
    if (count == 0)
	return 0;
    else if (count < 0)
	return -pj_get_netos_error();
    else if (count > PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL)
        count = PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL;

    /* Scan descriptor sets for event and add the events in the event
     * array to be processed later in this function. We do this so that
     * events can be processed in parallel without holding ioqueue lock.
     */
    pj_lock_acquire(ioqueue->lock);

    counter = 0;

    /* Scan for writable sockets first to handle piggy-back data
     * coming with accept().
     */
    h = ioqueue->active_list.next;
    for ( ; h!=&ioqueue->active_list && counter<count; h = h->next) {

	if ( (key_has_pending_write(h) || key_has_pending_connect(h))
	     && PJ_FD_ISSET(h->fd, &wfdset) && !IS_CLOSING(h))
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = WRITEABLE_EVENT;
            ++counter;
        }

        /* Scan for readable socket. */
	if ((key_has_pending_read(h) || key_has_pending_accept(h))
            && PJ_FD_ISSET(h->fd, &rfdset) && !IS_CLOSING(h) &&
	    counter<count)
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = READABLE_EVENT;
            ++counter;
	}

#if PJ_HAS_TCP
        if (key_has_pending_connect(h) && PJ_FD_ISSET(h->fd, &xfdset) &&
	    !IS_CLOSING(h) && counter<count) 
	{
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = EXCEPTION_EVENT;
            ++counter;
        }
#endif
    }

    pj_lock_release(ioqueue->lock);

    count = counter;

    /* Now process all events. The dispatch functions will take care
     * of locking in each of the key
     */
    for (counter=0; counter<count; ++counter) {
        switch (event[counter].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, event[counter].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, event[counter].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, event[counter].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(event[counter].key);
#endif
    }


    return count;
}
Ejemplo n.º 9
0
/*
 * pj_ioqueue_register_sock()
 *
 * Register socket handle to ioqueue.
 */
PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
					      pj_ioqueue_t *ioqueue,
					      pj_sock_t sock,
					      void *user_data,
					      const pj_ioqueue_callback *cb,
                                              pj_ioqueue_key_t **p_key)
{
    pj_ioqueue_key_t *key = NULL;
#if defined(PJ_WIN32) && PJ_WIN32!=0 || \
    defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0
    u_long value;
#else
    pj_uint32_t value;
#endif
    pj_status_t rc = PJ_SUCCESS;
    
    PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET &&
                     cb && p_key, PJ_EINVAL);

    pj_lock_acquire(ioqueue->lock);

    if (ioqueue->count >= ioqueue->max) {
        rc = PJ_ETOOMANY;
	goto on_return;
    }

    /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get
     * the key from the free list. Otherwise allocate a new one. 
     */
#if PJ_IOQUEUE_HAS_SAFE_UNREG

    /* Scan closing_keys first to let them come back to free_list */
    scan_closing_keys(ioqueue);

    pj_assert(!pj_list_empty(&ioqueue->free_list));
    if (pj_list_empty(&ioqueue->free_list)) {
	rc = PJ_ETOOMANY;
	goto on_return;
    }

    key = ioqueue->free_list.next;
    pj_list_erase(key);
#else
    key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
#endif

    rc = ioqueue_init_key(pool, ioqueue, key, sock, user_data, cb);
    if (rc != PJ_SUCCESS) {
	key = NULL;
	goto on_return;
    }

    /* Set socket to nonblocking. */
    value = 1;
#if defined(PJ_WIN32) && PJ_WIN32!=0 || \
    defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0
    if (ioctlsocket(sock, FIONBIO, &value)) {
#else
    if (ioctl(sock, FIONBIO, &value)) {
#endif
        rc = pj_get_netos_error();
	goto on_return;
    }


    /* Put in active list. */
    pj_list_insert_before(&ioqueue->active_list, key);
    ++ioqueue->count;

    /* Rescan fdset to get max descriptor */
    rescan_fdset(ioqueue);

on_return:
    /* On error, socket may be left in non-blocking mode. */
    *p_key = key;
    pj_lock_release(ioqueue->lock);
    
    return rc;
}

#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* Increment key's reference counter */
static void increment_counter(pj_ioqueue_key_t *key)
{
    pj_mutex_lock(key->ioqueue->ref_cnt_mutex);
    ++key->ref_count;
    pj_mutex_unlock(key->ioqueue->ref_cnt_mutex);
}
Ejemplo n.º 10
0
static pj_status_t krx_ice_handle_events(krx_ice* k, unsigned int maxms, unsigned int* pcount) {

  if(!k) {
    printf("Error: krx_ice_handle_events(), invalid krx_ice pointer.\n");
    return PJ_FALSE;
  }

  printf("lets poll: %p.\n", k);
  
  pj_time_val max_timeout = { 0, 0 };
  pj_time_val timeout = { 0, 0 };
  unsigned int count = 0;
  unsigned int net_event_count = 0;
  int c;

  max_timeout.msec = maxms;
  timeout.sec = timeout.msec = 0;

  /* poll the timer to run it and also retrieve earliest entry */
  c = pj_timer_heap_poll(k->ice_cfg.stun_cfg.timer_heap, &timeout);
  if(c > 0) {
    count += c;
  }
  
  /* timer_heap_poll should never return negative values! */
  if(timeout.sec < 0 || timeout.msec < 0) {
    printf("Error: timer returns negative values. Should never happen.\n");
    exit(1);
  }
  
  if(timeout.msec >= 1000) {
    timeout.msec = 999;
  }

  /* use the minimum timeout value */
  if(PJ_TIME_VAL_GT(timeout, max_timeout)) {
    timeout = max_timeout;
  }


  /* poll ioqueue */
  do { 

    c = pj_ioqueue_poll(k->ice_cfg.stun_cfg.ioqueue, &timeout);
    if(c < 0) {
      pj_status_t err = pj_get_netos_error();
      pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
      if(pcount) {
        *pcount = count;
        return err;
      }
      else if(c == 0) {
        break;
      }
      else {
        net_event_count += c;
        timeout.sec = timeout.msec = 0;
      }
    }

    
  } while(c > 0 && net_event_count < 1 );

  count += net_event_count;
  if(pcount) {
    *pcount = count;
  }

  return PJ_SUCCESS;
}