Esempio n. 1
0
PJ_DEF(unsigned) pj_timer_heap_poll( pj_timer_heap_t *ht, 
                                     pj_time_val *next_delay )
{
    pj_time_val now;
    unsigned count;

    PJ_ASSERT_RETURN(ht, 0);

    lock_timer_heap(ht);
    if (!ht->cur_size && next_delay) {
	next_delay->sec = next_delay->msec = PJ_MAXINT32;
        unlock_timer_heap(ht);
	return 0;
    }

    count = 0;
    pj_gettickcount(&now);

    while ( ht->cur_size && 
	    PJ_TIME_VAL_LTE(ht->heap[0]->_timer_value, now) &&
            count < ht->max_entries_per_poll ) 
    {
	pj_timer_entry *node = remove_node(ht, 0);
	pj_grp_lock_t *grp_lock;

	++count;

	grp_lock = node->_grp_lock;
	node->_grp_lock = NULL;

	unlock_timer_heap(ht);

	PJ_RACE_ME(5);

	if (node->cb)
	    (*node->cb)(ht, node);

	if (grp_lock)
	    pj_grp_lock_dec_ref(grp_lock);

	lock_timer_heap(ht);
    }
    if (ht->cur_size && next_delay) {
	*next_delay = ht->heap[0]->_timer_value;
	PJ_TIME_VAL_SUB(*next_delay, now);
	if (next_delay->sec < 0 || next_delay->msec < 0)
	    next_delay->sec = next_delay->msec = 0;
    } else if (next_delay) {
	next_delay->sec = next_delay->msec = PJ_MAXINT32;
    }
    unlock_timer_heap(ht);

    return count;
}
Esempio n. 2
0
/*
 * pj_ioqueue_poll()
 *
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    int i, count, processed;
    int msec;
    //struct epoll_event *events = ioqueue->events;
    //struct queue *queue = ioqueue->queue;
    struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    pj_timestamp t1, t2;
    
    PJ_CHECK_STACK();

    msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000;

    TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec));
    pj_get_timestamp(&t1);
 
    //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec);
    count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec);
    if (count == 0) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Check the closing keys only when there's no activity and when there are
     * pending closing keys.
     */
    if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) {
	pj_lock_acquire(ioqueue->lock);
	scan_closing_keys(ioqueue);
	pj_lock_release(ioqueue->lock);
    }
#endif
	TRACE_((THIS_FILE, "os_epoll_wait timed out"));
	return count;
    }
    else if (count < 0) {
	TRACE_((THIS_FILE, "os_epoll_wait error"));
	return -pj_get_netos_error();
    }

    pj_get_timestamp(&t2);
    TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec",
		       count, pj_elapsed_usec(&t1, &t2)));

    /* Lock ioqueue. */
    pj_lock_acquire(ioqueue->lock);

    for (processed=0, i=0; i<count; ++i) {
	pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type)
				events[i].epoll_data;

	TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events));

	/*
	 * Check readability.
	 */
	if ((events[i].events & EPOLLIN) && 
	    (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = READABLE_EVENT;
	    ++processed;
	    continue;
	}

	/*
	 * Check for writeability.
	 */
	if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}

#if PJ_HAS_TCP
	/*
	 * Check for completion of connect() operation.
	 */
	if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}
#endif /* PJ_HAS_TCP */

	/*
	 * Check for error condition.
	 */
	if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) {
	    /*
	     * We need to handle this exception event.  If it's related to us
	     * connecting, report it as such.  If not, just report it as a
	     * read event and the higher layers will handle it.
	     */
	    if (h->connecting) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = EXCEPTION_EVENT;
		++processed;
	    } else if (key_has_pending_read(h) || key_has_pending_accept(h)) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = READABLE_EVENT;
		++processed;
	    }
	    continue;
	}
    }
    for (i=0; i<processed; ++i) {
	if (queue[i].key->grp_lock)
	    pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0);
    }

    PJ_RACE_ME(5);

    pj_lock_release(ioqueue->lock);

    PJ_RACE_ME(5);

    /* Now process the events. */
    for (i=0; i<processed; ++i) {
	switch (queue[i].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, queue[i].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, queue[i].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, queue[i].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(queue[i].key);
#endif

	if (queue[i].key->grp_lock)
	    pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock,
	                            "ioqueue", 0);
    }

    /* Special case:
     * When epoll returns > 0 but no descriptors are actually set!
     */
    if (count > 0 && !processed && msec > 0) {
	pj_thread_sleep(msec);
    }

    pj_get_timestamp(&t1);
    TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec",
		       processed, pj_elapsed_usec(&t2, &t1)));

    return processed;
}
Esempio n. 3
0
void ioqueue_dispatch_exception_event( pj_ioqueue_t *ioqueue, 
                                       pj_ioqueue_key_t *h )
{
    pj_bool_t has_lock;

    pj_ioqueue_lock_key(h);

    if (!h->connecting) {
        /* It is possible that more than one thread was woken up, thus
         * the remaining thread will see h->connecting as zero because
         * it has been processed by other thread.
         */
	pj_ioqueue_unlock_key(h);
        return;
    }

    if (IS_CLOSING(h)) {
	pj_ioqueue_unlock_key(h);
	return;
    }

    /* Clear operation. */
    h->connecting = 0;

    ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
    ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT);

    /* Unlock; from this point we don't need to hold key's mutex
     * (unless concurrency is disabled, which in this case we should
     * hold the mutex while calling the callback) */
    if (h->allow_concurrent) {
	/* concurrency may be changed while we're in the callback, so
	 * save it to a flag.
	 */
	has_lock = PJ_FALSE;
	pj_ioqueue_unlock_key(h);
	PJ_RACE_ME(5);
    } else {
	has_lock = PJ_TRUE;
    }

    /* Call callback. */
    if (h->cb.on_connect_complete && !IS_CLOSING(h)) {
	pj_status_t status = -1;
#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
	int value;
	int vallen = sizeof(value);
	int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, 
				       &value, &vallen);
	if (gs_rc == 0) {
	    status = PJ_RETURN_OS_ERROR(value);
	}
#endif

	(*h->cb.on_connect_complete)(h, status);
    }

    if (has_lock) {
	pj_ioqueue_unlock_key(h);
    }
}
Esempio n. 4
0
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
{
    pj_status_t rc;

    /* Lock the key. */
    pj_ioqueue_lock_key(h);

    if (IS_CLOSING(h)) {
	pj_ioqueue_unlock_key(h);
	return;
    }

#   if PJ_HAS_TCP
    if (!pj_list_empty(&h->accept_list)) {

        struct accept_operation *accept_op;
	pj_bool_t has_lock;
	
        /* Get one accept operation from the list. */
	accept_op = h->accept_list.next;
        pj_list_erase(accept_op);
        accept_op->op = PJ_IOQUEUE_OP_NONE;

	/* Clear bit in fdset if there is no more pending accept */
        if (pj_list_empty(&h->accept_list))
            ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT);

	rc=pj_sock_accept(h->fd, accept_op->accept_fd, 
                          accept_op->rmt_addr, accept_op->addrlen);
	if (rc==PJ_SUCCESS && accept_op->local_addr) {
	    rc = pj_sock_getsockname(*accept_op->accept_fd, 
                                     accept_op->local_addr,
				     accept_op->addrlen);
	}

	/* Unlock; from this point we don't need to hold key's mutex
	 * (unless concurrency is disabled, which in this case we should
	 * hold the mutex while calling the callback) */
	if (h->allow_concurrent) {
	    /* concurrency may be changed while we're in the callback, so
	     * save it to a flag.
	     */
	    has_lock = PJ_FALSE;
	    pj_ioqueue_unlock_key(h);
	    PJ_RACE_ME(5);
	} else {
	    has_lock = PJ_TRUE;
	}

	/* Call callback. */
        if (h->cb.on_accept_complete && !IS_CLOSING(h)) {
	    (*h->cb.on_accept_complete)(h, 
                                        (pj_ioqueue_op_key_t*)accept_op,
                                        *accept_op->accept_fd, rc);
	}

	if (has_lock) {
	    pj_ioqueue_unlock_key(h);
	}
    }
    else
#   endif
    if (key_has_pending_read(h)) {
        struct read_operation *read_op;
        pj_ssize_t bytes_read;
	pj_bool_t has_lock;

        /* Get one pending read operation from the list. */
        read_op = h->read_list.next;
        pj_list_erase(read_op);

        /* Clear fdset if there is no pending read. */
        if (pj_list_empty(&h->read_list))
            ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT);

        bytes_read = read_op->size;

	if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) {
	    read_op->op = PJ_IOQUEUE_OP_NONE;
	    rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, 
				  read_op->flags,
				  read_op->rmt_addr, 
                                  read_op->rmt_addrlen);
	} else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) {
	    read_op->op = PJ_IOQUEUE_OP_NONE;
	    rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, 
			      read_op->flags);
        } else {
            pj_assert(read_op->op == PJ_IOQUEUE_OP_READ);
	    read_op->op = PJ_IOQUEUE_OP_NONE;
            /*
             * User has specified pj_ioqueue_read().
             * On Win32, we should do ReadFile(). But because we got
             * here because of select() anyway, user must have put a
             * socket descriptor on h->fd, which in this case we can
             * just call pj_sock_recv() instead of ReadFile().
             * On Unix, user may put a file in h->fd, so we'll have
             * to call read() here.
             * This may not compile on systems which doesn't have 
             * read(). That's why we only specify PJ_LINUX here so
             * that error is easier to catch.
             */
#	    if defined(PJ_WIN32) && PJ_WIN32 != 0 || \
	       defined(PJ_WIN64) && PJ_WIN64 != 0 || \
	       defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0
                rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, 
				  read_op->flags);
                //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size,
                //              &bytes_read, NULL);
#           elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0)
                bytes_read = read(h->fd, read_op->buf, bytes_read);
                rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error();
#	    elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0
                bytes_read = sys_read(h->fd, read_op->buf, bytes_read);
                rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read;
#           else
#               error "Implement read() for this platform!"
#           endif
        }
	
	if (rc != PJ_SUCCESS) {
#	    if (defined(PJ_WIN32) && PJ_WIN32 != 0) || \
	       (defined(PJ_WIN64) && PJ_WIN64 != 0) 
	    /* On Win32, for UDP, WSAECONNRESET on the receive side 
	     * indicates that previous sending has triggered ICMP Port 
	     * Unreachable message.
	     * But we wouldn't know at this point which one of previous 
	     * key that has triggered the error, since UDP socket can
	     * be shared!
	     * So we'll just ignore it!
	     */

	    if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) {
		//PJ_LOG(4,(THIS_FILE, 
                //          "Ignored ICMP port unreach. on key=%p", h));
	    }
#	    endif

            /* In any case we would report this to caller. */
            bytes_read = -rc;

#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
    PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
	    /* Special treatment for dead UDP sockets here, see ticket #1107 */
	    if (rc == PJ_STATUS_FROM_OS(ENOTCONN) && !IS_CLOSING(h) &&
		h->fd_type==pj_SOCK_DGRAM())
	    {
		replace_udp_sock(h);
	    }
#endif
	}

	/* Unlock; from this point we don't need to hold key's mutex
	 * (unless concurrency is disabled, which in this case we should
	 * hold the mutex while calling the callback) */
	if (h->allow_concurrent) {
	    /* concurrency may be changed while we're in the callback, so
	     * save it to a flag.
	     */
	    has_lock = PJ_FALSE;
	    pj_ioqueue_unlock_key(h);
	    PJ_RACE_ME(5);
	} else {
	    has_lock = PJ_TRUE;
	}

	/* Call callback. */
        if (h->cb.on_read_complete && !IS_CLOSING(h)) {
	    (*h->cb.on_read_complete)(h, 
                                      (pj_ioqueue_op_key_t*)read_op,
                                      bytes_read);
        }

	if (has_lock) {
	    pj_ioqueue_unlock_key(h);
	}

    } else {
        /*
         * This is normal; execution may fall here when multiple threads
         * are signalled for the same event, but only one thread eventually
         * able to process the event.
         */
	pj_ioqueue_unlock_key(h);
    }
}
Esempio n. 5
0
/*
 * ioqueue_dispatch_event()
 *
 * Report occurence of an event in the key to be processed by the
 * framework.
 */
void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
{
    /* Lock the key. */
    pj_ioqueue_lock_key(h);

    if (IS_CLOSING(h)) {
	pj_ioqueue_unlock_key(h);
	return;
    }

#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
    if (h->connecting) {
	/* Completion of connect() operation */
	pj_status_t status;
	pj_bool_t has_lock;

	/* Clear operation. */
	h->connecting = 0;

        ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
        ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT);


#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
	/* from connect(2): 
	 * On Linux, use getsockopt to read the SO_ERROR option at
	 * level SOL_SOCKET to determine whether connect() completed
	 * successfully (if SO_ERROR is zero).
	 */
	{
	  int value;
	  int vallen = sizeof(value);
	  int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, 
					 &value, &vallen);
	  if (gs_rc != 0) {
	    /* Argh!! What to do now??? 
	     * Just indicate that the socket is connected. The
	     * application will get error as soon as it tries to use
	     * the socket to send/receive.
	     */
	      status = PJ_SUCCESS;
	  } else {
	      status = PJ_STATUS_FROM_OS(value);
	  }
 	}
#elif (defined(PJ_WIN32) && PJ_WIN32!=0) || (defined(PJ_WIN64) && PJ_WIN64!=0) 
	status = PJ_SUCCESS; /* success */
#else
	/* Excellent information in D.J. Bernstein page:
	 * http://cr.yp.to/docs/connect.html
	 *
	 * Seems like the most portable way of detecting connect()
	 * failure is to call getpeername(). If socket is connected,
	 * getpeername() will return 0. If the socket is not connected,
	 * it will return ENOTCONN, and read(fd, &ch, 1) will produce
	 * the right errno through error slippage. This is a combination
	 * of suggestions from Douglas C. Schmidt and Ken Keys.
	 */
	{
	    struct sockaddr_in addr;
	    int addrlen = sizeof(addr);

	    status = pj_sock_getpeername(h->fd, (struct sockaddr*)&addr,
				         &addrlen);
	}
#endif

        /* Unlock; from this point we don't need to hold key's mutex
	 * (unless concurrency is disabled, which in this case we should
	 * hold the mutex while calling the callback) */
	if (h->allow_concurrent) {
	    /* concurrency may be changed while we're in the callback, so
	     * save it to a flag.
	     */
	    has_lock = PJ_FALSE;
	    pj_ioqueue_unlock_key(h);
	} else {
	    has_lock = PJ_TRUE;
	}

	/* Call callback. */
        if (h->cb.on_connect_complete && !IS_CLOSING(h))
	    (*h->cb.on_connect_complete)(h, status);

	/* Unlock if we still hold the lock */
	if (has_lock) {
	    pj_ioqueue_unlock_key(h);
	}

        /* Done. */

    } else 
#endif /* PJ_HAS_TCP */
    if (key_has_pending_write(h)) {
	/* Socket is writable. */
        struct write_operation *write_op;
        pj_ssize_t sent;
        pj_status_t send_rc = PJ_SUCCESS;

        /* Get the first in the queue. */
        write_op = h->write_list.next;

        /* For datagrams, we can remove the write_op from the list
         * so that send() can work in parallel.
         */
        if (h->fd_type == pj_SOCK_DGRAM()) {
            pj_list_erase(write_op);

            if (pj_list_empty(&h->write_list))
                ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);

        }

        /* Send the data. 
         * Unfortunately we must do this while holding key's mutex, thus
         * preventing parallel write on a single key.. :-((
         */
        sent = write_op->size - write_op->written;
        if (write_op->op == PJ_IOQUEUE_OP_SEND) {
            send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written,
                                   &sent, write_op->flags);
	    /* Can't do this. We only clear "op" after we're finished sending
	     * the whole buffer.
	     */
	    //write_op->op = 0;
        } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) {
	    int retry = 2;
	    while (--retry >= 0) {
		send_rc = pj_sock_sendto(h->fd, 
					 write_op->buf+write_op->written,
					 &sent, write_op->flags,
					 &write_op->rmt_addr, 
					 write_op->rmt_addrlen);
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
	    PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
		/* Special treatment for dead UDP sockets here, see ticket #1107 */
		if (send_rc==PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(h) &&
		    h->fd_type==pj_SOCK_DGRAM())
		{
		    PJ_PERROR(4,(THIS_FILE, send_rc,
				 "Send error for socket %d, retrying",
				 h->fd));
		    replace_udp_sock(h);
		    continue;
		}
#endif
		break;
	    }

	    /* Can't do this. We only clear "op" after we're finished sending
	     * the whole buffer.
	     */
	    //write_op->op = 0;
        } else {
            pj_assert(!"Invalid operation type!");
	    write_op->op = PJ_IOQUEUE_OP_NONE;
            send_rc = PJ_EBUG;
        }

        if (send_rc == PJ_SUCCESS) {
            write_op->written += sent;
        } else {
            pj_assert(send_rc > 0);
            write_op->written = -send_rc;
        }

        /* Are we finished with this buffer? */
        if (send_rc!=PJ_SUCCESS || 
            write_op->written == (pj_ssize_t)write_op->size ||
            h->fd_type == pj_SOCK_DGRAM()) 
        {
	    pj_bool_t has_lock;

	    write_op->op = PJ_IOQUEUE_OP_NONE;

            if (h->fd_type != pj_SOCK_DGRAM()) {
                /* Write completion of the whole stream. */
                pj_list_erase(write_op);

                /* Clear operation if there's no more data to send. */
                if (pj_list_empty(&h->write_list))
                    ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);

            }

	    /* Unlock; from this point we don't need to hold key's mutex
	     * (unless concurrency is disabled, which in this case we should
	     * hold the mutex while calling the callback) */
	    if (h->allow_concurrent) {
		/* concurrency may be changed while we're in the callback, so
		 * save it to a flag.
		 */
		has_lock = PJ_FALSE;
		pj_ioqueue_unlock_key(h);
		PJ_RACE_ME(5);
	    } else {
		has_lock = PJ_TRUE;
	    }

	    /* Call callback. */
            if (h->cb.on_write_complete && !IS_CLOSING(h)) {
	        (*h->cb.on_write_complete)(h, 
                                           (pj_ioqueue_op_key_t*)write_op,
                                           write_op->written);
            }

	    if (has_lock) {
		pj_ioqueue_unlock_key(h);
	    }

        } else {
            pj_ioqueue_unlock_key(h);
        }

        /* Done. */
    } else {
        /*
         * This is normal; execution may fall here when multiple threads
         * are signalled for the same event, but only one thread eventually
         * able to process the event.
         */
	pj_ioqueue_unlock_key(h);
    }
}