Пример #1
0
/*
 * pj_ioqueue_unregister()
 *
 * Unregister handle from ioqueue.
 */
PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
{
    pj_ioqueue_t *ioqueue;

    PJ_ASSERT_RETURN(key, PJ_EINVAL);

    ioqueue = key->ioqueue;

    /* Lock the key to make sure no callback is simultaneously modifying
     * the key. We need to lock the key before ioqueue here to prevent
     * deadlock.
     */
    pj_mutex_lock(key->mutex);

    /* Also lock ioqueue */
    pj_lock_acquire(ioqueue->lock);

    pj_assert(ioqueue->count > 0);
    --ioqueue->count;
#if !PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Ticket #520, key will be erased more than once */
    pj_list_erase(key);
#endif
    PJ_FD_CLR(key->fd, &ioqueue->rfdset);
    PJ_FD_CLR(key->fd, &ioqueue->wfdset);
#if PJ_HAS_TCP
    PJ_FD_CLR(key->fd, &ioqueue->xfdset);
#endif

    /* Close socket. */
    pj_sock_close(key->fd);

    /* Clear callback */
    key->cb.on_accept_complete = NULL;
    key->cb.on_connect_complete = NULL;
    key->cb.on_read_complete = NULL;
    key->cb.on_write_complete = NULL;

    /* Must release ioqueue lock first before decrementing counter, to
     * prevent deadlock.
     */
    pj_lock_release(ioqueue->lock);

#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Mark key is closing. */
    key->closing = 1;

    /* Decrement counter. */
    decrement_counter(key);

    /* Done. */
    pj_mutex_unlock(key->mutex);
#else
    pj_mutex_destroy(key->mutex);
#endif

    return PJ_SUCCESS;
}
Пример #2
0
/*
 * pj_ioqueue_unregister()
 *
 * Unregister handle from ioqueue.
 */
PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
{
    pj_ioqueue_t *ioqueue;
    struct epoll_event ev;
    int status;
    
    PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL);

    ioqueue = key->ioqueue;

    /* Lock the key to make sure no callback is simultaneously modifying
     * the key. We need to lock the key before ioqueue here to prevent
     * deadlock.
     */
    pj_mutex_lock(key->mutex);

    /* Also lock ioqueue */
    pj_lock_acquire(ioqueue->lock);

    pj_assert(ioqueue->count > 0);
    --ioqueue->count;
#if !PJ_IOQUEUE_HAS_SAFE_UNREG
    pj_list_erase(key);
#endif

    ev.events = 0;
    ev.epoll_data = (epoll_data_type)key;
    status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev);
    if (status != 0) {
	pj_status_t rc = pj_get_os_error();
	pj_lock_release(ioqueue->lock);
	return rc;
    }

    /* Destroy the key. */
    pj_sock_close(key->fd);

    pj_lock_release(ioqueue->lock);


#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Mark key is closing. */
    key->closing = 1;

    /* Decrement counter. */
    decrement_counter(key);

    /* Done. */
    pj_mutex_unlock(key->mutex);
#else
    pj_mutex_destroy(key->mutex);
#endif

    return PJ_SUCCESS;
}
Пример #3
0
Файл: 3_5.cpp Проект: Se7ge/csc
	SharedPtr& operator=(const SharedPtr & obj) {
        if (this != &obj){
            decrement_counter();
            ptr_ = obj.ptr_;
            counter_ = obj.counter_;
            if (obj.ptr_) {
                ++(*counter_);
            }
        }
		return *this;
	}
Пример #4
0
Файл: 3_5.cpp Проект: Se7ge/csc
	void reset(Expression *ptr = 0){
        if (ptr_ != ptr){
            decrement_counter();
            ptr_ = ptr;
            if (ptr) {
                counter_ = new int(0);
                if (ptr_) {
                    ++(*counter_);
                }
            }
        }
	}
Пример #5
0
void dpc_device::device_timer(emu_timer &timer, device_timer_id id, int param, void *ptr)
{
	if (id == TIMER_OSC)
	{
		// callback
		for (int data_fetcher = 5; data_fetcher < 8; data_fetcher++)
		{
			if (m_df[data_fetcher].osc_clk)
			{
				decrement_counter(data_fetcher);
			}
		}
	}
}
Пример #6
0
/*
 * pj_ioqueue_poll()
 *
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    int i, count, processed;
    int msec;
    //struct epoll_event *events = ioqueue->events;
    //struct queue *queue = ioqueue->queue;
    struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
    pj_timestamp t1, t2;
    
    PJ_CHECK_STACK();

    msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000;

    TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec));
    pj_get_timestamp(&t1);
 
    //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec);
    count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec);
    if (count == 0) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Check the closing keys only when there's no activity and when there are
     * pending closing keys.
     */
    if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) {
	pj_lock_acquire(ioqueue->lock);
	scan_closing_keys(ioqueue);
	pj_lock_release(ioqueue->lock);
    }
#endif
	TRACE_((THIS_FILE, "os_epoll_wait timed out"));
	return count;
    }
    else if (count < 0) {
	TRACE_((THIS_FILE, "os_epoll_wait error"));
	return -pj_get_netos_error();
    }

    pj_get_timestamp(&t2);
    TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec",
		       count, pj_elapsed_usec(&t1, &t2)));

    /* Lock ioqueue. */
    pj_lock_acquire(ioqueue->lock);

    for (processed=0, i=0; i<count; ++i) {
	pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type)
				events[i].epoll_data;

	TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events));

	/*
	 * Check readability.
	 */
	if ((events[i].events & EPOLLIN) && 
	    (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = READABLE_EVENT;
	    ++processed;
	    continue;
	}

	/*
	 * Check for writeability.
	 */
	if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}

#if PJ_HAS_TCP
	/*
	 * Check for completion of connect() operation.
	 */
	if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) {

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
	    queue[processed].key = h;
	    queue[processed].event_type = WRITEABLE_EVENT;
	    ++processed;
	    continue;
	}
#endif /* PJ_HAS_TCP */

	/*
	 * Check for error condition.
	 */
	if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) {
	    /*
	     * We need to handle this exception event.  If it's related to us
	     * connecting, report it as such.  If not, just report it as a
	     * read event and the higher layers will handle it.
	     */
	    if (h->connecting) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = EXCEPTION_EVENT;
		++processed;
	    } else if (key_has_pending_read(h) || key_has_pending_accept(h)) {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
		increment_counter(h);
#endif
		queue[processed].key = h;
		queue[processed].event_type = READABLE_EVENT;
		++processed;
	    }
	    continue;
	}
    }
    for (i=0; i<processed; ++i) {
	if (queue[i].key->grp_lock)
	    pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0);
    }

    PJ_RACE_ME(5);

    pj_lock_release(ioqueue->lock);

    PJ_RACE_ME(5);

    /* Now process the events. */
    for (i=0; i<processed; ++i) {
	switch (queue[i].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, queue[i].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, queue[i].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, queue[i].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(queue[i].key);
#endif

	if (queue[i].key->grp_lock)
	    pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock,
	                            "ioqueue", 0);
    }

    /* Special case:
     * When epoll returns > 0 but no descriptors are actually set!
     */
    if (count > 0 && !processed && msec > 0) {
	pj_thread_sleep(msec);
    }

    pj_get_timestamp(&t1);
    TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec",
		       processed, pj_elapsed_usec(&t2, &t1)));

    return processed;
}
Пример #7
0
/*
 * pj_ioqueue_unregister()
 *
 * Unregister handle from ioqueue.
 */
PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
{
    pj_ioqueue_t *ioqueue;
    struct epoll_event ev;
    int status;
    
    PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL);

    ioqueue = key->ioqueue;

    /* Lock the key to make sure no callback is simultaneously modifying
     * the key. We need to lock the key before ioqueue here to prevent
     * deadlock.
     */
    pj_ioqueue_lock_key(key);

    /* Also lock ioqueue */
    pj_lock_acquire(ioqueue->lock);

    pj_assert(ioqueue->count > 0);
    --ioqueue->count;
#if !PJ_IOQUEUE_HAS_SAFE_UNREG
    pj_list_erase(key);
#endif

    ev.events = 0;
    ev.epoll_data = (epoll_data_type)key;
    status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev);
    if (status != 0) {
	pj_status_t rc = pj_get_os_error();
	pj_lock_release(ioqueue->lock);
	return rc;
    }

    /* Destroy the key. */
    pj_sock_close(key->fd);

    pj_lock_release(ioqueue->lock);


#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Mark key is closing. */
    key->closing = 1;

    /* Decrement counter. */
    decrement_counter(key);

    /* Done. */
    if (key->grp_lock) {
	/* just dec_ref and unlock. we will set grp_lock to NULL
	 * elsewhere */
	pj_grp_lock_t *grp_lock = key->grp_lock;
	// Don't set grp_lock to NULL otherwise the other thread
	// will crash. Just leave it as dangling pointer, but this
	// should be safe
	//key->grp_lock = NULL;
	pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0);
	pj_grp_lock_release(grp_lock);
    } else {
	pj_ioqueue_unlock_key(key);
    }
#else
    if (key->grp_lock) {
	/* set grp_lock to NULL and unlock */
	pj_grp_lock_t *grp_lock = key->grp_lock;
	// Don't set grp_lock to NULL otherwise the other thread
	// will crash. Just leave it as dangling pointer, but this
	// should be safe
	//key->grp_lock = NULL;
	pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0);
	pj_grp_lock_release(grp_lock);
    } else {
	pj_ioqueue_unlock_key(key);
    }

    pj_lock_destroy(key->lock);
#endif

    return PJ_SUCCESS;
}
Пример #8
0
/*
 * pj_ioqueue_poll()
 *
 * Few things worth written:
 *
 *  - we used to do only one callback called per poll, but it didn't go
 *    very well. The reason is because on some situation, the write 
 *    callback gets called all the time, thus doesn't give the read
 *    callback to get called. This happens, for example, when user
 *    submit write operation inside the write callback.
 *    As the result, we changed the behaviour so that now multiple
 *    callbacks are called in a single poll. It should be fast too,
 *    just that we need to be carefull with the ioqueue data structs.
 *
 *  - to guarantee preemptiveness etc, the poll function must strictly
 *    work on fd_set copy of the ioqueue (not the original one).
 */
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
    pj_fd_set_t rfdset, wfdset, xfdset;
    int count, counter;
    pj_ioqueue_key_t *h;
    struct event
    {
        pj_ioqueue_key_t	*key;
        enum ioqueue_event_type  event_type;
    } event[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];

    PJ_ASSERT_RETURN(ioqueue, -PJ_EINVAL);

    /* Lock ioqueue before making fd_set copies */
    pj_lock_acquire(ioqueue->lock);

    /* We will only do select() when there are sockets to be polled.
     * Otherwise select() will return error.
     */
    if (PJ_FD_COUNT(&ioqueue->rfdset)==0 &&
        PJ_FD_COUNT(&ioqueue->wfdset)==0 
#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
        && PJ_FD_COUNT(&ioqueue->xfdset)==0
#endif
	)
    {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	scan_closing_keys(ioqueue);
#endif
	pj_lock_release(ioqueue->lock);
	TRACE__((THIS_FILE, "     poll: no fd is set"));
        if (timeout)
            pj_thread_sleep(PJ_TIME_VAL_MSEC(*timeout));
        return 0;
    }

    /* Copy ioqueue's pj_fd_set_t to local variables. */
    pj_memcpy(&rfdset, &ioqueue->rfdset, sizeof(pj_fd_set_t));
    pj_memcpy(&wfdset, &ioqueue->wfdset, sizeof(pj_fd_set_t));
#if PJ_HAS_TCP
    pj_memcpy(&xfdset, &ioqueue->xfdset, sizeof(pj_fd_set_t));
#else
    PJ_FD_ZERO(&xfdset);
#endif

#if VALIDATE_FD_SET
    validate_sets(ioqueue, &rfdset, &wfdset, &xfdset);
#endif

    /* Unlock ioqueue before select(). */
    pj_lock_release(ioqueue->lock);

    count = pj_sock_select(ioqueue->nfds+1, &rfdset, &wfdset, &xfdset, 
			   timeout);
    
    if (count == 0)
	return 0;
    else if (count < 0)
	return -pj_get_netos_error();
    else if (count > PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL)
        count = PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL;

    /* Scan descriptor sets for event and add the events in the event
     * array to be processed later in this function. We do this so that
     * events can be processed in parallel without holding ioqueue lock.
     */
    pj_lock_acquire(ioqueue->lock);

    counter = 0;

    /* Scan for writable sockets first to handle piggy-back data
     * coming with accept().
     */
    h = ioqueue->active_list.next;
    for ( ; h!=&ioqueue->active_list && counter<count; h = h->next) {

	if ( (key_has_pending_write(h) || key_has_pending_connect(h))
	     && PJ_FD_ISSET(h->fd, &wfdset) && !IS_CLOSING(h))
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = WRITEABLE_EVENT;
            ++counter;
        }

        /* Scan for readable socket. */
	if ((key_has_pending_read(h) || key_has_pending_accept(h))
            && PJ_FD_ISSET(h->fd, &rfdset) && !IS_CLOSING(h) &&
	    counter<count)
        {
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = READABLE_EVENT;
            ++counter;
	}

#if PJ_HAS_TCP
        if (key_has_pending_connect(h) && PJ_FD_ISSET(h->fd, &xfdset) &&
	    !IS_CLOSING(h) && counter<count) 
	{
#if PJ_IOQUEUE_HAS_SAFE_UNREG
	    increment_counter(h);
#endif
            event[counter].key = h;
            event[counter].event_type = EXCEPTION_EVENT;
            ++counter;
        }
#endif
    }

    pj_lock_release(ioqueue->lock);

    count = counter;

    /* Now process all events. The dispatch functions will take care
     * of locking in each of the key
     */
    for (counter=0; counter<count; ++counter) {
        switch (event[counter].event_type) {
        case READABLE_EVENT:
            ioqueue_dispatch_read_event(ioqueue, event[counter].key);
            break;
        case WRITEABLE_EVENT:
            ioqueue_dispatch_write_event(ioqueue, event[counter].key);
            break;
        case EXCEPTION_EVENT:
            ioqueue_dispatch_exception_event(ioqueue, event[counter].key);
            break;
        case NO_EVENT:
            pj_assert(!"Invalid event!");
            break;
        }

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(event[counter].key);
#endif
    }


    return count;
}
/*
 * pj_ioqueue_unregister()
 */
PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key )
{
    unsigned i;
    pj_bool_t has_lock;
    enum { RETRY = 10 };

    PJ_ASSERT_RETURN(key, PJ_EINVAL);

#if PJ_HAS_TCP
    if (key->connecting) {
	unsigned pos;
        pj_ioqueue_t *ioqueue;

        ioqueue = key->ioqueue;

	/* Erase from connecting_handles */
	pj_lock_acquire(ioqueue->lock);
	for (pos=0; pos < ioqueue->connecting_count; ++pos) {
	    if (ioqueue->connecting_keys[pos] == key) {
		erase_connecting_socket(ioqueue, pos);
		break;
	    }
	}
	key->connecting = 0;
	pj_lock_release(ioqueue->lock);
    }
#endif

#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Mark key as closing before closing handle. */
    key->closing = 1;

    /* If concurrency is disabled, wait until the key has finished
     * processing the callback
     */
    if (key->allow_concurrent == PJ_FALSE) {
	pj_mutex_lock(key->mutex);
	has_lock = PJ_TRUE;
    } else {
	has_lock = PJ_FALSE;
    }
#else
    PJ_UNUSED_ARG(has_lock);
#endif
    
    /* Close handle (the only way to disassociate handle from IOCP). 
     * We also need to close handle to make sure that no further events
     * will come to the handle.
     */
    /* Update 2008/07/18 (http://trac.pjsip.org/repos/ticket/575):
     *  - It seems that CloseHandle() in itself does not actually close
     *    the socket (i.e. it will still appear in "netstat" output). Also
     *    if we only use CloseHandle(), an "Invalid Handle" exception will
     *    be raised in WSACleanup().
     *  - MSDN documentation says that CloseHandle() must be called after 
     *    closesocket() call (see
     *    http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx).
     *    But turns out that this will raise "Invalid Handle" exception
     *    in debug mode.
     *  So because of this, we replaced CloseHandle() with closesocket()
     *  instead. These was tested on WinXP SP2.
     */
    //CloseHandle(key->hnd);
    pj_sock_close((pj_sock_t)key->hnd);

    /* Reset callbacks */
    key->cb.on_accept_complete = NULL;
    key->cb.on_connect_complete = NULL;
    key->cb.on_read_complete = NULL;
    key->cb.on_write_complete = NULL;

#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Even after handle is closed, I suspect that IOCP may still try to
     * do something with the handle, causing memory corruption when pool
     * debugging is enabled.
     *
     * Forcing context switch seems to have fixed that, but this is quite
     * an ugly solution..
     *
     * Update 2008/02/13:
     *	This should not happen if concurrency is disallowed for the key.
     *  So at least application has a solution for this (i.e. by disallowing
     *  concurrency in the key).
     */
    //This will loop forever if unregistration is done on the callback.
    //Doing this with RETRY I think should solve the IOCP setting the 
    //socket signalled, without causing the deadlock.
    //while (pj_atomic_get(key->ref_count) != 1)
    //	pj_thread_sleep(0);
    for (i=0; pj_atomic_get(key->ref_count) != 1 && i<RETRY; ++i)
	pj_thread_sleep(0);

    /* Decrement reference counter to destroy the key. */
    decrement_counter(key);

    if (has_lock)
	pj_mutex_unlock(key->mutex);
#endif

    return PJ_SUCCESS;
}
/*
 * Poll the I/O Completion Port, execute callback, 
 * and return the key and bytes transfered of the last operation.
 */
static pj_bool_t poll_iocp( HANDLE hIocp, DWORD dwTimeout, 
			    pj_ssize_t *p_bytes, pj_ioqueue_key_t **p_key )
{
    DWORD dwBytesTransfered, dwKey;
    generic_overlapped *pOv;
    pj_ioqueue_key_t *key;
    pj_ssize_t size_status = -1;
    BOOL rcGetQueued;

    /* Poll for completion status. */
    rcGetQueued = GetQueuedCompletionStatus(hIocp, &dwBytesTransfered,
					    &dwKey, (OVERLAPPED**)&pOv, 
					    dwTimeout);

    /* The return value is:
     * - nonzero if event was dequeued.
     * - zero and pOv==NULL if no event was dequeued.
     * - zero and pOv!=NULL if event for failed I/O was dequeued.
     */
    if (pOv) {
	pj_bool_t has_lock;

	/* Event was dequeued for either successfull or failed I/O */
	key = (pj_ioqueue_key_t*)dwKey;
	size_status = dwBytesTransfered;

	/* Report to caller regardless */
	if (p_bytes)
	    *p_bytes = size_status;
	if (p_key)
	    *p_key = key;

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	/* We shouldn't call callbacks if key is quitting. */
	if (key->closing)
	    return PJ_TRUE;

	/* If concurrency is disabled, lock the key 
	 * (and save the lock status to local var since app may change
	 * concurrency setting while in the callback) */
	if (key->allow_concurrent == PJ_FALSE) {
	    pj_mutex_lock(key->mutex);
	    has_lock = PJ_TRUE;
	} else {
	    has_lock = PJ_FALSE;
	}

	/* Now that we get the lock, check again that key is not closing */
	if (key->closing) {
	    if (has_lock) {
		pj_mutex_unlock(key->mutex);
	    }
	    return PJ_TRUE;
	}

	/* Increment reference counter to prevent this key from being
	 * deleted
	 */
	pj_atomic_inc(key->ref_count);
#else
	PJ_UNUSED_ARG(has_lock);
#endif

	/* Carry out the callback */
	switch (pOv->operation) {
	case PJ_IOQUEUE_OP_READ:
	case PJ_IOQUEUE_OP_RECV:
	case PJ_IOQUEUE_OP_RECV_FROM:
            pOv->operation = 0;
            if (key->cb.on_read_complete)
	        key->cb.on_read_complete(key, (pj_ioqueue_op_key_t*)pOv, 
                                         size_status);
	    break;
	case PJ_IOQUEUE_OP_WRITE:
	case PJ_IOQUEUE_OP_SEND:
	case PJ_IOQUEUE_OP_SEND_TO:
            pOv->operation = 0;
            if (key->cb.on_write_complete)
	        key->cb.on_write_complete(key, (pj_ioqueue_op_key_t*)pOv, 
                                                size_status);
	    break;
#if PJ_HAS_TCP
	case PJ_IOQUEUE_OP_ACCEPT:
	    /* special case for accept. */
	    ioqueue_on_accept_complete(key, (ioqueue_accept_rec*)pOv);
            if (key->cb.on_accept_complete) {
                ioqueue_accept_rec *accept_rec = (ioqueue_accept_rec*)pOv;
		pj_status_t status = PJ_SUCCESS;
		pj_sock_t newsock;

		newsock = accept_rec->newsock;
		accept_rec->newsock = PJ_INVALID_SOCKET;

		if (newsock == PJ_INVALID_SOCKET) {
		    int dwError = WSAGetLastError();
		    if (dwError == 0) dwError = OSERR_ENOTCONN;
		    status = PJ_RETURN_OS_ERROR(dwError);
		}

	        key->cb.on_accept_complete(key, (pj_ioqueue_op_key_t*)pOv,
                                           newsock, status);
		
            }
	    break;
	case PJ_IOQUEUE_OP_CONNECT:
#endif
	case PJ_IOQUEUE_OP_NONE:
	    pj_assert(0);
	    break;
	}

#if PJ_IOQUEUE_HAS_SAFE_UNREG
	decrement_counter(key);
	if (has_lock)
	    pj_mutex_unlock(key->mutex);
#endif

	return PJ_TRUE;
    }

    /* No event was queued. */
    return PJ_FALSE;
}
Пример #11
0
void down_clicked(ClickRecognizerRef recognizer, void* context) {
  decrement_counter();   
  display_counter();
}
Пример #12
0
Файл: 3_5.cpp Проект: Se7ge/csc
	~SharedPtr(){
        decrement_counter();
	}
void down_single_click_handler(ClickRecognizerRef recognizer, Window *window) {
	decrement_counter(); populate_counter_labels();
}