PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_sock_t sock, pj_grp_lock_t *grp_lock, void *user_data, const pj_ioqueue_callback *cb, pj_ioqueue_key_t **p_key) { PjUwpSocketCallback uwp_cb = { &on_read, &on_write, &on_accept, &on_connect }; pj_ioqueue_key_t *key; pj_status_t rc; pj_lock_acquire(ioqueue->lock); key = PJ_POOL_ZALLOC_T(pool, pj_ioqueue_key_t); rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } /* Create ioqueue key lock, if not yet */ if (!key->lock) { rc = pj_lock_create_simple_mutex(pool, NULL, &key->lock); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } } PjUwpSocket *s = (PjUwpSocket*)sock; s->SetNonBlocking(&uwp_cb, key); on_return: if (rc != PJ_SUCCESS) { if (key && key->grp_lock) pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0); } *p_key = key; pj_lock_release(ioqueue->lock); return rc; }
/* * Unregister from the I/O Queue framework. */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key ) { pj_ioqueue_t *ioqueue; PJ_ASSERT_RETURN(key, PJ_EINVAL); ioqueue = key->ioqueue; /* Lock the key to make sure no callback is simultaneously modifying * the key. We need to lock the key before ioqueue here to prevent * deadlock. */ pj_ioqueue_lock_key(key); /* Also lock ioqueue */ pj_lock_acquire(ioqueue->lock); /* Close socket. */ pj_sock_close(key->fd); /* Clear callback */ key->cb.on_accept_complete = NULL; key->cb.on_connect_complete = NULL; key->cb.on_read_complete = NULL; key->cb.on_write_complete = NULL; pj_lock_release(ioqueue->lock); if (key->grp_lock) { pj_grp_lock_t *grp_lock = key->grp_lock; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } pj_lock_destroy(key->lock); return PJ_SUCCESS; }
/* * pj_ioqueue_poll() * */ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout) { int i, count, processed; int msec; //struct epoll_event *events = ioqueue->events; //struct queue *queue = ioqueue->queue; struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; pj_timestamp t1, t2; PJ_CHECK_STACK(); msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000; TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec)); pj_get_timestamp(&t1); //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec); count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec); if (count == 0) { #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check the closing keys only when there's no activity and when there are * pending closing keys. */ if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) { pj_lock_acquire(ioqueue->lock); scan_closing_keys(ioqueue); pj_lock_release(ioqueue->lock); } #endif TRACE_((THIS_FILE, "os_epoll_wait timed out")); return count; } else if (count < 0) { TRACE_((THIS_FILE, "os_epoll_wait error")); return -pj_get_netos_error(); } pj_get_timestamp(&t2); TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec", count, pj_elapsed_usec(&t1, &t2))); /* Lock ioqueue. */ pj_lock_acquire(ioqueue->lock); for (processed=0, i=0; i<count; ++i) { pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type) events[i].epoll_data; TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events)); /* * Check readability. */ if ((events[i].events & EPOLLIN) && (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = READABLE_EVENT; ++processed; continue; } /* * Check for writeability. */ if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; continue; } #if PJ_HAS_TCP /* * Check for completion of connect() operation. */ if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; continue; } #endif /* PJ_HAS_TCP */ /* * Check for error condition. */ if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) { /* * We need to handle this exception event. If it's related to us * connecting, report it as such. If not, just report it as a * read event and the higher layers will handle it. */ if (h->connecting) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = EXCEPTION_EVENT; ++processed; } else if (key_has_pending_read(h) || key_has_pending_accept(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = READABLE_EVENT; ++processed; } continue; } } for (i=0; i<processed; ++i) { if (queue[i].key->grp_lock) pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } PJ_RACE_ME(5); pj_lock_release(ioqueue->lock); PJ_RACE_ME(5); /* Now process the events. */ for (i=0; i<processed; ++i) { switch (queue[i].event_type) { case READABLE_EVENT: ioqueue_dispatch_read_event(ioqueue, queue[i].key); break; case WRITEABLE_EVENT: ioqueue_dispatch_write_event(ioqueue, queue[i].key); break; case EXCEPTION_EVENT: ioqueue_dispatch_exception_event(ioqueue, queue[i].key); break; case NO_EVENT: pj_assert(!"Invalid event!"); break; } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(queue[i].key); #endif if (queue[i].key->grp_lock) pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } /* Special case: * When epoll returns > 0 but no descriptors are actually set! */ if (count > 0 && !processed && msec > 0) { pj_thread_sleep(msec); } pj_get_timestamp(&t1); TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec", processed, pj_elapsed_usec(&t2, &t1))); return processed; }
/* * pj_ioqueue_unregister() * * Unregister handle from ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key) { pj_ioqueue_t *ioqueue; struct epoll_event ev; int status; PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL); ioqueue = key->ioqueue; /* Lock the key to make sure no callback is simultaneously modifying * the key. We need to lock the key before ioqueue here to prevent * deadlock. */ pj_ioqueue_lock_key(key); /* Also lock ioqueue */ pj_lock_acquire(ioqueue->lock); pj_assert(ioqueue->count > 0); --ioqueue->count; #if !PJ_IOQUEUE_HAS_SAFE_UNREG pj_list_erase(key); #endif ev.events = 0; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev); if (status != 0) { pj_status_t rc = pj_get_os_error(); pj_lock_release(ioqueue->lock); return rc; } /* Destroy the key. */ pj_sock_close(key->fd); pj_lock_release(ioqueue->lock); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key is closing. */ key->closing = 1; /* Decrement counter. */ decrement_counter(key); /* Done. */ if (key->grp_lock) { /* just dec_ref and unlock. we will set grp_lock to NULL * elsewhere */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } #else if (key->grp_lock) { /* set grp_lock to NULL and unlock */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } pj_lock_destroy(key->lock); #endif return PJ_SUCCESS; }
/* * pj_ioqueue_register_sock() * * Register a socket to ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_sock_t sock, pj_grp_lock_t *grp_lock, void *user_data, const pj_ioqueue_callback *cb, pj_ioqueue_key_t **p_key) { pj_ioqueue_key_t *key = NULL; pj_uint32_t value; struct epoll_event ev; int status; pj_status_t rc = PJ_SUCCESS; PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET && cb && p_key, PJ_EINVAL); pj_lock_acquire(ioqueue->lock); if (ioqueue->count >= ioqueue->max) { rc = PJ_ETOOMANY; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: too many files")); goto on_return; } /* Set socket to nonblocking. */ value = 1; if ((rc=os_ioctl(sock, FIONBIO, (ioctl_val_type)&value))) { TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: ioctl rc=%d", rc)); rc = pj_get_netos_error(); goto on_return; } /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get * the key from the free list. Otherwise allocate a new one. */ #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Scan closing_keys first to let them come back to free_list */ scan_closing_keys(ioqueue); pj_assert(!pj_list_empty(&ioqueue->free_list)); if (pj_list_empty(&ioqueue->free_list)) { rc = PJ_ETOOMANY; goto on_return; } key = ioqueue->free_list.next; pj_list_erase(key); #else /* Create key. */ key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t)); #endif rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } /* Create key's mutex */ /* rc = pj_mutex_create_recursive(pool, NULL, &key->mutex); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } */ /* os_epoll_ctl. */ ev.events = EPOLLIN | EPOLLERR; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl(ioqueue->epfd, EPOLL_CTL_ADD, sock, &ev); if (status < 0) { rc = pj_get_os_error(); pj_lock_destroy(key->lock); key = NULL; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: os_epoll_ctl rc=%d", status)); goto on_return; } /* Register */ pj_list_insert_before(&ioqueue->active_list, key); ++ioqueue->count; //TRACE_((THIS_FILE, "socket registered, count=%d", ioqueue->count)); on_return: if (rc != PJ_SUCCESS) { if (key && key->grp_lock) pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0); } *p_key = key; pj_lock_release(ioqueue->lock); return rc; }