/* * pj_ioqueue_unregister() * * Unregister handle from ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key) { pj_ioqueue_t *ioqueue; struct epoll_event ev; int status; PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL); ioqueue = key->ioqueue; /* Lock the key to make sure no callback is simultaneously modifying * the key. We need to lock the key before ioqueue here to prevent * deadlock. */ pj_mutex_lock(key->mutex); /* Also lock ioqueue */ pj_lock_acquire(ioqueue->lock); pj_assert(ioqueue->count > 0); --ioqueue->count; #if !PJ_IOQUEUE_HAS_SAFE_UNREG pj_list_erase(key); #endif ev.events = 0; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev); if (status != 0) { pj_status_t rc = pj_get_os_error(); pj_lock_release(ioqueue->lock); return rc; } /* Destroy the key. */ pj_sock_close(key->fd); pj_lock_release(ioqueue->lock); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key is closing. */ key->closing = 1; /* Decrement counter. */ decrement_counter(key); /* Done. */ pj_mutex_unlock(key->mutex); #else pj_mutex_destroy(key->mutex); #endif return PJ_SUCCESS; }
/* util: ioctl that tries harder. */ static pj_status_t xioctl(int fh, int request, void *arg) { enum { RETRY = MAX_IOCTL_RETRY }; int r, c=0; do { r = v4l2_ioctl(fh, request, arg); } while (r==-1 && c++<RETRY && ((errno==EINTR) || (errno==EAGAIN))); return (r == -1) ? pj_get_os_error() : PJ_SUCCESS; }
/* Util: initiate v4l2 streaming via mmap */ static pj_status_t vid4lin_stream_init_streaming(vid4lin_stream *stream) { struct v4l2_requestbuffers req; unsigned i; pj_status_t status; pj_bzero(&req, sizeof(req)); req.count = BUFFER_CNT; req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req.memory = V4L2_MEMORY_MMAP; status = xioctl(stream->fd, VIDIOC_REQBUFS, &req); if (status != PJ_SUCCESS) return status; stream->buffers = pj_pool_calloc(stream->pool, req.count, sizeof(*stream->buffers)); stream->buf_cnt = 0; for (i = 0; i < req.count; ++i) { struct v4l2_buffer buf; pj_bzero(&buf, sizeof(buf)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = i; status = xioctl(stream->fd, VIDIOC_QUERYBUF, &buf); if (status != PJ_SUCCESS) goto on_error; stream->buffers[i].length = buf.length; stream->buffers[i].start = v4l2_mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, stream->fd, buf.m.offset); if (MAP_FAILED == stream->buffers[i].start) { status = pj_get_os_error(); goto on_error; } stream->buf_cnt++; } PJ_LOG(5,(THIS_FILE, " mmap streaming initialized")); stream->io_type = IO_TYPE_MMAP; return PJ_SUCCESS; on_error: return status; }
/* * pj_thread_sleep() */ PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec) { /* TODO: should change this to something like PJ_OS_HAS_NANOSLEEP */ #if defined(PJ_RTEMS) && PJ_RTEMS!=0 enum { NANOSEC_PER_MSEC = 1000000 }; struct timespec req; PJ_CHECK_STACK(); req.tv_sec = msec / 1000; req.tv_nsec = (msec % 1000) * NANOSEC_PER_MSEC; if (nanosleep(&req, NULL) == 0) return PJ_SUCCESS; return PJ_RETURN_OS_ERROR(pj_get_native_os_error()); #else PJ_CHECK_STACK(); pj_set_os_error(0); usleep(msec * 1000); /* MacOS X (reported on 10.5) seems to always set errno to ETIMEDOUT. * It does so because usleep() is declared to return int, and we're * supposed to check for errno only when usleep() returns non-zero. * Unfortunately, usleep() is declared to return void in other platforms * so it's not possible to always check for the return value (unless * we add a detection routine in autoconf). * * As a workaround, here we check if ETIMEDOUT is returned and * return successfully if it is. */ if (pj_get_native_os_error() == ETIMEDOUT) return PJ_SUCCESS; return pj_get_os_error(); #endif /* PJ_RTEMS */ }
/* * pj_ioqueue_unregister() * * Unregister handle from ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key) { pj_ioqueue_t *ioqueue; struct epoll_event ev; int status; PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL); ioqueue = key->ioqueue; /* Lock the key to make sure no callback is simultaneously modifying * the key. We need to lock the key before ioqueue here to prevent * deadlock. */ pj_ioqueue_lock_key(key); /* Also lock ioqueue */ pj_lock_acquire(ioqueue->lock); pj_assert(ioqueue->count > 0); --ioqueue->count; #if !PJ_IOQUEUE_HAS_SAFE_UNREG pj_list_erase(key); #endif ev.events = 0; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev); if (status != 0) { pj_status_t rc = pj_get_os_error(); pj_lock_release(ioqueue->lock); return rc; } /* Destroy the key. */ pj_sock_close(key->fd); pj_lock_release(ioqueue->lock); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key is closing. */ key->closing = 1; /* Decrement counter. */ decrement_counter(key); /* Done. */ if (key->grp_lock) { /* just dec_ref and unlock. we will set grp_lock to NULL * elsewhere */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } #else if (key->grp_lock) { /* set grp_lock to NULL and unlock */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } pj_lock_destroy(key->lock); #endif return PJ_SUCCESS; }
/* * pj_ioqueue_register_sock() * * Register a socket to ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_sock_t sock, pj_grp_lock_t *grp_lock, void *user_data, const pj_ioqueue_callback *cb, pj_ioqueue_key_t **p_key) { pj_ioqueue_key_t *key = NULL; pj_uint32_t value; struct epoll_event ev; int status; pj_status_t rc = PJ_SUCCESS; PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET && cb && p_key, PJ_EINVAL); pj_lock_acquire(ioqueue->lock); if (ioqueue->count >= ioqueue->max) { rc = PJ_ETOOMANY; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: too many files")); goto on_return; } /* Set socket to nonblocking. */ value = 1; if ((rc=os_ioctl(sock, FIONBIO, (ioctl_val_type)&value))) { TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: ioctl rc=%d", rc)); rc = pj_get_netos_error(); goto on_return; } /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get * the key from the free list. Otherwise allocate a new one. */ #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Scan closing_keys first to let them come back to free_list */ scan_closing_keys(ioqueue); pj_assert(!pj_list_empty(&ioqueue->free_list)); if (pj_list_empty(&ioqueue->free_list)) { rc = PJ_ETOOMANY; goto on_return; } key = ioqueue->free_list.next; pj_list_erase(key); #else /* Create key. */ key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t)); #endif rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } /* Create key's mutex */ /* rc = pj_mutex_create_recursive(pool, NULL, &key->mutex); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } */ /* os_epoll_ctl. */ ev.events = EPOLLIN | EPOLLERR; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl(ioqueue->epfd, EPOLL_CTL_ADD, sock, &ev); if (status < 0) { rc = pj_get_os_error(); pj_lock_destroy(key->lock); key = NULL; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: os_epoll_ctl rc=%d", status)); goto on_return; } /* Register */ pj_list_insert_before(&ioqueue->active_list, key); ++ioqueue->count; //TRACE_((THIS_FILE, "socket registered, count=%d", ioqueue->count)); on_return: if (rc != PJ_SUCCESS) { if (key && key->grp_lock) pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0); } *p_key = key; pj_lock_release(ioqueue->lock); return rc; }
/* * Repeated connect/accept on the same listener socket. */ static int compliance_test_2(pj_bool_t allow_concur) { #if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0 enum { MAX_PAIR = 1, TEST_LOOP = 2 }; #else enum { MAX_PAIR = 4, TEST_LOOP = 2 }; #endif struct listener { pj_sock_t sock; pj_ioqueue_key_t *key; pj_sockaddr_in addr; int addr_len; } listener; struct server { pj_sock_t sock; pj_ioqueue_key_t *key; pj_sockaddr_in local_addr; pj_sockaddr_in rem_addr; int rem_addr_len; pj_ioqueue_op_key_t accept_op; } server[MAX_PAIR]; struct client { pj_sock_t sock; pj_ioqueue_key_t *key; } client[MAX_PAIR]; pj_pool_t *pool = NULL; char *send_buf, *recv_buf; pj_ioqueue_t *ioque = NULL; int i, bufsize = BUF_MIN_SIZE; pj_ssize_t status; int test_loop, pending_op = 0; pj_timestamp t_elapsed; pj_str_t s; pj_status_t rc; listener.sock = PJ_INVALID_SOCKET; listener.key = NULL; for (i=0; i<MAX_PAIR; ++i) { server[i].sock = PJ_INVALID_SOCKET; server[i].key = NULL; } for (i=0; i<MAX_PAIR; ++i) { client[i].sock = PJ_INVALID_SOCKET; client[i].key = NULL; } // Create pool. pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL); // Create I/O Queue. rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_create()", rc); return -10; } // Concurrency rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_set_default_concurrency()", rc); return -11; } // Allocate buffers for send and receive. send_buf = (char*)pj_pool_alloc(pool, bufsize); recv_buf = (char*)pj_pool_alloc(pool, bufsize); // Create listener socket rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &listener.sock); if (rc != PJ_SUCCESS) { app_perror("...error creating socket", rc); status=-20; goto on_error; } // Bind listener socket. pj_sockaddr_in_init(&listener.addr, 0, 0); if ((rc=pj_sock_bind(listener.sock, &listener.addr, sizeof(listener.addr))) != 0 ) { app_perror("...bind error", rc); status=-30; goto on_error; } // Get listener address. listener.addr_len = sizeof(listener.addr); rc = pj_sock_getsockname(listener.sock, &listener.addr, &listener.addr_len); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_sock_getsockname()", rc); status=-40; goto on_error; } listener.addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1")); // Register listener socket. rc = pj_ioqueue_register_sock(pool, ioque, listener.sock, NULL, &test_cb, &listener.key); if (rc != PJ_SUCCESS) { app_perror("...ERROR", rc); status=-50; goto on_error; } // Listener socket listen(). if (pj_sock_listen(listener.sock, 5)) { app_perror("...ERROR in pj_sock_listen()", rc); status=-60; goto on_error; } for (test_loop=0; test_loop < TEST_LOOP; ++test_loop) { // Client connect and server accept. for (i=0; i<MAX_PAIR; ++i) { rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &client[i].sock); if (rc != PJ_SUCCESS) { app_perror("...error creating socket", rc); status=-70; goto on_error; } rc = pj_ioqueue_register_sock(pool, ioque, client[i].sock, NULL, &test_cb, &client[i].key); if (rc != PJ_SUCCESS) { app_perror("...error ", rc); status=-80; goto on_error; } // Server socket accept() pj_ioqueue_op_key_init(&server[i].accept_op, sizeof(server[i].accept_op)); server[i].rem_addr_len = sizeof(pj_sockaddr_in); status = pj_ioqueue_accept(listener.key, &server[i].accept_op, &server[i].sock, &server[i].local_addr, &server[i].rem_addr, &server[i].rem_addr_len); if (status!=PJ_SUCCESS && status != PJ_EPENDING) { app_perror("...ERROR in pj_ioqueue_accept()", rc); status=-90; goto on_error; } if (status==PJ_EPENDING) { ++pending_op; } // Client socket connect() status = pj_ioqueue_connect(client[i].key, &listener.addr, sizeof(listener.addr)); if (status!=PJ_SUCCESS && status != PJ_EPENDING) { app_perror("...ERROR in pj_ioqueue_connect()", rc); status=-100; goto on_error; } if (status==PJ_EPENDING) { ++pending_op; } } // Poll until all connected while (pending_op) { pj_time_val timeout = {1, 0}; #ifdef PJ_SYMBIAN status = pj_symbianos_poll(-1, 1000); #else status = pj_ioqueue_poll(ioque, &timeout); #endif if (status > 0) { if (status > pending_op) { PJ_LOG(3,(THIS_FILE, "...error: pj_ioqueue_poll() returned %d " "(only expecting %d)", status, pending_op)); return -110; } pending_op -= status; if (pending_op == 0) { status = 0; } } } // There's no pending operation. // When we poll the ioqueue, there must not be events. if (pending_op == 0) { pj_time_val timeout = {1, 0}; #ifdef PJ_SYMBIAN status = pj_symbianos_poll(-1, 1000); #else status = pj_ioqueue_poll(ioque, &timeout); #endif if (status != 0) { status=-120; goto on_error; } } for (i=0; i<MAX_PAIR; ++i) { // Check server socket. if (server[i].sock == PJ_INVALID_SOCKET) { status = -130; app_perror("...accept() error", pj_get_os_error()); goto on_error; } // Check addresses if (server[i].local_addr.sin_family != pj_AF_INET() || server[i].local_addr.sin_addr.s_addr == 0 || server[i].local_addr.sin_port == 0) { app_perror("...ERROR address not set", rc); status = -140; goto on_error; } if (server[i].rem_addr.sin_family != pj_AF_INET() || server[i].rem_addr.sin_addr.s_addr == 0 || server[i].rem_addr.sin_port == 0) { app_perror("...ERROR address not set", rc); status = -150; goto on_error; } // Register newly accepted socket. rc = pj_ioqueue_register_sock(pool, ioque, server[i].sock, NULL, &test_cb, &server[i].key); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_register_sock", rc); status = -160; goto on_error; } // Test send and receive. t_elapsed.u32.lo = 0; status = send_recv_test(ioque, server[i].key, client[i].key, send_buf, recv_buf, bufsize, &t_elapsed); if (status != 0) { goto on_error; } } // Success status = 0; for (i=0; i<MAX_PAIR; ++i) { if (server[i].key != NULL) { pj_ioqueue_unregister(server[i].key); server[i].key = NULL; server[i].sock = PJ_INVALID_SOCKET; } else if (server[i].sock != PJ_INVALID_SOCKET) { pj_sock_close(server[i].sock); server[i].sock = PJ_INVALID_SOCKET; } if (client[i].key != NULL) { pj_ioqueue_unregister(client[i].key); client[i].key = NULL; client[i].sock = PJ_INVALID_SOCKET; } else if (client[i].sock != PJ_INVALID_SOCKET) { pj_sock_close(client[i].sock); client[i].sock = PJ_INVALID_SOCKET; } } } status = 0; on_error: for (i=0; i<MAX_PAIR; ++i) { if (server[i].key != NULL) { pj_ioqueue_unregister(server[i].key); server[i].key = NULL; server[i].sock = PJ_INVALID_SOCKET; } else if (server[i].sock != PJ_INVALID_SOCKET) { pj_sock_close(server[i].sock); server[i].sock = PJ_INVALID_SOCKET; } if (client[i].key != NULL) { pj_ioqueue_unregister(client[i].key); client[i].key = NULL; server[i].sock = PJ_INVALID_SOCKET; } else if (client[i].sock != PJ_INVALID_SOCKET) { pj_sock_close(client[i].sock); client[i].sock = PJ_INVALID_SOCKET; } } if (listener.key) { pj_ioqueue_unregister(listener.key); listener.key = NULL; } else if (listener.sock != PJ_INVALID_SOCKET) { pj_sock_close(listener.sock); listener.sock = PJ_INVALID_SOCKET; } if (ioque != NULL) pj_ioqueue_destroy(ioque); pj_pool_release(pool); return status; }
/* * Compliance test for success scenario. */ static int compliance_test_0(pj_bool_t allow_concur) { pj_sock_t ssock=-1, csock0=-1, csock1=-1; pj_sockaddr_in addr, client_addr, rmt_addr; int client_addr_len; pj_pool_t *pool = NULL; char *send_buf, *recv_buf; pj_ioqueue_t *ioque = NULL; pj_ioqueue_key_t *skey=NULL, *ckey0=NULL, *ckey1=NULL; pj_ioqueue_op_key_t accept_op; int bufsize = BUF_MIN_SIZE; pj_ssize_t status = -1; int pending_op = 0; pj_timestamp t_elapsed; pj_str_t s; pj_status_t rc; // Create pool. pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL); // Allocate buffers for send and receive. send_buf = (char*)pj_pool_alloc(pool, bufsize); recv_buf = (char*)pj_pool_alloc(pool, bufsize); // Create server socket and client socket for connecting rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &ssock); if (rc != PJ_SUCCESS) { app_perror("...error creating socket", rc); status=-1; goto on_error; } rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &csock1); if (rc != PJ_SUCCESS) { app_perror("...error creating socket", rc); status=-1; goto on_error; } // Bind server socket. pj_sockaddr_in_init(&addr, 0, 0); if ((rc=pj_sock_bind(ssock, &addr, sizeof(addr))) != 0 ) { app_perror("...bind error", rc); status=-10; goto on_error; } // Get server address. client_addr_len = sizeof(addr); rc = pj_sock_getsockname(ssock, &addr, &client_addr_len); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_sock_getsockname()", rc); status=-15; goto on_error; } addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1")); // Create I/O Queue. rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_create()", rc); status=-20; goto on_error; } // Concurrency rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_set_default_concurrency()", rc); status=-21; goto on_error; } // Register server socket and client socket. rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, &test_cb, &skey); if (rc == PJ_SUCCESS) rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL, &test_cb, &ckey1); else ckey1 = NULL; if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_register_sock()", rc); status=-23; goto on_error; } // Server socket listen(). if (pj_sock_listen(ssock, 5)) { app_perror("...ERROR in pj_sock_listen()", rc); status=-25; goto on_error; } // Server socket accept() client_addr_len = sizeof(pj_sockaddr_in); status = pj_ioqueue_accept(skey, &accept_op, &csock0, &client_addr, &rmt_addr, &client_addr_len); if (status != PJ_EPENDING) { app_perror("...ERROR in pj_ioqueue_accept()", rc); status=-30; goto on_error; } if (status==PJ_EPENDING) { ++pending_op; } // Client socket connect() status = pj_ioqueue_connect(ckey1, &addr, sizeof(addr)); if (status!=PJ_SUCCESS && status != PJ_EPENDING) { app_perror("...ERROR in pj_ioqueue_connect()", rc); status=-40; goto on_error; } if (status==PJ_EPENDING) { ++pending_op; } // Poll until connected callback_read_size = callback_write_size = 0; callback_accept_status = callback_connect_status = -2; callback_call_count = 0; callback_read_key = callback_write_key = callback_accept_key = callback_connect_key = NULL; callback_accept_op = callback_read_op = callback_write_op = NULL; while (pending_op) { pj_time_val timeout = {1, 0}; #ifdef PJ_SYMBIAN callback_call_count = 0; pj_symbianos_poll(-1, 1000); status = callback_call_count; #else status = pj_ioqueue_poll(ioque, &timeout); #endif if (status > 0) { if (callback_accept_status != -2) { if (callback_accept_status != 0) { status=-41; goto on_error; } if (callback_accept_key != skey) { status=-42; goto on_error; } if (callback_accept_op != &accept_op) { status=-43; goto on_error; } callback_accept_status = -2; } if (callback_connect_status != -2) { if (callback_connect_status != 0) { status=-50; goto on_error; } if (callback_connect_key != ckey1) { status=-51; goto on_error; } callback_connect_status = -2; } if (status > pending_op) { PJ_LOG(3,(THIS_FILE, "...error: pj_ioqueue_poll() returned %d " "(only expecting %d)", status, pending_op)); return -52; } pending_op -= status; if (pending_op == 0) { status = 0; } } } // There's no pending operation. // When we poll the ioqueue, there must not be events. if (pending_op == 0) { pj_time_val timeout = {1, 0}; #ifdef PJ_SYMBIAN status = pj_symbianos_poll(-1, 1000); #else status = pj_ioqueue_poll(ioque, &timeout); #endif if (status != 0) { status=-60; goto on_error; } } // Check accepted socket. if (csock0 == PJ_INVALID_SOCKET) { status = -69; app_perror("...accept() error", pj_get_os_error()); goto on_error; } // Register newly accepted socket. rc = pj_ioqueue_register_sock(pool, ioque, csock0, NULL, &test_cb, &ckey0); if (rc != PJ_SUCCESS) { app_perror("...ERROR in pj_ioqueue_register_sock", rc); status = -70; goto on_error; } // Test send and receive. t_elapsed.u32.lo = 0; status = send_recv_test(ioque, ckey0, ckey1, send_buf, recv_buf, bufsize, &t_elapsed); if (status != 0) { goto on_error; } // Success status = 0; on_error: if (skey != NULL) pj_ioqueue_unregister(skey); else if (ssock != PJ_INVALID_SOCKET) pj_sock_close(ssock); if (ckey1 != NULL) pj_ioqueue_unregister(ckey1); else if (csock1 != PJ_INVALID_SOCKET) pj_sock_close(csock1); if (ckey0 != NULL) pj_ioqueue_unregister(ckey0); else if (csock0 != PJ_INVALID_SOCKET) pj_sock_close(csock0); if (ioque != NULL) pj_ioqueue_destroy(ioque); pj_pool_release(pool); return status; }
/* * Open stream. */ static pj_status_t open_stream(pjmedia_dir dir, int rec_id, int play_id, unsigned clock_rate, unsigned channel_count, unsigned samples_per_frame, unsigned bits_per_sample, pjmedia_snd_rec_cb rec_cb, pjmedia_snd_play_cb play_cb, void *user_data, pjmedia_snd_stream **p_snd_strm) { pj_pool_t *pool; pjmedia_snd_stream *strm; pj_status_t status; /* Make sure sound subsystem has been initialized with * pjmedia_snd_init() */ PJ_ASSERT_RETURN(pool_factory != NULL, PJ_EINVALIDOP); /* Can only support 16bits per sample */ PJ_ASSERT_RETURN(bits_per_sample == BITS_PER_SAMPLE, PJ_EINVAL); /* Create and Initialize stream descriptor */ pool = pj_pool_create(pool_factory, "wmme-dev", 1000, 1000, NULL); PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM); strm = pj_pool_zalloc(pool, sizeof(pjmedia_snd_stream)); strm->dir = dir; strm->play_id = play_id; strm->rec_id = rec_id; strm->pool = pool; strm->rec_cb = rec_cb; strm->play_cb = play_cb; strm->user_data = user_data; strm->clock_rate = clock_rate; strm->samples_per_frame = samples_per_frame; strm->bits_per_sample = bits_per_sample; strm->channel_count = channel_count; strm->buffer = pj_pool_alloc(pool, samples_per_frame * BYTES_PER_SAMPLE); if (!strm->buffer) { pj_pool_release(pool); return PJ_ENOMEM; } /* Create player stream */ if (dir & PJMEDIA_DIR_PLAYBACK) { unsigned buf_count; buf_count = snd_output_latency * clock_rate * channel_count / samples_per_frame / 1000; status = init_player_stream(strm->pool, &strm->play_strm, play_id, clock_rate, channel_count, samples_per_frame, buf_count); if (status != PJ_SUCCESS) { pjmedia_snd_stream_close(strm); return status; } } /* Create capture stream */ if (dir & PJMEDIA_DIR_CAPTURE) { unsigned buf_count; buf_count = snd_input_latency * clock_rate * channel_count / samples_per_frame / 1000; status = init_capture_stream(strm->pool, &strm->rec_strm, rec_id, clock_rate, channel_count, samples_per_frame, buf_count); if (status != PJ_SUCCESS) { pjmedia_snd_stream_close(strm); return status; } } /* Create the stop event */ strm->thread_quit_event = CreateEvent(NULL, FALSE, FALSE, NULL); if (strm->thread_quit_event == NULL) return pj_get_os_error(); /* Create and start the thread */ status = pj_thread_create(pool, "wmme", &wmme_dev_thread, strm, 0, 0, &strm->thread); if (status != PJ_SUCCESS) { pjmedia_snd_stream_close(strm); return status; } *p_snd_strm = strm; return PJ_SUCCESS; }
/* * Initialize Windows Multimedia recorder device */ static pj_status_t init_capture_stream( pj_pool_t *pool, struct wmme_stream *wmme_strm, int dev_id, unsigned clock_rate, unsigned channel_count, unsigned samples_per_frame, unsigned buffer_count) { MMRESULT mr; WAVEFORMATEX pcmwf; unsigned bytes_per_frame; unsigned i; PJ_ASSERT_RETURN(buffer_count <= MAX_PACKET_BUFFER_COUNT, PJ_EINVAL); /* Check device ID */ if (dev_id == -1) dev_id = 0; PJ_ASSERT_RETURN(dev_id >= 0 && dev_id < (int)dev_count, PJ_EINVAL); /* * Create a wait event. */ wmme_strm->hEvent = CreateEvent(NULL, FALSE, FALSE, NULL); if (NULL == wmme_strm->hEvent) return pj_get_os_error(); /* * Set up wave format structure for opening the device. */ init_waveformatex(&pcmwf, clock_rate, channel_count); bytes_per_frame = samples_per_frame * BYTES_PER_SAMPLE; /* * Open wave device. */ mr = waveInOpen(&wmme_strm->hWave.In, dev_info[dev_id].deviceId, &pcmwf, (DWORD)wmme_strm->hEvent, 0, CALLBACK_EVENT); if (mr != MMSYSERR_NOERROR) /* TODO: This is for HRESULT/GetLastError() */ PJ_RETURN_OS_ERROR(mr); /* * Create the buffers. */ wmme_strm->WaveHdr = pj_pool_zalloc(pool, sizeof(WAVEHDR) * buffer_count); for (i = 0; i < buffer_count; ++i) { wmme_strm->WaveHdr[i].lpData = pj_pool_zalloc(pool, bytes_per_frame); wmme_strm->WaveHdr[i].dwBufferLength = bytes_per_frame; mr = waveInPrepareHeader(wmme_strm->hWave.In, &(wmme_strm->WaveHdr[i]), sizeof(WAVEHDR)); if (mr != MMSYSERR_NOERROR) /* TODO: This is for HRESULT/GetLastError() */ PJ_RETURN_OS_ERROR(mr); mr = waveInAddBuffer(wmme_strm->hWave.In, &(wmme_strm->WaveHdr[i]), sizeof(WAVEHDR)); if (mr != MMSYSERR_NOERROR) /* TODO: This is for HRESULT/GetLastError() */ PJ_RETURN_OS_ERROR(mr); } wmme_strm->dwBufIdx = 0; wmme_strm->dwMaxBufIdx = buffer_count; wmme_strm->timestamp.u64 = 0; /* Done setting up play device. */ PJ_LOG(5,(THIS_FILE, " WaveAPI Sound recorder \"%s\" initialized (clock_rate=%d, " "channel_count=%d, samples_per_frame=%d (%dms))", dev_info[dev_id].info.name, clock_rate, channel_count, samples_per_frame, samples_per_frame * 1000 / clock_rate)); return PJ_SUCCESS; }
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h ) { pj_status_t rc; /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } # if PJ_HAS_TCP if (!pj_list_empty(&h->accept_list)) { struct accept_operation *accept_op; pj_bool_t has_lock; /* Get one accept operation from the list. */ accept_op = h->accept_list.next; pj_list_erase(accept_op); accept_op->op = PJ_IOQUEUE_OP_NONE; /* Clear bit in fdset if there is no more pending accept */ if (pj_list_empty(&h->accept_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); rc=pj_sock_accept(h->fd, accept_op->accept_fd, accept_op->rmt_addr, accept_op->addrlen); if (rc==PJ_SUCCESS && accept_op->local_addr) { rc = pj_sock_getsockname(*accept_op->accept_fd, accept_op->local_addr, accept_op->addrlen); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_accept_complete && !IS_CLOSING(h)) { (*h->cb.on_accept_complete)(h, (pj_ioqueue_op_key_t*)accept_op, *accept_op->accept_fd, rc); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else # endif if (key_has_pending_read(h)) { struct read_operation *read_op; pj_ssize_t bytes_read; pj_bool_t has_lock; /* Get one pending read operation from the list. */ read_op = h->read_list.next; pj_list_erase(read_op); /* Clear fdset if there is no pending read. */ if (pj_list_empty(&h->read_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); bytes_read = read_op->size; if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, read_op->flags, read_op->rmt_addr, read_op->rmt_addrlen); } else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); } else { pj_assert(read_op->op == PJ_IOQUEUE_OP_READ); read_op->op = PJ_IOQUEUE_OP_NONE; /* * User has specified pj_ioqueue_read(). * On Win32, we should do ReadFile(). But because we got * here because of select() anyway, user must have put a * socket descriptor on h->fd, which in this case we can * just call pj_sock_recv() instead of ReadFile(). * On Unix, user may put a file in h->fd, so we'll have * to call read() here. * This may not compile on systems which doesn't have * read(). That's why we only specify PJ_LINUX here so * that error is easier to catch. */ # if defined(PJ_WIN32) && PJ_WIN32 != 0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size, // &bytes_read, NULL); # elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0) bytes_read = read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error(); # elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0 bytes_read = sys_read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read; # else # error "Implement read() for this platform!" # endif } if (rc != PJ_SUCCESS) { # if defined(PJ_WIN32) && PJ_WIN32 != 0 /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) { //PJ_LOG(4,(THIS_FILE, // "Ignored ICMP port unreach. on key=%p", h)); } # endif /* In any case we would report this to caller. */ bytes_read = -rc; } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_read_complete && !IS_CLOSING(h)) { (*h->cb.on_read_complete)(h, (pj_ioqueue_op_key_t*)read_op, bytes_read); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
/* * Initialize DirectSound recorder device */ static pj_status_t init_capture_stream( struct dsound_stream *ds_strm, int dev_id, unsigned clock_rate, unsigned channel_count, unsigned samples_per_frame, unsigned buffer_count) { HRESULT hr; PCMWAVEFORMAT pcmwf; DSCBUFFERDESC dscbdesc; DSBPOSITIONNOTIFY dsPosNotify[MAX_PACKET_BUFFER_COUNT]; unsigned bytes_per_frame; unsigned i; PJ_ASSERT_RETURN(buffer_count <= MAX_PACKET_BUFFER_COUNT, PJ_EINVAL); /* Check device id */ if (dev_id == -1) dev_id = 0; PJ_ASSERT_RETURN(dev_id>=0 && dev_id < (int)dev_count, PJ_EINVAL); /* * Creating recorder device. */ hr = DirectSoundCaptureCreate(dev_info[dev_id].lpGuid, &ds_strm->ds.capture.lpDs, NULL); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); /* Init wave format to initialize buffer */ init_waveformatex( &pcmwf, clock_rate, channel_count); bytes_per_frame = samples_per_frame * BYTES_PER_SAMPLE; /* * Setup capture buffer using sound buffer structure that was passed * to play buffer creation earlier. */ pj_bzero(&dscbdesc, sizeof(DSCBUFFERDESC)); dscbdesc.dwSize = sizeof(DSCBUFFERDESC); dscbdesc.dwFlags = DSCBCAPS_WAVEMAPPED ; dscbdesc.dwBufferBytes = buffer_count * bytes_per_frame; dscbdesc.lpwfxFormat = (LPWAVEFORMATEX)&pcmwf; hr = IDirectSoundCapture_CreateCaptureBuffer( ds_strm->ds.capture.lpDs, &dscbdesc, &ds_strm->ds.capture.lpDsBuffer, NULL); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); /* * Create event for play notification. */ ds_strm->hEvent = CreateEvent( NULL, FALSE, FALSE, NULL); if (ds_strm->hEvent == NULL) return pj_get_os_error(); /* * Setup notifications for recording. */ hr = IDirectSoundCaptureBuffer_QueryInterface( ds_strm->ds.capture.lpDsBuffer, &IID_IDirectSoundNotify, (LPVOID *)&ds_strm->lpDsNotify); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); for (i=0; i<buffer_count; ++i) { dsPosNotify[i].dwOffset = i * bytes_per_frame; dsPosNotify[i].hEventNotify = ds_strm->hEvent; } hr = IDirectSoundNotify_SetNotificationPositions( ds_strm->lpDsNotify, buffer_count, dsPosNotify); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); hr = IDirectSoundCaptureBuffer_GetCurrentPosition( ds_strm->ds.capture.lpDsBuffer, NULL, &ds_strm->dwBytePos ); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); ds_strm->timestamp.u64 = 0; ds_strm->dwDsBufferSize = buffer_count * bytes_per_frame; /* * Capture latency must always be on a frame boundry, * so compute it based off the calculated buffer_count. */ ds_strm->latency = buffer_count * samples_per_frame * 1000 / clock_rate / channel_count; /* Done setting up recorder device. */ PJ_LOG(5,(THIS_FILE, " DirectSound capture \"%s\" initialized (clock_rate=%d, " "channel_count=%d, samples_per_frame=%d (%dms))", dev_info[dev_id].info.name, clock_rate, channel_count, samples_per_frame, samples_per_frame * 1000 / clock_rate)); return PJ_SUCCESS; }
/* * Initialize DirectSound player device. */ static pj_status_t init_player_stream( struct dsound_stream *ds_strm, int dev_id, unsigned clock_rate, unsigned channel_count, unsigned samples_per_frame, unsigned buffer_count) { HRESULT hr; HWND hwnd; PCMWAVEFORMAT pcmwf; DSBUFFERDESC dsbdesc; DSBPOSITIONNOTIFY dsPosNotify[MAX_PACKET_BUFFER_COUNT]; unsigned bytes_per_frame; unsigned max_latency; unsigned i; PJ_ASSERT_RETURN(buffer_count <= MAX_PACKET_BUFFER_COUNT, PJ_EINVAL); /* Check device ID */ if (dev_id == -1) dev_id = 0; PJ_ASSERT_RETURN(dev_id>=0 && dev_id < (int)dev_count, PJ_EINVAL); /* * Create DirectSound device. */ hr = DirectSoundCreate(dev_info[dev_id].lpGuid, &ds_strm->ds.play.lpDs, NULL); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); hwnd = GetForegroundWindow(); if (hwnd == NULL) { hwnd = GetDesktopWindow(); } hr = IDirectSound_SetCooperativeLevel( ds_strm->ds.play.lpDs, hwnd, DSSCL_PRIORITY); if FAILED(hr) return PJ_RETURN_OS_ERROR(hr); /* * Set up wave format structure for initialize DirectSound play * buffer. */ init_waveformatex(&pcmwf, clock_rate, channel_count); bytes_per_frame = samples_per_frame * BYTES_PER_SAMPLE; /* Set up DSBUFFERDESC structure. */ pj_bzero(&dsbdesc, sizeof(DSBUFFERDESC)); dsbdesc.dwSize = sizeof(DSBUFFERDESC); dsbdesc.dwFlags = DSBCAPS_CTRLVOLUME | DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS; dsbdesc.dwBufferBytes = buffer_count * bytes_per_frame; dsbdesc.lpwfxFormat = (LPWAVEFORMATEX)&pcmwf; /* * Create DirectSound playback buffer. */ hr = IDirectSound_CreateSoundBuffer(ds_strm->ds.play.lpDs, &dsbdesc, &ds_strm->ds.play.lpDsBuffer, NULL); if (FAILED(hr) ) return PJ_RETURN_OS_ERROR(hr); /* * Create event for play notification. */ ds_strm->hEvent = CreateEvent( NULL, FALSE, FALSE, NULL); if (ds_strm->hEvent == NULL) return pj_get_os_error(); /* * Setup notification for play. */ hr = IDirectSoundBuffer_QueryInterface( ds_strm->ds.play.lpDsBuffer, &IID_IDirectSoundNotify, (LPVOID *)&ds_strm->lpDsNotify); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); for (i=0; i<buffer_count; ++i) { dsPosNotify[i].dwOffset = i * bytes_per_frame; dsPosNotify[i].hEventNotify = ds_strm->hEvent; } hr = IDirectSoundNotify_SetNotificationPositions( ds_strm->lpDsNotify, buffer_count, dsPosNotify); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); hr = IDirectSoundBuffer_SetCurrentPosition(ds_strm->ds.play.lpDsBuffer, 0); if (FAILED(hr)) return PJ_RETURN_OS_ERROR(hr); ds_strm->dwBytePos = 0; ds_strm->dwDsBufferSize = buffer_count * bytes_per_frame; ds_strm->timestamp.u64 = 0; /* * Play latency does not need to be on a frame boundry, it is just how far * ahead of the read pointer we set the write pointer. So we should just * use the user configured latency. However, if the latency measured in * bytes causes more buffers than we are allowed, we must cap the latency * at the time contained in 1-buffer_count. */ max_latency = (1 - buffer_count) * samples_per_frame * 1000 / clock_rate / channel_count; ds_strm->latency = PJ_MIN(max_latency, snd_output_latency); /* Done setting up play device. */ PJ_LOG(5,(THIS_FILE, " DirectSound player \"%s\" initialized (clock_rate=%d, " "channel_count=%d, samples_per_frame=%d (%dms))", dev_info[dev_id].info.name, clock_rate, channel_count, samples_per_frame, samples_per_frame * 1000 / clock_rate)); return PJ_SUCCESS; }