/* * Callback upon receiving packet from network. */ static void on_read_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { nat_detect_session *sess; pj_status_t status; sess = (nat_detect_session *) pj_ioqueue_get_user_data(key); pj_assert(sess != NULL); pj_grp_lock_acquire(sess->grp_lock); /* Ignore packet when STUN session has been destroyed */ if (!sess->stun_sess) goto on_return; if (bytes_read < 0) { if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)) { /* Permanent error */ end_session(sess, (pj_status_t)-bytes_read, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); goto on_return; } } else if (bytes_read > 0) { pj_stun_session_on_rx_pkt(sess->stun_sess, sess->rx_pkt, bytes_read, PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET, NULL, NULL, &sess->src_addr, sess->src_addr_len); } sess->rx_pkt_len = sizeof(sess->rx_pkt); sess->src_addr_len = sizeof(sess->src_addr); status = pj_ioqueue_recvfrom(key, op_key, sess->rx_pkt, &sess->rx_pkt_len, PJ_IOQUEUE_ALWAYS_ASYNC, &sess->src_addr, &sess->src_addr_len); if (status != PJ_EPENDING) { pj_assert(status != PJ_SUCCESS); end_session(sess, status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); } on_return: pj_grp_lock_release(sess->grp_lock); }
/* * Callback on new TCP connection. */ static void lis_on_accept_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t sock, pj_status_t status) { struct tcp_listener *tcp_lis; struct accept_op *accept_op = (struct accept_op*) op_key; tcp_lis = (struct tcp_listener*) pj_ioqueue_get_user_data(key); PJ_UNUSED_ARG(sock); do { /* Report new connection. */ if (status == PJ_SUCCESS) { char addr[PJ_INET6_ADDRSTRLEN+8]; PJ_LOG(5,(tcp_lis->base.obj_name, "Incoming TCP from %s", pj_sockaddr_print(&accept_op->src_addr, addr, sizeof(addr), 3))); transport_create(accept_op->sock, &tcp_lis->base, &accept_op->src_addr, accept_op->src_addr_len); } else if (status != PJ_EPENDING) { show_err(tcp_lis->base.obj_name, "accept()", status); } /* Prepare next accept() */ accept_op->src_addr_len = sizeof(accept_op->src_addr); status = pj_ioqueue_accept(key, op_key, &accept_op->sock, NULL, &accept_op->src_addr, &accept_op->src_addr_len); } while (status != PJ_EPENDING && status != PJ_ECANCELLED && status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)); }
/* * pj_ioqueue_recv() * * Start asynchronous recv() from the socket. */ PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, unsigned flags ) { struct read_operation *read_op; PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing (need to do this first before accessing * other variables, since they might have been destroyed. See ticket * #469). */ if (IS_CLOSING(key)) return PJ_ECANCELLED; read_op = (struct read_operation*)op_key; read_op->op = PJ_IOQUEUE_OP_NONE; /* Try to see if there's data immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { pj_status_t status; pj_ssize_t size; size = *length; status = pj_sock_recv(key->fd, buffer, &size, flags); if (status == PJ_SUCCESS) { /* Yes! Data is available! */ *length = size; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) return status; } } flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No data is immediately available. * Must schedule asynchronous operation to the ioqueue. */ read_op->op = PJ_IOQUEUE_OP_RECV; read_op->buf = buffer; read_op->size = *length; read_op->flags = flags; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->read_list, read_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
/* * pj_mutex_unlock() */ PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex) { pj_status_t status; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(mutex, PJ_EINVAL); #if PJ_DEBUG pj_assert(mutex->owner == pj_thread_this()); if (--mutex->nesting_level == 0) { mutex->owner = NULL; } #endif LOG_MUTEX((mutex->obj_name, "Mutex released by thread %s", pj_thread_this()->obj_name)); #if PJ_WIN32_WINNT >= 0x0400 LeaveCriticalSection(&mutex->crit); status=PJ_SUCCESS; #else status = ReleaseMutex(mutex->hMutex) ? PJ_SUCCESS : PJ_STATUS_FROM_OS(GetLastError()); #endif return status; }
/* * pj_mutex_lock() */ PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex) { pj_status_t status; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(mutex, PJ_EINVAL); LOG_MUTEX((mutex->obj_name, "Mutex: thread %s is waiting", pj_thread_this()->obj_name)); #if PJ_WIN32_WINNT >= 0x0400 EnterCriticalSection(&mutex->crit); status=PJ_SUCCESS; #else if (WaitForSingleObject(mutex->hMutex, INFINITE)==WAIT_OBJECT_0) status = PJ_SUCCESS; else status = PJ_STATUS_FROM_OS(GetLastError()); #endif LOG_MUTEX((mutex->obj_name, (status==PJ_SUCCESS ? "Mutex acquired by thread %s" : "FAILED by %s"), pj_thread_this()->obj_name)); #if PJ_DEBUG if (status == PJ_SUCCESS) { mutex->owner = pj_thread_this(); ++mutex->nesting_level; } #endif return status; }
static void on_read_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { pj_ssize_t size; char *sendbuf = "Hello world"; pj_status_t status; if (sock_data.unregistered) return; pj_mutex_lock(sock_data.mutex); if (sock_data.unregistered) { pj_mutex_unlock(sock_data.mutex); return; } if (bytes_read < 0) { if (-bytes_read != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) app_perror("ioqueue reported recv error", -bytes_read); } else { sock_data.received += bytes_read; } if (test_method == UNREGISTER_IN_CALLBACK) { pj_time_val now; pj_gettimeofday(&now); if (PJ_TIME_VAL_GTE(now, time_to_unregister)) { sock_data.unregistered = 1; pj_ioqueue_unregister(key); pj_mutex_destroy(sock_data.mutex); pj_pool_release(sock_data.pool); sock_data.pool = NULL; return; } } do { size = sock_data.bufsize; status = pj_ioqueue_recv(key, op_key, sock_data.buffer, &size, 0); if (status != PJ_EPENDING && status != PJ_SUCCESS) app_perror("recv() error", status); } while (status == PJ_SUCCESS); pj_mutex_unlock(sock_data.mutex); size = pj_ansi_strlen(sendbuf); status = pj_sock_send(sock_data.csock, sendbuf, &size, 0); if (status != PJ_SUCCESS) app_perror("send() error", status); size = pj_ansi_strlen(sendbuf); status = pj_sock_send(sock_data.csock, sendbuf, &size, 0); if (status != PJ_SUCCESS) app_perror("send() error", status); }
/* * Poll for the completion of non-blocking connect(). * If there's a completion, the function return the key of the completed * socket, and 'result' argument contains the connect() result. If connect() * succeeded, 'result' will have value zero, otherwise will have the error * code. */ static int check_connecting( pj_ioqueue_t *ioqueue ) { if (ioqueue->connecting_count) { int i, count; struct { pj_ioqueue_key_t *key; pj_status_t status; } events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL-1]; pj_lock_acquire(ioqueue->lock); for (count=0; count<PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL-1; ++count) { DWORD result; result = WaitForMultipleObjects(ioqueue->connecting_count, ioqueue->connecting_handles, FALSE, 0); if (result >= WAIT_OBJECT_0 && result < WAIT_OBJECT_0+ioqueue->connecting_count) { WSANETWORKEVENTS net_events; /* Got completed connect(). */ unsigned pos = result - WAIT_OBJECT_0; events[count].key = ioqueue->connecting_keys[pos]; /* See whether connect has succeeded. */ WSAEnumNetworkEvents((pj_sock_t)events[count].key->hnd, ioqueue->connecting_handles[pos], &net_events); events[count].status = PJ_STATUS_FROM_OS(net_events.iErrorCode[FD_CONNECT_BIT]); /* Erase socket from pending connect. */ erase_connecting_socket(ioqueue, pos); } else { /* No more events */ break; } } pj_lock_release(ioqueue->lock); /* Call callbacks. */ for (i=0; i<count; ++i) { if (events[i].key->cb.on_connect_complete) { events[i].key->cb.on_connect_complete(events[i].key, events[i].status); } } return count; } return 0; }
static pj_status_t sock_set_net_service_type(pj_sock_t sock, int val) { pj_status_t status; status = pj_sock_setsockopt(sock, pj_SOL_SOCKET(), SO_NET_SERVICE_TYPE, &val, sizeof(val)); if (status == PJ_STATUS_FROM_OS(OSERR_ENOPROTOOPT)) status = PJ_ENOTSUP; return status; }
static pj_status_t sock_get_net_service_type(pj_sock_t sock, int *p_val) { pj_status_t status; int optlen = sizeof(*p_val); PJ_ASSERT_RETURN(p_val, PJ_EINVAL); status = pj_sock_getsockopt(sock, pj_SOL_SOCKET(), SO_NET_SERVICE_TYPE, p_val, &optlen); if (status == PJ_STATUS_FROM_OS(OSERR_ENOPROTOOPT)) status = PJ_ENOTSUP; return status; }
/* * Callback on received packet. */ static void on_read_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { struct udp_listener *udp; struct read_op *read_op = (struct read_op*) op_key; pj_status_t status; udp = (struct udp_listener*) pj_ioqueue_get_user_data(key); do { pj_pool_t *rpool; /* Report to server */ if (bytes_read > 0) { read_op->pkt.len = bytes_read; pj_gettimeofday(&read_op->pkt.rx_time); pj_turn_srv_on_rx_pkt(udp->base.server, &read_op->pkt); } /* Reset pool */ rpool = read_op->pkt.pool; pj_pool_reset(rpool); read_op->pkt.pool = rpool; read_op->pkt.transport = &udp->tp; read_op->pkt.src.tp_type = udp->base.tp_type; /* Read next packet */ bytes_read = sizeof(read_op->pkt.pkt); read_op->pkt.src_addr_len = sizeof(read_op->pkt.src.clt_addr); pj_bzero(&read_op->pkt.src.clt_addr, sizeof(read_op->pkt.src.clt_addr)); status = pj_ioqueue_recvfrom(udp->key, op_key, read_op->pkt.pkt, &bytes_read, 0, &read_op->pkt.src.clt_addr, &read_op->pkt.src_addr_len); if (status != PJ_EPENDING && status != PJ_SUCCESS) bytes_read = -status; } while (status != PJ_EPENDING && status != PJ_ECANCELLED && status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)); }
/* * Initiate overlapped connect() operation (well, it's non-blocking actually, * since there's no overlapped version of connect()). */ PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_key_t *key, const pj_sockaddr_t *addr, int addrlen ) { pj_status_t status; /* check parameters. All must be specified! */ PJ_ASSERT_RETURN(key && addr && addrlen, PJ_EINVAL); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; /* Check if socket has not been marked for connecting */ if (key->connecting != 0) return PJ_EPENDING; status = pj_sock_connect(key->fd, addr, addrlen); if (status == PJ_SUCCESS) { /* Connected! */ return PJ_SUCCESS; } else { if (status == PJ_STATUS_FROM_OS(PJ_BLOCKING_CONNECT_ERROR_VAL)) { /* Pending! */ pj_ioqueue_lock_key(key); /* Check again. Handle may have been closed after the previous * check in multithreaded app. See #913 */ if (IS_CLOSING(key)) { pj_ioqueue_unlock_key(key); return PJ_ECANCELLED; } key->connecting = PJ_TRUE; ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT); ioqueue_add_to_set(key->ioqueue, key, EXCEPTION_EVENT); pj_ioqueue_unlock_key(key); return PJ_EPENDING; } else { /* Error! */ return status; } } }
static void ioqueue_on_accept_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t new_sock, pj_status_t status) { pj_activesock_t *asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key); struct accept_op *accept_op = (struct accept_op*) op_key; PJ_UNUSED_ARG(new_sock); /* Ignore if we've been shutdown */ if (asock->shutdown) return; do { if (status == asock->last_err && status != PJ_SUCCESS) { asock->err_counter++; if (asock->err_counter >= PJ_ACTIVESOCK_MAX_CONSECUTIVE_ACCEPT_ERROR) { PJ_LOG(3, ("", "Received %d consecutive errors: %d for the accept()" " operation, stopping further ioqueue accepts.", asock->err_counter, asock->last_err)); if ((status == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) && (asock->cb.on_accept_complete2)) { (*asock->cb.on_accept_complete2)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len, PJ_ESOCKETSTOP); } return; } } else { asock->err_counter = 0; asock->last_err = status; } if (status==PJ_SUCCESS && (asock->cb.on_accept_complete2 || asock->cb.on_accept_complete)) { pj_bool_t ret; /* Notify callback */ if (asock->cb.on_accept_complete2) { ret = (*asock->cb.on_accept_complete2)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len, status); } else { ret = (*asock->cb.on_accept_complete)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len); } /* If callback returns false, we have been destroyed! */ if (!ret) return; #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 activesock_create_iphone_os_stream(asock); #endif } else if (status==PJ_SUCCESS) { /* Application doesn't handle the new socket, we need to * close it to avoid resource leak. */ pj_sock_close(accept_op->new_sock); } /* Don't start another accept() if we've been shutdown */ if (asock->shutdown) return; /* Prepare next accept() */ accept_op->new_sock = PJ_INVALID_SOCKET; accept_op->rem_addr_len = sizeof(accept_op->rem_addr); status = pj_ioqueue_accept(asock->key, op_key, &accept_op->new_sock, NULL, &accept_op->rem_addr, &accept_op->rem_addr_len); } while (status != PJ_EPENDING && status != PJ_ECANCELLED); }
/* * pj_ioqueue_connect() * * Initiate overlapped connect() operation (well, it's non-blocking actually, * since there's no overlapped version of connect()). */ PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_key_t *key, const pj_sockaddr_t *addr, int addrlen ) { HANDLE hEvent; pj_ioqueue_t *ioqueue; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(key && addr && addrlen, PJ_EINVAL); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check key is not closing */ if (key->closing) return PJ_ECANCELLED; #endif /* Initiate connect() */ if (connect((pj_sock_t)key->hnd, addr, addrlen) != 0) { DWORD dwStatus; dwStatus = WSAGetLastError(); if (dwStatus != WSAEWOULDBLOCK) { return PJ_RETURN_OS_ERROR(dwStatus); } } else { /* Connect has completed immediately! */ return PJ_SUCCESS; } ioqueue = key->ioqueue; /* Add to the array of connecting socket to be polled */ pj_lock_acquire(ioqueue->lock); if (ioqueue->connecting_count >= MAXIMUM_WAIT_OBJECTS) { pj_lock_release(ioqueue->lock); return PJ_ETOOMANYCONN; } /* Get or create event object. */ if (ioqueue->event_count) { hEvent = ioqueue->event_pool[ioqueue->event_count - 1]; --ioqueue->event_count; } else { hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if (hEvent == NULL) { DWORD dwStatus = GetLastError(); pj_lock_release(ioqueue->lock); return PJ_STATUS_FROM_OS(dwStatus); } } /* Mark key as connecting. * We can't use array index since key can be removed dynamically. */ key->connecting = 1; /* Associate socket events to the event object. */ if (WSAEventSelect((pj_sock_t)key->hnd, hEvent, FD_CONNECT) != 0) { CloseHandle(hEvent); pj_lock_release(ioqueue->lock); return PJ_RETURN_OS_ERROR(WSAGetLastError()); } /* Add to array. */ ioqueue->connecting_keys[ ioqueue->connecting_count ] = key; ioqueue->connecting_handles[ ioqueue->connecting_count ] = hEvent; ioqueue->connecting_count++; pj_lock_release(ioqueue->lock); return PJ_EPENDING; }
/* * pj_ioqueue_accept() * * Initiate overlapped accept() operation. */ PJ_DEF(pj_status_t) pj_ioqueue_accept( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t *new_sock, pj_sockaddr_t *local, pj_sockaddr_t *remote, int *addrlen) { BOOL rc; DWORD bytesReceived; pj_status_t status; union operation_key *op_key_rec; SOCKET sock; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(key && op_key && new_sock, PJ_EINVAL); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check key is not closing */ if (key->closing) return PJ_ECANCELLED; #endif /* * See if there is a new connection immediately available. */ sock = WSAAccept((SOCKET)key->hnd, remote, addrlen, NULL, 0); if (sock != INVALID_SOCKET) { /* Yes! New socket is available! */ if (local && addrlen) { int status; /* On WinXP or later, use SO_UPDATE_ACCEPT_CONTEXT so that socket * addresses can be obtained with getsockname() and getpeername(). */ status = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char*)&key->hnd, sizeof(SOCKET)); /* SO_UPDATE_ACCEPT_CONTEXT is for WinXP or later. * So ignore the error status. */ status = getsockname(sock, local, addrlen); if (status != 0) { DWORD dwError = WSAGetLastError(); closesocket(sock); return PJ_RETURN_OS_ERROR(dwError); } } *new_sock = sock; return PJ_SUCCESS; } else { DWORD dwError = WSAGetLastError(); if (dwError != WSAEWOULDBLOCK) { return PJ_RETURN_OS_ERROR(dwError); } } /* * No connection is immediately available. * Must schedule an asynchronous operation. */ op_key_rec = (union operation_key*)op_key->internal__; status = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &op_key_rec->accept.newsock); if (status != PJ_SUCCESS) return status; op_key_rec->accept.operation = PJ_IOQUEUE_OP_ACCEPT; op_key_rec->accept.addrlen = addrlen; op_key_rec->accept.local = local; op_key_rec->accept.remote = remote; op_key_rec->accept.newsock_ptr = new_sock; pj_bzero( &op_key_rec->accept.overlapped, sizeof(op_key_rec->accept.overlapped)); rc = AcceptEx( (SOCKET)key->hnd, (SOCKET)op_key_rec->accept.newsock, op_key_rec->accept.accept_buf, 0, ACCEPT_ADDR_LEN, ACCEPT_ADDR_LEN, &bytesReceived, &op_key_rec->accept.overlapped ); if (rc == TRUE) { ioqueue_on_accept_complete(key, &op_key_rec->accept); return PJ_SUCCESS; } else { DWORD dwStatus = WSAGetLastError(); if (dwStatus!=WSA_IO_PENDING) return PJ_STATUS_FROM_OS(dwStatus); } /* Asynchronous Accept() has been submitted. */ return PJ_EPENDING; }
/* * pj_ioqueue_sendto() * * Initiate overlapped SendTo operation. */ PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, const void *data, pj_ssize_t *length, pj_uint32_t flags, const pj_sockaddr_t *addr, int addrlen) { int rc; DWORD bytesWritten; DWORD dwFlags; union operation_key *op_key_rec; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(key && op_key && data, PJ_EINVAL); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check key is not closing */ if (key->closing) return PJ_ECANCELLED; #endif op_key_rec = (union operation_key*)op_key->internal__; /* * First try blocking write. */ op_key_rec->overlapped.wsabuf.buf = (void*)data; op_key_rec->overlapped.wsabuf.len = *length; dwFlags = flags; if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { rc = WSASendTo((SOCKET)key->hnd, &op_key_rec->overlapped.wsabuf, 1, &bytesWritten, dwFlags, addr, addrlen, NULL, NULL); if (rc == 0) { *length = bytesWritten; return PJ_SUCCESS; } else { DWORD dwStatus = WSAGetLastError(); if (dwStatus != WSAEWOULDBLOCK) { *length = -1; return PJ_RETURN_OS_ERROR(dwStatus); } } } dwFlags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * Data can't be sent immediately. * Schedule asynchronous WSASend(). */ pj_bzero( &op_key_rec->overlapped.overlapped, sizeof(op_key_rec->overlapped.overlapped)); op_key_rec->overlapped.operation = PJ_IOQUEUE_OP_SEND; rc = WSASendTo((SOCKET)key->hnd, &op_key_rec->overlapped.wsabuf, 1, &bytesWritten, dwFlags, addr, addrlen, &op_key_rec->overlapped.overlapped, NULL); if (rc == SOCKET_ERROR) { DWORD dwStatus = WSAGetLastError(); if (dwStatus!=WSA_IO_PENDING) return PJ_STATUS_FROM_OS(dwStatus); } /* Asynchronous operation successfully submitted. */ return PJ_EPENDING; }
/* * WMME capture and playback thread. */ static int PJ_THREAD_FUNC wmme_dev_thread(void *arg) { pjmedia_snd_stream *strm = arg; HANDLE events[3]; unsigned eventCount; unsigned bytes_per_frame; pj_status_t status = PJ_SUCCESS; eventCount = 0; events[eventCount++] = strm->thread_quit_event; if (strm->dir & PJMEDIA_DIR_PLAYBACK) events[eventCount++] = strm->play_strm.hEvent; if (strm->dir & PJMEDIA_DIR_CAPTURE) events[eventCount++] = strm->rec_strm.hEvent; /* Raise self priority. We don't want the audio to be distorted by * system activity. */ #if defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 if (strm->dir & PJMEDIA_DIR_PLAYBACK) CeSetThreadPriority(GetCurrentThread(), 153); else CeSetThreadPriority(GetCurrentThread(), 247); #else SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL); #endif /* Calculate bytes per frame */ bytes_per_frame = strm->samples_per_frame * BYTES_PER_SAMPLE; /* * Loop while not signalled to quit, wait for event objects to be * signalled by WMME capture and play buffer. */ while (status == PJ_SUCCESS) { DWORD rc; pjmedia_dir signalled_dir; rc = WaitForMultipleObjects(eventCount, events, FALSE, INFINITE); if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + eventCount) continue; if (rc == WAIT_OBJECT_0) break; if (rc == (WAIT_OBJECT_0 + 1)) { if (events[1] == strm->play_strm.hEvent) signalled_dir = PJMEDIA_DIR_PLAYBACK; else signalled_dir = PJMEDIA_DIR_CAPTURE; } else { if (events[2] == strm->play_strm.hEvent) signalled_dir = PJMEDIA_DIR_PLAYBACK; else signalled_dir = PJMEDIA_DIR_CAPTURE; } if (signalled_dir == PJMEDIA_DIR_PLAYBACK) { struct wmme_stream *wmme_strm = &strm->play_strm; MMRESULT mr = MMSYSERR_NOERROR; status = PJ_SUCCESS; /* * Windows Multimedia has requested us to feed some frames to * playback buffer. */ while (wmme_strm->WaveHdr[wmme_strm->dwBufIdx].dwFlags & WHDR_DONE) { void* buffer = wmme_strm->WaveHdr[wmme_strm->dwBufIdx].lpData; PJ_LOG(5,(THIS_FILE, "Finished writing buffer %d", wmme_strm->dwBufIdx)); /* Get frame from application. */ status = (*strm->play_cb)(strm->user_data, wmme_strm->timestamp.u32.lo, buffer, bytes_per_frame); if (status != PJ_SUCCESS) break; /* Write to the device. */ mr = waveOutWrite(wmme_strm->hWave.Out, &(wmme_strm->WaveHdr[wmme_strm->dwBufIdx]), sizeof(WAVEHDR)); if (mr != MMSYSERR_NOERROR) { status = PJ_STATUS_FROM_OS(mr); break; } /* Increment position. */ if (++wmme_strm->dwBufIdx >= wmme_strm->dwMaxBufIdx) wmme_strm->dwBufIdx = 0; wmme_strm->timestamp.u64 += strm->samples_per_frame / strm->channel_count; } } else { struct wmme_stream *wmme_strm = &strm->rec_strm; MMRESULT mr = MMSYSERR_NOERROR; status = PJ_SUCCESS; /* * Windows Multimedia has indicated that it has some frames ready * in the capture buffer. Get as much frames as possible to * prevent overflows. */ #if 0 { static DWORD tc = 0; DWORD now = GetTickCount(); DWORD i = 0; DWORD bits = 0; if (tc == 0) tc = now; for (i = 0; i < wmme_strm->dwMaxBufIdx; ++i) { bits = bits << 4; bits |= wmme_strm->WaveHdr[i].dwFlags & WHDR_DONE; } PJ_LOG(5,(THIS_FILE, "Record Signal> Index: %d, Delta: %4.4d, " "Flags: %6.6x\n", wmme_strm->dwBufIdx, now - tc, bits)); tc = now; } #endif while (wmme_strm->WaveHdr[wmme_strm->dwBufIdx].dwFlags & WHDR_DONE) { char* buffer = (char*) wmme_strm->WaveHdr[wmme_strm->dwBufIdx].lpData; unsigned cap_len = wmme_strm->WaveHdr[wmme_strm->dwBufIdx].dwBytesRecorded; /* PJ_LOG(5,(THIS_FILE, "Read %d bytes from buffer %d", cap_len, wmme_strm->dwBufIdx)); */ if (cap_len < bytes_per_frame) pj_bzero(buffer + cap_len, bytes_per_frame - cap_len); /* Copy the audio data out of the wave buffer. */ pj_memcpy(strm->buffer, buffer, bytes_per_frame); /* Re-add the buffer to the device. */ mr = waveInAddBuffer(wmme_strm->hWave.In, &(wmme_strm->WaveHdr[wmme_strm->dwBufIdx]), sizeof(WAVEHDR)); if (mr != MMSYSERR_NOERROR) { status = PJ_STATUS_FROM_OS(mr); break; } /* Call callback */ status = (*strm->rec_cb)(strm->user_data, wmme_strm->timestamp.u32.lo, strm->buffer, bytes_per_frame); if (status != PJ_SUCCESS) break; /* Increment position. */ if (++wmme_strm->dwBufIdx >= wmme_strm->dwMaxBufIdx) wmme_strm->dwBufIdx = 0; wmme_strm->timestamp.u64 += strm->samples_per_frame / strm->channel_count; } } } PJ_LOG(5,(THIS_FILE, "WMME: thread stopping..")); return 0; }
/* * Initiate overlapped accept() operation. */ PJ_DEF(pj_status_t) pj_ioqueue_accept( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t *new_sock, pj_sockaddr_t *local, pj_sockaddr_t *remote, int *addrlen) { struct accept_operation *accept_op; pj_status_t status; /* check parameters. All must be specified! */ PJ_ASSERT_RETURN(key && op_key && new_sock, PJ_EINVAL); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; accept_op = (struct accept_operation*)op_key; accept_op->op = PJ_IOQUEUE_OP_NONE; /* Fast track: * See if there's new connection available immediately. */ if (pj_list_empty(&key->accept_list)) { status = pj_sock_accept(key->fd, new_sock, remote, addrlen); if (status == PJ_SUCCESS) { /* Yes! New connection is available! */ if (local && addrlen) { status = pj_sock_getsockname(*new_sock, local, addrlen); if (status != PJ_SUCCESS) { pj_sock_close(*new_sock); *new_sock = PJ_INVALID_SOCKET; return status; } } return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { return status; } } } /* * No connection is available immediately. * Schedule accept() operation to be completed when there is incoming * connection available. */ accept_op->op = PJ_IOQUEUE_OP_ACCEPT; accept_op->accept_fd = new_sock; accept_op->rmt_addr = remote; accept_op->addrlen= addrlen; accept_op->local_addr = local; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->accept_list, accept_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
/* * pj_ioqueue_recvfrom() * * Start asynchronous recvfrom() from the socket. */ PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, unsigned flags, pj_sockaddr_t *addr, int *addrlen) { struct read_operation *read_op; PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; read_op = (struct read_operation*)op_key; read_op->op = PJ_IOQUEUE_OP_NONE; /* Try to see if there's data immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { pj_status_t status; pj_ssize_t size; size = *length; status = pj_sock_recvfrom(key->fd, buffer, &size, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Yes! Data is available! */ *length = size; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) return status; } } flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No data is immediately available. * Must schedule asynchronous operation to the ioqueue. */ read_op->op = PJ_IOQUEUE_OP_RECV_FROM; read_op->buf = buffer; read_op->size = *length; read_op->flags = flags; read_op->rmt_addr = addr; read_op->rmt_addrlen = addrlen; pj_ioqueue_lock_key(key); /* Check again. Handle may have been closed after the previous check * in multithreaded app. If we add bad handle to the set it will * corrupt the ioqueue set. See #913 */ if (IS_CLOSING(key)) { pj_ioqueue_unlock_key(key); return PJ_ECANCELLED; } pj_list_insert_before(&key->read_list, read_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_ioqueue_unlock_key(key); return PJ_EPENDING; }
/* Handler for sending outgoing message; called by transport manager. */ static pj_status_t loop_send_msg( pjsip_transport *tp, pjsip_tx_data *tdata, const pj_sockaddr_t *rem_addr, int addr_len, void *token, void (*cb)(pjsip_transport *transport, void *token, pj_ssize_t sent_bytes)) { struct loop_transport *loop = (struct loop_transport*)tp; struct recv_list *recv_pkt; PJ_ASSERT_RETURN(tp && (tp->key.type == PJSIP_TRANSPORT_LOOP || tp->key.type == PJSIP_TRANSPORT_LOOP_DGRAM), PJ_EINVAL); PJ_UNUSED_ARG(rem_addr); PJ_UNUSED_ARG(addr_len); /* Need to send failure? */ if (loop->fail_mode) { if (loop->send_delay == 0) { return PJ_STATUS_FROM_OS(OSERR_ECONNRESET); } else { add_notification(loop, tdata, -PJ_STATUS_FROM_OS(OSERR_ECONNRESET), token, cb); return PJ_EPENDING; } } /* Discard any packets? */ if (loop->discard) return PJ_SUCCESS; /* Create rdata for the "incoming" packet. */ recv_pkt = create_incoming_packet(loop, tdata); if (!recv_pkt) return PJ_ENOMEM; /* If delay is not configured, deliver this packet now! */ if (loop->recv_delay == 0) { pj_ssize_t size_eaten; size_eaten = pjsip_tpmgr_receive_packet( loop->base.tpmgr, &recv_pkt->rdata); pj_assert(size_eaten == recv_pkt->rdata.pkt_info.len); pjsip_endpt_release_pool(loop->base.endpt, recv_pkt->rdata.tp_info.pool); } else { /* Otherwise if delay is configured, add the "packet" to the * receive list to be processed by worker thread. */ pj_lock_acquire(loop->base.lock); pj_list_push_back(&loop->recv_list, recv_pkt); pj_lock_release(loop->base.lock); } if (loop->send_delay != 0) { add_notification(loop, tdata, tdata->buf.cur - tdata->buf.start, token, cb); return PJ_EPENDING; } else { return PJ_SUCCESS; } }
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h ) { pj_status_t rc; /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } # if PJ_HAS_TCP if (!pj_list_empty(&h->accept_list)) { struct accept_operation *accept_op; pj_bool_t has_lock; /* Get one accept operation from the list. */ accept_op = h->accept_list.next; pj_list_erase(accept_op); accept_op->op = PJ_IOQUEUE_OP_NONE; /* Clear bit in fdset if there is no more pending accept */ if (pj_list_empty(&h->accept_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); rc=pj_sock_accept(h->fd, accept_op->accept_fd, accept_op->rmt_addr, accept_op->addrlen); if (rc==PJ_SUCCESS && accept_op->local_addr) { rc = pj_sock_getsockname(*accept_op->accept_fd, accept_op->local_addr, accept_op->addrlen); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_accept_complete && !IS_CLOSING(h)) { (*h->cb.on_accept_complete)(h, (pj_ioqueue_op_key_t*)accept_op, *accept_op->accept_fd, rc); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else # endif if (key_has_pending_read(h)) { struct read_operation *read_op; pj_ssize_t bytes_read; pj_bool_t has_lock; /* Get one pending read operation from the list. */ read_op = h->read_list.next; pj_list_erase(read_op); /* Clear fdset if there is no pending read. */ if (pj_list_empty(&h->read_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); bytes_read = read_op->size; if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, read_op->flags, read_op->rmt_addr, read_op->rmt_addrlen); } else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); } else { pj_assert(read_op->op == PJ_IOQUEUE_OP_READ); read_op->op = PJ_IOQUEUE_OP_NONE; /* * User has specified pj_ioqueue_read(). * On Win32, we should do ReadFile(). But because we got * here because of select() anyway, user must have put a * socket descriptor on h->fd, which in this case we can * just call pj_sock_recv() instead of ReadFile(). * On Unix, user may put a file in h->fd, so we'll have * to call read() here. * This may not compile on systems which doesn't have * read(). That's why we only specify PJ_LINUX here so * that error is easier to catch. */ # if defined(PJ_WIN32) && PJ_WIN32 != 0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size, // &bytes_read, NULL); # elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0) bytes_read = read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error(); # elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0 bytes_read = sys_read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read; # else # error "Implement read() for this platform!" # endif } if (rc != PJ_SUCCESS) { # if defined(PJ_WIN32) && PJ_WIN32 != 0 /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) { //PJ_LOG(4,(THIS_FILE, // "Ignored ICMP port unreach. on key=%p", h)); } # endif /* In any case we would report this to caller. */ bytes_read = -rc; } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_read_complete && !IS_CLOSING(h)) { (*h->cb.on_read_complete)(h, (pj_ioqueue_op_key_t*)read_op, bytes_read); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
static void ioqueue_on_read_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { pj_activesock_t *asock; struct read_op *r = (struct read_op*)op_key; unsigned loop = 0; pj_status_t status; asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key); /* Ignore if we've been shutdown */ if (asock->shutdown & SHUT_RX) return; do { unsigned flags; if (bytes_read > 0) { /* * We've got new data. */ pj_size_t remainder; pj_bool_t ret; /* Append this new data to existing data. If socket is stream * oriented, user might have left some data in the buffer. * Otherwise if socket is datagram there will be nothing in * existing packet hence the packet will contain only the new * packet. */ r->size += bytes_read; /* Set default remainder to zero */ remainder = 0; /* And return value to TRUE */ ret = PJ_TRUE; /* Notify callback */ if (asock->read_type == TYPE_RECV && asock->cb.on_data_read) { ret = (*asock->cb.on_data_read)(asock, r->pkt, r->size, PJ_SUCCESS, &remainder); } else if (asock->read_type == TYPE_RECV_FROM && asock->cb.on_data_recvfrom) { ret = (*asock->cb.on_data_recvfrom)(asock, r->pkt, r->size, &r->src_addr, r->src_addr_len, PJ_SUCCESS); } /* If callback returns false, we have been destroyed! */ if (!ret) return; /* Only stream oriented socket may leave data in the packet */ if (asock->stream_oriented) { r->size = remainder; } else { r->size = 0; } } else if (bytes_read <= 0 && -bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && (asock->stream_oriented || -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))) { pj_size_t remainder; pj_bool_t ret; if (bytes_read == 0) { /* For stream/connection oriented socket, this means the * connection has been closed. For datagram sockets, it means * we've received datagram with zero length. */ if (asock->stream_oriented) status = PJ_EEOF; else status = PJ_SUCCESS; } else { /* This means we've got an error. If this is stream/connection * oriented, it means connection has been closed. For datagram * sockets, it means we've got some error (e.g. EWOULDBLOCK). */ status = (pj_status_t)-bytes_read; } /* Set default remainder to zero */ remainder = 0; /* And return value to TRUE */ ret = PJ_TRUE; /* Notify callback */ if (asock->read_type == TYPE_RECV && asock->cb.on_data_read) { /* For connection oriented socket, we still need to report * the remainder data (if any) to the user to let user do * processing with the remainder data before it closes the * connection. * If there is no remainder data, set the packet to NULL. */ /* Shouldn't set the packet to NULL, as there may be active * socket user, such as SSL socket, that needs to have access * to the read buffer packet. */ //ret = (*asock->cb.on_data_read)(asock, (r->size? r->pkt:NULL), // r->size, status, &remainder); ret = (*asock->cb.on_data_read)(asock, r->pkt, r->size, status, &remainder); } else if (asock->read_type == TYPE_RECV_FROM && asock->cb.on_data_recvfrom) { /* This would always be datagram oriented hence there's * nothing in the packet. We can't be sure if there will be * anything useful in the source_addr, so just put NULL * there too. */ /* In some scenarios, status may be PJ_SUCCESS. The upper * layer application may not expect the callback to be called * with successful status and NULL data, so lets not call the * callback if the status is PJ_SUCCESS. */ if (status != PJ_SUCCESS ) { ret = (*asock->cb.on_data_recvfrom)(asock, NULL, 0, NULL, 0, status); } } /* If callback returns false, we have been destroyed! */ if (!ret) return; /* Also stop further read if we've been shutdown */ if (asock->shutdown & SHUT_RX) return; /* Only stream oriented socket may leave data in the packet */ if (asock->stream_oriented) { r->size = remainder; } else { r->size = 0; } } /* Read next data. We limit ourselves to processing max_loop immediate * data, so when the loop counter has exceeded this value, force the * read()/recvfrom() to return pending operation to allow the program * to do other jobs. */ bytes_read = r->max_size - r->size; flags = asock->read_flags; if (++loop >= asock->max_loop) flags |= PJ_IOQUEUE_ALWAYS_ASYNC; if (asock->read_type == TYPE_RECV) { status = pj_ioqueue_recv(key, op_key, r->pkt + r->size, &bytes_read, flags); } else { r->src_addr_len = sizeof(r->src_addr); status = pj_ioqueue_recvfrom(key, op_key, r->pkt + r->size, &bytes_read, flags, &r->src_addr, &r->src_addr_len); } if (status == PJ_SUCCESS) { /* Immediate data */ ; } else if (status != PJ_EPENDING && status != PJ_ECANCELLED) { /* Error */ bytes_read = -status; } else { break; } } while (1); }
/* * udp_on_read_complete() * * This is callback notification from ioqueue that a pending recvfrom() * operation has completed. */ static void udp_on_read_complete( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { /* See https://trac.pjsip.org/repos/ticket/1197 */ enum { MAX_IMMEDIATE_PACKET = 50 }; pjsip_rx_data_op_key *rdata_op_key = (pjsip_rx_data_op_key*) op_key; pjsip_rx_data *rdata = rdata_op_key->rdata; struct udp_transport *tp = (struct udp_transport*)rdata->tp_info.transport; int i; pj_status_t status; /* Don't do anything if transport is closing. */ if (tp->is_closing) { tp->is_closing++; return; } /* Don't do anything if transport is being paused. */ if (tp->is_paused) return; /* * The idea of the loop is to process immediate data received by * pj_ioqueue_recvfrom(), as long as i < MAX_IMMEDIATE_PACKET. When * i is >= MAX_IMMEDIATE_PACKET, we force the recvfrom() operation to * complete asynchronously, to allow other sockets to get their data. */ for (i=0;; ++i) { enum { MIN_SIZE = 32 }; pj_uint32_t flags; /* Report the packet to transport manager. Only do so if packet size * is relatively big enough for a SIP packet. */ if (bytes_read > MIN_SIZE) { pj_ssize_t size_eaten; const pj_sockaddr *src_addr = &rdata->pkt_info.src_addr; /* Init pkt_info part. */ rdata->pkt_info.len = bytes_read; rdata->pkt_info.zero = 0; pj_gettimeofday(&rdata->pkt_info.timestamp); if (src_addr->addr.sa_family == pj_AF_INET()) { pj_ansi_strcpy(rdata->pkt_info.src_name, pj_inet_ntoa(src_addr->ipv4.sin_addr)); rdata->pkt_info.src_port = pj_ntohs(src_addr->ipv4.sin_port); } else { pj_inet_ntop(pj_AF_INET6(), pj_sockaddr_get_addr(&rdata->pkt_info.src_addr), rdata->pkt_info.src_name, sizeof(rdata->pkt_info.src_name)); rdata->pkt_info.src_port = pj_ntohs(src_addr->ipv6.sin6_port); } size_eaten = pjsip_tpmgr_receive_packet(rdata->tp_info.transport->tpmgr, rdata); if (size_eaten < 0) { pj_assert(!"It shouldn't happen!"); size_eaten = rdata->pkt_info.len; } /* Since this is UDP, the whole buffer is the message. */ rdata->pkt_info.len = 0; } else if (bytes_read <= MIN_SIZE) { /* TODO: */ } else if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)) { /* Report error to endpoint. */ PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, (pj_status_t)-bytes_read, "Warning: pj_ioqueue_recvfrom()" " callback error")); } if (i >= MAX_IMMEDIATE_PACKET) { /* Force ioqueue_recvfrom() to return PJ_EPENDING */ flags = PJ_IOQUEUE_ALWAYS_ASYNC; } else { flags = 0; } /* Reset pool. * Need to copy rdata fields to temp variable because they will * be invalid after pj_pool_reset(). */ { pj_pool_t *rdata_pool = rdata->tp_info.pool; struct udp_transport *rdata_tp ; unsigned rdata_index; rdata_tp = (struct udp_transport*)rdata->tp_info.transport; rdata_index = (unsigned)(unsigned long)(pj_ssize_t) rdata->tp_info.tp_data; pj_pool_reset(rdata_pool); init_rdata(rdata_tp, rdata_index, rdata_pool, &rdata); /* Change some vars to point to new location after * pool reset. */ op_key = &rdata->tp_info.op_key.op_key; } /* Only read next packet if transport is not being paused. This * check handles the case where transport is paused while endpoint * is still processing a SIP message. */ if (tp->is_paused) return; /* Read next packet. */ bytes_read = sizeof(rdata->pkt_info.packet); rdata->pkt_info.src_addr_len = sizeof(rdata->pkt_info.src_addr); status = pj_ioqueue_recvfrom(key, op_key, rdata->pkt_info.packet, &bytes_read, flags, &rdata->pkt_info.src_addr, &rdata->pkt_info.src_addr_len); if (status == PJ_SUCCESS) { /* Continue loop. */ pj_assert(i < MAX_IMMEDIATE_PACKET); } else if (status == PJ_EPENDING) { break; } else { if (i < MAX_IMMEDIATE_PACKET) { /* Report error to endpoint if this is not EWOULDBLOCK error.*/ if (status != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && status != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && status != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)) { PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, status, "Warning: pj_ioqueue_recvfrom")); } /* Continue loop. */ bytes_read = 0; } else { /* This is fatal error. * Ioqueue operation will stop for this transport! */ PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, status, "FATAL: pj_ioqueue_recvfrom() error, " "UDP transport stopping! Error")); break; } } } }
PJ_DEF(pj_status_t) pj_get_netos_error(void) { return PJ_STATUS_FROM_OS(WSAGetLastError()); }
/* * pj_ioqueue_recv() * * Initiate overlapped WSARecv() operation. */ PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, pj_uint32_t flags ) { /* * Ideally we should just call pj_ioqueue_recvfrom() with NULL addr and * addrlen here. But unfortunately it generates EINVAL... :-( * -bennylp */ int rc; DWORD bytesRead; DWORD dwFlags = 0; union operation_key *op_key_rec; PJ_CHECK_STACK(); PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check key is not closing */ if (key->closing) return PJ_ECANCELLED; #endif op_key_rec = (union operation_key*)op_key->internal__; op_key_rec->overlapped.wsabuf.buf = buffer; op_key_rec->overlapped.wsabuf.len = *length; dwFlags = flags; /* Try non-overlapped received first to see if data is * immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { rc = WSARecv((SOCKET)key->hnd, &op_key_rec->overlapped.wsabuf, 1, &bytesRead, &dwFlags, NULL, NULL); if (rc == 0) { *length = bytesRead; return PJ_SUCCESS; } else { DWORD dwError = WSAGetLastError(); if (dwError != WSAEWOULDBLOCK) { *length = -1; return PJ_RETURN_OS_ERROR(dwError); } } } dwFlags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No immediate data available. * Register overlapped Recv() operation. */ pj_bzero( &op_key_rec->overlapped.overlapped, sizeof(op_key_rec->overlapped.overlapped)); op_key_rec->overlapped.operation = PJ_IOQUEUE_OP_RECV; rc = WSARecv((SOCKET)key->hnd, &op_key_rec->overlapped.wsabuf, 1, &bytesRead, &dwFlags, &op_key_rec->overlapped.overlapped, NULL); if (rc == SOCKET_ERROR) { DWORD dwStatus = WSAGetLastError(); if (dwStatus!=WSA_IO_PENDING) { *length = -1; return PJ_STATUS_FROM_OS(dwStatus); } } /* Pending operation has been scheduled. */ return PJ_EPENDING; }
/* * pj_ioqueue_sendto() * * Start asynchronous write() to the descriptor. */ PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, const void *data, pj_ssize_t *length, pj_uint32_t flags, const pj_sockaddr_t *addr, int addrlen) { struct write_operation *write_op; unsigned retry; pj_status_t status; pj_ssize_t sent; PJ_ASSERT_RETURN(key && op_key && data && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; /* We can not use PJ_IOQUEUE_ALWAYS_ASYNC for socket write */ flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* Fast track: * Try to send data immediately, only if there's no pending write! * Note: * We are speculating that the list is empty here without properly * acquiring ioqueue's mutex first. This is intentional, to maximize * performance via parallelism. * * This should be safe, because: * - by convention, we require caller to make sure that the * key is not unregistered while other threads are invoking * an operation on the same key. * - pj_list_empty() is safe to be invoked by multiple threads, * even when other threads are modifying the list. */ if (pj_list_empty(&key->write_list)) { /* * See if data can be sent immediately. */ sent = *length; status = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Success! */ *length = sent; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { return status; } status = status; } } /* * Check that address storage can hold the address parameter. */ PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr_in), PJ_EBUG); /* * Schedule asynchronous send. */ write_op = (struct write_operation*)op_key; /* Spin if write_op has pending operation */ for (retry=0; write_op->op != 0 && retry<PENDING_RETRY; ++retry) pj_thread_sleep(0); /* Last chance */ if (write_op->op) { /* Unable to send packet because there is already pending write on the * write_op. We could not put the operation into the write_op * because write_op already contains a pending operation! And * we could not send the packet directly with sendto() either, * because that will break the order of the packet. So we can * only return error here. * * This could happen for example in multithreads program, * where polling is done by one thread, while other threads are doing * the sending only. If the polling thread runs on lower priority * than the sending thread, then it's possible that the pending * write flag is not cleared in-time because clearing is only done * during polling. * * Aplication should specify multiple write operation keys on * situation like this. */ //pj_assert(!"ioqueue: there is pending operation on this key!"); return PJ_EBUSY; } write_op->op = PJ_IOQUEUE_OP_SEND_TO; write_op->buf = (char*)data; write_op->size = *length; write_op->written = 0; write_op->flags = flags; pj_memcpy(&write_op->rmt_addr, addr, addrlen); write_op->rmt_addrlen = addrlen; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->write_list, write_op); ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
//rtems_task Init(rtems_task_argument Argument) void *POSIX_Init(void *argument) { pthread_attr_t threadAttr; pthread_t theThread; struct sched_param sched_param; size_t stack_size; int result; char data[1000]; memset(data, 1, sizeof(data)); /* Set the TOD clock, so that gettimeofday() will work */ rtems_time_of_day fakeTime = { 2006, 3, 15, 17, 30, 0, 0 }; if (RTEMS_SUCCESSFUL != rtems_clock_set(&fakeTime)) { assert(0); } /* Bring up the network stack so we can run the socket tests. */ initialize_network(); /* Start a POSIX thread for pjlib_test_main(), since that's what it * thinks it is running in. */ /* Initialize attribute */ TEST( pthread_attr_init(&threadAttr) ); /* Looks like the rest of the attributes must be fully initialized too, * or otherwise pthread_create will return EINVAL. */ /* Specify explicit scheduling request */ TEST( pthread_attr_setinheritsched(&threadAttr, PTHREAD_EXPLICIT_SCHED)); /* Timeslicing is needed by thread test, and this is accomplished by * SCHED_RR. */ TEST( pthread_attr_setschedpolicy(&threadAttr, SCHED_RR)); /* Set priority */ TEST( pthread_attr_getschedparam(&threadAttr, &sched_param)); sched_param.sched_priority = NETWORK_STACK_PRIORITY - 10; TEST( pthread_attr_setschedparam(&threadAttr, &sched_param)); /* Must have sufficient stack size (large size is needed by * logger, because default settings for logger is to use message buffer * from the stack). */ TEST( pthread_attr_getstacksize(&threadAttr, &stack_size)); if (stack_size < 8192) TEST( pthread_attr_setstacksize(&threadAttr, 8192)); /* Create the thread for application */ result = pthread_create(&theThread, &threadAttr, &pjlib_test_main, NULL); if (result != 0) { my_perror(PJ_STATUS_FROM_OS(result), "Error creating pjlib_test_main thread"); assert(!"Error creating main thread"); } return NULL; }
/* * ioqueue_dispatch_event() * * Report occurence of an event in the key to be processed by the * framework. */ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h) { /* Lock the key. */ pj_ioqueue_lock_key(h); if (IS_CLOSING(h)) { pj_ioqueue_unlock_key(h); return; } #if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0 if (h->connecting) { /* Completion of connect() operation */ pj_status_t status; pj_bool_t has_lock; /* Clear operation. */ h->connecting = 0; ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT); #if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0) /* from connect(2): * On Linux, use getsockopt to read the SO_ERROR option at * level SOL_SOCKET to determine whether connect() completed * successfully (if SO_ERROR is zero). */ { int value; int vallen = sizeof(value); int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, &value, &vallen); if (gs_rc != 0) { /* Argh!! What to do now??? * Just indicate that the socket is connected. The * application will get error as soon as it tries to use * the socket to send/receive. */ status = PJ_SUCCESS; } else { status = PJ_STATUS_FROM_OS(value); } } #elif (defined(PJ_WIN32) && PJ_WIN32!=0) || (defined(PJ_WIN64) && PJ_WIN64!=0) status = PJ_SUCCESS; /* success */ #else /* Excellent information in D.J. Bernstein page: * http://cr.yp.to/docs/connect.html * * Seems like the most portable way of detecting connect() * failure is to call getpeername(). If socket is connected, * getpeername() will return 0. If the socket is not connected, * it will return ENOTCONN, and read(fd, &ch, 1) will produce * the right errno through error slippage. This is a combination * of suggestions from Douglas C. Schmidt and Ken Keys. */ { struct sockaddr_in addr; int addrlen = sizeof(addr); status = pj_sock_getpeername(h->fd, (struct sockaddr*)&addr, &addrlen); } #endif /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_ioqueue_unlock_key(h); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_connect_complete && !IS_CLOSING(h)) (*h->cb.on_connect_complete)(h, status); /* Unlock if we still hold the lock */ if (has_lock) { pj_ioqueue_unlock_key(h); } /* Done. */ } else #endif /* PJ_HAS_TCP */ if (key_has_pending_write(h)) { /* Socket is writable. */ struct write_operation *write_op; pj_ssize_t sent; pj_status_t send_rc = PJ_SUCCESS; /* Get the first in the queue. */ write_op = h->write_list.next; /* For datagrams, we can remove the write_op from the list * so that send() can work in parallel. */ if (h->fd_type == pj_SOCK_DGRAM()) { pj_list_erase(write_op); if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Send the data. * Unfortunately we must do this while holding key's mutex, thus * preventing parallel write on a single key.. :-(( */ sent = write_op->size - write_op->written; if (write_op->op == PJ_IOQUEUE_OP_SEND) { send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written, &sent, write_op->flags); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) { int retry = 2; while (--retry >= 0) { send_rc = pj_sock_sendto(h->fd, write_op->buf+write_op->written, &sent, write_op->flags, &write_op->rmt_addr, write_op->rmt_addrlen); #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 /* Special treatment for dead UDP sockets here, see ticket #1107 */ if (send_rc==PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(h) && h->fd_type==pj_SOCK_DGRAM()) { PJ_PERROR(4,(THIS_FILE, send_rc, "Send error for socket %d, retrying", h->fd)); replace_udp_sock(h); continue; } #endif break; } /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else { pj_assert(!"Invalid operation type!"); write_op->op = PJ_IOQUEUE_OP_NONE; send_rc = PJ_EBUG; } if (send_rc == PJ_SUCCESS) { write_op->written += sent; } else { pj_assert(send_rc > 0); write_op->written = -send_rc; } /* Are we finished with this buffer? */ if (send_rc!=PJ_SUCCESS || write_op->written == (pj_ssize_t)write_op->size || h->fd_type == pj_SOCK_DGRAM()) { pj_bool_t has_lock; write_op->op = PJ_IOQUEUE_OP_NONE; if (h->fd_type != pj_SOCK_DGRAM()) { /* Write completion of the whole stream. */ pj_list_erase(write_op); /* Clear operation if there's no more data to send. */ if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_ioqueue_unlock_key(h); PJ_RACE_ME(5); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_write_complete && !IS_CLOSING(h)) { (*h->cb.on_write_complete)(h, (pj_ioqueue_op_key_t*)write_op, write_op->written); } if (has_lock) { pj_ioqueue_unlock_key(h); } } else { pj_ioqueue_unlock_key(h); } /* Done. */ } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_ioqueue_unlock_key(h); } }
int errno_test(void) { enum { CUT = 6 }; pj_status_t rc = 0; char errbuf[256]; PJ_LOG(3,(THIS_FILE, "...errno test: check the msg carefully")); PJ_UNUSED_ARG(rc); /* * Windows platform error. */ # ifdef ERROR_INVALID_DATA rc = PJ_STATUS_FROM_OS(ERROR_INVALID_DATA); pj_set_os_error(rc); /* Whole */ pj_strerror(rc, errbuf, sizeof(errbuf)); trim_newlines(errbuf); PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA: '%s'", errbuf)); if (my_stristr(errbuf, "invalid") == NULL) { PJ_LOG(3, (THIS_FILE, "...error: expecting \"invalid\" string in the msg")); #ifndef PJ_WIN32_WINCE return -20; #endif } /* Cut version. */ pj_strerror(rc, errbuf, CUT); PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA (cut): '%s'", errbuf)); # endif /* * Unix errors */ # if defined(EINVAL) && !defined(PJ_SYMBIAN) rc = PJ_STATUS_FROM_OS(EINVAL); pj_set_os_error(rc); /* Whole */ pj_strerror(rc, errbuf, sizeof(errbuf)); trim_newlines(errbuf); PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL: '%s'", errbuf)); if (my_stristr(errbuf, "invalid") == NULL) { PJ_LOG(3, (THIS_FILE, "...error: expecting \"invalid\" string in the msg")); return -30; } /* Cut */ pj_strerror(rc, errbuf, CUT); PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL (cut): '%s'", errbuf)); # endif /* * Windows WSA errors */ # ifdef WSAEINVAL rc = PJ_STATUS_FROM_OS(WSAEINVAL); pj_set_os_error(rc); /* Whole */ pj_strerror(rc, errbuf, sizeof(errbuf)); trim_newlines(errbuf); PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL: '%s'", errbuf)); if (my_stristr(errbuf, "invalid") == NULL) { PJ_LOG(3, (THIS_FILE, "...error: expecting \"invalid\" string in the msg")); return -40; } /* Cut */ pj_strerror(rc, errbuf, CUT); PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL (cut): '%s'", errbuf)); # endif pj_strerror(PJ_EBUG, errbuf, sizeof(errbuf)); PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG: '%s'", errbuf)); if (my_stristr(errbuf, "BUG") == NULL) { PJ_LOG(3, (THIS_FILE, "...error: expecting \"BUG\" string in the msg")); return -20; } pj_strerror(PJ_EBUG, errbuf, CUT); PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG, cut at %d chars: '%s'", CUT, errbuf)); /* Perror */ pj_perror(3, THIS_FILE, PJ_SUCCESS, "...testing %s", "pj_perror"); PJ_PERROR(3,(THIS_FILE, PJ_SUCCESS, "...testing %s", "PJ_PERROR")); return 0; }
PJ_DEF(pj_status_t) pj_get_netos_error(void) { return PJ_STATUS_FROM_OS(errno); }