static void NT_TimeProc(void *arg) { _MDOverlapped *overlapped = (_MDOverlapped *)arg; PRRecvWait *desc = overlapped->data.mw.desc; PRFileDesc *bottom; if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_TIMEOUT, (LONG)PR_MW_PENDING) != (LONG)PR_MW_PENDING) { /* This wait recv descriptor has already completed. */ return; } /* close the osfd to abort the outstanding async io request */ /* $$$$ ** Little late to be checking if NSPR's on the bottom of stack, ** but if we don't check, we can't assert that the private data ** is what we think it is. ** $$$$ */ bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL != bottom) /* now what!?!?! */ { bottom->secret->state = _PR_FILEDESC_CLOSED; if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); PR_ASSERT(!"What shall I do?"); } } return; } /* NT_TimeProc */
// Used to return connection info to Dashboard.cpp void nsSocketTransportService::AnalyzeConnection(nsTArray<SocketInfo> *data, struct SocketContext *context, bool aActive) { if (context->mHandler->mIsPrivate) return; PRFileDesc *aFD = context->mFD; PRFileDesc *idLayer = PR_GetIdentitiesLayer(aFD, PR_NSPR_IO_LAYER); NS_ENSURE_TRUE_VOID(idLayer); bool tcp = PR_GetDescType(idLayer) == PR_DESC_SOCKET_TCP; PRNetAddr peer_addr; PR_GetPeerName(aFD, &peer_addr); char host[64] = {0}; PR_NetAddrToString(&peer_addr, host, sizeof(host)); uint16_t port; if (peer_addr.raw.family == PR_AF_INET) port = peer_addr.inet.port; else port = peer_addr.ipv6.port; port = PR_ntohs(port); uint64_t sent = context->mHandler->ByteCountSent(); uint64_t received = context->mHandler->ByteCountReceived(); SocketInfo info = { nsCString(host), sent, received, port, aActive, tcp }; data->AppendElement(info); }
PR_IMPLEMENT(PRStatus) PR_GetConnectStatus(const PRPollDesc *pd) { /* Find the NSPR layer and invoke its connectcontinue method */ PRFileDesc *bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } return SocketConnectContinue(bottom, pd->out_flags); }
PR_FileDesc2NativeHandle(PRFileDesc *fd) { if (fd) { fd = PR_GetIdentitiesLayer(fd, PR_NSPR_IO_LAYER); } if (!fd) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return -1; } return fd->secret->md.osfd; }
void ClosingService::ThreadFunc() { for (;;) { PRFileDesc *fd; { mozilla::MonitorAutoLock mon(mMonitor); while (!mShutdown && (mQueue.Length() == 0)) { mon.Wait(); } if (mShutdown) { // If we are in shutdown leak the rest of the sockets. for (uint32_t i = 0; i < mQueue.Length(); i++) { fd = mQueue[i]; // If the ClosingService layer is the first layer above // PR_NSPR_IO_LAYER we are not going to leak anything, but PR_Close // will not be called. PR_Free(fd); } mQueue.Clear(); return; } fd = mQueue[0]; mQueue.RemoveElementAt(0); } // Leave lock before closing socket. It can block for a long time and in // case we accidentally attach this layer twice this would cause deadlock. bool tcp = (PR_GetDescType(PR_GetIdentitiesLayer(fd, PR_NSPR_IO_LAYER)) == PR_DESC_SOCKET_TCP); PRIntervalTime closeStarted = PR_IntervalNow(); fd->methods->close(fd); // Post telemetry. if (tcp) { SendPRCloseTelemetry(closeStarted, Telemetry::PRCLOSE_TCP_BLOCKING_TIME_NORMAL, Telemetry::PRCLOSE_TCP_BLOCKING_TIME_SHUTDOWN, Telemetry::PRCLOSE_TCP_BLOCKING_TIME_CONNECTIVITY_CHANGE, Telemetry::PRCLOSE_TCP_BLOCKING_TIME_LINK_CHANGE, Telemetry::PRCLOSE_TCP_BLOCKING_TIME_OFFLINE); } else { SendPRCloseTelemetry(closeStarted, Telemetry::PRCLOSE_UDP_BLOCKING_TIME_NORMAL, Telemetry::PRCLOSE_UDP_BLOCKING_TIME_SHUTDOWN, Telemetry::PRCLOSE_UDP_BLOCKING_TIME_CONNECTIVITY_CHANGE, Telemetry::PRCLOSE_UDP_BLOCKING_TIME_LINK_CHANGE, Telemetry::PRCLOSE_UDP_BLOCKING_TIME_OFFLINE); } } }
void nsSOCKSSocketInfo::FixupAddressFamily(PRFileDesc *fd, PRNetAddr *proxy) { int32_t proxyFamily = PR_NetAddrFamily(&mInternalProxyAddr); // Do nothing if the address family is already matched if (proxyFamily == mDestinationFamily) { return; } // If the system does not support IPv6 and the proxy address is IPv6, // We can do nothing here. if (proxyFamily == PR_AF_INET6 && !ipv6Supported) { return; } // If the system does not support IPv6 and the destination address is // IPv6, convert IPv4 address to IPv4-mapped IPv6 address to satisfy // the emulation layer if (mDestinationFamily == PR_AF_INET6 && !ipv6Supported) { proxy->ipv6.family = PR_AF_INET6; proxy->ipv6.port = mInternalProxyAddr.inet.port; uint8_t *proxyp = proxy->ipv6.ip.pr_s6_addr; memset(proxyp, 0, 10); memset(proxyp + 10, 0xff, 2); memcpy(proxyp + 12,(char *) &mInternalProxyAddr.inet.ip, 4); // mDestinationFamily should not be updated return; } // Get an OS native handle from a specified FileDesc PROsfd osfd = PR_FileDesc2NativeHandle(fd); if (osfd == -1) { return; } // Create a new FileDesc with a specified family PRFileDesc *tmpfd = PR_OpenTCPSocket(proxyFamily); if (!tmpfd) { return; } PROsfd newsd = PR_FileDesc2NativeHandle(tmpfd); if (newsd == -1) { PR_Close(tmpfd); return; } // Must succeed because PR_FileDesc2NativeHandle succeeded fd = PR_GetIdentitiesLayer(fd, PR_NSPR_IO_LAYER); MOZ_ASSERT(fd); // Swap OS native handles PR_ChangeFileDescNativeHandle(fd, newsd); PR_ChangeFileDescNativeHandle(tmpfd, osfd); // Close temporary FileDesc which is now associated with // old OS native handle PR_Close(tmpfd); mDestinationFamily = proxyFamily; }
PR_IMPLEMENT(PRStatus) PR_PushIOLayer( PRFileDesc *stack, PRDescIdentity id, PRFileDesc *fd) { PRFileDesc *insert = PR_GetIdentitiesLayer(stack, id); PR_ASSERT(fd != NULL); PR_ASSERT(stack != NULL); PR_ASSERT(insert != NULL); PR_ASSERT(PR_IO_LAYER_HEAD != id); if ((NULL == stack) || (NULL == fd) || (NULL == insert)) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } if (stack == insert) { /* going on top of the stack */ /* old-style stack */ PRFileDesc copy = *stack; *stack = *fd; *fd = copy; fd->higher = stack; stack->lower = fd; stack->higher = NULL; } else { /* * going somewhere in the middle of the stack for both old and new * style stacks, or going on top of stack for new style stack */ fd->lower = insert; fd->higher = insert->higher; insert->higher->lower = fd; insert->higher = fd; } return PR_SUCCESS; }
PR_IMPLEMENT(PRFileDesc*) PR_PopIOLayer(PRFileDesc *stack, PRDescIdentity id) { PRFileDesc *extract = PR_GetIdentitiesLayer(stack, id); PR_ASSERT(0 != id); PR_ASSERT(NULL != stack); PR_ASSERT(NULL != extract); if ((NULL == stack) || (0 == id) || (NULL == extract)) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; } if (extract == stack) { /* popping top layer of the stack */ /* old style stack */ PRFileDesc copy = *stack; extract = stack->lower; *stack = *extract; *extract = copy; stack->higher = NULL; } else if ((PR_IO_LAYER_HEAD == stack->identity) && (extract == stack->lower) && (extract->lower == NULL)) { /* * new style stack * popping the only layer in the stack; delete the stack too */ stack->lower = NULL; _PR_DestroyIOLayer(stack); } else { /* for both kinds of stacks */ extract->lower->higher = extract->higher; extract->higher->lower = extract->lower; } extract->higher = extract->lower = NULL; return extract; } /* PR_PopIOLayer */
static PRInt32 LocalThreads( PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout) { PRPollDesc *pd, *epd; PRInt32 ready, pdcnt; _PRUnixPollDesc *unixpds, *unixpd; /* * XXX * PRPollDesc has a PRFileDesc field, fd, while the IOQ * is a list of PRPollQueue structures, each of which contains * a _PRUnixPollDesc. A _PRUnixPollDesc struct contains * the OS file descriptor, osfd, and not a PRFileDesc. * So, we have allocate memory for _PRUnixPollDesc structures, * copy the flags information from the pds list and have pq * point to this list of _PRUnixPollDesc structures. * * It would be better if the memory allocation can be avoided. */ unixpd = unixpds = (_PRUnixPollDesc*) PR_MALLOC(npds * sizeof(_PRUnixPollDesc)); if (NULL == unixpds) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return -1; } ready = 0; for (pdcnt = 0, pd = pds, epd = pd + npds; pd < epd; pd++) { PRFileDesc *bottom; PRInt16 in_flags_read = 0, in_flags_write = 0; PRInt16 out_flags_read = 0, out_flags_write = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { if (pd->in_flags & PR_POLL_READ) { in_flags_read = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read); } if (pd->in_flags & PR_POLL_WRITE) { in_flags_write = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write); } if ((0 != (in_flags_read & out_flags_read)) || (0 != (in_flags_write & out_flags_write))) { /* this one's ready right now */ if (0 == ready) { /* * We will have to return without calling the * system poll/select function. So zero the * out_flags fields of all the poll descriptors * before this one. */ PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; pd->out_flags = out_flags_read | out_flags_write; } else { pd->out_flags = 0; /* pre-condition */ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); /* what to do about that? */ if ((NULL != bottom) && (_PR_FILEDESC_OPEN == bottom->secret->state)) { if (0 == ready) { unixpd->osfd = bottom->secret->md.osfd; unixpd->in_flags = 0; if (in_flags_read & PR_POLL_READ) { unixpd->in_flags |= _PR_UNIX_POLL_READ; pd->out_flags |= _PR_POLL_READ_SYS_READ; } if (in_flags_read & PR_POLL_WRITE) { unixpd->in_flags |= _PR_UNIX_POLL_WRITE; pd->out_flags |= _PR_POLL_READ_SYS_WRITE; } if (in_flags_write & PR_POLL_READ) { unixpd->in_flags |= _PR_UNIX_POLL_READ; pd->out_flags |= _PR_POLL_WRITE_SYS_READ; } if (in_flags_write & PR_POLL_WRITE) { unixpd->in_flags |= _PR_UNIX_POLL_WRITE; pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE; } if ((in_flags_read | in_flags_write) & PR_POLL_EXCEPT) { unixpd->in_flags |= _PR_UNIX_POLL_EXCEPT; } unixpd++; pdcnt++; } } else { if (0 == ready) { PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; /* this will cause an abrupt return */ pd->out_flags = PR_POLL_NVAL; /* bogii */ } } } } if (0 != ready) { /* no need to block */ PR_DELETE(unixpds); return ready; } ready = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout); /* * Copy the out_flags from the _PRUnixPollDesc structures to the * user's PRPollDesc structures and free the allocated memory */ unixpd = unixpds; for (pd = pds, epd = pd + npds; pd < epd; pd++) { PRInt16 out_flags = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { /* * take errors from the poll operation, * the R/W bits from the request */ if (0 != unixpd->out_flags) { if (unixpd->out_flags & _PR_UNIX_POLL_READ) { if (pd->out_flags & _PR_POLL_READ_SYS_READ) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_READ) out_flags |= PR_POLL_WRITE; } if (unixpd->out_flags & _PR_UNIX_POLL_WRITE) { if (pd->out_flags & _PR_POLL_READ_SYS_WRITE) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE) out_flags |= PR_POLL_WRITE; } if (unixpd->out_flags & _PR_UNIX_POLL_EXCEPT) out_flags |= PR_POLL_EXCEPT; if (unixpd->out_flags & _PR_UNIX_POLL_ERR) out_flags |= PR_POLL_ERR; if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) out_flags |= PR_POLL_NVAL; if (unixpd->out_flags & _PR_UNIX_POLL_HUP) out_flags |= PR_POLL_HUP; } unixpd++; } pd->out_flags = out_flags; } PR_DELETE(unixpds); return ready; } /* LocalThreads */
static PRInt32 NativeThreadSelect( PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout) { /* * This code is almost a duplicate of w32poll.c's _PR_MD_PR_POLL(). */ fd_set rd, wt, ex; PRFileDesc *bottom; PRPollDesc *pd, *epd; PRInt32 maxfd = -1, ready, err; PRIntervalTime remaining, elapsed, start; struct timeval tv, *tvp = NULL; FD_ZERO(&rd); FD_ZERO(&wt); FD_ZERO(&ex); ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { PRInt16 in_flags_read = 0, in_flags_write = 0; PRInt16 out_flags_read = 0, out_flags_write = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { if (pd->in_flags & PR_POLL_READ) { in_flags_read = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read); } if (pd->in_flags & PR_POLL_WRITE) { in_flags_write = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write); } if ((0 != (in_flags_read & out_flags_read)) || (0 != (in_flags_write & out_flags_write))) { /* this one's ready right now */ if (0 == ready) { /* * We will have to return without calling the * system poll/select function. So zero the * out_flags fields of all the poll descriptors * before this one. */ PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; pd->out_flags = out_flags_read | out_flags_write; } else { pd->out_flags = 0; /* pre-condition */ /* make sure this is an NSPR supported stack */ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); /* what to do about that? */ if ((NULL != bottom) && (_PR_FILEDESC_OPEN == bottom->secret->state)) { if (0 == ready) { PRInt32 osfd = bottom->secret->md.osfd; if (osfd > maxfd) maxfd = osfd; if (in_flags_read & PR_POLL_READ) { pd->out_flags |= _PR_POLL_READ_SYS_READ; FD_SET(osfd, &rd); } if (in_flags_read & PR_POLL_WRITE) { pd->out_flags |= _PR_POLL_READ_SYS_WRITE; FD_SET(osfd, &wt); } if (in_flags_write & PR_POLL_READ) { pd->out_flags |= _PR_POLL_WRITE_SYS_READ; FD_SET(osfd, &rd); } if (in_flags_write & PR_POLL_WRITE) { pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE; FD_SET(osfd, &wt); } if (pd->in_flags & PR_POLL_EXCEPT) FD_SET(osfd, &ex); } } else { if (0 == ready) { PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; /* this will cause an abrupt return */ pd->out_flags = PR_POLL_NVAL; /* bogii */ } } } else { pd->out_flags = 0; } } if (0 != ready) return ready; /* no need to block */ remaining = timeout; start = PR_IntervalNow(); retry: if (timeout != PR_INTERVAL_NO_TIMEOUT) { PRInt32 ticksPerSecond = PR_TicksPerSecond(); tv.tv_sec = remaining / ticksPerSecond; tv.tv_usec = PR_IntervalToMicroseconds( remaining % ticksPerSecond ); tvp = &tv; } ready = _MD_SELECT(maxfd + 1, &rd, &wt, &ex, tvp); if (ready == -1 && errno == EINTR) { if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry; else { elapsed = (PRIntervalTime) (PR_IntervalNow() - start); if (elapsed > timeout) ready = 0; /* timed out */ else { remaining = timeout - elapsed; goto retry; } } } /* ** Now to unravel the select sets back into the client's poll ** descriptor list. Is this possibly an area for pissing away ** a few cycles or what? */ if (ready > 0) { ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { PRInt16 out_flags = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { PRInt32 osfd; bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); osfd = bottom->secret->md.osfd; if (FD_ISSET(osfd, &rd)) { if (pd->out_flags & _PR_POLL_READ_SYS_READ) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_READ) out_flags |= PR_POLL_WRITE; } if (FD_ISSET(osfd, &wt)) { if (pd->out_flags & _PR_POLL_READ_SYS_WRITE) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE) out_flags |= PR_POLL_WRITE; } if (FD_ISSET(osfd, &ex)) out_flags |= PR_POLL_EXCEPT; } pd->out_flags = out_flags; if (out_flags) ready++; } PR_ASSERT(ready > 0); } else if (ready < 0) { err = _MD_ERRNO(); if (err == EBADF) { /* Find the bad fds */ ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { pd->out_flags = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); if (fcntl(bottom->secret->md.osfd, F_GETFL, 0) == -1) { pd->out_flags = PR_POLL_NVAL; ready++; } } } PR_ASSERT(ready > 0); } else _PR_MD_MAP_SELECT_ERROR(err); } return ready; } /* NativeThreadSelect */
memio_Private *memio_GetSecret(PRFileDesc *fd) { PRFileDesc *memiofd = PR_GetIdentitiesLayer(fd, memio_identity); struct PRFilePrivate *secret = memiofd->secret; return (memio_Private *)secret; }
void memio_SetPeerName(PRFileDesc *fd, const PRNetAddr *peername) { PRFileDesc *memiofd = PR_GetIdentitiesLayer(fd, memio_identity); struct PRFilePrivate *secret = memiofd->secret; secret->peername = *peername; }
PR_IMPLEMENT(PRRecvWait*) PR_CancelWaitGroup(PRWaitGroup *group) { PRRecvWait **desc; PRRecvWait *recv_wait = NULL; #ifdef WINNT _MDOverlapped *overlapped; PRRecvWait **end; PRThread *me = _PR_MD_CURRENT_THREAD(); #endif if (NULL == group) group = mw_state->group; PR_ASSERT(NULL != group); if (NULL == group) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; } PR_Lock(group->ml); if (_prmw_stopped != group->state) { if (_prmw_running == group->state) group->state = _prmw_stopping; /* so nothing new comes in */ if (0 == group->waiting_threads) /* is there anybody else? */ group->state = _prmw_stopped; /* we can stop right now */ else { PR_NotifyAllCondVar(group->new_business); PR_NotifyAllCondVar(group->io_complete); } while (_prmw_stopped != group->state) (void)PR_WaitCondVar(group->mw_manage, PR_INTERVAL_NO_TIMEOUT); } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* make all the existing descriptors look done/interrupted */ #ifdef WINNT end = &group->waiter->recv_wait + group->waiter->length; for (desc = &group->waiter->recv_wait; desc < end; ++desc) { if (NULL != *desc) { if (InterlockedCompareExchange((LONG *)&(*desc)->outcome, (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { PRFileDesc *bottom = PR_GetIdentitiesLayer( (*desc)->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); goto invalid_arg; } bottom->secret->state = _PR_FILEDESC_CLOSED; #if 0 fprintf(stderr, "cancel wait group: closing socket\n"); #endif if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); exit(1); } } } } while (group->waiter->count > 0) { _PR_THREAD_LOCK(me); me->state = _PR_IO_WAIT; PR_APPEND_LINK(&me->waitQLinks, &group->wait_list); if (!_PR_IS_NATIVE_THREAD(me)) { _PR_SLEEPQ_LOCK(me->cpu); _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT); _PR_SLEEPQ_UNLOCK(me->cpu); } _PR_THREAD_UNLOCK(me); _PR_MD_UNLOCK(&group->mdlock); PR_Unlock(group->ml); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); me->state = _PR_RUNNING; PR_Lock(group->ml); _PR_MD_LOCK(&group->mdlock); } #else for (desc = &group->waiter->recv_wait; group->waiter->count > 0; ++desc) { PR_ASSERT(desc < &group->waiter->recv_wait + group->waiter->length); if (NULL != *desc) _MW_DoneInternal(group, desc, PR_MW_INTERRUPT); } #endif /* take first element of finished list and return it or NULL */ if (PR_CLIST_IS_EMPTY(&group->io_ready)) PR_SetError(PR_GROUP_EMPTY_ERROR, 0); else { PRCList *head = PR_LIST_HEAD(&group->io_ready); PR_REMOVE_AND_INIT_LINK(head); #ifdef WINNT overlapped = (_MDOverlapped *) ((char *)head - offsetof(_MDOverlapped, data)); head = &overlapped->data.mw.desc->internal; if (NULL != overlapped->data.mw.timer) { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT != overlapped->data.mw.desc->timeout); CancelTimer(overlapped->data.mw.timer); } else { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT == overlapped->data.mw.desc->timeout); } PR_DELETE(overlapped); #endif recv_wait = (PRRecvWait*)head; } #ifdef WINNT invalid_arg: _PR_MD_UNLOCK(&group->mdlock); #endif PR_Unlock(group->ml); return recv_wait; } /* PR_CancelWaitGroup */
// add SOCKS IO layer to an existing socket nsresult nsSOCKSIOLayerAddToSocket(int32_t family, const char *host, int32_t port, const char *proxyHost, int32_t proxyPort, int32_t socksVersion, uint32_t flags, PRFileDesc *fd, nsISupports** info) { NS_ENSURE_TRUE((socksVersion == 4) || (socksVersion == 5), NS_ERROR_NOT_INITIALIZED); if (firstTime) { //XXX hack until NSPR provides an official way to detect system IPv6 // support (bug 388519) PRFileDesc *tmpfd = PR_OpenTCPSocket(PR_AF_INET6); if (!tmpfd) { ipv6Supported = false; } else { // If the system does not support IPv6, NSPR will push // IPv6-to-IPv4 emulation layer onto the native layer ipv6Supported = PR_GetIdentitiesLayer(tmpfd, PR_NSPR_IO_LAYER) == tmpfd; PR_Close(tmpfd); } nsSOCKSIOLayerIdentity = PR_GetUniqueIdentity("SOCKS layer"); nsSOCKSIOLayerMethods = *PR_GetDefaultIOMethods(); nsSOCKSIOLayerMethods.connect = nsSOCKSIOLayerConnect; nsSOCKSIOLayerMethods.connectcontinue = nsSOCKSIOLayerConnectContinue; nsSOCKSIOLayerMethods.poll = nsSOCKSIOLayerPoll; nsSOCKSIOLayerMethods.bind = nsSOCKSIOLayerBind; nsSOCKSIOLayerMethods.acceptread = nsSOCKSIOLayerAcceptRead; nsSOCKSIOLayerMethods.getsockname = nsSOCKSIOLayerGetName; nsSOCKSIOLayerMethods.getpeername = nsSOCKSIOLayerGetPeerName; nsSOCKSIOLayerMethods.accept = nsSOCKSIOLayerAccept; nsSOCKSIOLayerMethods.listen = nsSOCKSIOLayerListen; nsSOCKSIOLayerMethods.close = nsSOCKSIOLayerClose; firstTime = false; #if defined(PR_LOGGING) gSOCKSLog = PR_NewLogModule("SOCKS"); #endif } LOGDEBUG(("Entering nsSOCKSIOLayerAddToSocket().")); PRFileDesc * layer; PRStatus rv; layer = PR_CreateIOLayerStub(nsSOCKSIOLayerIdentity, &nsSOCKSIOLayerMethods); if (! layer) { LOGERROR(("PR_CreateIOLayerStub() failed.")); return NS_ERROR_FAILURE; } nsSOCKSSocketInfo * infoObject = new nsSOCKSSocketInfo(); if (!infoObject) { // clean up IOLayerStub LOGERROR(("Failed to create nsSOCKSSocketInfo().")); PR_DELETE(layer); return NS_ERROR_FAILURE; } NS_ADDREF(infoObject); infoObject->Init(socksVersion, family, proxyHost, proxyPort, host, flags); layer->secret = (PRFilePrivate*) infoObject; rv = PR_PushIOLayer(fd, PR_GetLayersIdentity(fd), layer); if (rv == PR_FAILURE) { LOGERROR(("PR_PushIOLayer() failed. rv = %x.", rv)); NS_RELEASE(infoObject); PR_DELETE(layer); return NS_ERROR_FAILURE; } *info = static_cast<nsISOCKSSocketInfo*>(infoObject); NS_ADDREF(*info); return NS_OK; }
PR_IMPLEMENT(PRStatus) PR_AddWaitFileDesc( PRWaitGroup *group, PRRecvWait *desc) { _PR_HashStory hrv; PRStatus rv = PR_FAILURE; #ifdef WINNT _MDOverlapped *overlapped; HANDLE hFile; BOOL bResult; DWORD dwError; PRFileDesc *bottom; #endif if (!_pr_initialized) _PR_ImplicitInitialization(); if ((NULL == group) && (NULL == (group = MW_Init2()))) { return rv; } PR_ASSERT(NULL != desc->fd); desc->outcome = PR_MW_PENDING; /* nice, well known value */ desc->bytesRecv = 0; /* likewise, though this value is ambiguious */ PR_Lock(group->ml); if (_prmw_running != group->state) { /* Not allowed to add after cancelling the group */ desc->outcome = PR_MW_INTERRUPT; PR_SetError(PR_INVALID_STATE_ERROR, 0); PR_Unlock(group->ml); return rv; } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* ** If the waiter count is zero at this point, there's no telling ** how long we've been idle. Therefore, initialize the beginning ** of the timing interval. As long as the list doesn't go empty, ** it will maintain itself. */ if (0 == group->waiter->count) group->last_poll = PR_IntervalNow(); do { hrv = MW_AddHashInternal(desc, group->waiter); if (_prmw_rehash != hrv) break; hrv = MW_ExpandHashInternal(group); /* gruesome */ if (_prmw_success != hrv) break; } while (PR_TRUE); #ifdef WINNT _PR_MD_UNLOCK(&group->mdlock); #endif PR_NotifyCondVar(group->new_business); /* tell the world */ rv = (_prmw_success == hrv) ? PR_SUCCESS : PR_FAILURE; PR_Unlock(group->ml); #ifdef WINNT overlapped = PR_NEWZAP(_MDOverlapped); if (NULL == overlapped) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); NT_HashRemove(group, desc->fd); return rv; } overlapped->ioModel = _MD_MultiWaitIO; overlapped->data.mw.desc = desc; overlapped->data.mw.group = group; if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { overlapped->data.mw.timer = CreateTimer( desc->timeout, NT_TimeProc, overlapped); if (0 == overlapped->data.mw.timer) { NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); /* * XXX It appears that a maximum of 16 timer events can * be outstanding. GetLastError() returns 0 when I try it. */ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, GetLastError()); return PR_FAILURE; } } /* Reach to the bottom layer to get the OS fd */ bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } hFile = (HANDLE)bottom->secret->md.osfd; if (!bottom->secret->md.io_model_committed) { PRInt32 st; st = _md_Associate(hFile); PR_ASSERT(0 != st); bottom->secret->md.io_model_committed = PR_TRUE; } bResult = ReadFile(hFile, desc->buffer.start, (DWORD)desc->buffer.length, NULL, &overlapped->overlapped); if (FALSE == bResult && (dwError = GetLastError()) != ERROR_IO_PENDING) { if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_FAILURE, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { CancelTimer(overlapped->data.mw.timer); } NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); } _PR_MD_MAP_READ_ERROR(dwError); rv = PR_FAILURE; } #endif return rv; } /* PR_AddWaitFileDesc */
static PRInt32 NativeThreadPoll( PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout) { /* * This function is mostly duplicated from ptio.s's PR_Poll(). */ PRInt32 ready = 0; /* * For restarting poll() if it is interrupted by a signal. * We use these variables to figure out how much time has * elapsed and how much of the timeout still remains. */ PRIntn index, msecs; struct pollfd *syspoll = NULL; PRIntervalTime start, elapsed, remaining; syspoll = (struct pollfd*)PR_MALLOC(npds * sizeof(struct pollfd)); if (NULL == syspoll) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return -1; } for (index = 0; index < npds; ++index) { PRFileDesc *bottom; PRInt16 in_flags_read = 0, in_flags_write = 0; PRInt16 out_flags_read = 0, out_flags_write = 0; if ((NULL != pds[index].fd) && (0 != pds[index].in_flags)) { if (pds[index].in_flags & PR_POLL_READ) { in_flags_read = (pds[index].fd->methods->poll)( pds[index].fd, pds[index].in_flags & ~PR_POLL_WRITE, &out_flags_read); } if (pds[index].in_flags & PR_POLL_WRITE) { in_flags_write = (pds[index].fd->methods->poll)( pds[index].fd, pds[index].in_flags & ~PR_POLL_READ, &out_flags_write); } if ((0 != (in_flags_read & out_flags_read)) || (0 != (in_flags_write & out_flags_write))) { /* this one is ready right now */ if (0 == ready) { /* * We will return without calling the system * poll function. So zero the out_flags * fields of all the poll descriptors before * this one. */ int i; for (i = 0; i < index; i++) { pds[i].out_flags = 0; } } ready += 1; pds[index].out_flags = out_flags_read | out_flags_write; } else { pds[index].out_flags = 0; /* pre-condition */ /* now locate the NSPR layer at the bottom of the stack */ bottom = PR_GetIdentitiesLayer(pds[index].fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); /* what to do about that? */ if ((NULL != bottom) && (_PR_FILEDESC_OPEN == bottom->secret->state)) { if (0 == ready) { syspoll[index].fd = bottom->secret->md.osfd; syspoll[index].events = 0; /* pre-condition */ if (in_flags_read & PR_POLL_READ) { pds[index].out_flags |= _PR_POLL_READ_SYS_READ; syspoll[index].events |= POLLIN; } if (in_flags_read & PR_POLL_WRITE) { pds[index].out_flags |= _PR_POLL_READ_SYS_WRITE; syspoll[index].events |= POLLOUT; } if (in_flags_write & PR_POLL_READ) { pds[index].out_flags |= _PR_POLL_WRITE_SYS_READ; syspoll[index].events |= POLLIN; } if (in_flags_write & PR_POLL_WRITE) { pds[index].out_flags |= _PR_POLL_WRITE_SYS_WRITE; syspoll[index].events |= POLLOUT; } if (pds[index].in_flags & PR_POLL_EXCEPT) syspoll[index].events |= POLLPRI; } } else { if (0 == ready) { int i; for (i = 0; i < index; i++) { pds[i].out_flags = 0; } } ready += 1; /* this will cause an abrupt return */ pds[index].out_flags = PR_POLL_NVAL; /* bogii */ } } } else { /* make poll() ignore this entry */ syspoll[index].fd = -1; syspoll[index].events = 0; pds[index].out_flags = 0; } } if (0 == ready) { switch (timeout) { case PR_INTERVAL_NO_WAIT: msecs = 0; break; case PR_INTERVAL_NO_TIMEOUT: msecs = -1; break; default: msecs = PR_IntervalToMilliseconds(timeout); start = PR_IntervalNow(); } retry: ready = _MD_POLL(syspoll, npds, msecs); if (-1 == ready) { PRIntn oserror = errno; if (EINTR == oserror) { if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry; else if (timeout == PR_INTERVAL_NO_WAIT) ready = 0; else { elapsed = (PRIntervalTime)(PR_IntervalNow() - start); if (elapsed > timeout) ready = 0; /* timed out */ else { remaining = timeout - elapsed; msecs = PR_IntervalToMilliseconds(remaining); goto retry; } } } else _PR_MD_MAP_POLL_ERROR(oserror); } else if (ready > 0) { for (index = 0; index < npds; ++index) { PRInt16 out_flags = 0; if ((NULL != pds[index].fd) && (0 != pds[index].in_flags)) { if (0 != syspoll[index].revents) { /* ** Set up the out_flags so that it contains the ** bits that the highest layer thinks are nice ** to have. Then the client of that layer will ** call the appropriate I/O function and maybe ** the protocol will make progress. */ if (syspoll[index].revents & POLLIN) { if (pds[index].out_flags & _PR_POLL_READ_SYS_READ) { out_flags |= PR_POLL_READ; } if (pds[index].out_flags & _PR_POLL_WRITE_SYS_READ) { out_flags |= PR_POLL_WRITE; } } if (syspoll[index].revents & POLLOUT) { if (pds[index].out_flags & _PR_POLL_READ_SYS_WRITE) { out_flags |= PR_POLL_READ; } if (pds[index].out_flags & _PR_POLL_WRITE_SYS_WRITE) { out_flags |= PR_POLL_WRITE; } } if (syspoll[index].revents & POLLPRI) out_flags |= PR_POLL_EXCEPT; if (syspoll[index].revents & POLLERR) out_flags |= PR_POLL_ERR; if (syspoll[index].revents & POLLNVAL) out_flags |= PR_POLL_NVAL; if (syspoll[index].revents & POLLHUP) out_flags |= PR_POLL_HUP; } } pds[index].out_flags = out_flags; } } } PR_DELETE(syspoll); return ready; } /* NativeThreadPoll */
PRInt32 _PR_MD_PR_POLL(PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout) { #ifdef BSD_SELECT fd_set rd, wt, ex; #else int rd, wt, ex; int* socks; unsigned long msecs; int i, j; #endif PRFileDesc *bottom; PRPollDesc *pd, *epd; PRInt32 maxfd = -1, ready, err; PRIntervalTime remaining, elapsed, start; #ifdef BSD_SELECT struct timeval tv, *tvp = NULL; FD_ZERO(&rd); FD_ZERO(&wt); FD_ZERO(&ex); #else rd = 0; wt = 0; ex = 0; socks = (int) PR_MALLOC( npds * 3 * sizeof(int) ); if (!socks) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return -1; } #endif ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { PRInt16 in_flags_read = 0, in_flags_write = 0; PRInt16 out_flags_read = 0, out_flags_write = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { if (pd->in_flags & PR_POLL_READ) { in_flags_read = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read); } if (pd->in_flags & PR_POLL_WRITE) { in_flags_write = (pd->fd->methods->poll)( pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write); } if ((0 != (in_flags_read & out_flags_read)) || (0 != (in_flags_write & out_flags_write))) { /* this one's ready right now */ if (0 == ready) { /* * We will have to return without calling the * system poll/select function. So zero the * out_flags fields of all the poll descriptors * before this one. */ PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; pd->out_flags = out_flags_read | out_flags_write; } else { pd->out_flags = 0; /* pre-condition */ /* make sure this is an NSPR supported stack */ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); /* what to do about that? */ if ((NULL != bottom) && (_PR_FILEDESC_OPEN == bottom->secret->state)) { if (0 == ready) { PRInt32 osfd = bottom->secret->md.osfd; if (osfd > maxfd) maxfd = osfd; if (in_flags_read & PR_POLL_READ) { pd->out_flags |= _PR_POLL_READ_SYS_READ; #ifdef BSD_SELECT FD_SET(osfd, &rd); #else socks[rd] = osfd; rd++; #endif } if (in_flags_read & PR_POLL_WRITE) { pd->out_flags |= _PR_POLL_READ_SYS_WRITE; #ifdef BSD_SELECT FD_SET(osfd, &wt); #else socks[npds+wt] = osfd; wt++; #endif } if (in_flags_write & PR_POLL_READ) { pd->out_flags |= _PR_POLL_WRITE_SYS_READ; #ifdef BSD_SELECT FD_SET(osfd, &rd); #else socks[rd] = osfd; rd++; #endif } if (in_flags_write & PR_POLL_WRITE) { pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE; #ifdef BSD_SELECT FD_SET(osfd, &wt); #else socks[npds+wt] = osfd; wt++; #endif } if (pd->in_flags & PR_POLL_EXCEPT) { #ifdef BSD_SELECT FD_SET(osfd, &ex); #else socks[npds*2+ex] = osfd; ex++; #endif } } } else { if (0 == ready) { PRPollDesc *prev; for (prev = pds; prev < pd; prev++) { prev->out_flags = 0; } } ready += 1; /* this will cause an abrupt return */ pd->out_flags = PR_POLL_NVAL; /* bogii */ } } } else { pd->out_flags = 0; } } if (0 != ready) { #ifndef BSD_SELECT PR_Free(socks); #endif return ready; /* no need to block */ } remaining = timeout; start = PR_IntervalNow(); retry: #ifdef BSD_SELECT if (timeout != PR_INTERVAL_NO_TIMEOUT) { PRInt32 ticksPerSecond = PR_TicksPerSecond(); tv.tv_sec = remaining / ticksPerSecond; tv.tv_usec = PR_IntervalToMicroseconds( remaining % ticksPerSecond ); tvp = &tv; } ready = bsdselect(maxfd + 1, &rd, &wt, &ex, tvp); #else switch (timeout) { case PR_INTERVAL_NO_WAIT: msecs = 0; break; case PR_INTERVAL_NO_TIMEOUT: msecs = -1; break; default: msecs = PR_IntervalToMilliseconds(remaining); } /* compact array */ for( i = rd, j = npds; j < npds+wt; i++,j++ ) socks[i] = socks[j]; for( i = rd+wt, j = npds*2; j < npds*2+ex; i++,j++ ) socks[i] = socks[j]; ready = os2_select(socks, rd, wt, ex, msecs); #endif if (ready == -1 && errno == EINTR) { if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry; else { elapsed = (PRIntervalTime) (PR_IntervalNow() - start); if (elapsed > timeout) ready = 0; /* timed out */ else { remaining = timeout - elapsed; goto retry; } } } /* ** Now to unravel the select sets back into the client's poll ** descriptor list. Is this possibly an area for pissing away ** a few cycles or what? */ if (ready > 0) { ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { PRInt16 out_flags = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { PRInt32 osfd; bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); osfd = bottom->secret->md.osfd; #ifdef BSD_SELECT if (FD_ISSET(osfd, &rd)) #else if( IsSocketSet(osfd, socks, 0, rd) ) #endif { if (pd->out_flags & _PR_POLL_READ_SYS_READ) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_READ) out_flags |= PR_POLL_WRITE; } #ifdef BSD_SELECT if (FD_ISSET(osfd, &wt)) #else if( IsSocketSet(osfd, socks, rd, wt) ) #endif { if (pd->out_flags & _PR_POLL_READ_SYS_WRITE) out_flags |= PR_POLL_READ; if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE) out_flags |= PR_POLL_WRITE; } #ifdef BSD_SELECT if (FD_ISSET(osfd, &ex)) #else if( IsSocketSet(osfd, socks, rd+wt, ex) ) #endif { out_flags |= PR_POLL_EXCEPT; } } pd->out_flags = out_flags; if (out_flags) ready++; } PR_ASSERT(ready > 0); } else if (ready < 0) { err = _MD_ERRNO(); if (err == EBADF) { /* Find the bad fds */ int optval; int optlen = sizeof(optval); ready = 0; for (pd = pds, epd = pd + npds; pd < epd; pd++) { pd->out_flags = 0; if ((NULL != pd->fd) && (0 != pd->in_flags)) { bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER); if (getsockopt(bottom->secret->md.osfd, SOL_SOCKET, SO_TYPE, (char *) &optval, &optlen) == -1) { PR_ASSERT(sock_errno() == ENOTSOCK); if (sock_errno() == ENOTSOCK) { pd->out_flags = PR_POLL_NVAL; ready++; } } } } PR_ASSERT(ready > 0); } else _PR_MD_MAP_SELECT_ERROR(err); } #ifndef BSD_SELECT PR_Free(socks); #endif return ready; }
PR_IMPLEMENT(PRStatus) PR_CancelWaitFileDesc(PRWaitGroup *group, PRRecvWait *desc) { #if !defined(WINNT) PRRecvWait **recv_wait; #endif PRStatus rv = PR_SUCCESS; if (NULL == group) group = mw_state->group; PR_ASSERT(NULL != group); if (NULL == group) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); rv = PR_FAILURE; goto unlock; } #ifdef WINNT if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { PRFileDesc *bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); goto unlock; } bottom->secret->state = _PR_FILEDESC_CLOSED; #if 0 fprintf(stderr, "cancel wait recv: closing socket\n"); #endif if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); exit(1); } } #else if (NULL != (recv_wait = _MW_LookupInternal(group, desc->fd))) { /* it was in the wait table */ _MW_DoneInternal(group, recv_wait, PR_MW_INTERRUPT); goto unlock; } if (!PR_CLIST_IS_EMPTY(&group->io_ready)) { /* is it already complete? */ PRCList *head = PR_LIST_HEAD(&group->io_ready); do { PRRecvWait *done = (PRRecvWait*)head; if (done == desc) goto unlock; head = PR_NEXT_LINK(head); } while (head != &group->io_ready); } PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); rv = PR_FAILURE; #endif unlock: PR_Unlock(group->ml); return rv; } /* PR_CancelWaitFileDesc */