nsresult
nsSocketTransportService::DetachSocket(SocketContext *listHead, SocketContext *sock)
{
    SOCKET_LOG(("nsSocketTransportService::DetachSocket [handler=%p]\n", sock->mHandler));
    MOZ_ASSERT((listHead == mActiveList) || (listHead == mIdleList),
               "DetachSocket invalid head");

    // inform the handler that this socket is going away
    sock->mHandler->OnSocketDetached(sock->mFD);
    mSentBytesCount += sock->mHandler->ByteCountSent();
    mReceivedBytesCount += sock->mHandler->ByteCountReceived();

    // cleanup
    sock->mFD = nullptr;
    NS_RELEASE(sock->mHandler);

    if (listHead == mActiveList)
        RemoveFromPollList(sock);
    else
        RemoveFromIdleList(sock);

    // NOTE: sock is now an invalid pointer
    
    //
    // notify the first element on the pending socket queue...
    //
    nsCOMPtr<nsIRunnable> event;
    LinkedRunnableEvent *runnable = mPendingSocketQueue.getFirst();
    if (runnable) {
        event = runnable->TakeEvent();
        runnable->remove();
        delete runnable;
    }
    if (event) {
        // move event from pending queue to dispatch queue
        return Dispatch(event, NS_DISPATCH_NORMAL);
    }
    return NS_OK;
}
NS_IMETHODIMP
EventTokenBucket::Notify(nsITimer *timer)
{
    MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);

#ifdef XP_WIN
    if (timer == mFineGrainResetTimer) {
        FineGrainResetTimerNotify();
        return NS_OK;
    }
#endif

    SOCKET_LOG(("EventTokenBucket::Notify() %p\n", this));
    mTimerArmed = false;
    if (mStopped)
        return NS_OK;

    UpdateCredits();
    DispatchEvents();
    UpdateTimer();

    return NS_OK;
}
void
nsSocketTransportService::OnKeepaliveEnabledPrefChange()
{
    // Dispatch to socket thread if we're not executing there.
    if (PR_GetCurrentThread() != gSocketThread) {
        gSocketTransportService->Dispatch(
            NewRunnableMethod(
                this, &nsSocketTransportService::OnKeepaliveEnabledPrefChange),
            NS_DISPATCH_NORMAL);
        return;
    }

    SOCKET_LOG(("nsSocketTransportService::OnKeepaliveEnabledPrefChange %s",
                mKeepaliveEnabledPref ? "enabled" : "disabled"));

    // Notify each socket that keepalive has been en/disabled globally.
    for (int32_t i = mActiveCount - 1; i >= 0; --i) {
        NotifyKeepaliveEnabledPrefChange(&mActiveList[i]);
    }
    for (int32_t i = mIdleCount - 1; i >= 0; --i) {
        NotifyKeepaliveEnabledPrefChange(&mIdleList[i]);
    }
}
void
EventTokenBucket::WantNormalTimers()
{
    if (!mFineGrainTimerInUse)
        return;
    if (mFineGrainResetTimerArmed)
        return;

    TimeDuration elapsed(TimeStamp::Now() - mLastFineGrainTimerUse);
    static const TimeDuration fiveSeconds = TimeDuration::FromSeconds(5);

    if (elapsed >= fiveSeconds) {
        NormalTimers();
        return;
    }

    if (!mFineGrainResetTimer)
        mFineGrainResetTimer = do_CreateInstance("@mozilla.org/timer;1");

    // if we can't delay the reset, just do it now
    if (!mFineGrainResetTimer) {
        NormalTimers();
        return;
    }

    // pad the callback out 100ms to avoid having to round trip this again if the
    // timer calls back just a tad early.
    SOCKET_LOG(("EventTokenBucket::WantNormalTimers %p "
                "Will reset timer granularity after delay", this));

    mFineGrainResetTimer->InitWithCallback(
        this,
        static_cast<uint32_t>((fiveSeconds - elapsed).ToMilliseconds()) + 100,
        nsITimer::TYPE_ONE_SHOT);
    mFineGrainResetTimerArmed = true;
}
Example #5
0
bool
PollableEvent::Clear()
{
  // necessary because of the "dont signal on socket thread" optimization
  MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);

  SOCKET_LOG(("PollableEvent::Clear\n"));
  mSignaled = false;
  if (!mReadFD) {
    SOCKET_LOG(("PollableEvent::Clear mReadFD is null\n"));
    return false;
  }
  char buf[2048];
  int32_t status = PR_Read(mReadFD, buf, 2048);
  SOCKET_LOG(("PollableEvent::Signal PR_Read %d\n", status));

  if (status == 1) {
    return true;
  }
  if (status == 0) {
    SOCKET_LOG(("PollableEvent::Clear EOF!\n"));
    return false;
  }
  if (status > 1) {
    MOZ_ASSERT(false);
    SOCKET_LOG(("PollableEvent::Clear Unexpected events\n"));
    Clear();
    return true;
  }
  PRErrorCode code = PR_GetError();
  if (code == PR_WOULD_BLOCK_ERROR) {
    return true;
  }
  SOCKET_LOG(("PollableEvent::Clear unexpected error %d\n", code));
  return false;
}
Example #6
0
void ClientSocket::onReadyRead()
{
    if (!m_socket) {
        SOCKET_LOG("Cannot read message from client. Socket is already deleted.");
        return;
    }

    const qint64 available = m_socket->bytesAvailable();
    m_message.append( m_socket->read(available) );

    while ( !m_message.isEmpty() ) {
        if (!m_hasMessageLength) {
            const int preambleSize = headerDataSize() + streamDataSize(m_messageLength);
            if ( m_message.length() < preambleSize )
                break;

            {
                QDataStream stream(m_message);
                stream.setVersion(QDataStream::Qt_5_0);
                quint32 magicNumber;
                quint32 version;
                stream >> magicNumber >> version >> m_messageLength;
                if ( stream.status() != QDataStream::Ok ) {
                    error("Failed to read message length from client!");
                    return;
                }

                if (magicNumber != protocolMagicNumber) {
                    error("Unexpected message magic number from client!");
                    return;
                }

                if (version != protocolVersion) {
                    error("Unexpected message version from client!");
                    return;
                }
            }

            m_message.remove(0, preambleSize);
            m_hasMessageLength = true;

            if (m_messageLength > bigMessageThreshold)
                COPYQ_LOG( QString("Receiving big message: %1 MiB").arg(m_messageLength / 1024 / 1024) );
        }

        const auto length = static_cast<int>(m_messageLength);
        if ( m_message.length() < length )
            break;

        QByteArray msg = m_message.mid(0, length);
        qint32 messageCode;

        if ( !readValue(&messageCode, &msg) ) {
            error("Failed to read message code from client!");
            return;
        }

        m_hasMessageLength = false;
        m_message = m_message.mid(length);

        emit messageReceived(msg, messageCode, id());
    }
}
Example #7
0
ClientSocket::~ClientSocket()
{
    SOCKET_LOG("Destroying socket.");
    close();
}
nsresult
nsSocketTransportService::DoPollIteration(TimeDuration *pollDuration)
{
    SOCKET_LOG(("STS poll iter\n"));

    int32_t i, count;
    //
    // poll loop
    //
    // walk active list backwards to see if any sockets should actually be
    // idle, then walk the idle list backwards to see if any idle sockets
    // should become active.  take care to check only idle sockets that
    // were idle to begin with ;-)
    //
    count = mIdleCount;
    for (i=mActiveCount-1; i>=0; --i) {
        //---
        SOCKET_LOG(("  active [%u] { handler=%p condition=%x pollflags=%hu }\n", i,
            mActiveList[i].mHandler,
            mActiveList[i].mHandler->mCondition,
            mActiveList[i].mHandler->mPollFlags));
        //---
        if (NS_FAILED(mActiveList[i].mHandler->mCondition))
            DetachSocket(mActiveList, &mActiveList[i]);
        else {
            uint16_t in_flags = mActiveList[i].mHandler->mPollFlags;
            if (in_flags == 0)
                MoveToIdleList(&mActiveList[i]);
            else {
                // update poll flags
                mPollList[i+1].in_flags = in_flags;
                mPollList[i+1].out_flags = 0;
            }
        }
    }
    for (i=count-1; i>=0; --i) {
        //---
        SOCKET_LOG(("  idle [%u] { handler=%p condition=%x pollflags=%hu }\n", i,
            mIdleList[i].mHandler,
            mIdleList[i].mHandler->mCondition,
            mIdleList[i].mHandler->mPollFlags));
        //---
        if (NS_FAILED(mIdleList[i].mHandler->mCondition))
            DetachSocket(mIdleList, &mIdleList[i]);
        else if (mIdleList[i].mHandler->mPollFlags != 0)
            MoveToPollList(&mIdleList[i]);
    }

    SOCKET_LOG(("  calling PR_Poll [active=%u idle=%u]\n", mActiveCount, mIdleCount));

#if defined(XP_WIN)
    // 30 active connections is the historic limit before firefox 7's 256. A few
    //  windows systems have troubles with the higher limit, so actively probe a
    // limit the first time we exceed 30.
    if ((mActiveCount > 30) && !mProbedMaxCount)
        ProbeMaxCount();
#endif

    // Measures seconds spent while blocked on PR_Poll
    uint32_t pollInterval = 0;
    int32_t n = 0;
    *pollDuration = 0;
    if (!gIOService->IsNetTearingDown()) {
        // Let's not do polling during shutdown.
        n = Poll(&pollInterval, pollDuration);
    }

    if (n < 0) {
        SOCKET_LOG(("  PR_Poll error [%d] os error [%d]\n", PR_GetError(),
                    PR_GetOSError()));
    }
    else {
        //
        // service "active" sockets...
        //
        uint32_t numberOfOnSocketReadyCalls = 0;
        for (i=0; i<int32_t(mActiveCount); ++i) {
            PRPollDesc &desc = mPollList[i+1];
            SocketContext &s = mActiveList[i];
            if (n > 0 && desc.out_flags != 0) {
                s.mElapsedTime = 0;
                s.mHandler->OnSocketReady(desc.fd, desc.out_flags);
                numberOfOnSocketReadyCalls++;
            }
            // check for timeout errors unless disabled...
            else if (s.mHandler->mPollTimeout != UINT16_MAX) {
                // update elapsed time counter
                // (NOTE: We explicitly cast UINT16_MAX to be an unsigned value
                // here -- otherwise, some compilers will treat it as signed,
                // which makes them fire signed/unsigned-comparison build
                // warnings for the comparison against 'pollInterval'.)
                if (MOZ_UNLIKELY(pollInterval >
                                static_cast<uint32_t>(UINT16_MAX) -
                                s.mElapsedTime))
                    s.mElapsedTime = UINT16_MAX;
                else
                    s.mElapsedTime += uint16_t(pollInterval);
                // check for timeout expiration 
                if (s.mElapsedTime >= s.mHandler->mPollTimeout) {
                    s.mElapsedTime = 0;
                    s.mHandler->OnSocketReady(desc.fd, -1);
                    numberOfOnSocketReadyCalls++;
                }
            }
        }
        if (mTelemetryEnabledPref) {
            Telemetry::Accumulate(
                Telemetry::STS_NUMBER_OF_ONSOCKETREADY_CALLS,
                numberOfOnSocketReadyCalls);
        }

        //
        // check for "dead" sockets and remove them (need to do this in
        // reverse order obviously).
        //
        for (i=mActiveCount-1; i>=0; --i) {
            if (NS_FAILED(mActiveList[i].mHandler->mCondition))
                DetachSocket(mActiveList, &mActiveList[i]);
        }

        if (n != 0 && (mPollList[0].out_flags & (PR_POLL_READ | PR_POLL_EXCEPT))) {
            MutexAutoLock lock(mLock);

            // acknowledge pollable event (should not block)
            if (mPollableEvent &&
                ((mPollList[0].out_flags & PR_POLL_EXCEPT) ||
                 !mPollableEvent->Clear())) {
                // On Windows, the TCP loopback connection in the
                // pollable event may become broken when a laptop
                // switches between wired and wireless networks or
                // wakes up from hibernation.  We try to create a
                // new pollable event.  If that fails, we fall back
                // on "busy wait".
                NS_WARNING("Trying to repair mPollableEvent");
                mPollableEvent.reset(new PollableEvent());
                if (!mPollableEvent->Valid()) {
                    mPollableEvent = nullptr;
                }
                SOCKET_LOG(("running socket transport thread without "
                            "a pollable event now valid=%d", !!mPollableEvent));
                mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr;
                mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT;
                mPollList[0].out_flags = 0;
            }
        }
    }

    return NS_OK;
}
NS_IMETHODIMP
nsSocketTransportService::Run()
{
    SOCKET_LOG(("STS thread init %d sockets\n", gMaxCount));

    psm::InitializeSSLServerCertVerificationThreads();

    gSocketThread = PR_GetCurrentThread();

    {
        MutexAutoLock lock(mLock);
        mPollableEvent.reset(new PollableEvent());
        //
        // NOTE: per bug 190000, this failure could be caused by Zone-Alarm
        // or similar software.
        //
        // NOTE: per bug 191739, this failure could also be caused by lack
        // of a loopback device on Windows and OS/2 platforms (it creates
        // a loopback socket pair on these platforms to implement a pollable
        // event object).  if we can't create a pollable event, then we'll
        // have to "busy wait" to implement the socket event queue :-(
        //
        if (!mPollableEvent->Valid()) {
            mPollableEvent = nullptr;
            NS_WARNING("running socket transport thread without a pollable event");
            SOCKET_LOG(("running socket transport thread without a pollable event"));
        }

        mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr;
        mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT;
        mPollList[0].out_flags = 0;
    }

    mRawThread = NS_GetCurrentThread();

    // hook ourselves up to observe event processing for this thread
    nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mRawThread);
    threadInt->SetObserver(this);

    // make sure the pseudo random number generator is seeded on this thread
    srand(static_cast<unsigned>(PR_Now()));

    // For the calculation of the duration of the last cycle (i.e. the last for-loop
    // iteration before shutdown).
    TimeStamp startOfCycleForLastCycleCalc;
    int numberOfPendingEventsLastCycle;

    // For measuring of the poll iteration duration without time spent blocked
    // in poll().
    TimeStamp pollCycleStart;
    // Time blocked in poll().
    TimeDuration singlePollDuration;

    // For calculating the time needed for a new element to run.
    TimeStamp startOfIteration;
    TimeStamp startOfNextIteration;
    int numberOfPendingEvents;

    // If there is too many pending events queued, we will run some poll()
    // between them and the following variable is cumulative time spent
    // blocking in poll().
    TimeDuration pollDuration;

    for (;;) {
        bool pendingEvents = false;

        numberOfPendingEvents = 0;
        numberOfPendingEventsLastCycle = 0;
        if (mTelemetryEnabledPref) {
            startOfCycleForLastCycleCalc = TimeStamp::NowLoRes();
            startOfNextIteration = TimeStamp::NowLoRes();
        }
        pollDuration = 0;

        do {
            if (mTelemetryEnabledPref) {
                pollCycleStart = TimeStamp::NowLoRes();
            }

            DoPollIteration(&singlePollDuration);

            if (mTelemetryEnabledPref && !pollCycleStart.IsNull()) {
                Telemetry::Accumulate(Telemetry::STS_POLL_BLOCK_TIME,
                                      singlePollDuration.ToMilliseconds());
                Telemetry::AccumulateTimeDelta(
                    Telemetry::STS_POLL_CYCLE,
                    pollCycleStart + singlePollDuration,
                    TimeStamp::NowLoRes());
                pollDuration += singlePollDuration;
            }

            mRawThread->HasPendingEvents(&pendingEvents);
            if (pendingEvents) {
                if (!mServingPendingQueue) {
                    nsresult rv = Dispatch(NewRunnableMethod(this,
                        &nsSocketTransportService::MarkTheLastElementOfPendingQueue),
                        nsIEventTarget::DISPATCH_NORMAL);
                    if (NS_FAILED(rv)) {
                        NS_WARNING("Could not dispatch a new event on the "
                                   "socket thread.");
                    } else {
                        mServingPendingQueue = true;
                    }

                    if (mTelemetryEnabledPref) {
                        startOfIteration = startOfNextIteration;
                        // Everything that comes after this point will
                        // be served in the next iteration. If no even
                        // arrives, startOfNextIteration will be reset at the
                        // beginning of each for-loop.
                        startOfNextIteration = TimeStamp::NowLoRes();
                    }
                }
                TimeStamp eventQueueStart = TimeStamp::NowLoRes();
                do {
                    NS_ProcessNextEvent(mRawThread);
                    numberOfPendingEvents++;
                    pendingEvents = false;
                    mRawThread->HasPendingEvents(&pendingEvents);
                } while (pendingEvents && mServingPendingQueue &&
                         ((TimeStamp::NowLoRes() -
                           eventQueueStart).ToMilliseconds() <
                          mMaxTimePerPollIter));

                if (mTelemetryEnabledPref && !mServingPendingQueue &&
                    !startOfIteration.IsNull()) {
                    Telemetry::AccumulateTimeDelta(
                        Telemetry::STS_POLL_AND_EVENTS_CYCLE,
                        startOfIteration + pollDuration,
                        TimeStamp::NowLoRes());

                    Telemetry::Accumulate(
                        Telemetry::STS_NUMBER_OF_PENDING_EVENTS,
                        numberOfPendingEvents);

                    numberOfPendingEventsLastCycle += numberOfPendingEvents;
                    numberOfPendingEvents = 0;
                    pollDuration = 0;
                }
            }
        } while (pendingEvents);

        bool goingOffline = false;
        // now that our event queue is empty, check to see if we should exit
        {
            MutexAutoLock lock(mLock);
            if (mShuttingDown) {
                if (mTelemetryEnabledPref &&
                    !startOfCycleForLastCycleCalc.IsNull()) {
                    Telemetry::Accumulate(
                        Telemetry::STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE,
                        numberOfPendingEventsLastCycle);
                    Telemetry::AccumulateTimeDelta(
                        Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE,
                        startOfCycleForLastCycleCalc,
                        TimeStamp::NowLoRes());
                }
                break;
            }
            if (mGoingOffline) {
                mGoingOffline = false;
                goingOffline = true;
            }
        }
        // Avoid potential deadlock
        if (goingOffline)
            Reset(true);
    }

    SOCKET_LOG(("STS shutting down thread\n"));

    // detach all sockets, including locals
    Reset(false);

    // Final pass over the event queue. This makes sure that events posted by
    // socket detach handlers get processed.
    NS_ProcessPendingEvents(mRawThread);

    gSocketThread = nullptr;

    psm::StopSSLServerCertVerificationThreads();

    SOCKET_LOG(("STS thread exit\n"));
    return NS_OK;
}
void
nsSocketTransportService::ProbeMaxCount()
{
    NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");

    if (mProbedMaxCount)
        return;
    mProbedMaxCount = true;

    // Allocate and test a PR_Poll up to the gMaxCount number of unconnected
    // sockets. See bug 692260 - windows should be able to handle 1000 sockets
    // in select() without a problem, but LSPs have been known to balk at lower
    // numbers. (64 in the bug).

    // Allocate
    struct PRPollDesc pfd[SOCKET_LIMIT_TARGET];
    uint32_t numAllocated = 0;

    for (uint32_t index = 0 ; index < gMaxCount; ++index) {
        pfd[index].in_flags = PR_POLL_READ | PR_POLL_WRITE | PR_POLL_EXCEPT;
        pfd[index].out_flags = 0;
        pfd[index].fd =  PR_OpenTCPSocket(PR_AF_INET);
        if (!pfd[index].fd) {
            SOCKET_LOG(("Socket Limit Test index %d failed\n", index));
            if (index < SOCKET_LIMIT_MIN)
                gMaxCount = SOCKET_LIMIT_MIN;
            else
                gMaxCount = index;
            break;
        }
        ++numAllocated;
    }

    // Test
    PR_STATIC_ASSERT(SOCKET_LIMIT_MIN >= 32U);
    while (gMaxCount <= numAllocated) {
        int32_t rv = PR_Poll(pfd, gMaxCount, PR_MillisecondsToInterval(0));
        
        SOCKET_LOG(("Socket Limit Test poll() size=%d rv=%d\n",
                    gMaxCount, rv));

        if (rv >= 0)
            break;

        SOCKET_LOG(("Socket Limit Test poll confirmationSize=%d rv=%d error=%d\n",
                    gMaxCount, rv, PR_GetError()));

        gMaxCount -= 32;
        if (gMaxCount <= SOCKET_LIMIT_MIN) {
            gMaxCount = SOCKET_LIMIT_MIN;
            break;
        }
    }

    // Free
    for (uint32_t index = 0 ; index < numAllocated; ++index)
        if (pfd[index].fd)
            PR_Close(pfd[index].fd);

    Telemetry::Accumulate(Telemetry::NETWORK_PROBE_MAXCOUNT, gMaxCount);
    SOCKET_LOG(("Socket Limit Test max was confirmed at %d\n", gMaxCount));
}
Example #11
0
void* atiny_net_connect(const char* host, const char* port, int proto)
{
    int flags;
    int ret;
    struct addrinfo hints;
    struct addrinfo* addr_list;
    struct addrinfo* cur;
    atiny_net_context* ctx;

    if (NULL == host || NULL == port ||
        (proto != ATINY_PROTO_UDP && proto != ATINY_PROTO_TCP))
    {
        SOCKET_LOG("ilegal incoming parameters");
        return NULL;
    }

    /* Do name resolution with both IPv6 and IPv4 */
    memset(&hints, 0, sizeof(hints));
    hints.ai_family = AF_UNSPEC;
    hints.ai_socktype = proto == ATINY_PROTO_UDP ? SOCK_DGRAM : SOCK_STREAM;
    hints.ai_protocol = proto == ATINY_PROTO_UDP ? IPPROTO_UDP : IPPROTO_TCP;

    SOCKET_LOG("try to do name resolution now...");

    if ((ret = getaddrinfo(host, port, &hints, &addr_list)) != 0)
    {
        SOCKET_LOG("getaddrinfo failed: 0x%x", ret);
        return NULL;
    }

    SOCKET_LOG("do name resolution succeed");

    ctx = atiny_malloc(sizeof(atiny_net_context));

    if (NULL == ctx)
    {
        SOCKET_LOG("malloc failed for socket context");
        freeaddrinfo(addr_list);
        return NULL;
    }

    ctx->fd = -1;

    /* Try the sockaddrs until a connection succeeds */
    for (cur = addr_list; cur != NULL; cur = cur->ai_next)
    {
        ctx->fd = socket(cur->ai_family, cur->ai_socktype, cur->ai_protocol);

        if (ctx->fd < 0)
        {
            continue;
        }

        flags = fcntl(ctx->fd, F_GETFL, 0);

        if (flags < 0 || fcntl(ctx->fd, F_SETFL, flags | O_NONBLOCK) < 0)
        {
            close(ctx->fd);
            ctx->fd = -1;
            continue;
        }

        if (connect(ctx->fd, cur->ai_addr, cur->ai_addrlen) == 0)
        {
            break;
        }

        close(ctx->fd);
        ctx->fd = -1;
    }

    freeaddrinfo(addr_list);

    if (ctx->fd < 0)
    {
        SOCKET_LOG("unkown host(%s) or port(%s)", host, port);
        atiny_free(ctx);
        return NULL;
    }

    if (proto == ATINY_PROTO_UDP)
    {
        SOCKET_LOG("UDP create socket and bind to server(%s:%s) finished", host, port);
    }
    else /* proto == ATINY_PROTO_TCP */
    {
        SOCKET_LOG("TCP connect to server(%s:%s) succeed", host, port);
    }

    return ctx;
}
Example #12
0
static bool NewTCPSocketPair(PRFileDesc *fd[], bool aSetRecvBuff)
{
  // this is a replacement for PR_NewTCPSocketPair that manually
  // sets the recv buffer to 64K. A windows bug (1248358)
  // can result in using an incompatible rwin and window
  // scale option on localhost pipes if not set before connect.

  SOCKET_LOG(("NewTCPSocketPair %s a recv buffer tuning\n", aSetRecvBuff ? "with" : "without"));

  PRFileDesc *listener = nullptr;
  PRFileDesc *writer = nullptr;
  PRFileDesc *reader = nullptr;
  PRSocketOptionData recvBufferOpt;
  recvBufferOpt.option = PR_SockOpt_RecvBufferSize;
  recvBufferOpt.value.recv_buffer_size = 65535;

  PRSocketOptionData nodelayOpt;
  nodelayOpt.option = PR_SockOpt_NoDelay;
  nodelayOpt.value.no_delay = true;

  PRSocketOptionData noblockOpt;
  noblockOpt.option = PR_SockOpt_Nonblocking;
  noblockOpt.value.non_blocking = true;

  listener = PR_OpenTCPSocket(PR_AF_INET);
  if (!listener) {
    goto failed;
  }

  if (aSetRecvBuff) {
    PR_SetSocketOption(listener, &recvBufferOpt);
  }
  PR_SetSocketOption(listener, &nodelayOpt);

  PRNetAddr listenAddr;
  memset(&listenAddr, 0, sizeof(listenAddr));
  if ((PR_InitializeNetAddr(PR_IpAddrLoopback, 0, &listenAddr) == PR_FAILURE) ||
      (PR_Bind(listener, &listenAddr) == PR_FAILURE) ||
      (PR_GetSockName(listener, &listenAddr) == PR_FAILURE) || // learn the dynamic port
      (PR_Listen(listener, 5) == PR_FAILURE)) {
    goto failed;
  }

  writer = PR_OpenTCPSocket(PR_AF_INET);
  if (!writer) {
    goto failed;
  }
  if (aSetRecvBuff) {
    PR_SetSocketOption(writer, &recvBufferOpt);
  }
  PR_SetSocketOption(writer, &nodelayOpt);
  PR_SetSocketOption(writer, &noblockOpt);
  PRNetAddr writerAddr;
  if (PR_InitializeNetAddr(PR_IpAddrLoopback, ntohs(listenAddr.inet.port), &writerAddr) == PR_FAILURE) {
    goto failed;
  }

  if (PR_Connect(writer, &writerAddr, PR_INTERVAL_NO_TIMEOUT) == PR_FAILURE) {
    if ((PR_GetError() != PR_IN_PROGRESS_ERROR) ||
        (PR_ConnectContinue(writer, PR_POLL_WRITE) == PR_FAILURE)) {
      goto failed;
    }
  }

  reader = PR_Accept(listener, &listenAddr, PR_MillisecondsToInterval(200));
  if (!reader) {
    goto failed;
  }
  if (aSetRecvBuff) {
    PR_SetSocketOption(reader, &recvBufferOpt);
  }
  PR_SetSocketOption(reader, &nodelayOpt);
  PR_SetSocketOption(reader, &noblockOpt);
  PR_Close(listener);

  fd[0] = reader;
  fd[1] = writer;
  return true;

failed:
  if (listener) {
    PR_Close(listener);
  }
  if (reader) {
    PR_Close(reader);
  }
  if (writer) {
    PR_Close(writer);
  }
  return false;
}
Example #13
0
PollableEvent::PollableEvent()
  : mWriteFD(nullptr)
  , mReadFD(nullptr)
  , mSignaled(false)
{
  MOZ_COUNT_CTOR(PollableEvent);
  MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
  // create pair of prfiledesc that can be used as a poll()ble
  // signal. on windows use a localhost socket pair, and on
  // unix use a pipe.
#ifdef USEPIPE
  SOCKET_LOG(("PollableEvent() using pipe\n"));
  if (PR_CreatePipe(&mReadFD, &mWriteFD) == PR_SUCCESS) {
    // make the pipe non blocking. NSPR asserts at
    // trying to use SockOpt here
    PROsfd fd = PR_FileDesc2NativeHandle(mReadFD);
    int flags = fcntl(fd, F_GETFL, 0);
    (void)fcntl(fd, F_SETFL, flags | O_NONBLOCK);
    fd = PR_FileDesc2NativeHandle(mWriteFD);
    flags = fcntl(fd, F_GETFL, 0);
    (void)fcntl(fd, F_SETFL, flags | O_NONBLOCK);
  } else {
    mReadFD = nullptr;
    mWriteFD = nullptr;
    SOCKET_LOG(("PollableEvent() pipe failed\n"));
  }
#else
  SOCKET_LOG(("PollableEvent() using socket pair\n"));
  PRFileDesc *fd[2];
  LazyInitSocket();

  // Try with a increased recv buffer first (bug 1248358).
  if (NewTCPSocketPair(fd, true)) {
    mReadFD = fd[0];
    mWriteFD = fd[1];
  // If the previous fails try without recv buffer increase (bug 1305436).
  } else if (NewTCPSocketPair(fd, false)) {
    mReadFD = fd[0];
    mWriteFD = fd[1];
  // If both fail, try the old version.
  } else if (PR_NewTCPSocketPair(fd) == PR_SUCCESS) {
    mReadFD = fd[0];
    mWriteFD = fd[1];

    PRSocketOptionData socket_opt;
    DebugOnly<PRStatus> status;
    socket_opt.option = PR_SockOpt_NoDelay;
    socket_opt.value.no_delay = true;
    PR_SetSocketOption(mWriteFD, &socket_opt);
    PR_SetSocketOption(mReadFD, &socket_opt);
    socket_opt.option = PR_SockOpt_Nonblocking;
    socket_opt.value.non_blocking = true;
    status = PR_SetSocketOption(mWriteFD, &socket_opt);
    MOZ_ASSERT(status == PR_SUCCESS);
    status = PR_SetSocketOption(mReadFD, &socket_opt);
    MOZ_ASSERT(status == PR_SUCCESS);
  }

  if (mReadFD && mWriteFD) {
    // compatibility with LSPs such as McAfee that assume a NSPR
    // layer for read ala the nspr Pollable Event - Bug 698882. This layer is a nop.
    PRFileDesc *topLayer =
      PR_CreateIOLayerStub(sPollableEventLayerIdentity,
                           sPollableEventLayerMethodsPtr);
    if (topLayer) {
      if (PR_PushIOLayer(fd[0], PR_TOP_IO_LAYER, topLayer) == PR_FAILURE) {
        topLayer->dtor(topLayer);
      } else {
        SOCKET_LOG(("PollableEvent() nspr layer ok\n"));
        mReadFD = topLayer;
      }
    }

  } else {
    SOCKET_LOG(("PollableEvent() socketpair failed\n"));
  }
#endif

  if (mReadFD && mWriteFD) {
    // prime the system to deal with races invovled in [dc]tor cycle
    SOCKET_LOG(("PollableEvent() ctor ok\n"));
    mSignaled = true;
    PR_Write(mWriteFD, "I", 1);
  }
}