Exemple #1
0
PR_IMPLEMENT(PRRecvWait*) PR_EnumerateWaitGroup(
    PRMWaitEnumerator *enumerator, const PRRecvWait *previous)
{
    PRRecvWait *result = NULL;
    
    /* entry point sanity checking */
    PR_ASSERT(NULL != enumerator);
    PR_ASSERT(_PR_ENUM_SEALED == enumerator->seal);
    if ((NULL == enumerator)
    || (_PR_ENUM_SEALED != enumerator->seal)) goto bad_argument;

    /* beginning of enumeration */
    if (NULL == previous)
    {
        if (NULL == enumerator->group)
        {
            enumerator->group = mw_state->group;
            if (NULL == enumerator->group)
            {
                PR_SetError(PR_GROUP_EMPTY_ERROR, 0);
                return NULL;
            }
        }
        enumerator->waiter = &enumerator->group->waiter->recv_wait;
        enumerator->p_timestamp = enumerator->group->p_timestamp;
        enumerator->thread = PR_GetCurrentThread();
        enumerator->index = 0;
    }
    /* continuing an enumeration */
    else
    {
        PRThread *me = PR_GetCurrentThread();
        PR_ASSERT(me == enumerator->thread);
        if (me != enumerator->thread) goto bad_argument;

        /* need to restart the enumeration */
        if (enumerator->p_timestamp != enumerator->group->p_timestamp)
            return PR_EnumerateWaitGroup(enumerator, NULL);
    }

    /* actually progress the enumeration */
#if defined(WINNT)
    _PR_MD_LOCK(&enumerator->group->mdlock);
#else
    PR_Lock(enumerator->group->ml);
#endif
    while (enumerator->index++ < enumerator->group->waiter->length)
    {
        if (NULL != (result = *(enumerator->waiter)++)) break;
    }
#if defined(WINNT)
    _PR_MD_UNLOCK(&enumerator->group->mdlock);
#else
    PR_Unlock(enumerator->group->ml);
#endif

    return result;  /* what we live for */

bad_argument:
    PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
    return NULL;  /* probably ambiguous */
}  /* PR_EnumerateWaitGroup */
JD_METHOD_(void*)  CNSAdapter_NSPR::JD_GetCurrentThread(void)
{
  return PR_GetCurrentThread();
}
Exemple #3
0
void
nsHttpTransaction::Close(nsresult reason)
{
    LOG(("nsHttpTransaction::Close [this=%x reason=%x]\n", this, reason));

    NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");

    if (mClosed) {
        LOG(("  already closed\n"));
        return;
    }

    if (mActivityDistributor) {
        // report the reponse is complete if not already reported
        if (!mResponseIsComplete)
            mActivityDistributor->ObserveActivity(
                mChannel,
                NS_HTTP_ACTIVITY_TYPE_HTTP_TRANSACTION,
                NS_HTTP_ACTIVITY_SUBTYPE_RESPONSE_COMPLETE,
                PR_Now(),
                static_cast<PRUint64>(mContentRead),
                EmptyCString());

        // report that this transaction is closing
        mActivityDistributor->ObserveActivity(
            mChannel,
            NS_HTTP_ACTIVITY_TYPE_HTTP_TRANSACTION,
            NS_HTTP_ACTIVITY_SUBTYPE_TRANSACTION_CLOSE,
            PR_Now(), LL_ZERO, EmptyCString());
    }

    // we must no longer reference the connection!  find out if the 
    // connection was being reused before letting it go.
    PRBool connReused = PR_FALSE;
    if (mConnection)
        connReused = mConnection->IsReused();
    mConnected = PR_FALSE;

    //
    // if the connection was reset or closed before we wrote any part of the
    // request or if we wrote the request but didn't receive any part of the
    // response and the connection was being reused, then we can (and really
    // should) assume that we wrote to a stale connection and we must therefore
    // repeat the request over a new connection.
    //
    // NOTE: the conditions under which we will automatically retry the HTTP
    // request have to be carefully selected to avoid duplication of the
    // request from the point-of-view of the server.  such duplication could
    // have dire consequences including repeated purchases, etc.
    //
    // NOTE: because of the way SSL proxy CONNECT is implemented, it is
    // possible that the transaction may have received data without having
    // sent any data.  for this reason, mSendData == FALSE does not imply
    // mReceivedData == FALSE.  (see bug 203057 for more info.)
    //
    if (reason == NS_ERROR_NET_RESET || reason == NS_OK) {
        if (!mReceivedData && (!mSentData || connReused)) {
            // if restarting fails, then we must proceed to close the pipe,
            // which will notify the channel that the transaction failed.
            if (NS_SUCCEEDED(Restart()))
                return;
        }
    }

    PRBool relConn = PR_TRUE;
    if (NS_SUCCEEDED(reason)) {
        // the server has not sent the final \r\n terminating the header
        // section, and there may still be a header line unparsed.  let's make
        // sure we parse the remaining header line, and then hopefully, the
        // response will be usable (see bug 88792).  related to that, we may
        // also have an empty response containing no headers.  we should treat
        // that as an empty HTTP/0.9 response (see bug 300613).
        if (!mHaveAllHeaders) {
            char data = '\n';
            PRUint32 unused;
            ParseHead(&data, 1, &unused);
        }

        // honor the sticky connection flag...
        if (mCaps & NS_HTTP_STICKY_CONNECTION)
            relConn = PR_FALSE;
    }
    if (relConn && mConnection)
        NS_RELEASE(mConnection);

    mStatus = reason;
    mTransactionDone = PR_TRUE; // forcibly flag the transaction as complete
    mClosed = PR_TRUE;

    // release some resources that we no longer need
    mRequestStream = nsnull;
    mReqHeaderBuf.Truncate();
    mLineBuf.Truncate();
    if (mChunkedDecoder) {
        delete mChunkedDecoder;
        mChunkedDecoder = nsnull;
    }

    // closing this pipe triggers the channel's OnStopRequest method.
    mPipeOut->CloseWithStatus(reason);
}
NSAPI_PUBLIC SYS_THREAD systhread_current(void)
{
    return PR_GetCurrentThread();
}
Exemple #5
0
/* returns the threadId of the current thread modulo MAX_CONN_ARRAY 
=> gives the position of the thread in the array of secure connections */
static int PR_ThreadSelf(void) {
	PRThread *thr = PR_GetCurrentThread();
	PRUint32 myself = PR_GetThreadID(thr);
	myself &= 0x000007FF ;
	return myself;
}
Exemple #6
0
static void
InitializeStaticHeaders()
{
  MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
  if (!gStaticHeaders) {
    gStaticHeaders = new nsDeque();
    AddStaticElement(NS_LITERAL_CSTRING(":authority"));
    AddStaticElement(NS_LITERAL_CSTRING(":method"), NS_LITERAL_CSTRING("GET"));
    AddStaticElement(NS_LITERAL_CSTRING(":method"), NS_LITERAL_CSTRING("POST"));
    AddStaticElement(NS_LITERAL_CSTRING(":path"), NS_LITERAL_CSTRING("/"));
    AddStaticElement(NS_LITERAL_CSTRING(":path"), NS_LITERAL_CSTRING("/index.html"));
    AddStaticElement(NS_LITERAL_CSTRING(":scheme"), NS_LITERAL_CSTRING("http"));
    AddStaticElement(NS_LITERAL_CSTRING(":scheme"), NS_LITERAL_CSTRING("https"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("200"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("204"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("206"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("304"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("400"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("404"));
    AddStaticElement(NS_LITERAL_CSTRING(":status"), NS_LITERAL_CSTRING("500"));
    AddStaticElement(NS_LITERAL_CSTRING("accept-charset"));
    AddStaticElement(NS_LITERAL_CSTRING("accept-encoding"), NS_LITERAL_CSTRING("gzip, deflate"));
    AddStaticElement(NS_LITERAL_CSTRING("accept-language"));
    AddStaticElement(NS_LITERAL_CSTRING("accept-ranges"));
    AddStaticElement(NS_LITERAL_CSTRING("accept"));
    AddStaticElement(NS_LITERAL_CSTRING("access-control-allow-origin"));
    AddStaticElement(NS_LITERAL_CSTRING("age"));
    AddStaticElement(NS_LITERAL_CSTRING("allow"));
    AddStaticElement(NS_LITERAL_CSTRING("authorization"));
    AddStaticElement(NS_LITERAL_CSTRING("cache-control"));
    AddStaticElement(NS_LITERAL_CSTRING("content-disposition"));
    AddStaticElement(NS_LITERAL_CSTRING("content-encoding"));
    AddStaticElement(NS_LITERAL_CSTRING("content-language"));
    AddStaticElement(NS_LITERAL_CSTRING("content-length"));
    AddStaticElement(NS_LITERAL_CSTRING("content-location"));
    AddStaticElement(NS_LITERAL_CSTRING("content-range"));
    AddStaticElement(NS_LITERAL_CSTRING("content-type"));
    AddStaticElement(NS_LITERAL_CSTRING("cookie"));
    AddStaticElement(NS_LITERAL_CSTRING("date"));
    AddStaticElement(NS_LITERAL_CSTRING("etag"));
    AddStaticElement(NS_LITERAL_CSTRING("expect"));
    AddStaticElement(NS_LITERAL_CSTRING("expires"));
    AddStaticElement(NS_LITERAL_CSTRING("from"));
    AddStaticElement(NS_LITERAL_CSTRING("host"));
    AddStaticElement(NS_LITERAL_CSTRING("if-match"));
    AddStaticElement(NS_LITERAL_CSTRING("if-modified-since"));
    AddStaticElement(NS_LITERAL_CSTRING("if-none-match"));
    AddStaticElement(NS_LITERAL_CSTRING("if-range"));
    AddStaticElement(NS_LITERAL_CSTRING("if-unmodified-since"));
    AddStaticElement(NS_LITERAL_CSTRING("last-modified"));
    AddStaticElement(NS_LITERAL_CSTRING("link"));
    AddStaticElement(NS_LITERAL_CSTRING("location"));
    AddStaticElement(NS_LITERAL_CSTRING("max-forwards"));
    AddStaticElement(NS_LITERAL_CSTRING("proxy-authenticate"));
    AddStaticElement(NS_LITERAL_CSTRING("proxy-authorization"));
    AddStaticElement(NS_LITERAL_CSTRING("range"));
    AddStaticElement(NS_LITERAL_CSTRING("referer"));
    AddStaticElement(NS_LITERAL_CSTRING("refresh"));
    AddStaticElement(NS_LITERAL_CSTRING("retry-after"));
    AddStaticElement(NS_LITERAL_CSTRING("server"));
    AddStaticElement(NS_LITERAL_CSTRING("set-cookie"));
    AddStaticElement(NS_LITERAL_CSTRING("strict-transport-security"));
    AddStaticElement(NS_LITERAL_CSTRING("transfer-encoding"));
    AddStaticElement(NS_LITERAL_CSTRING("user-agent"));
    AddStaticElement(NS_LITERAL_CSTRING("vary"));
    AddStaticElement(NS_LITERAL_CSTRING("via"));
    AddStaticElement(NS_LITERAL_CSTRING("www-authenticate"));
  }
}
JNIEXPORT void JNICALL 
Java_org_mozilla_jss_ssl_SSLSocket_socketWrite(JNIEnv *env, jobject self, 
    jbyteArray bufBA, jint off, jint len, jint timeout)
{
    JSSL_SocketData *sock = NULL;
    jbyte *buf = NULL;
    jint size;
    PRIntervalTime ivtimeout;
    PRThread *me;
    PRInt32 numwrit;

    if( bufBA == NULL ) {
        JSS_throw(env, NULL_POINTER_EXCEPTION);
        goto finish;
    }

    size = (*env)->GetArrayLength(env, bufBA);
    if( off < 0 || len < 0 || (off+len) > size ) {
        JSS_throw(env, INDEX_OUT_OF_BOUNDS_EXCEPTION);
        goto finish;
    }

    buf = (*env)->GetByteArrayElements(env, bufBA, NULL);
    if( buf == NULL ) {
        goto finish;
    }

    ivtimeout = (timeout > 0) ? PR_MillisecondsToInterval(timeout)
                              : PR_INTERVAL_NO_TIMEOUT;

    /* get the socket */
    if( JSSL_getSockData(env, self, &sock) != PR_SUCCESS ) {
        goto finish;
    }
    /* set the current thread doing the write */
    me = PR_GetCurrentThread();
    PR_Lock(sock->lock);
    if ( sock->closePending ) {
       PR_Unlock(sock->lock);
       JSSL_throwSSLSocketException(env, "Write operation interrupted");
       goto finish;
    }
    PR_ASSERT(sock->writer == NULL);
    sock->writer = me;
    PR_Unlock(sock->lock);

    numwrit = PR_Send(sock->fd, buf+off, len, 0 /*flags*/, ivtimeout);

    PR_Lock(sock->lock);
    PR_ASSERT(sock->writer == me);
    sock->writer = NULL;
    PR_Unlock(sock->lock);

    if( numwrit < 0 ) {
        PRErrorCode err = PR_GetError();
        if( err == PR_PENDING_INTERRUPT_ERROR ) {
#ifdef WINNT
            /* clean up after PR_Interrupt called by abortReadWrite. */
            PR_NT_CancelIo(sock->fd);
#endif 
            JSSL_throwSSLSocketException(env, "Write operation interrupted");
        } else if( err == PR_IO_TIMEOUT_ERROR ) {
#ifdef WINNT
            /*
             * if timeout was set, and the PR_Send() timed out,
             * then cancel the I/O on the socket, otherwise PR_Send()
             * will always return PR_IO_PENDING_ERROR on subsequent
             * calls
             */
            PR_NT_CancelIo(sock->fd);
#endif 
            JSSL_throwSSLSocketException(env, "Operation timed out");
        } else {
            JSSL_throwSSLSocketException(env, "Failed to write to socket");
        }
        goto finish;
    }
    /* PR_Send is supposed to block until it sends everything */
    PR_ASSERT(numwrit == len);

finish:
    if( buf != NULL ) {
        (*env)->ReleaseByteArrayElements(env, bufBA, buf, JNI_ABORT);
    }
    EXCEPTION_CHECK(env, sock)
}
Exemple #8
0
// static
JSBool XPCJSRuntime::GCCallback(JSContext *cx, JSGCStatus status)
{
    nsVoidArray* dyingWrappedJSArray;

    XPCJSRuntime* self = nsXPConnect::GetRuntime();
    if(self)
    {
        switch(status)
        {
            case JSGC_BEGIN:
            {
                if(!NS_IsMainThread())
                {
                    return JS_FALSE;
                }
                break;
            }
            case JSGC_MARK_END:
            {
                NS_ASSERTION(!self->mDoingFinalization, "bad state");
    
                // mThreadRunningGC indicates that GC is running
                { // scoped lock
                    XPCAutoLock lock(self->GetMapLock());
                    NS_ASSERTION(!self->mThreadRunningGC, "bad state");
                    self->mThreadRunningGC = PR_GetCurrentThread();
                }

                dyingWrappedJSArray = &self->mWrappedJSToReleaseArray;

                {
                    JSDyingJSObjectData data = {cx, dyingWrappedJSArray};

                    // Add any wrappers whose JSObjects are to be finalized to
                    // this array. Note that this is a nsVoidArray because
                    // we do not want to be changing the refcount of these wrappers.
                    // We add them to the array now and Release the array members
                    // later to avoid the posibility of doing any JS GCThing
                    // allocations during the gc cycle.
                    self->mWrappedJSMap->
                        Enumerate(WrappedJSDyingJSObjectFinder, &data);
                }

                // Do cleanup in NativeInterfaces. This part just finds 
                // member cloned function objects that are about to be 
                // collected. It does not deal with collection of interfaces or
                // sets at this point.
                CX_AND_XPCRT_Data data = {cx, self};

                self->mIID2NativeInterfaceMap->
                    Enumerate(NativeInterfaceGC, &data);

                // Find dying scopes...
                XPCWrappedNativeScope::FinishedMarkPhaseOfGC(cx, self);

                self->mDoingFinalization = JS_TRUE;
                break;
            }
            case JSGC_FINALIZE_END:
            {
                NS_ASSERTION(self->mDoingFinalization, "bad state");
                self->mDoingFinalization = JS_FALSE;

                // Release all the members whose JSObjects are now known
                // to be dead.

                dyingWrappedJSArray = &self->mWrappedJSToReleaseArray;
                while(1)
                {
                    nsXPCWrappedJS* wrapper;
                    PRInt32 count = dyingWrappedJSArray->Count();
                    if(!count)
                    {
                        dyingWrappedJSArray->Compact();
                        break;
                    }
                    wrapper = static_cast<nsXPCWrappedJS*>
                        (dyingWrappedJSArray->ElementAt(count-1));
                    dyingWrappedJSArray->RemoveElementAt(count-1);
                    NS_RELEASE(wrapper);
                }


#ifdef XPC_REPORT_NATIVE_INTERFACE_AND_SET_FLUSHING
                printf("--------------------------------------------------------------\n");
                int setsBefore = (int) self->mNativeSetMap->Count();
                int ifacesBefore = (int) self->mIID2NativeInterfaceMap->Count();
#endif

                // We use this occasion to mark and sweep NativeInterfaces,
                // NativeSets, and the WrappedNativeJSClasses...

                // Do the marking...
                XPCWrappedNativeScope::MarkAllWrappedNativesAndProtos();

                self->mDetachedWrappedNativeProtoMap->
                    Enumerate(DetachedWrappedNativeProtoMarker, nsnull);

                // Mark the sets used in the call contexts. There is a small
                // chance that a wrapper's set will change *while* a call is
                // happening which uses that wrapper's old interfface set. So,
                // we need to do this marking to avoid collecting those sets
                // that might no longer be otherwise reachable from the wrappers
                // or the wrapperprotos.

                // Skip this part if XPConnect is shutting down. We get into
                // bad locking problems with the thread iteration otherwise.
                if(!self->GetXPConnect()->IsShuttingDown())
                {
                    PRLock* threadLock = XPCPerThreadData::GetLock();
                    if(threadLock)
                    { // scoped lock
                        nsAutoLock lock(threadLock);

                        XPCPerThreadData* iterp = nsnull;
                        XPCPerThreadData* thread;

                        while(nsnull != (thread =
                                     XPCPerThreadData::IterateThreads(&iterp)))
                        {
                            // Mark those AutoMarkingPtr lists!
                            thread->MarkAutoRootsAfterJSFinalize();

                            XPCCallContext* ccxp = thread->GetCallContext();
                            while(ccxp)
                            {
                                // Deal with the strictness of callcontext that
                                // complains if you ask for a set when
                                // it is in a state where the set could not
                                // possibly be valid.
                                if(ccxp->CanGetSet())
                                {
                                    XPCNativeSet* set = ccxp->GetSet();
                                    if(set)
                                        set->Mark();
                                }
                                if(ccxp->CanGetInterface())
                                {
                                    XPCNativeInterface* iface = ccxp->GetInterface();
                                    if(iface)
                                        iface->Mark();
                                }
                                ccxp = ccxp->GetPrevCallContext();
                            }
                        }
                    }
                }

                // Do the sweeping...

                // We don't want to sweep the JSClasses at shutdown time.
                // At this point there may be JSObjects using them that have
                // been removed from the other maps.
                if(!self->GetXPConnect()->IsShuttingDown())
                {
                    self->mNativeScriptableSharedMap->
                        Enumerate(JSClassSweeper, nsnull);
                }

                self->mClassInfo2NativeSetMap->
                    Enumerate(NativeUnMarkedSetRemover, nsnull);

                self->mNativeSetMap->
                    Enumerate(NativeSetSweeper, nsnull);

                CX_AND_XPCRT_Data data = {cx, self};

                self->mIID2NativeInterfaceMap->
                    Enumerate(NativeInterfaceSweeper, &data);

#ifdef DEBUG
                XPCWrappedNativeScope::ASSERT_NoInterfaceSetsAreMarked();
#endif

#ifdef XPC_REPORT_NATIVE_INTERFACE_AND_SET_FLUSHING
                int setsAfter = (int) self->mNativeSetMap->Count();
                int ifacesAfter = (int) self->mIID2NativeInterfaceMap->Count();

                printf("\n");
                printf("XPCNativeSets:        before: %d  collected: %d  remaining: %d\n",
                       setsBefore, setsBefore - setsAfter, setsAfter);
                printf("XPCNativeInterfaces:  before: %d  collected: %d  remaining: %d\n",
                       ifacesBefore, ifacesBefore - ifacesAfter, ifacesAfter);
                printf("--------------------------------------------------------------\n");
#endif

                // Sweep scopes needing cleanup
                XPCWrappedNativeScope::FinishedFinalizationPhaseOfGC(cx);

                // Now we are going to recycle any unused WrappedNativeTearoffs.
                // We do this by iterating all the live callcontexts (on all
                // threads!) and marking the tearoffs in use. And then we
                // iterate over all the WrappedNative wrappers and sweep their
                // tearoffs.
                //
                // This allows us to perhaps minimize the growth of the
                // tearoffs. And also makes us not hold references to interfaces
                // on our wrapped natives that we are not actually using.
                //
                // XXX We may decide to not do this on *every* gc cycle.

                // Skip this part if XPConnect is shutting down. We get into
                // bad locking problems with the thread iteration otherwise.
                if(!self->GetXPConnect()->IsShuttingDown())
                {
                    PRLock* threadLock = XPCPerThreadData::GetLock();
                    if(threadLock)
                    {
                        // Do the marking...
                        
                        { // scoped lock
                            nsAutoLock lock(threadLock);

                            XPCPerThreadData* iterp = nsnull;
                            XPCPerThreadData* thread;

                            while(nsnull != (thread =
                                     XPCPerThreadData::IterateThreads(&iterp)))
                            {
                                XPCCallContext* ccxp = thread->GetCallContext();
                                while(ccxp)
                                {
                                    // Deal with the strictness of callcontext that
                                    // complains if you ask for a tearoff when
                                    // it is in a state where the tearoff could not
                                    // possibly be valid.
                                    if(ccxp->CanGetTearOff())
                                    {
                                        XPCWrappedNativeTearOff* to = 
                                            ccxp->GetTearOff();
                                        if(to)
                                            to->Mark();
                                    }
                                    ccxp = ccxp->GetPrevCallContext();
                                }
                            }
                        }
    
                        // Do the sweeping...
                        XPCWrappedNativeScope::SweepAllWrappedNativeTearOffs();
                    }
                }

                // Now we need to kill the 'Dying' XPCWrappedNativeProtos.
                // We transfered these native objects to this table when their
                // JSObject's were finalized. We did not destroy them immediately
                // at that point because the ordering of JS finalization is not
                // deterministic and we did not yet know if any wrappers that
                // might still be referencing the protos where still yet to be
                // finalized and destroyed. We *do* know that the protos'
                // JSObjects would not have been finalized if there were any
                // wrappers that referenced the proto but where not themselves
                // slated for finalization in this gc cycle. So... at this point
                // we know that any and all wrappers that might have been
                // referencing the protos in the dying list are themselves dead.
                // So, we can safely delete all the protos in the list.

                self->mDyingWrappedNativeProtoMap->
                    Enumerate(DyingProtoKiller, nsnull);


                // mThreadRunningGC indicates that GC is running.
                // Clear it and notify waiters.
                { // scoped lock
                    XPCAutoLock lock(self->GetMapLock());
                    NS_ASSERTION(self->mThreadRunningGC == PR_GetCurrentThread(), "bad state");
                    self->mThreadRunningGC = nsnull;
                    xpc_NotifyAll(self->GetMapLock());
                }

                break;
            }
            case JSGC_END:
            {
                // NOTE that this event happens outside of the gc lock in
                // the js engine. So this could be simultaneous with the
                // events above.

                // Do any deferred released of native objects.
                nsVoidArray* array = &self->mNativesToReleaseArray;
#ifdef XPC_TRACK_DEFERRED_RELEASES
                printf("XPC - Begin deferred Release of %d nsISupports pointers\n",
                       array->Count());
#endif
                while(1)
                {
                    nsISupports* obj;
                    {
                        PRInt32 count = array->Count();
                        if(!count)
                        {
                            array->Compact();
                            break;
                        }
                        obj = reinterpret_cast<nsISupports*>
                            (array->ElementAt(count-1));
                        array->RemoveElementAt(count-1);
                    }
                    NS_RELEASE(obj);
                }
#ifdef XPC_TRACK_DEFERRED_RELEASES
                printf("XPC - End deferred Releases\n");
#endif
                break;
            }
            default:
                break;
        }
    }

    // always chain to old GCCallback if non-null.
    return gOldJSGCCallback ? gOldJSGCCallback(cx, status) : JS_TRUE;
}
Exemple #9
0
/* Caller holds the SSL Socket's write lock. SSL_LOCK_WRITER(ss) */
int
ssl_SecureSend(sslSocket *ss, const unsigned char *buf, int len, int flags)
{
    int rv = 0;

    SSL_TRC(2, ("%d: SSL[%d]: SecureSend: sending %d bytes",
		SSL_GETPID(), ss->fd, len));

    if (ss->shutdownHow & ssl_SHUTDOWN_SEND) {
	PORT_SetError(PR_SOCKET_SHUTDOWN_ERROR);
    	rv = PR_FAILURE;
	goto done;
    }
    if (flags) {
	PORT_SetError(PR_INVALID_ARGUMENT_ERROR);
    	rv = PR_FAILURE;
	goto done;
    }

    ssl_GetXmitBufLock(ss);
    if (ss->pendingBuf.len != 0) {
	PORT_Assert(ss->pendingBuf.len > 0);
	rv = ssl_SendSavedWriteData(ss);
	if (rv >= 0 && ss->pendingBuf.len != 0) {
	    PORT_Assert(ss->pendingBuf.len > 0);
	    PORT_SetError(PR_WOULD_BLOCK_ERROR);
	    rv = SECFailure;
	}
    }
    ssl_ReleaseXmitBufLock(ss);
    if (rv < 0) {
	goto done;
    }

    if (len > 0) 
    	ss->writerThread = PR_GetCurrentThread();
    /* If any of these is non-zero, the initial handshake is not done. */
    if (!ss->firstHsDone) {
	PRBool falseStart = PR_FALSE;
	ssl_Get1stHandshakeLock(ss);
	if (ss->opt.enableFalseStart &&
	    ss->version >= SSL_LIBRARY_VERSION_3_0) {
	    ssl_GetSSL3HandshakeLock(ss);
	    falseStart = ss->ssl3.hs.canFalseStart;
	    ssl_ReleaseSSL3HandshakeLock(ss);
	}
	if (!falseStart &&
	    (ss->handshake || ss->nextHandshake || ss->securityHandshake)) {
	    rv = ssl_Do1stHandshake(ss);
	}
	ssl_Release1stHandshakeLock(ss);
    }
    if (rv < 0) {
    	ss->writerThread = NULL;
	goto done;
    }

    /* Check for zero length writes after we do housekeeping so we make forward
     * progress.
     */
    if (len == 0) {
    	rv = 0;
	goto done;
    }
    PORT_Assert(buf != NULL);
    if (!buf) {
	PORT_SetError(PR_INVALID_ARGUMENT_ERROR);
    	rv = PR_FAILURE;
	goto done;
    }

    if (!ss->firstHsDone) {
	PORT_Assert(ss->version >= SSL_LIBRARY_VERSION_3_0);
#ifdef DEBUG
	ssl_GetSSL3HandshakeLock(ss);
	PORT_Assert(ss->ssl3.hs.canFalseStart);
	ssl_ReleaseSSL3HandshakeLock(ss);
#endif
	SSL_TRC(3, ("%d: SSL[%d]: SecureSend: sending data due to false start",
		    SSL_GETPID(), ss->fd));
    }

    /* Send out the data using one of these functions:
     *	ssl2_SendClear, ssl2_SendStream, ssl2_SendBlock, 
     *  ssl3_SendApplicationData
     */
    ssl_GetXmitBufLock(ss);
    rv = (*ss->sec.send)(ss, buf, len, flags);
    ssl_ReleaseXmitBufLock(ss);
    ss->writerThread = NULL;
done:
    if (rv < 0) {
	SSL_TRC(2, ("%d: SSL[%d]: SecureSend: returning %d count, error %d",
		    SSL_GETPID(), ss->fd, rv, PORT_GetError()));
    } else {
	SSL_TRC(2, ("%d: SSL[%d]: SecureSend: returning %d count",
		    SSL_GETPID(), ss->fd, rv));
    }
    return rv;
}
Exemple #10
0
nsTimelineServiceTimer::nsTimelineServiceTimer()
: mAccum(LL_ZERO), mStart(LL_ZERO), mRunning(0),
  mOwnerThread(PR_GetCurrentThread())
{
}
static void *
prldap_get_thread_id( void )
{
    return( (void *)PR_GetCurrentThread());
}
Exemple #12
0
    void Print(const char* aName, LogLevel aLevel, const char* aFmt, va_list aArgs)
    {
        const size_t kBuffSize = 1024;
        char buff[kBuffSize];

        char* buffToWrite = buff;

        // For backwards compat we need to use the NSPR format string versions
        // of sprintf and friends and then hand off to printf.
        va_list argsCopy;
        va_copy(argsCopy, aArgs);
        size_t charsWritten = PR_vsnprintf(buff, kBuffSize, aFmt, argsCopy);
        va_end(argsCopy);

        if (charsWritten == kBuffSize - 1) {
            // We may have maxed out, allocate a buffer instead.
            buffToWrite = PR_vsmprintf(aFmt, aArgs);
            charsWritten = strlen(buffToWrite);
        }

        // Determine if a newline needs to be appended to the message.
        const char* newline = "";
        if (charsWritten == 0 || buffToWrite[charsWritten - 1] != '\n') {
            newline = "\n";
        }

        FILE* out = mOutFile ? mOutFile : stderr;

        // This differs from the NSPR format in that we do not output the
        // opaque system specific thread pointer (ie pthread_t) cast
        // to a long. The address of the current PR_Thread continues to be
        // prefixed.
        //
        // Additionally we prefix the output with the abbreviated log level
        // and the module name.
        PRThread *currentThread = PR_GetCurrentThread();
        const char *currentThreadName = (mMainThread == currentThread)
                                        ? "Main Thread"
                                        : PR_GetThreadName(currentThread);

        char noNameThread[40];
        if (!currentThreadName) {
            snprintf_literal(noNameThread, "Unnamed thread %p", currentThread);
            currentThreadName = noNameThread;
        }

        if (!mAddTimestamp) {
            fprintf_stderr(out,
                           "[%s]: %s/%s %s%s",
                           currentThreadName, ToLogStr(aLevel),
                           aName, buffToWrite, newline);
        } else {
            PRExplodedTime now;
            PR_ExplodeTime(PR_Now(), PR_GMTParameters, &now);
            fprintf_stderr(
                out,
                "%04d-%02d-%02d %02d:%02d:%02d.%06d UTC - [%s]: %s/%s %s%s",
                now.tm_year, now.tm_month + 1, now.tm_mday,
                now.tm_hour, now.tm_min, now.tm_sec, now.tm_usec,
                currentThreadName, ToLogStr(aLevel),
                aName, buffToWrite, newline);
        }

        if (mIsSync) {
            fflush(out);
        }

        if (buffToWrite != buff) {
            PR_smprintf_free(buffToWrite);
        }
    }
Exemple #13
0
PR_IMPLEMENT(void) PR_LogPrint(const char *fmt, ...)
{
    va_list ap;
    char line[LINE_BUF_SIZE];
    char *line_long = NULL;
    PRUint32 nb_tid = 0, nb;
    PRThread *me;
    PRExplodedTime now;

    if (!_pr_initialized) _PR_ImplicitInitialization();

    if (!logFile) {
        return;
    }

    if (outputTimeStamp) {
        PR_ExplodeTime(PR_Now(), PR_GMTParameters, &now);
        nb_tid = PR_snprintf(line, sizeof(line)-1,
                             "%04d-%02d-%02d %02d:%02d:%02d.%06d UTC - ",
                             now.tm_year, now.tm_month + 1, now.tm_mday,
                             now.tm_hour, now.tm_min, now.tm_sec,
                             now.tm_usec);
    }

    me = PR_GetCurrentThread();
    nb_tid += PR_snprintf(line+nb_tid, sizeof(line)-nb_tid-1, "%ld[%p]: ",
#if defined(_PR_BTHREADS)
                          me, me);
#else
                          me ? me->id : 0L, me);
#endif

    va_start(ap, fmt);
    nb = nb_tid + PR_vsnprintf(line+nb_tid, sizeof(line)-nb_tid-1, fmt, ap);
    va_end(ap);

    /*
     * Check if we might have run out of buffer space (in case we have a
     * long line), and malloc a buffer just this once.
     */
    if (nb == sizeof(line)-2) {
        va_start(ap, fmt);
        line_long = PR_vsmprintf(fmt, ap);
        va_end(ap);
        /* If this failed, we'll fall back to writing the truncated line. */
    }

    if (line_long) {
        nb = strlen(line_long);
        _PR_LOCK_LOG();
        if (logBuf != 0) {
            _PUT_LOG(logFile, logBuf, logp - logBuf);
            logp = logBuf;
        }
        /*
         * Write out the thread id (with an optional timestamp) and the
         * malloc'ed buffer.
         */
        _PUT_LOG(logFile, line, nb_tid);
        _PUT_LOG(logFile, line_long, nb);
        /* Ensure there is a trailing newline. */
        if (!nb || (line_long[nb-1] != '\n')) {
            char eol[2];
            eol[0] = '\n';
            eol[1] = '\0';
            _PUT_LOG(logFile, eol, 1);
        }
        _PR_UNLOCK_LOG();
        PR_smprintf_free(line_long);
    } else {
        /* Ensure there is a trailing newline. */
        if (nb && (line[nb-1] != '\n')) {
            line[nb++] = '\n';
            line[nb] = '\0';
        }
        _PR_LOCK_LOG();
        if (logBuf == 0) {
            _PUT_LOG(logFile, line, nb);
        } else {
            /* If nb can't fit into logBuf, write out logBuf first. */
            if (logp + nb > logEndp) {
                _PUT_LOG(logFile, logBuf, logp - logBuf);
                logp = logBuf;
            }
            /* nb is guaranteed to fit into logBuf. */
            memcpy(logp, line, nb);
            logp += nb;
        }
        _PR_UNLOCK_LOG();
    }
    PR_LogFlush();
}
Exemple #14
0
static PRStatus _MW_PollInternal(PRWaitGroup *group)
{
    PRRecvWait **waiter;
    PRStatus rv = PR_FAILURE;
    PRInt32 count, count_ready;
    PRIntervalTime polling_interval;

    group->poller = PR_GetCurrentThread();

    while (PR_TRUE)
    {
        PRIntervalTime now, since_last_poll;
        PRPollDesc *poll_list;

        while (0 == group->waiter->count)
        {
            PRStatus st;
            st = PR_WaitCondVar(group->new_business, PR_INTERVAL_NO_TIMEOUT);
            if (_prmw_running != group->state)
            {
                PR_SetError(PR_INVALID_STATE_ERROR, 0);
                goto aborted;
            }
            if (_MW_ABORTED(st)) goto aborted;
        }

        /*
        ** There's something to do. See if our existing polling list
        ** is large enough for what we have to do?
        */

        while (group->polling_count < group->waiter->count)
        {
            PRUint32 old_count = group->waiter->count;
            PRUint32 new_count = PR_ROUNDUP(old_count, _PR_POLL_COUNT_FUDGE);
            PRSize new_size = sizeof(PRPollDesc) * new_count;
            PRPollDesc *old_polling_list = group->polling_list;

            PR_Unlock(group->ml);
            poll_list = (PRPollDesc*)PR_CALLOC(new_size);
            if (NULL == poll_list)
            {
                PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
                PR_Lock(group->ml);
                goto failed_alloc;
            }
            if (NULL != old_polling_list)
                PR_DELETE(old_polling_list);
            PR_Lock(group->ml);
            if (_prmw_running != group->state)
            {
                PR_SetError(PR_INVALID_STATE_ERROR, 0);
                goto aborted;
            }
            group->polling_list = poll_list;
            group->polling_count = new_count;
        }

        now = PR_IntervalNow();
        polling_interval = max_polling_interval;
        since_last_poll = now - group->last_poll;

        waiter = &group->waiter->recv_wait;
        poll_list = group->polling_list;
        for (count = 0; count < group->waiter->count; ++waiter)
        {
            PR_ASSERT(waiter < &group->waiter->recv_wait
                + group->waiter->length);
            if (NULL != *waiter)  /* a live one! */
            {
                if ((PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout)
                && (since_last_poll >= (*waiter)->timeout))
                    _MW_DoneInternal(group, waiter, PR_MW_TIMEOUT);
                else
                {
                    if (PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout)
                    {
                        (*waiter)->timeout -= since_last_poll;
                        if ((*waiter)->timeout < polling_interval)
                            polling_interval = (*waiter)->timeout;
                    }
                    PR_ASSERT(poll_list < group->polling_list
                        + group->polling_count);
                    poll_list->fd = (*waiter)->fd;
                    poll_list->in_flags = PR_POLL_READ;
                    poll_list->out_flags = 0;
#if 0
                    printf(
                        "Polling 0x%x[%d]: [fd: 0x%x, tmo: %u]\n",
                        poll_list, count, poll_list->fd, (*waiter)->timeout);
#endif
                    poll_list += 1;
                    count += 1;
                }
            }
        } 

        PR_ASSERT(count == group->waiter->count);

        /*
        ** If there are no more threads waiting for completion,
        ** we need to return.
        */
        if ((!PR_CLIST_IS_EMPTY(&group->io_ready))
        && (1 == group->waiting_threads)) break;

        if (0 == count) continue;  /* wait for new business */

        group->last_poll = now;

        PR_Unlock(group->ml);

        count_ready = PR_Poll(group->polling_list, count, polling_interval);

        PR_Lock(group->ml);

        if (_prmw_running != group->state)
        {
            PR_SetError(PR_INVALID_STATE_ERROR, 0);
            goto aborted;
        }
        if (-1 == count_ready)
        {
            goto failed_poll;  /* that's a shame */
        }
        else if (0 < count_ready)
        {
            for (poll_list = group->polling_list; count > 0;
            poll_list++, count--)
            {
                PR_ASSERT(
                    poll_list < group->polling_list + group->polling_count);
                if (poll_list->out_flags != 0)
                {
                    waiter = _MW_LookupInternal(group, poll_list->fd);
                    /*
                    ** If 'waiter' is NULL, that means the wait receive
                    ** descriptor has been canceled.
                    */
                    if (NULL != waiter)
                        _MW_DoneInternal(group, waiter, PR_MW_SUCCESS);
                }
            }
        }
        /*
        ** If there are no more threads waiting for completion,
        ** we need to return.
        ** This thread was "borrowed" to do the polling, but it really
        ** belongs to the client.
        */
        if ((!PR_CLIST_IS_EMPTY(&group->io_ready))
        && (1 == group->waiting_threads)) break;
    }

    rv = PR_SUCCESS;

aborted:
failed_poll:
failed_alloc:
    group->poller = NULL;  /* we were that, not we ain't */
    if ((_prmw_running == group->state) && (group->waiting_threads > 1))
    {
        /* Wake up one thread to become the new poller. */
        PR_NotifyCondVar(group->io_complete);
    }
    return rv;  /* we return with the lock held */
}  /* _MW_PollInternal */
Exemple #15
0
PR_IMPLEMENT(PRStatus) PR_SetThreadPrivate(PRUintn index, void *priv)
{
    PRStatus rv = PR_SUCCESS;
    PRThread *self = PR_GetCurrentThread();

    /*
    ** The index being set might not have a sufficient vector in this
    ** thread. But if the index has been allocated, it's okay to go
    ** ahead and extend this one now.
    */
    if (index >= _pr_tpd_highwater)
    {
        PR_ASSERT(index < _pr_tpd_highwater);
        PR_SetError(PR_TPD_RANGE_ERROR, 0);
	    rv = PR_FAILURE;
    }
    else
    {
		PRIntn _is;

        _PR_LOCK_TPINDEX();
        if ((NULL == self->privateData) || (self->tpdLength <= index))
        {
            void *extension = PR_CALLOC(_pr_tpd_length * sizeof(void*));
            PR_ASSERT(
                ((NULL == self->privateData) && (0 == self->tpdLength))
                || ((NULL != self->privateData) && (0 != self->tpdLength)));
            if (NULL != extension)
            {
                (void)memcpy(
                    extension, self->privateData,
                    self->tpdLength * sizeof(void*));
                self->tpdLength = _pr_tpd_length;
                self->privateData = extension;
            }
        }
        /*
        ** There wasn't much chance of having to call the destructor
        ** unless the slot already existed.
        */
        else if (self->privateData[index] && _pr_tpd_destructors[index])
        {
            void *data = self->privateData[index];
            self->privateData[index] = NULL;
	        _PR_UNLOCK_TPINDEX();
	        (*_pr_tpd_destructors[index])(data);
            _PR_LOCK_TPINDEX();
        }

        /*
        ** If the thread's private data is still NULL, then we couldn't
        ** fix the problem. We must be outa-memory (again).
        */
        if (NULL == self->privateData)
        {
            PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
            rv = PR_FAILURE;
        }
        else self->privateData[index] = priv;
	    _PR_UNLOCK_TPINDEX();
	}

	return rv;
}
Exemple #16
0
bool
RWLock::LockedForWritingByCurrentThread()
{
  return mOwningThread == PR_GetCurrentThread();
}
NS_IMETHODIMP
WorkerThread::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable, uint32_t aFlags)
{
    // May be called on any thread!
    nsCOMPtr<nsIRunnable> runnable(aRunnable); // in case we exit early

    // Workers only support asynchronous dispatch.
    if (NS_WARN_IF(aFlags != NS_DISPATCH_NORMAL)) {
        return NS_ERROR_UNEXPECTED;
    }

    const bool onWorkerThread = PR_GetCurrentThread() == mThread;

#ifdef DEBUG
    if (runnable && !onWorkerThread) {
        nsCOMPtr<nsICancelableRunnable> cancelable = do_QueryInterface(runnable);

        {
            MutexAutoLock lock(mLock);

            // Only enforce cancelable runnables after we've started the worker loop.
            if (!mAcceptingNonWorkerRunnables) {
                MOZ_ASSERT(cancelable,
                           "Only nsICancelableRunnable may be dispatched to a worker!");
            }
        }
    }
#endif

    WorkerPrivate* workerPrivate = nullptr;
    if (onWorkerThread) {
        // No need to lock here because it is only modified on this thread.
        MOZ_ASSERT(mWorkerPrivate);
        mWorkerPrivate->AssertIsOnWorkerThread();

        workerPrivate = mWorkerPrivate;
    } else {
        MutexAutoLock lock(mLock);

        MOZ_ASSERT(mOtherThreadsDispatchingViaEventTarget < UINT32_MAX);

        if (mWorkerPrivate) {
            workerPrivate = mWorkerPrivate;

            // Incrementing this counter will make the worker thread sleep if it
            // somehow tries to unset mWorkerPrivate while we're using it.
            mOtherThreadsDispatchingViaEventTarget++;
        }
    }

    nsresult rv;
    if (runnable && onWorkerThread) {
        RefPtr<WorkerRunnable> workerRunnable = workerPrivate->MaybeWrapAsWorkerRunnable(runnable.forget());
        rv = nsThread::Dispatch(workerRunnable.forget(), NS_DISPATCH_NORMAL);
    } else {
        rv = nsThread::Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
    }

    if (!onWorkerThread && workerPrivate) {
        // We need to wake the worker thread if we're not already on the right
        // thread and the dispatch succeeded.
        if (NS_SUCCEEDED(rv)) {
            MutexAutoLock workerLock(workerPrivate->mMutex);

            workerPrivate->mCondVar.Notify();
        }

        // Now unset our waiting flag.
        {
            MutexAutoLock lock(mLock);

            MOZ_ASSERT(mOtherThreadsDispatchingViaEventTarget);

            if (!--mOtherThreadsDispatchingViaEventTarget) {
                mWorkerPrivateCondVar.Notify();
            }
        }
    }

    if (NS_WARN_IF(NS_FAILED(rv))) {
        return rv;
    }

    return NS_OK;
}
NS_IMETHODIMP
nsEventQueueServiceImpl::CreateThreadEventQueue()
{
  return CreateEventQueue(PR_GetCurrentThread(), PR_TRUE);
}
JNIEXPORT jint JNICALL 
Java_org_mozilla_jss_ssl_SSLSocket_socketRead(JNIEnv *env, jobject self, 
    jbyteArray bufBA, jint off, jint len, jint timeout)
{
    JSSL_SocketData *sock = NULL;
    jbyte *buf = NULL;
    jint size;
    PRIntervalTime ivtimeout;
    PRThread *me;
    jint nread = -1;
    
    size = (*env)->GetArrayLength(env, bufBA);
    if( off < 0 || len < 0 || (off+len) > size) {
        JSS_throw(env, INDEX_OUT_OF_BOUNDS_EXCEPTION);
        goto finish;
    }

    buf = (*env)->GetByteArrayElements(env, bufBA, NULL);
    if( buf == NULL ) {
        goto finish;
    }

    ivtimeout = (timeout > 0) ? PR_MillisecondsToInterval(timeout)
                              : PR_INTERVAL_NO_TIMEOUT;

    /* get the socket */
    if( JSSL_getSockData(env, self, &sock) != PR_SUCCESS ) {
        goto finish;
    }

    /* set the current thread doing the read */
    me = PR_GetCurrentThread();
    PR_Lock(sock->lock);
    if ( sock->closePending ) {
       PR_Unlock(sock->lock);
       JSSL_throwSSLSocketException(env, "Read operation interrupted");
       goto finish;
    }
    PR_ASSERT(sock->reader == NULL);
    sock->reader = me;
    PR_Unlock(sock->lock);

    nread = PR_Recv(sock->fd, buf+off, len, 0 /*flags*/, ivtimeout);

    PR_Lock(sock->lock);
    PR_ASSERT(sock->reader == me);
    sock->reader = NULL; 
    PR_Unlock(sock->lock);

    if( nread < 0 ) {
        PRErrorCode err = PR_GetError();

        if( err == PR_PENDING_INTERRUPT_ERROR ) {
#ifdef WINNT
            /* Clean up after PR_interrupt called by abortReadWrite. */
            PR_NT_CancelIo(sock->fd);
#endif 
            JSSL_throwSSLSocketException(env, "Read operation interrupted");
        } else if( err == PR_IO_TIMEOUT_ERROR ) {
#ifdef WINNT
            /*
             * if timeout was set, and the PR_Recv timed out,
             * then cancel the I/O on the socket, otherwise PR_Recv()
             * will always return PR_IO_PENDING_ERROR on subsequent
             * calls
             */
            PR_NT_CancelIo(sock->fd);
#endif
            JSSL_throwSSLSocketException(env, "Operation timed out");
        } else {
            JSSL_throwSSLSocketException(env, "Error reading from socket");
        }
        goto finish;
    }

    if( nread == 0 ) {
        /* EOF in Java is -1 */
        nread = -1;
    }

finish:
    EXCEPTION_CHECK(env, sock)
    (*env)->ReleaseByteArrayElements(env, bufBA, buf,
        (nread>0) ? 0 /*copy and free*/ : JNI_ABORT /*free, no copy*/);
    return nread;
}
NS_IMETHODIMP
nsEventQueueServiceImpl::CreateMonitoredThreadEventQueue()
{
  return CreateEventQueue(PR_GetCurrentThread(), PR_FALSE);
}
bool CacheIOThread::IsCurrentThread()
{
  return mThread == PR_GetCurrentThread();
}
Exemple #22
0
nsresult
SpdyConnectTransaction::ReadSegments(nsAHttpSegmentReader *reader,
                                     uint32_t count,
                                     uint32_t *countRead)
{
  MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
  LOG(("SpdyConnectTransaction::ReadSegments %p count %d conn %p\n",
       this, count, mTunneledConn.get()));

  mSegmentReader = reader;

  // spdy stream carrying tunnel is not setup yet.
  if (!mTunneledConn) {
    uint32_t toWrite = mConnectString.Length() - mConnectStringOffset;
    toWrite = std::min(toWrite, count);
    *countRead = toWrite;
    if (toWrite) {
      nsresult rv = mSegmentReader->
        OnReadSegment(mConnectString.BeginReading() + mConnectStringOffset,
                      toWrite, countRead);
      if (NS_FAILED(rv) && (rv != NS_BASE_STREAM_WOULD_BLOCK)) {
        LOG(("SpdyConnectTransaction::ReadSegments %p OnReadSegmentError %x\n",
             this, rv));
        CreateShimError(rv);
      } else {
        mConnectStringOffset += toWrite;
        if (mConnectString.Length() == mConnectStringOffset) {
          mConnectString.Truncate();
          mConnectStringOffset = 0;
        }
      }
      return rv;
    }
    return NS_BASE_STREAM_WOULD_BLOCK;
  }

  if (mForcePlainText) {
    // this path just ignores sending the request so that we can
    // send a synthetic reply in writesegments()
    LOG(("SpdyConnectTransaciton::ReadSegments %p dropping %d output bytes "
         "due to synthetic reply\n", this, mOutputDataUsed - mOutputDataOffset));
    *countRead = mOutputDataUsed - mOutputDataOffset;
    mOutputDataOffset = mOutputDataUsed = 0;
    mTunneledConn->DontReuse();
    return NS_OK;
  }

  *countRead = 0;
  Flush(count, countRead);
  if (!mTunnelStreamOut->mCallback) {
    return NS_BASE_STREAM_WOULD_BLOCK;
  }

  nsresult rv =
    mTunnelStreamOut->mCallback->OnOutputStreamReady(mTunnelStreamOut);
  if (NS_FAILED(rv)) {
    return rv;
  }

  uint32_t subtotal;
  count -= *countRead;
  rv = Flush(count, &subtotal);
  *countRead += subtotal;
  return rv;
}
Exemple #23
0
 void AssertThreadSafety()
 {
   MOZ_ASSERT(mThread == PR_GetCurrentThread(),
              "XPCLocaleCallbacks used unsafely!");
 }
Exemple #24
0
NS_IMETHODIMP
nsHTTPDownloadEvent::Run()
{
  if (!mListener)
    return NS_OK;

  nsresult rv;

  nsCOMPtr<nsIIOService> ios = do_GetIOService();
  NS_ENSURE_STATE(ios);

  nsCOMPtr<nsIChannel> chan;
  ios->NewChannel(mRequestSession->mURL, nsnull, nsnull, getter_AddRefs(chan));
  NS_ENSURE_STATE(chan);

  // Create a loadgroup for this new channel.  This way if the channel
  // is redirected, we'll have a way to cancel the resulting channel.
  nsCOMPtr<nsILoadGroup> lg = do_CreateInstance(NS_LOADGROUP_CONTRACTID);
  chan->SetLoadGroup(lg);

  if (mRequestSession->mHasPostData)
  {
    nsCOMPtr<nsIInputStream> uploadStream;
    rv = NS_NewPostDataStream(getter_AddRefs(uploadStream),
                              PR_FALSE,
                              mRequestSession->mPostData,
                              0, ios);
    NS_ENSURE_SUCCESS(rv, rv);

    nsCOMPtr<nsIUploadChannel> uploadChannel(do_QueryInterface(chan));
    NS_ENSURE_STATE(uploadChannel);

    rv = uploadChannel->SetUploadStream(uploadStream, 
                                        mRequestSession->mPostContentType,
                                        -1);
    NS_ENSURE_SUCCESS(rv, rv);
  }

  nsCOMPtr<nsIHttpChannel> hchan = do_QueryInterface(chan);
  NS_ENSURE_STATE(hchan);

  rv = hchan->SetRequestMethod(mRequestSession->mRequestMethod);
  NS_ENSURE_SUCCESS(rv, rv);

  mResponsibleForDoneSignal = PR_FALSE;
  mListener->mResponsibleForDoneSignal = PR_TRUE;

  mListener->mLoadGroup = lg.get();
  NS_ADDREF(mListener->mLoadGroup);
  mListener->mLoadGroupOwnerThread = PR_GetCurrentThread();

  rv = NS_NewStreamLoader(getter_AddRefs(mListener->mLoader), 
                          mListener);

  if (NS_SUCCEEDED(rv))
    rv = hchan->AsyncOpen(mListener->mLoader, nsnull);

  if (NS_FAILED(rv)) {
    mListener->mResponsibleForDoneSignal = PR_FALSE;
    mResponsibleForDoneSignal = PR_TRUE;

    NS_RELEASE(mListener->mLoadGroup);
    mListener->mLoadGroup = nsnull;
    mListener->mLoadGroupOwnerThread = nsnull;
  }

  return NS_OK;
}
Exemple #25
0
static int PR_MyThreadId(void) {
	PRThread *thr = PR_GetCurrentThread();
	PRUint32 myself = PR_GetThreadID(thr);
	return myself;
}
Exemple #26
0
void
IDBFactory::AssertIsOnOwningThread() const
{
  MOZ_ASSERT(mOwningThread);
  MOZ_ASSERT(PR_GetCurrentThread() == mOwningThread);
}
Exemple #27
0
void
nsHttpTransaction::OnTransportStatus(nsITransport* transport,
                                     nsresult status, PRUint64 progress)
{
    LOG(("nsHttpTransaction::OnSocketStatus [this=%x status=%x progress=%llu]\n",
        this, status, progress));

    if (!mTransportSink)
        return;

    NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");

    // Need to do this before the STATUS_RECEIVING_FROM check below, to make
    // sure that the activity distributor gets told about all status events.
    if (mActivityDistributor) {
        // upon STATUS_WAITING_FOR; report request body sent
        if ((mHasRequestBody) &&
            (status == nsISocketTransport::STATUS_WAITING_FOR))
            mActivityDistributor->ObserveActivity(
                mChannel,
                NS_HTTP_ACTIVITY_TYPE_HTTP_TRANSACTION,
                NS_HTTP_ACTIVITY_SUBTYPE_REQUEST_BODY_SENT,
                PR_Now(), LL_ZERO, EmptyCString());

        // report the status and progress
        mActivityDistributor->ObserveActivity(
            mChannel,
            NS_HTTP_ACTIVITY_TYPE_SOCKET_TRANSPORT,
            static_cast<PRUint32>(status),
            PR_Now(),
            progress,
            EmptyCString());
    }

    // nsHttpChannel synthesizes progress events in OnDataAvailable
    if (status == nsISocketTransport::STATUS_RECEIVING_FROM)
        return;

    PRUint64 progressMax;

    if (status == nsISocketTransport::STATUS_SENDING_TO) {
        // suppress progress when only writing request headers
        if (!mHasRequestBody)
            return;

        nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mRequestStream);
        NS_ASSERTION(seekable, "Request stream isn't seekable?!?");

        PRInt64 prog = 0;
        seekable->Tell(&prog);
        progress = prog;

        // when uploading, we include the request headers in the progress
        // notifications.
        progressMax = mRequestSize; // XXX mRequestSize is 32-bit!
    }
    else {
        progress = LL_ZERO;
        progressMax = 0;
    }

    mTransportSink->OnTransportStatus(transport, status, progress, progressMax);
}
Exemple #28
0
void
FileSystemBase::AssertIsOnOwningThread() const
{
  MOZ_ASSERT(mOwningThread);
  MOZ_ASSERT(PR_GetCurrentThread() == mOwningThread);
}
Exemple #29
0
  void Print(const char* aName, LogLevel aLevel, const char* aFmt, va_list aArgs)
  {
    const size_t kBuffSize = 1024;
    char buff[kBuffSize];

    char* buffToWrite = buff;

    // For backwards compat we need to use the NSPR format string versions
    // of sprintf and friends and then hand off to printf.
    va_list argsCopy;
    va_copy(argsCopy, aArgs);
    size_t charsWritten = PR_vsnprintf(buff, kBuffSize, aFmt, argsCopy);
    va_end(argsCopy);

    if (charsWritten == kBuffSize - 1) {
      // We may have maxed out, allocate a buffer instead.
      buffToWrite = PR_vsmprintf(aFmt, aArgs);
      charsWritten = strlen(buffToWrite);
    }

    // Determine if a newline needs to be appended to the message.
    const char* newline = "";
    if (charsWritten == 0 || buffToWrite[charsWritten - 1] != '\n') {
      newline = "\n";
    }

    FILE* out = stderr;

    // In case we use rotate, this ensures the FILE is kept alive during
    // its use.  Increased before we load mOutFile.
    ++mPrintEntryCount;

    detail::LogFile* outFile = mOutFile;
    if (outFile) {
      out = outFile->File();
    }

    // This differs from the NSPR format in that we do not output the
    // opaque system specific thread pointer (ie pthread_t) cast
    // to a long. The address of the current PR_Thread continues to be
    // prefixed.
    //
    // Additionally we prefix the output with the abbreviated log level
    // and the module name.
    PRThread *currentThread = PR_GetCurrentThread();
    const char *currentThreadName = (mMainThread == currentThread)
      ? "Main Thread"
      : PR_GetThreadName(currentThread);

    char noNameThread[40];
    if (!currentThreadName) {
      SprintfLiteral(noNameThread, "Unnamed thread %p", currentThread);
      currentThreadName = noNameThread;
    }

    if (!mAddTimestamp) {
      fprintf_stderr(out,
                     "[%s]: %s/%s %s%s",
                     currentThreadName, ToLogStr(aLevel),
                     aName, buffToWrite, newline);
    } else {
      PRExplodedTime now;
      PR_ExplodeTime(PR_Now(), PR_GMTParameters, &now);
      fprintf_stderr(
          out,
          "%04d-%02d-%02d %02d:%02d:%02d.%06d UTC - [%s]: %s/%s %s%s",
          now.tm_year, now.tm_month + 1, now.tm_mday,
          now.tm_hour, now.tm_min, now.tm_sec, now.tm_usec,
          currentThreadName, ToLogStr(aLevel),
          aName, buffToWrite, newline);
    }

    if (mIsSync) {
      fflush(out);
    }

    if (buffToWrite != buff) {
      PR_smprintf_free(buffToWrite);
    }

    if (mRotate > 0 && outFile) {
      int32_t fileSize = ftell(out);
      if (fileSize > mRotate) {
        uint32_t fileNum = outFile->Num();

        uint32_t nextFileNum = fileNum + 1;
        if (nextFileNum >= kRotateFilesNumber) {
          nextFileNum = 0;
        }

        // And here is the trick.  The current out-file remembers its order
        // number.  When no other thread shifted the global file number yet,
        // we are the thread to open the next file.
        if (mOutFileNum.compareExchange(fileNum, nextFileNum)) {
          // We can work with mToReleaseFile because we are sure the
          // mPrintEntryCount can't drop to zero now - the condition
          // to actually delete what's stored in that member.
          // And also, no other thread can enter this piece of code
          // because mOutFile is still holding the current file with
          // the non-shifted number.  The compareExchange() above is
          // a no-op for other threads.
          outFile->mNextToRelease = mToReleaseFile;
          mToReleaseFile = outFile;

          mOutFile = OpenFile(false, nextFileNum);
        }
      }
    }

    if (--mPrintEntryCount == 0 && mToReleaseFile) {
      // We were the last Print() entered, if there is a file to release
      // do it now.  exchange() is atomic and makes sure we release the file
      // only once on one thread.
      detail::LogFile* release = mToReleaseFile.exchange(nullptr);
      delete release;
    }
  }
nsresult
nsHttpPipeline::WriteSegments(nsAHttpSegmentWriter *writer,
                              uint32_t count,
                              uint32_t *countWritten)
{
    LOG(("nsHttpPipeline::WriteSegments [this=%p count=%u]\n", this, count));

    MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);

    if (mClosed)
        return NS_SUCCEEDED(mStatus) ? NS_BASE_STREAM_CLOSED : mStatus;

    nsAHttpTransaction *trans;
    nsresult rv;

    trans = Response(0);
    // This code deals with the establishment of a CONNECT tunnel through
    // an HTTP proxy. It allows the connection to do the CONNECT/200
    // HTTP transaction to establish a tunnel as a precursor to the
    // actual pipeline of regular HTTP transactions.
    if (!trans && mRequestQ.Length() &&
        mConnection->IsProxyConnectInProgress()) {
        LOG(("nsHttpPipeline::WriteSegments [this=%p] Forced Delegation\n",
             this));
        trans = Request(0);
    }

    if (!trans) {
        if (mRequestQ.Length() > 0)
            rv = NS_BASE_STREAM_WOULD_BLOCK;
        else
            rv = NS_BASE_STREAM_CLOSED;
    } else {
        //
        // ask the transaction to consume data from the connection.
        // PushBack may be called recursively.
        //
        rv = trans->WriteSegments(writer, count, countWritten);

        if (rv == NS_BASE_STREAM_CLOSED || trans->IsDone()) {
            trans->Close(NS_OK);

            // Release the transaction if it is not IsProxyConnectInProgress()
            if (trans == Response(0)) {
                mResponseQ.RemoveElementAt(0);
                mResponseIsPartial = false;
                ++mHttp1xTransactionCount;
            }

            // ask the connection manager to add additional transactions
            // to our pipeline.
            RefPtr<nsHttpConnectionInfo> ci;
            GetConnectionInfo(getter_AddRefs(ci));
            if (ci)
                gHttpHandler->ConnMgr()->ProcessPendingQForEntry(ci);
        }
        else
            mResponseIsPartial = true;
    }

    if (mPushBackLen) {
        nsHttpPushBackWriter pushBackWriter(mPushBackBuf, mPushBackLen);
        uint32_t len = mPushBackLen, n;
        mPushBackLen = 0;

        // This progress notification has previously been sent from
        // the socket transport code, but it was delivered to the
        // previous transaction on the pipeline.
        nsITransport *transport = Transport();
        if (transport)
            OnTransportStatus(transport, NS_NET_STATUS_RECEIVING_FROM,
                              mReceivingFromProgress);

        // the push back buffer is never larger than NS_HTTP_SEGMENT_SIZE,
        // so we are guaranteed that the next response will eat the entire
        // push back buffer (even though it might again call PushBack).
        rv = WriteSegments(&pushBackWriter, len, &n);
    }

    return rv;
}