// static void Mark(PRUint32 aType, void * aItem, const char * aText, const char * aText2) { #ifdef MOZ_VISUAL_EVENT_TRACER if (!gInitialized) return; if (aType == eNone) return; if (!CheckEventFilters(aType, aItem, aText)) // Events use just aText return; RecordBatch * threadLogPrivate = static_cast<RecordBatch *>( PR_GetThreadPrivate(gThreadPrivateIndex)); if (!threadLogPrivate) { // Deletion is made by the flushing thread threadLogPrivate = new RecordBatch(); PR_SetThreadPrivate(gThreadPrivateIndex, threadLogPrivate); } Record * record = threadLogPrivate->mNextRecord; record->mType = aType; record->mTime = (mozilla::TimeStamp::Now() - gProfilerStart).ToMilliseconds(); record->mItem = aItem; record->mText = PL_strdup(aText); record->mText2 = aText2 ? PL_strdup(aText2) : nsnull; ++threadLogPrivate->mNextRecord; if (threadLogPrivate->mNextRecord == threadLogPrivate->mRecordsTail) { // This calls RecordBatch::FlushBatch(threadLogPrivate) PR_SetThreadPrivate(gThreadPrivateIndex, nsnull); } #endif }
/* * Get current thread-local JSThread info, creating one if it doesn't exist. * Each thread has a unique JSThread pointer. * * Since we are dealing with thread-local data, no lock is needed. * * Return a pointer to the thread local info, NULL if the system runs out * of memory, or it failed to set thread private data (neither case is very * likely; both are probably due to out-of-memory). It is up to the caller * to report an error, if possible. */ JSThread * js_GetCurrentThread(JSRuntime *rt) { JSThread *thread; thread = (JSThread *)PR_GetThreadPrivate(threadTPIndex); if (!thread) { thread = (JSThread *) calloc(1, sizeof(JSThread)); if (!thread) return NULL; if (PR_FAILURE == PR_SetThreadPrivate(threadTPIndex, thread)) { free(thread); return NULL; } JS_INIT_CLIST(&thread->contextList); thread->id = js_CurrentThreadId(); /* js_SetContextThread initialize gcFreeLists as necessary. */ #ifdef DEBUG memset(thread->gcFreeLists, JS_FREE_PATTERN, sizeof(thread->gcFreeLists)); #endif } return thread; }
// static void Shutdown() { #ifdef MOZ_VISUAL_EVENT_TRACER MOZ_EVENT_TRACER_MARK(gFlushingThread, "Profiling End"); // This must be called after all other threads had been shut down // (i.e. their private data had been 'released'). // Release the private data of this thread to flush all the remaning writes. PR_SetThreadPrivate(gThreadPrivateIndex, nsnull); if (gFlushingThread) { { MonitorAutoLock mon(*gMonitor); gInitialized = false; gStopFlushingThread = true; mon.Notify(); } PR_JoinThread(gFlushingThread); gFlushingThread = nsnull; } if (gMonitor) { delete gMonitor; gMonitor = nsnull; } if (gEventFilter) { delete gEventFilter; gEventFilter = nsnull; } #endif }
void nsThreadManager::Shutdown() { NS_ASSERTION(NS_IsMainThread(), "shutdown not called from main thread"); // Prevent further access to the thread manager (no more new threads!) // // XXX What happens if shutdown happens before NewThread completes? // Fortunately, NewThread is only called on the main thread for now. // mInitialized = PR_FALSE; // Empty the main thread event queue before we begin shutting down threads. NS_ProcessPendingEvents(mMainThread); // We gather the threads from the hashtable into a list, so that we avoid // holding the hashtable lock while calling nsIThread::Shutdown. nsThreadArray threads; { nsAutoLock lock(mLock); mThreadsByPRThread.Enumerate(AppendAndRemoveThread, &threads); } // It's tempting to walk the list of threads here and tell them each to stop // accepting new events, but that could lead to badness if one of those // threads is stuck waiting for a response from another thread. To do it // right, we'd need some way to interrupt the threads. // // Instead, we process events on the current thread while waiting for threads // to shutdown. This means that we have to preserve a mostly functioning // world until such time as the threads exit. // Shutdown all threads that require it (join with threads that we created). for (PRUint32 i = 0; i < threads.Length(); ++i) { nsThread *thread = threads[i]; if (thread->ShutdownRequired()) thread->Shutdown(); } // In case there are any more events somehow... NS_ProcessPendingEvents(mMainThread); // There are no more background threads at this point. // Clear the table of threads. { nsAutoLock lock(mLock); mThreadsByPRThread.Clear(); } // Release main thread object. mMainThread = nsnull; // Remove the TLS entry for the main thread. PR_SetThreadPrivate(mCurThreadIndex, nsnull); // We don't need this lock anymore. PR_DestroyLock(mLock); mLock = nsnull; }
static TimelineThreadData *GetThisThreadData() { NS_ABORT_IF_FALSE(gTLSIndex!=BAD_TLS_INDEX, "Our TLS not initialized"); TimelineThreadData *new_data = nsnull; TimelineThreadData *data = (TimelineThreadData *)PR_GetThreadPrivate(gTLSIndex); if (data == nsnull) { // First request for this thread - allocate it. new_data = new TimelineThreadData(); if (!new_data) goto done; // Fill it new_data->timers = PL_NewHashTable(100, PL_HashString, PL_CompareStrings, PL_CompareValues, NULL, NULL); if (new_data->timers==NULL) goto done; new_data->initTime = PR_Now(); NS_WARN_IF_FALSE(!gTimelineDisabled, "Why are we creating new state when disabled?"); new_data->disabled = PR_FALSE; data = new_data; new_data = nsnull; PR_SetThreadPrivate(gTLSIndex, data); } done: if (new_data) // eeek - error during creation! delete new_data; NS_WARN_IF_FALSE(data, "TimelineService could not get thread-local data"); return data; }
NSS_IMPLEMENT void nss_DestroyErrorStack ( void) { if( INVALID_TPD_INDEX != error_stack_index ) { PR_SetThreadPrivate(error_stack_index, NULL); } return; }
// static XPCPerThreadData* XPCPerThreadData::GetDataImpl(JSContext *cx) { XPCPerThreadData* data; if(!gLock) { gLock = PR_NewLock(); if(!gLock) return nsnull; } if(gTLSIndex == BAD_TLS_INDEX) { nsAutoLock lock(gLock); // check again now that we have the lock... if(gTLSIndex == BAD_TLS_INDEX) { if(PR_FAILURE == PR_NewThreadPrivateIndex(&gTLSIndex, xpc_ThreadDataDtorCB)) { NS_ERROR("PR_NewThreadPrivateIndex failed!"); gTLSIndex = BAD_TLS_INDEX; return nsnull; } } } data = (XPCPerThreadData*) PR_GetThreadPrivate(gTLSIndex); if(!data) { data = new XPCPerThreadData(); if(!data || !data->IsValid()) { NS_ERROR("new XPCPerThreadData() failed!"); if(data) delete data; return nsnull; } if(PR_FAILURE == PR_SetThreadPrivate(gTLSIndex, data)) { NS_ERROR("PR_SetThreadPrivate failed!"); delete data; return nsnull; } } if(cx && !sMainJSThread && NS_IsMainThread()) { sMainJSThread = cx->thread; sMainThreadData = data; sMainThreadData->mThread = PR_GetCurrentThread(); } return data; }
//------------------------------------------------------------------------- // // destructor // //------------------------------------------------------------------------- nsToolkit::~nsToolkit() { if (mSharedGC) { gdk_gc_unref(mSharedGC); } // Remove the TLS reference to the toolkit... PR_SetThreadPrivate(gToolkitTLSIndex, nsnull); }
void nsTraceRefcnt::SetActivityIsLegal(bool aLegal) { if (gActivityTLS == BAD_TLS_INDEX) { PR_NewThreadPrivateIndex(&gActivityTLS, nullptr); } PR_SetThreadPrivate(gActivityTLS, reinterpret_cast<void*>(!aLegal)); }
// Free the thread state for the current thread // (Presumably previously create with a call to // PyXPCOM_ThreadState_Ensure) void PyXPCOM_ThreadState_Free() { ThreadData *pData = (ThreadData *)PR_GetThreadPrivate(tlsIndex); if (!pData) return; PyThreadState *thisThreadState = pData->ts; PyThreadState_Delete(thisThreadState); PR_SetThreadPrivate(tlsIndex, NULL); nsMemory::Free(pData); }
void nsAutoCMonitor::Exit() { #ifdef DEBUG (void) PR_SetThreadPrivate(LockStackTPI, mDown); #endif PRStatus status = PR_CExitMonitor(mLockObject); NS_ASSERTION(status == PR_SUCCESS, "PR_CExitMonitor failed"); mLockCount -= 1; }
void nsTraceRefcntImpl::SetActivityIsLegal(bool aLegal) { #ifdef NS_IMPL_REFCNT_LOGGING if (gActivityTLS == BAD_TLS_INDEX) PR_NewThreadPrivateIndex(&gActivityTLS, nullptr); PR_SetThreadPrivate(gActivityTLS, NS_INT32_TO_PTR(!aLegal)); #endif }
void nsExceptionService::Shutdown() { mProviders.Reset(); if (lock) { DropAllThreads(); PR_DestroyLock(lock); lock = nsnull; } PR_SetThreadPrivate(tlsIndex, nsnull); }
nsAutoLockBase::nsAutoLockBase(void* addr, nsAutoLockType type) { if (LockStackTPI == PRUintn(-1)) InitAutoLockStatics(); nsAutoLockBase* stackTop = (nsAutoLockBase*) PR_GetThreadPrivate(LockStackTPI); if (stackTop) { if (stackTop->mAddr == addr) { // Ignore reentry: it's legal for monitors, and NSPR will assert // if you reenter a PRLock. } else if (!addr) { // Ignore null addresses: the caller promises not to use the // lock at all, and NSPR will assert if you enter it. } else { const void* node = #ifdef NS_TRACE_MALLOC_XXX NS_GetStackTrace(1) #else nsnull #endif ; nsNamedVector* vec1; nsNamedVector* vec2; PRUint32 i2; if (!WellOrdered(stackTop->mAddr, addr, node, &i2, &vec1, &vec2)) { char buf[128]; PR_snprintf(buf, sizeof buf, "Potential deadlock between %s%s@%p and %s%s@%p", vec1->mName ? vec1->mName : "", LockTypeNames[stackTop->mType], stackTop->mAddr, vec2->mName ? vec2->mName : "", LockTypeNames[type], addr); #ifdef NS_TRACE_MALLOC_XXX fprintf(stderr, "\n*** %s\n\nCurrent stack:\n", buf); NS_DumpStackTrace(node, stderr); fputs("\nPrevious stack:\n", stderr); NS_DumpStackTrace(vec2->mInnerSites.ElementAt(i2), stderr); putc('\n', stderr); #endif NS_ERROR(buf); } } } mAddr = addr; mDown = stackTop; mType = type; if (mAddr) (void) PR_SetThreadPrivate(LockStackTPI, this); }
void nsTraceRefcnt::SetActivityIsLegal(bool aLegal) { #ifdef NS_IMPL_REFCNT_LOGGING if (gActivityTLS == BAD_TLS_INDEX) { PR_NewThreadPrivateIndex(&gActivityTLS, nullptr); } PR_SetThreadPrivate(gActivityTLS, reinterpret_cast<void*>(!aLegal)); #endif }
void nsThreadManager::UnregisterCurrentThread(nsThread *thread) { NS_ASSERTION(thread->GetPRThread() == PR_GetCurrentThread(), "bad thread"); nsAutoLock lock(mLock); mThreadsByPRThread.Remove(thread->GetPRThread()); PR_SetThreadPrivate(mCurThreadIndex, nsnull); // Ref-count balanced via ReleaseObject }
//------------------------------------------------------------------------- // // Return the nsIToolkit for the current thread. If a toolkit does not // yet exist, then one will be created... // //------------------------------------------------------------------------- NS_METHOD NS_GetCurrentToolkit(nsIToolkit* *aResult) { nsIToolkit* toolkit = nsnull; nsresult rv = NS_OK; PRStatus status; #ifdef DEBUG printf("TK-GetCTK\n"); #endif // Create the TLS index the first time through... if (0 == gToolkitTLSIndex) { status = PR_NewThreadPrivateIndex(&gToolkitTLSIndex, NULL); if (PR_FAILURE == status) { rv = NS_ERROR_FAILURE; } } if (NS_SUCCEEDED(rv)) { toolkit = (nsIToolkit*)PR_GetThreadPrivate(gToolkitTLSIndex); // // Create a new toolkit for this thread... // if (!toolkit) { toolkit = new nsToolkit(); if (!toolkit) { rv = NS_ERROR_OUT_OF_MEMORY; } else { NS_ADDREF(toolkit); toolkit->Init(PR_GetCurrentThread()); // // The reference stored in the TLS is weak. It is removed in the // nsToolkit destructor... // PR_SetThreadPrivate(gToolkitTLSIndex, (void*)toolkit); } } else { NS_ADDREF(toolkit); } *aResult = toolkit; } return rv; }
void nsThreadManager::RegisterCurrentThread(nsThread *thread) { NS_ASSERTION(thread->GetPRThread() == PR_GetCurrentThread(), "bad thread"); nsAutoLock lock(mLock); mThreadsByPRThread.Put(thread->GetPRThread(), thread); // XXX check OOM? NS_ADDREF(thread); // for TLS entry PR_SetThreadPrivate(mCurThreadIndex, thread); }
void nsAutoCMonitor::Enter() { #ifdef DEBUG nsAutoLockBase* stackTop = (nsAutoLockBase*) PR_GetThreadPrivate(LockStackTPI); NS_ASSERTION(stackTop == mDown, "non-LIFO nsAutoCMonitor::Enter"); mDown = stackTop; (void) PR_SetThreadPrivate(LockStackTPI, this); #endif PR_CEnterMonitor(mLockObject); mLockCount += 1; }
void nsAutoMonitor::Exit() { #ifdef DEBUG if (!mAddr) { NS_ERROR("It is not legal to exit a null monitor"); return; } (void) PR_SetThreadPrivate(LockStackTPI, mDown); #endif PRStatus status = PR_ExitMonitor(mMonitor); NS_ASSERTION(status == PR_SUCCESS, "PR_ExitMonitor failed"); mLockCount -= 1; }
void nsAutoLockBase::Hide() { if (!mAddr) return; nsAutoLockBase* curr = (nsAutoLockBase*) PR_GetThreadPrivate(LockStackTPI); nsAutoLockBase* prev = nsnull; while (curr != this) { prev = curr; curr = prev->mDown; } if (!prev) PR_SetThreadPrivate(LockStackTPI, mDown); else prev->mDown = mDown; }
/* * mempool_destroy is a callback which is set to NSPR ThreadPrivateIndex */ static void mempool_destroy() { int i = 0; struct mempool *my_mempool; #ifdef SHARED_MEMPOOL for (i = 0; MEMPOOL_END != mempool[i].mempool_name; i++) { struct mempool_object *object = NULL; if (NULL == mempool[i].mempool_mutex) { /* mutex is NULL; this mempool is not enabled */ continue; } object = mempool[i].mempool_head; mempool[i].mempool_head = NULL; while (NULL != object) { struct mempool_object *next = object->mempool_next; if (NULL != mempool[i].mempool_cleanup_fn) { (mempool[i].mempool_cleanup_fn)((void *)object); } slapi_ch_free((void **)&object); object = next; } PR_DestroyLock(mempool[i].mempool_mutex); mempool[i].mempool_mutex = NULL; } #else my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index); if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */ return; } for (i = 0; i < MAX_MEMPOOL; i++) { struct mempool_object *object = my_mempool[i].mempool_head; while (NULL != object) { struct mempool_object *next = object->mempool_next; if (NULL != my_mempool[i].mempool_cleanup_fn) { (my_mempool[i].mempool_cleanup_fn)((void *)object); } slapi_ch_free((void **)&object); object = next; } my_mempool[i].mempool_head = NULL; my_mempool[i].mempool_count = 0; } slapi_ch_free((void **)&my_mempool); PR_SetThreadPrivate (mempool_index, (void *)NULL); #endif }
void nsAutoMonitor::Enter() { #ifdef DEBUG if (!mAddr) { NS_ERROR("It is not legal to enter a null monitor"); return; } nsAutoLockBase* stackTop = (nsAutoLockBase*) PR_GetThreadPrivate(LockStackTPI); NS_ASSERTION(stackTop == mDown, "non-LIFO nsAutoMonitor::Enter"); mDown = stackTop; (void) PR_SetThreadPrivate(LockStackTPI, this); #endif PR_EnterMonitor(mMonitor); mLockCount += 1; }
/* readonly attribute nsIExceptionManager currentExceptionManager; */ NS_IMETHODIMP nsExceptionService::GetCurrentExceptionManager(nsIExceptionManager * *aCurrentScriptManager) { CHECK_SERVICE_USE_OK(); nsExceptionManager *mgr = (nsExceptionManager *)PR_GetThreadPrivate(tlsIndex); if (mgr == nsnull) { // Stick the new exception object in with no reference count. mgr = new nsExceptionManager(this); if (mgr == nsnull) return NS_ERROR_OUT_OF_MEMORY; PR_SetThreadPrivate(tlsIndex, mgr); // The reference count is held in the thread-list AddThread(mgr); } *aCurrentScriptManager = mgr; NS_ADDREF(*aCurrentScriptManager); return NS_OK; }
/* * return memory to memory pool * (Callback cleanup function was intented to release nested memory in the * memory area. Initially, memory had its structure which could point * other memory area. But the current code (#else) expects no structure. * Thus, the cleanup callback is not needed) * The current code (#else) uses the memory pool stored in the * per-thread-private data. */ int mempool_return(int type, void *object, mempool_cleanup_callback cleanup) { PR_ASSERT(type >= 0 && type < MEMPOOL_END); if (!config_get_mempool_switch()) { return LDAP_SUCCESS; /* memory pool: off */ } #ifdef SHARED_MEMPOOL if (NULL == mempool[type].mempool_mutex) { /* mutex is NULL; this mempool is not enabled */ return LDAP_SUCCESS; } PR_Lock(mempool[type].mempool_mutex); ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head; mempool[type].mempool_head = (struct mempool_object *)object; mempool[type].mempool_cleanup_fn = cleanup; mempool[type].mempool_count++; PR_Unlock(mempool[type].mempool_mutex); return LDAP_SUCCESS; #else { struct mempool *my_mempool; int maxfreelist; my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index); if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */ mempool_init(&my_mempool); } ((struct mempool_object *)object)->mempool_next = my_mempool[type].mempool_head; maxfreelist = config_get_mempool_maxfreelist(); if ((maxfreelist > 0) && (my_mempool[type].mempool_count > maxfreelist)) { return LDAP_UNWILLING_TO_PERFORM; } else { ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head; my_mempool[type].mempool_head = (struct mempool_object *)object; my_mempool[type].mempool_cleanup_fn = cleanup; my_mempool[type].mempool_count++; PR_SetThreadPrivate (mempool_index, (void *)my_mempool); return LDAP_SUCCESS; } } #endif }
static error_stack * error_get_my_stack ( void) { PRStatus st; error_stack *rv; PRUintn new_size; PRUint32 new_bytes; error_stack *new_stack; if( INVALID_TPD_INDEX == error_stack_index ) { st = PR_CallOnce(&error_call_once, error_once_function); if( PR_SUCCESS != st ) { return (error_stack *)NULL; } } rv = (error_stack *)PR_GetThreadPrivate(error_stack_index); if( (error_stack *)NULL == rv ) { /* Doesn't exist; create one */ new_size = 16; } else if( rv->header.count == rv->header.space && rv->header.count < NSS_MAX_ERROR_STACK_COUNT ) { /* Too small, expand it */ new_size = PR_MIN( rv->header.space * 2, NSS_MAX_ERROR_STACK_COUNT); } else { /* Okay, return it */ return rv; } new_bytes = (new_size * sizeof(PRInt32)) + sizeof(error_stack); /* Use NSPR's calloc/realloc, not NSS's, to avoid loops! */ new_stack = PR_Calloc(1, new_bytes); if( (error_stack *)NULL != new_stack ) { if( (error_stack *)NULL != rv ) { (void)nsslibc_memcpy(new_stack,rv,rv->header.space); } new_stack->header.space = new_size; } /* Set the value, whether or not the allocation worked */ PR_SetThreadPrivate(error_stack_index, new_stack); return new_stack; }
// static void XPCPerThreadData::CleanupAllThreads() { // I've questioned the sense of cleaning up other threads' data from the // start. But I got talked into it. Now I see that we *can't* do all the // cleaup while holding this lock. So, we are going to go to the trouble // to copy out the data that needs to be cleaned up *outside* of // the lock. Yuk! XPCJSContextStack** stacks = nsnull; int count = 0; int i; if(gLock) { nsAutoLock lock(gLock); for(XPCPerThreadData* cur = gThreads; cur; cur = cur->mNextThread) count++; stacks = (XPCJSContextStack**) new XPCJSContextStack*[count] ; if(stacks) { i = 0; for(XPCPerThreadData* cur = gThreads; cur; cur = cur->mNextThread) { stacks[i++] = cur->mJSContextStack; cur->mJSContextStack = nsnull; cur->Cleanup(); } } } if(stacks) { for(i = 0; i < count; i++) delete stacks[i]; delete [] stacks; } if(gTLSIndex != BAD_TLS_INDEX) PR_SetThreadPrivate(gTLSIndex, nsnull); }
// Ensure that we have a Python thread state available to use. // If this is called for the first time on a thread, it will allocate // the thread state. This does NOT change the state of the Python lock. // Returns TRUE if a new thread state was created, or FALSE if a // thread state already existed. PRBool PyXPCOM_ThreadState_Ensure() { ThreadData *pData = (ThreadData *)PR_GetThreadPrivate(tlsIndex); if (pData==NULL) { /* First request on this thread */ /* Check we have an interpreter state */ if (PyXPCOM_InterpreterState==NULL) { Py_FatalError("Can not setup thread state, as have no interpreter state"); } pData = (ThreadData *)nsMemory::Alloc(sizeof(ThreadData)); if (!pData) Py_FatalError("Out of memory allocating thread state."); memset(pData, 0, sizeof(*pData)); if (NS_FAILED( PR_SetThreadPrivate( tlsIndex, pData ) ) ) { NS_ABORT_IF_FALSE(0, "Could not create thread data for this thread!"); Py_FatalError("Could not thread private thread data!"); } pData->ts = PyThreadState_New(PyXPCOM_InterpreterState); return PR_TRUE; // Did create a thread state state } return PR_FALSE; // Thread state was previously created }
//------------------------------------------------------------------------- // // destructor // //------------------------------------------------------------------------- nsToolkit::~nsToolkit() { NS_PRECONDITION(::IsWindow(mDispatchWnd), "Invalid window handle"); // Destroy the Dispatch Window ::DestroyWindow(mDispatchWnd); mDispatchWnd = NULL; // Remove the TLS reference to the toolkit... PR_SetThreadPrivate(gToolkitTLSIndex, nsnull); if (gMouseTrailer) { gMouseTrailer->DestroyTimer(); delete gMouseTrailer; gMouseTrailer = nsnull; } #if defined (MOZ_STATIC_COMPONENT_LIBS) || defined(WINCE) nsToolkit::Shutdown(); #endif }
/* * get memory from memory pool * The current code (#else) uses the memory pool stored in the * per-thread-private data. */ void * mempool_get(int type) { struct mempool_object *object = NULL; struct mempool *my_mempool; PR_ASSERT(type >= 0 && type < MEMPOOL_END); if (!config_get_mempool_switch()) { return NULL; /* memory pool: off */ } #ifdef SHARED_MEMPOOL if (NULL == mempool[type].mempool_mutex) { /* mutex is NULL; this mempool is not enabled */ return NULL; } PR_Lock(mempool[type].mempool_mutex); object = mempool[type].mempool_head; if (NULL != object) { mempool[type].mempool_head = object->mempool_next; mempool[type].mempool_count--; object->mempool_next = NULL; } PR_Unlock(mempool[type].mempool_mutex); #else my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index); if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */ return NULL; } object = my_mempool[type].mempool_head; if (NULL != object) { my_mempool[type].mempool_head = object->mempool_next; my_mempool[type].mempool_count--; object->mempool_next = NULL; PR_SetThreadPrivate (mempool_index, (void *)my_mempool); } #endif return object; }