NS_IMETHODIMP nsProtectedAuthThread::Login(nsIObserver *aObserver) { NS_ENSURE_ARG(aObserver); if (!mMutex) return NS_ERROR_FAILURE; if (!mSlot) // We need pointer to the slot return NS_ERROR_FAILURE; nsCOMPtr<nsIObserver> observerProxy; nsresult rv = NS_GetProxyForObject(NS_PROXY_TO_MAIN_THREAD, NS_GET_IID(nsIObserver), aObserver, NS_PROXY_SYNC | NS_PROXY_ALWAYS, getter_AddRefs(observerProxy)); if (NS_FAILED(rv)) return rv; PR_Lock(mMutex); if (mIAmRunning || mLoginReady) { PR_Unlock(mMutex); return NS_OK; } observerProxy.swap(mStatusObserver); mIAmRunning = PR_TRUE; mThreadHandle = PR_CreateThread(PR_USER_THREAD, nsProtectedAuthThreadRunner, static_cast<void*>(this), PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0); // bool thread_started_ok = (threadHandle != nsnull); // we might want to return "thread started ok" to caller in the future NS_ASSERTION(mThreadHandle, "Could not create nsProtectedAuthThreadRunner thread\n"); PR_Unlock(mMutex); return NS_OK; }
/** * * CCApp Provider main routine. * * @param arg - CCApp msg queue * * @return void * * @pre None */ void CCApp_task(void * arg) { static const char fname[] = "CCApp_task"; phn_syshdr_t *syshdr = NULL; appListener *listener = NULL; void * msg; // If the "ready to start" condition variable has been created // (is non-null), we're going to wait for it to be signaled // before we start processing messages. if (ccAppReadyToStartCond) { PR_Lock(ccAppReadyToStartLock); while (!ccAppReadyToStart) { PR_WaitCondVar(ccAppReadyToStartCond, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(ccAppReadyToStartLock); } //initialize the listener list sll_lite_init(&sll_list); CCAppInit(); while (1) { msg = cprGetMessage(ccapp_msgq, TRUE, (void **) &syshdr); if ( msg) { CCAPP_DEBUG(DEB_F_PREFIX"Received Cmd[%d] for app[%d]\n", DEB_F_PREFIX_ARGS(SIP_CC_PROV, fname), syshdr->Cmd, syshdr->Usr.UsrInfo); listener = getCcappListener(syshdr->Usr.UsrInfo); if (listener != NULL) { (* ((appListener)(listener)))(msg, syshdr->Cmd); } else { CCAPP_DEBUG(DEB_F_PREFIX"Event[%d] doesn't have a dedicated listener.\n", DEB_F_PREFIX_ARGS(SIP_CC_PROV, fname), syshdr->Usr.UsrInfo); } cprReleaseSysHeader(syshdr); cpr_free(msg); } } }
/* * mark the connection as being done with pagedresults * processing - returns True if it was processing, * False otherwise */ int pagedresults_reset_processing(Connection *conn, int index) { int ret = 0; LDAPDebug1Arg(LDAP_DEBUG_TRACE, "--> pagedresults_reset_processing: idx=%d\n", index); if (conn && (index > -1)) { PR_Lock(conn->c_mutex); if (index < conn->c_pagedresults.prl_maxlen) { ret = (conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_PROCESSING); /* if ret is false, the following doesn't do anything */ conn->c_pagedresults.prl_list[index].pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING; } PR_Unlock(conn->c_mutex); } LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_reset_processing: %d\n", ret); return ret; }
int pagedresults_set_search_result_set_size_estimate(Connection *conn, int count, int index) { int rc = -1; LDAPDebug1Arg(LDAP_DEBUG_TRACE, "--> pagedresults_set_search_result_set_size_estimate: " "idx=%d\n", index); if (conn && (index > -1)) { PR_Lock(conn->c_mutex); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate = count; } PR_Unlock(conn->c_mutex); rc = 0; } LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_set_search_result_set_size_estimate: %d\n", rc); return rc; }
/* fetches the current min/max times and the search count, and clears them */ void at_getCountMinMax(AddThread *at, PRUint32 *count, PRUint32 *min, PRUint32 *max, PRUint32 *total) { PR_Lock(at->lock); if (count) { *count = at->addCount; at->addCount = 0; } if (min) { *min = at->mintime; at->mintime = 10000; } if (max) { *max = at->maxtime; at->maxtime = 0; } if (total) *total = at->addTotal; at->alive--; PR_Unlock(at->lock); }
void js_lock_task(JSTaskState *task) { PRThread *me = PR_GetCurrentThread(); if (! js_owner_lock) { js_owner_lock = PR_NewLock(); } if ( js_owner_thread == me) { PR_ASSERT(js_owner_count > 0); js_owner_count++; } else { PR_Lock(js_owner_lock); PR_ASSERT(js_owner_count == 0); js_owner_count = 1; js_owner_thread = me; } }
void jsd_Lock(JSDStaticLock* lock) { void* me; ASSERT_VALID_LOCK(lock); _CURRENT_THREAD(me); if(lock->owner == me) { lock->count++; JS_ASSERT(lock->count > 1); } else { PR_Lock(lock->lock); /* this can block... */ JS_ASSERT(lock->owner == 0); JS_ASSERT(lock->count == 0); lock->count = 1; lock->owner = me; } }
/* * return arg (ec_arg) only if the context is in the event queue */ void * slapi_eq_get_arg ( Slapi_Eq_Context ctx ) { slapi_eq_context **p; PR_ASSERT(eq_initialized); if (!eq_stopped) { PR_Lock(eq->eq_lock); p = &(eq->eq_queue); while (p && *p != NULL) { if ((*p)->ec_id == ctx) { PR_Unlock(eq->eq_lock); return (*p)->ec_arg; } else { p = &((*p)->ec_next); } } PR_Unlock(eq->eq_lock); } return NULL; }
/* * Prepare for iteration across an object set. Returns the first * object in the set. The returned object is referenced, therefore * the caller must either release the object, or * pass it to an objset_next_obj call, which will * implicitly release the object. * Returns the first object, or NULL if the objset contains no * objects. */ Object * objset_first_obj(Objset *set) { Object *return_object; /* Be tolerant (for the replication plugin) */ if (set == NULL) return NULL; PR_Lock(set->lock); if (NULL == set->head) { return_object = NULL; } else { object_acquire(set->head->obj); return_object = set->head->obj; } PR_Unlock(set->lock); return return_object; }
/* static */ bool DiscardTracker::TryAllocation(uint64_t aBytes) { MOZ_ASSERT(sInitialized); PR_Lock(sAllocationLock); bool enoughSpace = !gfxPrefs::ImageMemHardLimitDecodedImageKB() || (gfxPrefs::ImageMemHardLimitDecodedImageKB() * 1024) - sCurrentDecodedImageBytes >= aBytes; if (enoughSpace) { sCurrentDecodedImageBytes += aBytes; } PR_Unlock(sAllocationLock); // If we're using too much memory for decoded images, MaybeDiscardSoon will // enqueue a callback to discard some images. MaybeDiscardSoon(); return enoughSpace; }
int ldbm_back_rmdb( Slapi_PBlock *pb ) { struct ldbminfo *li = NULL; /* char *directory = NULL;*/ int return_value = -1; Slapi_Backend *be; slapi_pblock_get( pb, SLAPI_BACKEND, &be ); if (be->be_state != BE_STATE_STOPPED) { LDAPDebug( LDAP_DEBUG_TRACE, "ldbm_back_cleanup: warning - backend is in a wrong state - %d\n", be->be_state, 0, 0 ); return 0; } PR_Lock (be->be_state_lock); if (be->be_state != BE_STATE_STOPPED) { LDAPDebug( LDAP_DEBUG_TRACE, "ldbm_back_cleanup: warning - backend is in a wrong state - %d\n", be->be_state, 0, 0 ); PR_Unlock (be->be_state_lock); return 0; } slapi_pblock_get( pb, SLAPI_PLUGIN_PRIVATE, &li ); /* slapi_pblock_get( pb, SLAPI_SEQ_VAL, &directory );*/ return_value = dblayer_delete_database( li ); if (return_value == 0) be->be_state = BE_STATE_DELETED; PR_Unlock (be->be_state_lock); return return_value; }
/* * get memory from memory pool * The current code (#else) uses the memory pool stored in the * per-thread-private data. */ void * mempool_get(int type) { struct mempool_object *object = NULL; struct mempool *my_mempool; PR_ASSERT(type >= 0 && type < MEMPOOL_END); if (!config_get_mempool_switch()) { return NULL; /* memory pool: off */ } #ifdef SHARED_MEMPOOL if (NULL == mempool[type].mempool_mutex) { /* mutex is NULL; this mempool is not enabled */ return NULL; } PR_Lock(mempool[type].mempool_mutex); object = mempool[type].mempool_head; if (NULL != object) { mempool[type].mempool_head = object->mempool_next; mempool[type].mempool_count--; object->mempool_next = NULL; } PR_Unlock(mempool[type].mempool_mutex); #else my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index); if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */ return NULL; } object = my_mempool[type].mempool_head; if (NULL != object) { my_mempool[type].mempool_head = object->mempool_next; my_mempool[type].mempool_count--; object->mempool_next = NULL; PR_SetThreadPrivate (mempool_index, (void *)my_mempool); } #endif return object; }
static PRIntervalTime ConditionNotify(PRUint32 loops) { PRThread *thread; NotifyData notifyData; PRIntervalTime timein, overhead; timein = PR_IntervalNow(); notifyData.counter = loops; notifyData.ml = PR_NewLock(); notifyData.child = PR_NewCondVar(notifyData.ml); notifyData.parent = PR_NewCondVar(notifyData.ml); thread = PR_CreateThread( PR_USER_THREAD, Notifier, ¬ifyData, PR_GetThreadPriority(PR_GetCurrentThread()), thread_scope, PR_JOINABLE_THREAD, 0); overhead = PR_IntervalNow() - timein; /* elapsed so far */ PR_Lock(notifyData.ml); while (notifyData.counter > 0) { notifyData.pending = PR_TRUE; PR_NotifyCondVar(notifyData.child); while (notifyData.pending) PR_WaitCondVar(notifyData.parent, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(notifyData.ml); timein = PR_IntervalNow(); (void)PR_JoinThread(thread); PR_DestroyCondVar(notifyData.child); PR_DestroyCondVar(notifyData.parent); PR_DestroyLock(notifyData.ml); overhead += (PR_IntervalNow() - timein); /* more overhead */ return overhead; } /* ConditionNotify */
nsresult IPC_Disconnect() { // Must disconnect on same thread used to connect! PR_ASSERT(gMainThread == PR_GetCurrentThread()); if (!gConnState || !gConnThread) return NS_ERROR_NOT_INITIALIZED; PR_Lock(gConnState->lock); gConnState->shutdown = PR_TRUE; PR_SetPollableEvent(gConnState->fds[POLL].fd); PR_Unlock(gConnState->lock); PR_JoinThread(gConnThread); ConnDestroy(gConnState); gConnState = NULL; gConnThread = NULL; return NS_OK; }
/* ** Wait on a Semaphore. ** ** This routine allows a calling thread to wait or proceed depending upon the ** state of the semahore sem. The thread can proceed only if the counter value ** of the semaphore sem is currently greater than 0. If the value of semaphore ** sem is positive, it is decremented by one and the routine returns immediately ** allowing the calling thread to continue. If the value of semaphore sem is 0, ** the calling thread blocks awaiting the semaphore to be released by another ** thread. ** ** This routine can return PR_PENDING_INTERRUPT if the waiting thread ** has been interrupted. */ PR_IMPLEMENT(PRStatus) PR_WaitSem(PRSemaphore *sem) { PRStatus status = PR_SUCCESS; #ifdef HAVE_CVAR_BUILT_ON_SEM return _PR_MD_WAIT_SEM(&sem->md); #else PR_Lock(sem->cvar->lock); while (sem->count == 0) { sem->waiters++; status = PR_WaitCondVar(sem->cvar, PR_INTERVAL_NO_TIMEOUT); sem->waiters--; if (status != PR_SUCCESS) break; } if (status == PR_SUCCESS) sem->count--; PR_Unlock(sem->cvar->lock); #endif return (status); }
static PRBool CancelTimer(TimerEvent *timer) { PRBool canceled = PR_FALSE; PR_Lock(tm_vars.ml); timer->ref_count -= 1; if (timer->links.prev == &timer->links) { while (timer->ref_count == 1) { PR_WaitCondVar(tm_vars.cancel_timer, PR_INTERVAL_NO_TIMEOUT); } } else { PR_REMOVE_LINK(&timer->links); canceled = PR_TRUE; } PR_Unlock(tm_vars.ml); PR_DELETE(timer); return canceled; }
/* * Add a new event to the event queue. */ static void eq_enqueue(slapi_eq_context *newec) { slapi_eq_context **p; PR_ASSERT(NULL != newec); PR_Lock(eq->eq_lock); /* Insert <newec> in order (sorted by start time) in the list */ for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { if ((*p)->ec_when > newec->ec_when) { break; } } if (NULL != *p) { newec->ec_next = *p; } else { newec->ec_next = NULL; } *p = newec; PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ PR_Unlock(eq->eq_lock); }
/* ** Free the stack for the current thread */ void _PR_FreeStack(PRThreadStack *ts) { if (!ts) { return; } if (ts->flags & _PR_STACK_PRIMORDIAL) { PR_DELETE(ts); return; } /* ** Put the stack on the free list. This is done because we are still ** using the stack. Next time a thread is created we will trim the ** list down; it's safe to do it then because we will have had to ** context switch to a live stack before another thread can be ** created. */ PR_Lock(_pr_stackLock); PR_APPEND_LINK(&ts->links, _pr_freeStacks.prev); _pr_numFreeStacks++; PR_Unlock(_pr_stackLock); }
nsrefcnt nsLDAPConnection::Release(void) { nsrefcnt count; NS_PRECONDITION(0 != mRefCnt, "dup release"); count = PR_AtomicDecrement((PRInt32 *)&mRefCnt); NS_LOG_RELEASE(this, count, "nsLDAPConnection"); if (0 == count) { // As commented by danm: In the object's destructor, if by some // convoluted, indirect means it happens to run into some code // that temporarily references it (addref/release), then if the // refcount had been left at 0 the unexpected release would // attempt to reenter the object's destructor. // mRefCnt = 1; /* stabilize */ // If we have a mRunnable object, we need to make sure to lock it's // mLock before we try to DELETE. This is to avoid a race condition. // We also make sure to keep a strong reference to the runnable // object, to make sure it doesn't get GCed from underneath us, // while we are still holding a lock for instance. // if (mRunnable && mRunnable->mLock) { nsLDAPConnectionLoop *runnable = mRunnable; NS_ADDREF(runnable); PR_Lock(runnable->mLock); NS_DELETEXPCOM(this); PR_Unlock(runnable->mLock); NS_RELEASE(runnable); } else { NS_DELETEXPCOM(this); } return 0; } return count; }
static PRIntervalTime Alarms1(PRUint32 loops) { PRAlarm *alarm; AlarmData ad; PRIntervalTime overhead, timein = PR_IntervalNow(); PRIntervalTime duration = PR_SecondsToInterval(3); PRLock *ml = PR_NewLock(); PRCondVar *cv = PR_NewCondVar(ml); ad.ml = ml; ad.cv = cv; ad.rate = 1; ad.times = loops; ad.late = ad.times = 0; ad.duration = duration; ad.timein = PR_IntervalNow(); ad.period = PR_SecondsToInterval(1); alarm = PR_CreateAlarm(); (void)PR_SetAlarm( alarm, ad.period, ad.rate, AlarmFn1, &ad); overhead = PR_IntervalNow() - timein; PR_Lock(ml); while ((PRIntervalTime)(PR_IntervalNow() - ad.timein) < duration) PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(ml); timein = PR_IntervalNow(); (void)PR_DestroyAlarm(alarm); PR_DestroyCondVar(cv); PR_DestroyLock(ml); overhead += (PR_IntervalNow() - timein); return duration + overhead; } /* Alarms1 */
void slapi_be_stopping (Slapi_Backend *be) { int i; PR_Lock (be->be_state_lock); for (i=0; ((i<maxbackends) && backends[i] != be); i++) ; PR_ASSERT(i<maxbackends); backends[i] = NULL; be->be_state = BE_STATE_DELETED; if (be->be_lock != NULL) { slapi_destroy_rwlock(be->be_lock); be->be_lock = NULL; } nbackends--; PR_Unlock (be->be_state_lock); }
/* ** Return a file descriptor to the cache unless there are too many in ** there already. If put in cache, clear the fields first. */ void _PR_Putfd(PRFileDesc *fd) { PR_ASSERT(PR_NSPR_IO_LAYER == fd->identity); fd->methods = &_pr_faulty_methods; fd->identity = PR_INVALID_IO_LAYER; fd->secret->state = _PR_FILEDESC_FREED; if (0 == _pr_fd_cache.limit_high) { PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher)); } else { if (_pr_fd_cache.count > _pr_fd_cache.limit_high) { PR_Free(fd->secret); PR_Free(fd); } else { PR_Lock(_pr_fd_cache.ml); if (NULL == _pr_fd_cache.tail) { PR_ASSERT(0 == _pr_fd_cache.count); PR_ASSERT(NULL == _pr_fd_cache.head); _pr_fd_cache.head = _pr_fd_cache.tail = fd; } else { PR_ASSERT(NULL == _pr_fd_cache.tail->higher); _pr_fd_cache.tail->higher = fd; _pr_fd_cache.tail = fd; /* new value */ } fd->higher = NULL; /* always so */ _pr_fd_cache.count += 1; /* count the new entry */ PR_Unlock(_pr_fd_cache.ml); } } } /* _PR_Putfd */
int is_anyinstance_busy(struct ldbminfo *li) { ldbm_instance *inst; Object *inst_obj; int rval = 0; /* server is up -- mark all backends busy */ for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { inst = (ldbm_instance *)object_get_data(inst_obj); PR_Lock(inst->inst_config_mutex); rval = inst->inst_flags & INST_FLAG_BUSY; PR_Unlock(inst->inst_config_mutex); if (0 != rval) { break; } } if (inst_obj) object_release(inst_obj); return rval; }
/* Starts a backend instance */ int ldbm_instance_start(backend *be) { int rc; PR_Lock (be->be_state_lock); if (be->be_state != BE_STATE_STOPPED && be->be_state != BE_STATE_DELETED) { slapi_log_err(SLAPI_LOG_TRACE, "ldbm_instance_start", "Warning - backend is in a wrong state - %d\n", be->be_state); PR_Unlock (be->be_state_lock); return 0; } rc = dblayer_instance_start(be, DBLAYER_NORMAL_MODE); be->be_state = BE_STATE_STARTED; PR_Unlock (be->be_state_lock); return rc; }
void nsPluginInstance::shut(void) { DBG("nsPluginInstance::shut\n"); DBG("Acquiring playerLock mutex for shutdown.\n"); PR_Lock(playerLock); _shutdown = TRUE; DBG("Releasing playerLock mutex for shutdown.\n"); PR_Unlock(playerLock); if (_thread) { DBG("Waiting for thread to terminate.\n"); PR_JoinThread(_thread); _thread = NULL; } // subclass it back //SubclassWindow(_window, _oldWndProc); _initialized = FALSE; }
PR_DestroyTrace( PRTraceHandle handle /* Handle to be destroyed */ ) { RName *rnp = (RName *)handle; QName *qnp = rnp->qName; PR_LOG( lm, PR_LOG_DEBUG, ("PRTrace: Deleting: QName: %s, RName: %s", qnp->name, rnp->name)); /* Lock the Facility */ PR_Lock( traceLock ); /* ** Remove RName from the list of RNames in QName ** and free RName */ PR_LOG( lm, PR_LOG_DEBUG, ("PRTrace: Deleting RName: %s, %p", rnp->name, rnp)); PR_REMOVE_LINK( &rnp->link ); PR_Free( rnp->lock ); PR_DELETE( rnp ); /* ** If this is the last RName within QName ** remove QName from the qNameList and free it */ if ( PR_CLIST_IS_EMPTY( &qnp->rNameList ) ) { PR_LOG( lm, PR_LOG_DEBUG, ("PRTrace: Deleting unused QName: %s, %p", qnp->name, qnp)); PR_REMOVE_LINK( &qnp->link ); PR_DELETE( qnp ); } /* Unlock the Facility */ PR_Unlock( traceLock ); return; } /* end PR_DestroyTrace() */
SECStatus reap_threads(GlobalThreadMgr *threadMGR) { perThread * slot; int i; if (!threadMGR->threadLock) return SECSuccess; PR_Lock(threadMGR->threadLock); while (threadMGR->numRunning > 0) { PR_WaitCondVar(threadMGR->threadEndQ, PR_INTERVAL_NO_TIMEOUT); for (i = 0; i < threadMGR->numUsed; ++i) { slot = &threadMGR->threads[i]; if (slot->running == rs_zombie) { /* Handle cleanup of thread here. */ /* Now make sure the thread has ended OK. */ PR_JoinThread(slot->prThread); slot->running = rs_idle; --threadMGR->numRunning; /* notify the thread launcher. */ PR_NotifyCondVar(threadMGR->threadStartQ); } } } /* Safety Sam sez: make sure count is right. */ for (i = 0; i < threadMGR->numUsed; ++i) { slot = &threadMGR->threads[i]; if (slot->running != rs_idle) { fprintf(stderr, "Thread in slot %d is in state %d!\n", i, slot->running); } } PR_Unlock(threadMGR->threadLock); return SECSuccess; }
/** Registers a closure of the form callback(cx, userdata) to be called by Spidermonkey's Operation Callback API. * You must *NEVER* call this function from *within* a callback function which has been registered with this facility! * The punishment might just be deadlock! Don't call this function from a different thread/JSContext than the one that * that you're associating the callback with. * * This call may traverse the entire linked list of registrations. Don't add and remove callbacks a lot! * * @returns A pointer that can be used to delete the callback registration at a later time, or NULL on error. */ GPSEEAsyncCallback *gpsee_addAsyncCallback(JSContext *cx, GPSEEAsyncCallbackFunction callback, void *userdata) { gpsee_runtime_t *grt = (gpsee_runtime_t *) JS_GetRuntimePrivate(JS_GetRuntime(cx)); GPSEEAsyncCallback *newcb, **pp; /* Allocate the new callback entry struct */ newcb = JS_malloc(cx, sizeof(GPSEEAsyncCallback)); if (!newcb) { JS_ReportOutOfMemory(cx); return NULL; } /* Initialize the new callback entry struct (except 'next' member, which gets set while we have a lock on the list) */ newcb->callback = callback; newcb->userdata = userdata; newcb->cx = cx; /* Acquire mutex protecting grt->asyncCallbacks */ PR_Lock(grt->asyncCallbacks_lock); /* Insert the new callback into the list */ /* Locate a sorted insertion point into the linked list; sort by 'cx' member */ for (pp = &grt->asyncCallbacks; *pp && (*pp)->cx > cx; pp = &(*pp)->next); /* Insert! */ newcb->next = *pp; *pp = newcb; /* Relinquish mutex */ PR_Unlock(grt->asyncCallbacks_lock); /* If this is the first time this context has had a callback registered, we must register a context callback to clean * up all callbacks associated with this context. Note that we don't want to do this for the primordial context, but * it's a moot point because gpsee_maybeGC() is registered soon after context instantiation and should never be * removed until just before context finalization, anyway. */ if (!newcb->next || newcb->next->cx != cx) gpsee_getContextPrivate(cx, &grt->asyncCallbacks, 0, gpsee_removeAsyncCallbackContext); /* Return a pointer to the new callback entry struct */ return newcb; }
POOL_EXPORT void * nspool_realloc(nspool_handle_t *pool_handle, void *ptr, size_t size) { pool_t *pool = (pool_t *)pool_handle; void *newptr; block_t *block_ptr; int oldsize; if (pool_handle == NULL || pool_disable) return NSPERM_REALLOC(ptr, size); if ( (newptr = nspool_malloc(pool_handle, size)) == NULL) return NULL; /* With our structure we don't know exactly where the end * of the original block is. But we do know an upper bound * which is a valid ptr. Search the outstanding blocks * for the block which contains this ptr, and copy... */ #ifdef POOL_LOCKING PR_Lock(pool->lock); #endif if ( !(block_ptr = _ptr_in_pool(pool, ptr)) ) { /* User is trying to realloc nonmalloc'd space! */ return newptr; } oldsize = block_ptr->end - (char *)ptr ; if (oldsize > size) oldsize = size; memmove((char *)newptr, (char *)ptr, oldsize); #ifdef POOL_LOCKING PR_Unlock(pool->lock); #endif return newptr; }
void PKCS11Thread(void *data) { ThreadData *threadData = (ThreadData *)data; pk11_op_func op = (pk11_op_func)threadData->op; int iters = threadData->iters; unsigned char sigData[256]; SECItem sig; CK_SESSION_HANDLE session; CK_RV crv; threadData->status = SECSuccess; threadData->count = 0; /* get our thread's session */ PR_Lock(threadData->lock); crv = NSC_OpenSession(1, CKF_SERIAL_SESSION, NULL, 0, &session); PR_Unlock(threadData->lock); if (crv != CKR_OK) { return; } if (threadData->isSign) { sig.data = sigData; sig.len = sizeof(sigData); threadData->p2 = (void *)&sig; } while (iters--) { threadData->status = (*op)(session, threadData->p1, threadData->p2, threadData->p3); if (threadData->status != SECSuccess) { break; } threadData->count++; } return; }