void * Pthread(void *arg) { static Ns_Tls tls; /* * Allocate TLS first time (this is recommended TLS * self-initialization style. */ Ns_ThreadSetName("pthread"); sleep(5); if (tls == NULL) { Ns_MasterLock(); if (tls == NULL) { Ns_TlsAlloc(&tls, PthreadTlsCleanup); } Ns_MasterUnlock(); } Ns_TlsSet(&tls, arg); /* * Wait for exit signal from main(). */ Ns_MutexLock(&plock); while (!pgo) { Ns_CondWait(&pcond, &plock); } Ns_MutexUnlock(&plock); return arg; }
static Thread * NewThread(ThreadArg *argPtr) { Thread *thrPtr; int stack; thrPtr = ns_calloc(1, sizeof(Thread)); Ns_GetTime(&thrPtr->ctime); thrPtr->tid = Ns_ThreadId(); sprintf(thrPtr->name, "-thread%d-", thrPtr->tid); if (argPtr == NULL) { thrPtr->flags = FLAG_DETACHED; } else { thrPtr->flags = argPtr->flags; thrPtr->proc = argPtr->proc; thrPtr->arg = argPtr->arg; strcpy(thrPtr->parent, argPtr->parent); } stack = NsGetStack(&thrPtr->stackaddr, &thrPtr->stacksize); if (stack) { thrPtr->flags |= FLAG_HAVESTACK; if (stack < 0) { thrPtr->flags |= FLAG_STACKDOWN; } } Ns_TlsSet(&key, thrPtr); Ns_MutexLock(&threadlock); thrPtr->nextPtr = firstThreadPtr; firstThreadPtr = thrPtr; Ns_MutexUnlock(&threadlock); return thrPtr; }
int Ns_SetThreadLocalStorage(Ns_ThreadLocalStorage *tls, void *p) { Ns_TlsSet((Ns_Tls *) tls, p); return NS_OK; }
gdbm_error * gdbm_perrno(void) { static Ns_Tls tls; gdbm_error *errPtr; if (tls == NULL) { Ns_MasterLock(); if (tls == NULL) { Ns_TlsAlloc(&tls, gdbm_free); } Ns_MasterUnlock(); } errPtr = Ns_TlsGet(&tls); if (errPtr == NULL) { errPtr = gdbm_malloc(sizeof(gdbm_error)); *errPtr = GDBM_NO_ERROR; Ns_TlsSet(&tls, errPtr); } return errPtr; }
static int JsInit(Tcl_Interp *interp, void *arg) { jsEnv *jsEnvPtr; Ns_TlsAlloc(&tls, JsCleanup); jsEnvPtr = ns_calloc(1, sizeof(jsEnv)); jsEnvPtr->runtime = JS_NewRuntime(8L * 1024L * 1024L); jsEnvPtr->context = JS_NewContext(jsEnvPtr->runtime, 8192); jsEnvPtr->object = JS_NewObject(jsEnvPtr->context, NULL, NULL, NULL); JS_InitStandardClasses(jsEnvPtr->context, jsEnvPtr->object); JS_SetErrorReporter(jsEnvPtr->context, JsLogError); Ns_TlsSet(&tls, jsEnvPtr); Ns_Log(Debug, "JsInit: %p", jsEnvPtr); Tcl_CreateObjCommand(interp, "js.eval", JsEvalObjCmd, NULL, NULL); return TCL_OK; }
void WorkThread(void *arg) { int i = (int) arg; int *ip; time_t now; Ns_Thread self; char name[32]; sprintf(name, "-work:%d-", i); Ns_ThreadSetName(name); if (i == 2) { Ns_RWLockWrLock(&rwlock); Msg("rwlock write aquired"); sleep(2); } else { Ns_RWLockRdLock(&rwlock); Msg("rwlock read aquired aquired"); sleep(1); } Ns_CsEnter(&cs); Msg("enter critical section once"); Ns_CsEnter(&cs); Msg("enter critical section twice"); Ns_CsLeave(&cs); Ns_CsLeave(&cs); Ns_ThreadSelf(&self); arg = Ns_TlsGet(&key); Ns_SemaWait(&sema); Msg("got semaphore posted from main"); if (arg == NULL) { arg = ns_malloc(sizeof(int)); Ns_TlsSet(&key, arg); } ip = arg; *ip = i; if (i == 5) { Ns_Time to; int st; Ns_GetTime(&to); Msg("time: %ld %ld", to.sec, to.usec); Ns_IncrTime(&to, 5, 0); Msg("time: %ld %ld", to.sec, to.usec); Ns_MutexLock(&lock); time(&now); Msg("timed wait starts: %s", ns_ctime(&now)); st = Ns_CondTimedWait(&cond, &lock, &to); Ns_MutexUnlock(&lock); time(&now); Msg("timed wait ends: %s - status: %d", ns_ctime(&now), st); } if (i == 9) { Msg("sleep 4 seconds start"); sleep(4); Msg("sleep 4 seconds done"); } time(&now); Ns_RWLockUnlock(&rwlock); Msg("rwlock unlocked"); Msg("exiting"); Ns_ThreadExit((void *) i); }
void NsConnThread(void *arg) { ConnData *dataPtr = arg; Pool *poolPtr = dataPtr->poolPtr; Conn *connPtr; Ns_Time wait, *timePtr; char name[100]; int status, ncons; char *msg; double spread; /* * Set the conn thread name. */ Ns_TlsSet(&ctdtls, dataPtr); Ns_MutexLock(&poolPtr->lock); sprintf(name, "-%s:%d-", poolPtr->name, poolPtr->threads.nextid++); Ns_MutexUnlock(&poolPtr->lock); Ns_ThreadSetName(name); /* spread is a value of 1.0 +- specified percentage, i.e. between 0.0 and 2.0 when the configured percentage is 100 */ spread = 1.0 + (2 * poolPtr->threads.spread * Ns_DRand() - poolPtr->threads.spread) / 100.0; ncons = round(poolPtr->threads.maxconns * spread); msg = "exceeded max connections per thread"; /* * Start handling connections. */ Ns_MutexLock(&poolPtr->lock); poolPtr->threads.starting--; poolPtr->threads.idle++; while (poolPtr->threads.maxconns <= 0 || ncons-- > 0) { /* * Wait for a connection to arrive, exiting if one doesn't * arrive in the configured timeout period. */ if (poolPtr->threads.current <= poolPtr->threads.min) { timePtr = NULL; } else { Ns_GetTime(&wait); Ns_IncrTime(&wait, round(poolPtr->threads.timeout * spread), 0); timePtr = &wait; } status = NS_OK; while (!poolPtr->shutdown && status == NS_OK && poolPtr->queue.wait.firstPtr == NULL) { /* nothing is queued, we wait for a queue entry */ poolPtr->threads.waiting++; status = Ns_CondTimedWait(&poolPtr->cond, &poolPtr->lock, timePtr); poolPtr->threads.waiting--; } if (poolPtr->queue.wait.firstPtr == NULL) { msg = "timeout waiting for connection"; break; } /* * Pull the first connection off the waiting list. */ connPtr = poolPtr->queue.wait.firstPtr; poolPtr->queue.wait.firstPtr = connPtr->nextPtr; if (poolPtr->queue.wait.lastPtr == connPtr) { poolPtr->queue.wait.lastPtr = NULL; } connPtr->nextPtr = NULL; connPtr->prevPtr = poolPtr->queue.active.lastPtr; if (poolPtr->queue.active.lastPtr != NULL) { poolPtr->queue.active.lastPtr->nextPtr = connPtr; } poolPtr->queue.active.lastPtr = connPtr; if (poolPtr->queue.active.firstPtr == NULL) { poolPtr->queue.active.firstPtr = connPtr; } poolPtr->threads.idle--; poolPtr->queue.wait.num--; Ns_MutexUnlock(&poolPtr->lock); /* * Run the connection. */ Ns_MutexLock(&connlock); dataPtr->connPtr = connPtr; Ns_MutexUnlock(&connlock); Ns_GetTime(&connPtr->times.run); ConnRun(connPtr); Ns_MutexLock(&connlock); dataPtr->connPtr = NULL; Ns_MutexUnlock(&connlock); /* * Remove from the active list and push on the free list. */ Ns_MutexLock(&poolPtr->lock); if (connPtr->prevPtr != NULL) { connPtr->prevPtr->nextPtr = connPtr->nextPtr; } else { poolPtr->queue.active.firstPtr = connPtr->nextPtr; } if (connPtr->nextPtr != NULL) { connPtr->nextPtr->prevPtr = connPtr->prevPtr; } else { poolPtr->queue.active.lastPtr = connPtr->prevPtr; } poolPtr->threads.idle++; Ns_MutexUnlock(&poolPtr->lock); NsFreeConn(connPtr); Ns_MutexLock(&poolPtr->lock); } /* * Append this thread to list of threads to reap. */ Ns_MutexLock(&joinlock); dataPtr->nextPtr = joinPtr; joinPtr = dataPtr; Ns_MutexUnlock(&joinlock); /* * Mark this thread as no longer active. */ if (poolPtr->shutdown) { msg = "shutdown pending"; } poolPtr->threads.current--; poolPtr->threads.idle--; if (((poolPtr->queue.wait.num > 0 && poolPtr->threads.idle == 0 && poolPtr->threads.starting == 0 ) || (poolPtr->threads.current < poolPtr->threads.min) ) && !poolPtr->shutdown) { /* Recreate a thread when on of the condings hold - there are more queue entries are still waiting, but no thread is either starting or idle, or - there are less than minthreads connection threads alive. */ poolPtr->threads.current ++; Ns_MutexUnlock(&poolPtr->lock); NsCreateConnThread(poolPtr, 0); /* joinThreads == 0 to avoid deadlock */ } else if (poolPtr->queue.wait.num > 0 && poolPtr->threads.waiting > 0) { /* Wake up a waiting thread */ Ns_CondSignal(&poolPtr->cond); Ns_MutexUnlock(&poolPtr->lock); } else { Ns_MutexUnlock(&poolPtr->lock); } Ns_Log(Notice, "exiting: %s", msg); Ns_ThreadExit(dataPtr); }
/** Main service method, called to forward a request to tomcat */ static int jk2_handler(void* context, Ns_Conn *conn) { jk_logger_t *l=NULL; int rc; jk_worker_t *worker=NULL; jk_endpoint_t *end = NULL; jk_env_t *env; char* workerName; jk_ws_service_t *s=NULL; jk_pool_t *rPool=NULL; int rc1; jk_uriEnv_t *uriEnv; uriContext* uriCtx = (uriContext*)context; /* must make this call to assure TLS destructor runs when thread exists */ Ns_TlsSet (&jkTls, jvmGlobal); uriEnv= uriCtx->uriEnv; /* there is a chance of dynamic reconfiguration, so instead of doing above statement, might map the URI to its context object on the fly, like this: uriEnv = workerEnv->uriMap->mapUri (workerEnv->globalEnv, workerEnv->uriMap, conn->request->host, conn->request->url); */ /* Get an env instance */ env = workerEnv->globalEnv->getEnv( workerEnv->globalEnv ); worker = uriEnv->worker; if (worker == NULL) /* use global default */ worker=workerEnv->defaultWorker; workerName = uriEnv->mbean->getAttribute (env, uriEnv->mbean, "group"); if( worker==NULL && workerName!=NULL ) { worker=env->getByName( env, workerName); env->l->jkLog(env, env->l, JK_LOG_INFO, "finding worker for %#lx %#lx %s\n", worker, uriEnv, workerName); uriEnv->worker=worker; } if(worker==NULL || worker->mbean==NULL || worker->mbean->localName==NULL ) { env->l->jkLog(env, env->l, JK_LOG_ERROR, "No worker for %s\n", conn->request->url); workerEnv->globalEnv->releaseEnv( workerEnv->globalEnv, env ); return NS_FILTER_RETURN; } if( uriEnv->mbean->debug > 0 ) env->l->jkLog(env, env->l, JK_LOG_DEBUG, "serving %s with %#lx %#lx %s\n", uriEnv->mbean->localName, worker, worker->mbean, worker->mbean->localName ); /* Get a pool for the request */ rPool= worker->rPoolCache->get( env, worker->rPoolCache ); if( rPool == NULL ) { rPool=worker->mbean->pool->create( env, worker->mbean->pool, HUGE_POOL_SIZE ); if( uriEnv->mbean->debug > 0 ) env->l->jkLog(env, env->l, JK_LOG_DEBUG, "new rpool %#lx\n", rPool ); } s=(jk_ws_service_t *)rPool->calloc( env, rPool, sizeof( jk_ws_service_t )); jk2_service_ns_init( env, s, uriCtx->serverName); s->pool = rPool; s->init( env, s, worker, conn ); s->is_recoverable_error = JK_FALSE; s->uriEnv = uriEnv; rc = worker->service(env, worker, s); s->afterRequest(env, s); rPool->reset(env, rPool); rc1=worker->rPoolCache->put( env, worker->rPoolCache, rPool ); if( rc1 == JK_OK ) { rPool=NULL; } if( rPool!=NULL ) { rPool->close(env, rPool); } if(rc==JK_OK) { workerEnv->globalEnv->releaseEnv( workerEnv->globalEnv, env ); return NS_OK; } env->l->jkLog(env, env->l, JK_LOG_ERROR, "Error connecting to tomcat %d\n", rc); workerEnv->globalEnv->releaseEnv( workerEnv->globalEnv, env ); Ns_ConnReturnInternalError (conn); return NS_FILTER_RETURN; }