/* * Destroy a Repl_Objset. * Arguments: * o: the object set to be destroyed * maxwait: the maximum time to wait for all object refcnts to * go to zero. * panic_fn: a function to be called if, after waiting "maxwait" * seconds, not all object refcnts are zero. * The caller must ensure that no one else holds references to the * set or any objects it contains. */ void repl_objset_destroy(Repl_Objset **o, time_t maxwait, FNFree panic_fn) { Repl_Objset_object *co = NULL; time_t now, stop_time; int really_gone; int loopcount; void *cookie; PR_ASSERT(NULL != o); PR_ASSERT(NULL != *o); time(&now); stop_time = now + maxwait; /* * Loop over the objects until they all are actually gone, * or until maxwait seconds have passed. */ really_gone = 0; loopcount = 0; while (now < stop_time) { void *cookie; PR_Lock((*o)->lock); if ((co = llistGetFirst((*o)->objects, &cookie)) == NULL) { really_gone = 1; PR_Unlock((*o)->lock); break; } while (NULL != co) { /* Set the deleted flag so the object isn't returned by iterator */ co->flags |= REPL_OBJSET_OBJ_FLAG_DELETED; if (0 == co->refcnt) { /* Remove the object */ co = removeCurrentObjectAndGetNextNolock ((*o), co, cookie); } else co = llistGetNext((*o)->objects, &cookie); } PR_Unlock((*o)->lock); time(&now); if (loopcount > 0) { DS_Sleep(PR_TicksPerSecond()); } loopcount++; } if (!really_gone) { if (NULL != panic_fn) { /* * Call the "aargh, this thing won't go away" panic * function for each remaining object. */ PR_Lock((*o)->lock); co = llistGetFirst((*o)->objects, &cookie); while (NULL != co) { panic_fn(co->data); co = llistGetNext((*o)->objects, &cookie); } PR_Unlock((*o)->lock); } } else { /* Free the linked list */ llistDestroy(&(*o)->objects, (*o)->destructor); PR_DestroyLock((*o)->lock); slapi_ch_free((void **)o); } }
CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex) { PRInt32 err; PRFileDesc *model = NULL; PRBool ssl2, ssl3, tlsv1; struct SessionHandle *data = conn->data; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; SECStatus rv; char *certDir = NULL; int curlerr; const int *cipher_to_enable; curlerr = CURLE_SSL_CONNECT_ERROR; if (connssl->state == ssl_connection_complete) return CURLE_OK; connssl->data = data; #ifdef HAVE_PK11_CREATEGENERICOBJECT connssl->cacert[0] = NULL; connssl->cacert[1] = NULL; connssl->key = NULL; #endif /* FIXME. NSS doesn't support multiple databases open at the same time. */ PR_Lock(nss_initlock); if(!initialized) { struct_stat st; /* First we check if $SSL_DIR points to a valid dir */ certDir = getenv("SSL_DIR"); if(certDir) { if((stat(certDir, &st) != 0) || (!S_ISDIR(st.st_mode))) { certDir = NULL; } } /* Now we check if the default location is a valid dir */ if(!certDir) { if((stat(SSL_DIR, &st) == 0) && (S_ISDIR(st.st_mode))) { certDir = (char *)SSL_DIR; } } if (!NSS_IsInitialized()) { initialized = 1; infof(conn->data, "Initializing NSS with certpath: %s\n", certDir ? certDir : "none"); if(!certDir) { rv = NSS_NoDB_Init(NULL); } else { char *certpath = PR_smprintf("%s%s", NSS_VersionCheck("3.12.0") ? "sql:" : "", certDir); rv = NSS_Initialize(certpath, "", "", "", NSS_INIT_READONLY); PR_smprintf_free(certpath); } if(rv != SECSuccess) { infof(conn->data, "Unable to initialize NSS database\n"); curlerr = CURLE_SSL_CACERT_BADFILE; initialized = 0; PR_Unlock(nss_initlock); goto error; } } if(num_enabled_ciphers() == 0) NSS_SetDomesticPolicy(); #ifdef HAVE_PK11_CREATEGENERICOBJECT if(!mod) { char *configstring = aprintf("library=%s name=PEM", pem_library); if(!configstring) { PR_Unlock(nss_initlock); goto error; } mod = SECMOD_LoadUserModule(configstring, NULL, PR_FALSE); free(configstring); if(!mod || !mod->loaded) { if(mod) { SECMOD_DestroyModule(mod); mod = NULL; } infof(data, "WARNING: failed to load NSS PEM library %s. Using OpenSSL " "PEM certificates will not work.\n", pem_library); } } #endif PK11_SetPasswordFunc(nss_get_password); } PR_Unlock(nss_initlock); model = PR_NewTCPSocket(); if(!model) goto error; model = SSL_ImportFD(NULL, model); if(SSL_OptionSet(model, SSL_SECURITY, PR_TRUE) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_SERVER, PR_FALSE) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_CLIENT, PR_TRUE) != SECSuccess) goto error; ssl2 = ssl3 = tlsv1 = PR_FALSE; switch (data->set.ssl.version) { default: case CURL_SSLVERSION_DEFAULT: ssl3 = tlsv1 = PR_TRUE; break; case CURL_SSLVERSION_TLSv1: tlsv1 = PR_TRUE; break; case CURL_SSLVERSION_SSLv2: ssl2 = PR_TRUE; break; case CURL_SSLVERSION_SSLv3: ssl3 = PR_TRUE; break; } if(SSL_OptionSet(model, SSL_ENABLE_SSL2, ssl2) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_ENABLE_SSL3, ssl3) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_ENABLE_TLS, tlsv1) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_V2_COMPATIBLE_HELLO, ssl2) != SECSuccess) goto error; /* enable all ciphers from enable_ciphers_by_default */ cipher_to_enable = enable_ciphers_by_default; while (SSL_NULL_WITH_NULL_NULL != *cipher_to_enable) { if (SSL_CipherPrefSet(model, *cipher_to_enable, PR_TRUE) != SECSuccess) { curlerr = CURLE_SSL_CIPHER; goto error; } cipher_to_enable++; } if(data->set.ssl.cipher_list) { if(set_ciphers(data, model, data->set.ssl.cipher_list) != SECSuccess) { curlerr = CURLE_SSL_CIPHER; goto error; } } if(data->set.ssl.verifyhost == 1) infof(data, "warning: ignoring unsupported value (1) of ssl.verifyhost\n"); data->set.ssl.certverifyresult=0; /* not checked yet */ if(SSL_BadCertHook(model, (SSLBadCertHandler) BadCertHandler, conn) != SECSuccess) { goto error; } if(SSL_HandshakeCallback(model, (SSLHandshakeCallback) HandshakeCallback, NULL) != SECSuccess) goto error; if(!data->set.ssl.verifypeer) /* skip the verifying of the peer */ ; else if(data->set.ssl.CAfile) { int rc = nss_load_cert(&conn->ssl[sockindex], data->set.ssl.CAfile, PR_TRUE); if(!rc) { curlerr = CURLE_SSL_CACERT_BADFILE; goto error; } } else if(data->set.ssl.CApath) { struct_stat st; PRDir *dir; PRDirEntry *entry; if(stat(data->set.ssl.CApath, &st) == -1) { curlerr = CURLE_SSL_CACERT_BADFILE; goto error; } if(S_ISDIR(st.st_mode)) { int rc; dir = PR_OpenDir(data->set.ssl.CApath); do { entry = PR_ReadDir(dir, PR_SKIP_BOTH | PR_SKIP_HIDDEN); if(entry) { char fullpath[PATH_MAX]; snprintf(fullpath, sizeof(fullpath), "%s/%s", data->set.ssl.CApath, entry->name); rc = nss_load_cert(&conn->ssl[sockindex], fullpath, PR_TRUE); /* FIXME: check this return value! */ } /* This is purposefully tolerant of errors so non-PEM files * can be in the same directory */ } while(entry != NULL); PR_CloseDir(dir); } } infof(data, " CAfile: %s\n" " CApath: %s\n", data->set.ssl.CAfile ? data->set.ssl.CAfile : "none", data->set.ssl.CApath ? data->set.ssl.CApath : "none"); if (data->set.ssl.CRLfile) { int rc = nss_load_crl(data->set.ssl.CRLfile, PR_FALSE); if (!rc) { curlerr = CURLE_SSL_CRL_BADFILE; goto error; } infof(data, " CRLfile: %s\n", data->set.ssl.CRLfile ? data->set.ssl.CRLfile : "none"); } if(data->set.str[STRING_CERT]) { bool nickname_alloc = FALSE; char *nickname = fmt_nickname(data->set.str[STRING_CERT], &nickname_alloc); if(!nickname) return CURLE_OUT_OF_MEMORY; if(!cert_stuff(conn, sockindex, data->set.str[STRING_CERT], data->set.str[STRING_KEY])) { /* failf() is already done in cert_stuff() */ if(nickname_alloc) free(nickname); return CURLE_SSL_CERTPROBLEM; } /* this "takes over" the pointer to the allocated name or makes a dup of it */ connssl->client_nickname = nickname_alloc?nickname:strdup(nickname); if(!connssl->client_nickname) return CURLE_OUT_OF_MEMORY; } else connssl->client_nickname = NULL; if(SSL_GetClientAuthDataHook(model, SelectClientCert, (void *)connssl) != SECSuccess) { curlerr = CURLE_SSL_CERTPROBLEM; goto error; } /* Import our model socket onto the existing file descriptor */ connssl->handle = PR_ImportTCPSocket(sockfd); connssl->handle = SSL_ImportFD(model, connssl->handle); if(!connssl->handle) goto error; PR_Close(model); /* We don't need this any more */ /* This is the password associated with the cert that we're using */ if (data->set.str[STRING_KEY_PASSWD]) { SSL_SetPKCS11PinArg(connssl->handle, data->set.str[STRING_KEY_PASSWD]); } /* Force handshake on next I/O */ SSL_ResetHandshake(connssl->handle, /* asServer */ PR_FALSE); SSL_SetURL(connssl->handle, conn->host.name); /* Force the handshake now */ if(SSL_ForceHandshakeWithTimeout(connssl->handle, PR_SecondsToInterval(HANDSHAKE_TIMEOUT)) != SECSuccess) { if(conn->data->set.ssl.certverifyresult == SSL_ERROR_BAD_CERT_DOMAIN) curlerr = CURLE_PEER_FAILED_VERIFICATION; else if(conn->data->set.ssl.certverifyresult!=0) curlerr = CURLE_SSL_CACERT; goto error; } connssl->state = ssl_connection_complete; display_conn_info(conn, connssl->handle); if (data->set.str[STRING_SSL_ISSUERCERT]) { SECStatus ret; bool nickname_alloc = FALSE; char *nickname = fmt_nickname(data->set.str[STRING_SSL_ISSUERCERT], &nickname_alloc); if(!nickname) return CURLE_OUT_OF_MEMORY; ret = check_issuer_cert(connssl->handle, nickname); if(nickname_alloc) free(nickname); if(SECFailure == ret) { infof(data,"SSL certificate issuer check failed\n"); curlerr = CURLE_SSL_ISSUER_ERROR; goto error; } else { infof(data, "SSL certificate issuer check ok\n"); } } return CURLE_OK; error: err = PR_GetError(); infof(data, "NSS error %d\n", err); if(model) PR_Close(model); return curlerr; }
PRInt16 nsMAPIConfiguration::RegisterSession(PRUint32 aHwnd, const PRUnichar *aUserName, const PRUnichar *aPassword, PRBool aForceDownLoad, PRBool aNewSession, PRUint32 *aSession, const char *aIdKey) { PRInt16 nResult = 0; PRUint32 n_SessionId = 0; PR_Lock(m_Lock); // Check whether max sessions is exceeded if (sessionCount >= m_nMaxSessions) { PR_Unlock(m_Lock); return -1; } if (aUserName != nsnull && aUserName[0] != '\0') m_ProfileMap.Get(nsDependentString(aUserName), &n_SessionId); // try to share a session; if not create a session if (n_SessionId > 0) { nsMAPISession *pTemp = nsnull; m_SessionMap.Get(n_SessionId, &pTemp); if (pTemp != nsnull) { pTemp->IncrementSession(); *aSession = n_SessionId; nResult = 1; } } else if (aNewSession || n_SessionId == 0) // checking for n_SessionId is a concession { // create a new session; if new session is specified OR there is no session nsMAPISession *pTemp = nsnull; pTemp = new nsMAPISession(aHwnd, aUserName, aPassword, aForceDownLoad, aIdKey); if (pTemp != nsnull) { session_generator++; // I don't think there will be (2 power 32) sessions alive // in a cycle. This is an assumption if (session_generator == 0) session_generator++; m_SessionMap.Put(session_generator, pTemp); if (aUserName != nsnull && aUserName[0] != '\0') m_ProfileMap.Put(nsDependentString(aUserName), session_generator); *aSession = session_generator; sessionCount++; nResult = 1; } } PR_Unlock(m_Lock); return nResult; }
PR_IMPLEMENT(PRRecvWait*) PR_EnumerateWaitGroup( PRMWaitEnumerator *enumerator, const PRRecvWait *previous) { PRRecvWait *result = NULL; /* entry point sanity checking */ PR_ASSERT(NULL != enumerator); PR_ASSERT(_PR_ENUM_SEALED == enumerator->seal); if ((NULL == enumerator) || (_PR_ENUM_SEALED != enumerator->seal)) goto bad_argument; /* beginning of enumeration */ if (NULL == previous) { if (NULL == enumerator->group) { enumerator->group = mw_state->group; if (NULL == enumerator->group) { PR_SetError(PR_GROUP_EMPTY_ERROR, 0); return NULL; } } enumerator->waiter = &enumerator->group->waiter->recv_wait; enumerator->p_timestamp = enumerator->group->p_timestamp; enumerator->thread = PR_GetCurrentThread(); enumerator->index = 0; } /* continuing an enumeration */ else { PRThread *me = PR_GetCurrentThread(); PR_ASSERT(me == enumerator->thread); if (me != enumerator->thread) goto bad_argument; /* need to restart the enumeration */ if (enumerator->p_timestamp != enumerator->group->p_timestamp) return PR_EnumerateWaitGroup(enumerator, NULL); } /* actually progress the enumeration */ #if defined(WINNT) _PR_MD_LOCK(&enumerator->group->mdlock); #else PR_Lock(enumerator->group->ml); #endif while (enumerator->index++ < enumerator->group->waiter->length) { if (NULL != (result = *(enumerator->waiter)++)) break; } #if defined(WINNT) _PR_MD_UNLOCK(&enumerator->group->mdlock); #else PR_Unlock(enumerator->group->ml); #endif return result; /* what we live for */ bad_argument: PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; /* probably ambiguous */ } /* PR_EnumerateWaitGroup */
PR_IMPLEMENT(PRStatus) PR_AddWaitFileDesc( PRWaitGroup *group, PRRecvWait *desc) { _PR_HashStory hrv; PRStatus rv = PR_FAILURE; #ifdef WINNT _MDOverlapped *overlapped; HANDLE hFile; BOOL bResult; DWORD dwError; PRFileDesc *bottom; #endif if (!_pr_initialized) _PR_ImplicitInitialization(); if ((NULL == group) && (NULL == (group = MW_Init2()))) { return rv; } PR_ASSERT(NULL != desc->fd); desc->outcome = PR_MW_PENDING; /* nice, well known value */ desc->bytesRecv = 0; /* likewise, though this value is ambiguious */ PR_Lock(group->ml); if (_prmw_running != group->state) { /* Not allowed to add after cancelling the group */ desc->outcome = PR_MW_INTERRUPT; PR_SetError(PR_INVALID_STATE_ERROR, 0); PR_Unlock(group->ml); return rv; } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* ** If the waiter count is zero at this point, there's no telling ** how long we've been idle. Therefore, initialize the beginning ** of the timing interval. As long as the list doesn't go empty, ** it will maintain itself. */ if (0 == group->waiter->count) group->last_poll = PR_IntervalNow(); do { hrv = MW_AddHashInternal(desc, group->waiter); if (_prmw_rehash != hrv) break; hrv = MW_ExpandHashInternal(group); /* gruesome */ if (_prmw_success != hrv) break; } while (PR_TRUE); #ifdef WINNT _PR_MD_UNLOCK(&group->mdlock); #endif PR_NotifyCondVar(group->new_business); /* tell the world */ rv = (_prmw_success == hrv) ? PR_SUCCESS : PR_FAILURE; PR_Unlock(group->ml); #ifdef WINNT overlapped = PR_NEWZAP(_MDOverlapped); if (NULL == overlapped) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); NT_HashRemove(group, desc->fd); return rv; } overlapped->ioModel = _MD_MultiWaitIO; overlapped->data.mw.desc = desc; overlapped->data.mw.group = group; if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { overlapped->data.mw.timer = CreateTimer( desc->timeout, NT_TimeProc, overlapped); if (0 == overlapped->data.mw.timer) { NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); /* * XXX It appears that a maximum of 16 timer events can * be outstanding. GetLastError() returns 0 when I try it. */ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, GetLastError()); return PR_FAILURE; } } /* Reach to the bottom layer to get the OS fd */ bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } hFile = (HANDLE)bottom->secret->md.osfd; if (!bottom->secret->md.io_model_committed) { PRInt32 st; st = _md_Associate(hFile); PR_ASSERT(0 != st); bottom->secret->md.io_model_committed = PR_TRUE; } bResult = ReadFile(hFile, desc->buffer.start, (DWORD)desc->buffer.length, NULL, &overlapped->overlapped); if (FALSE == bResult && (dwError = GetLastError()) != ERROR_IO_PENDING) { if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_FAILURE, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { CancelTimer(overlapped->data.mw.timer); } NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); } _PR_MD_MAP_READ_ERROR(dwError); rv = PR_FAILURE; } #endif return rv; } /* PR_AddWaitFileDesc */
/* * Parse the value from an LDAPv3 "Simple Paged Results" control. They look * like this: * * realSearchControlValue ::= SEQUENCE { * size INTEGER (0..maxInt), * -- requested page size from client * -- result set size estimate from server * cookie OCTET STRING * -- index for the pagedresults array in the connection * } * * Return an LDAP error code (LDAP_SUCCESS if all goes well). */ int pagedresults_parse_control_value( Slapi_PBlock *pb, struct berval *psbvp, ber_int_t *pagesize, int *index ) { int rc = LDAP_SUCCESS; struct berval cookie = {0}; Connection *conn = pb->pb_conn; Operation *op = pb->pb_op; LDAPDebug0Args(LDAP_DEBUG_TRACE, "--> pagedresults_parse_control_value\n"); if ( NULL == conn || NULL == op || NULL == pagesize || NULL == index ) { LDAPDebug1Arg(LDAP_DEBUG_ANY, "<-- pagedresults_parse_control_value: %d\n", LDAP_OPERATIONS_ERROR); return LDAP_OPERATIONS_ERROR; } *index = -1; if ( psbvp->bv_len == 0 || psbvp->bv_val == NULL ) { rc = LDAP_PROTOCOL_ERROR; } else { BerElement *ber = ber_init( psbvp ); if ( ber == NULL ) { rc = LDAP_OPERATIONS_ERROR; } else { if ( ber_scanf( ber, "{io}", pagesize, &cookie ) == LBER_ERROR ) { rc = LDAP_PROTOCOL_ERROR; } /* the ber encoding is no longer needed */ ber_free(ber, 1); if ( cookie.bv_len <= 0 ) { int i; int maxlen; /* first time? */ PR_Lock(conn->c_mutex); maxlen = conn->c_pagedresults.prl_maxlen; if (conn->c_pagedresults.prl_count == maxlen) { if (0 == maxlen) { /* first time */ conn->c_pagedresults.prl_maxlen = 1; conn->c_pagedresults.prl_list = (PagedResults *)slapi_ch_calloc(1, sizeof(PagedResults)); } else { /* new max length */ conn->c_pagedresults.prl_maxlen *= 2; conn->c_pagedresults.prl_list = (PagedResults *)slapi_ch_realloc( (char *)conn->c_pagedresults.prl_list, sizeof(PagedResults) * conn->c_pagedresults.prl_maxlen); /* initialze newly allocated area */ memset(conn->c_pagedresults.prl_list + maxlen, '\0', sizeof(PagedResults) * maxlen); } *index = maxlen; /* the first position in the new area */ } else { for (i = 0; i < conn->c_pagedresults.prl_maxlen; i++) { if (!conn->c_pagedresults.prl_list[i].pr_current_be) { *index = i; break; } } } conn->c_pagedresults.prl_count++; PR_Unlock(conn->c_mutex); } else { /* Repeated paged results request. * PagedResults is already allocated. */ char *ptr = slapi_ch_malloc(cookie.bv_len + 1); memcpy(ptr, cookie.bv_val, cookie.bv_len); *(ptr+cookie.bv_len) = '\0'; *index = strtol(ptr, NULL, 10); slapi_ch_free_string(&ptr); } slapi_ch_free((void **)&cookie.bv_val); } } if ((*index > -1) && (*index < conn->c_pagedresults.prl_maxlen)) { /* Need to keep the latest msgid to prepare for the abandon. */ conn->c_pagedresults.prl_list[*index].pr_msgid = op->o_msgid; } else { rc = LDAP_PROTOCOL_ERROR; LDAPDebug1Arg(LDAP_DEBUG_ANY, "pagedresults_parse_control_value: invalid cookie: %d\n", *index); } LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_parse_control_value: idx %d\n", *index); return rc; }
PR_IMPLEMENT(PRRecvWait*) PR_CancelWaitGroup(PRWaitGroup *group) { PRRecvWait **desc; PRRecvWait *recv_wait = NULL; #ifdef WINNT _MDOverlapped *overlapped; PRRecvWait **end; PRThread *me = _PR_MD_CURRENT_THREAD(); #endif if (NULL == group) group = mw_state->group; PR_ASSERT(NULL != group); if (NULL == group) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; } PR_Lock(group->ml); if (_prmw_stopped != group->state) { if (_prmw_running == group->state) group->state = _prmw_stopping; /* so nothing new comes in */ if (0 == group->waiting_threads) /* is there anybody else? */ group->state = _prmw_stopped; /* we can stop right now */ else { PR_NotifyAllCondVar(group->new_business); PR_NotifyAllCondVar(group->io_complete); } while (_prmw_stopped != group->state) (void)PR_WaitCondVar(group->mw_manage, PR_INTERVAL_NO_TIMEOUT); } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* make all the existing descriptors look done/interrupted */ #ifdef WINNT end = &group->waiter->recv_wait + group->waiter->length; for (desc = &group->waiter->recv_wait; desc < end; ++desc) { if (NULL != *desc) { if (InterlockedCompareExchange((LONG *)&(*desc)->outcome, (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { PRFileDesc *bottom = PR_GetIdentitiesLayer( (*desc)->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); goto invalid_arg; } bottom->secret->state = _PR_FILEDESC_CLOSED; #if 0 fprintf(stderr, "cancel wait group: closing socket\n"); #endif if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); exit(1); } } } } while (group->waiter->count > 0) { _PR_THREAD_LOCK(me); me->state = _PR_IO_WAIT; PR_APPEND_LINK(&me->waitQLinks, &group->wait_list); if (!_PR_IS_NATIVE_THREAD(me)) { _PR_SLEEPQ_LOCK(me->cpu); _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT); _PR_SLEEPQ_UNLOCK(me->cpu); } _PR_THREAD_UNLOCK(me); _PR_MD_UNLOCK(&group->mdlock); PR_Unlock(group->ml); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); me->state = _PR_RUNNING; PR_Lock(group->ml); _PR_MD_LOCK(&group->mdlock); } #else for (desc = &group->waiter->recv_wait; group->waiter->count > 0; ++desc) { PR_ASSERT(desc < &group->waiter->recv_wait + group->waiter->length); if (NULL != *desc) _MW_DoneInternal(group, desc, PR_MW_INTERRUPT); } #endif /* take first element of finished list and return it or NULL */ if (PR_CLIST_IS_EMPTY(&group->io_ready)) PR_SetError(PR_GROUP_EMPTY_ERROR, 0); else { PRCList *head = PR_LIST_HEAD(&group->io_ready); PR_REMOVE_AND_INIT_LINK(head); #ifdef WINNT overlapped = (_MDOverlapped *) ((char *)head - offsetof(_MDOverlapped, data)); head = &overlapped->data.mw.desc->internal; if (NULL != overlapped->data.mw.timer) { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT != overlapped->data.mw.desc->timeout); CancelTimer(overlapped->data.mw.timer); } else { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT == overlapped->data.mw.desc->timeout); } PR_DELETE(overlapped); #endif recv_wait = (PRRecvWait*)head; } #ifdef WINNT invalid_arg: _PR_MD_UNLOCK(&group->mdlock); #endif PR_Unlock(group->ml); return recv_wait; } /* PR_CancelWaitGroup */
void NSTP_ThreadMain (void *arg) { NSTPPool pip = (NSTPPool)arg; NSTPWorkItem *work = NULL; NSTPThread self; /* Initialize structure describing this thread */ self.prthread = PR_GetCurrentThread (); self.work = NULL; self.shutdown = PR_FALSE; PR_Lock(pip->lock); /* Add this thread to the list of threads in the thread pool instance */ self.next = pip->threads; pip->threads = &self; /* * The free thread count was incremented when this thread was created. * The thread is free, but we're going to increment the count at the * beginning of the loop below, so decrement it here. */ --pip -> stats.freeCount; /* * Begin main service loop. The pool lock is held at the beginning * of the loop, either because it was acquired above, or because it * was acquired at the end of the previous iteration. */ while (!self.shutdown) { /* Count this thread as free */ ++pip -> stats.freeCount; /* Wait for something on the work queue, or for shutdown */ while (!pip->head && !self.shutdown) { PR_WaitCondVar (pip->cvar, PR_INTERVAL_NO_TIMEOUT); } /* Dequeue the head work item */ work = pip->head; pip -> head = work -> next; pip -> stats.queueCount--; // decrement the queue count if (!pip->head) { pip->tail = NULL; /* * If the pool shutdown flag is set, wake all threads waiting * on the pool cvar, so that the one that is waiting for the * work queue to be empty will wake up and see that. The * other (worker) threads will immediately go back to waiting * on the pool cvar when they see that the work queue is * empty. */ if (pip->shutdown) { PR_NotifyAllCondVar(pip->cvar); } } self.work = work; /* This thread is no longer free */ --pip -> stats.freeCount; /* Release the pool instance lock */ PR_Unlock(pip->lock); /* Call the work function */ work->workfn(work->workarg); /* Acquire the lock used by the calling, waiting thread */ PR_Lock(work -> waiter_mon -> lock); /* Set work completion status */ work->work_status = NSTP_STATUS_WORK_DONE; work->work_complete = PR_TRUE; /* Wake up the calling, waiting thread */ PR_NotifyCondVar (work -> waiter_mon -> cvar); /* Release the lock */ PR_Unlock(work -> waiter_mon -> lock); /* Acquire the pool instance lock for the next iteration */ PR_Lock(pip->lock); } /* Decrement the thread count before this thread terminates */ if (--pip -> stats.threadCount <= 0) { /* Notify shutdown thread when this is the last thread */ PR_NotifyCondVar(pip->cvar); } PR_Unlock(pip->lock); }
NSTP_DestroyPool(NSTPPool pool, PRBool doitnow) { NSTPWorkItem *work; PR_Lock(pool->lock); /* * Indicate pool is being shut down, so no more requests * will be accepted. */ pool->shutdown = PR_TRUE; if (doitnow) { /* Complete all queued work items with NSTP_STATUS_SHUTDOWN_REJECT */ while ((work = pool->head) != NULL) { /* Dequeue work item */ pool->head = work->next; if (pool->head == NULL) { pool->tail = NULL; } PR_Unlock(pool->lock); /* Acquire the lock used by the calling, waiting thread */ PR_Lock ( work -> waiter_mon -> lock); /* Set work completion status */ work->work_status = NSTP_STATUS_SHUTDOWN_REJECT; work->work_complete = PR_TRUE; /* Wake up the calling, waiting thread */ PR_NotifyCondVar (work -> waiter_mon -> cvar); /* Release the lock */ PR_Unlock (work -> waiter_mon -> lock); PR_Lock (pool -> lock); } } else { /* doitnow is PR_FALSE */ /* Wait for work queue to be empty */ while (pool->head != NULL) { PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT); } } if (pool -> stats.threadCount > 0) { NSTPThread *thread; NSTPThread *nxt_thread; for (thread = pool->threads; thread; thread = nxt_thread) { nxt_thread = thread->next; thread->shutdown = PR_TRUE; } /* Wakeup all threads to look at their shutdown flags */ PR_NotifyAllCondVar(pool->cvar); /* Wait for threadCount to go to zero */ while (pool -> stats.threadCount > 0) { PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT); } } PR_Unlock(pool->lock); PR_DestroyCondVar(pool->cvar); PR_DestroyLock(pool->lock); PR_DELETE(pool); }
NSTP_ExitGlobalLock (void) { return PR_Unlock(NSTPLock); }
NSTP_QueueWorkItem (NSTPPool tpool, NSTPWorkFN workfn, NSTPWorkArg workarg, PRIntervalTime timeout) { PRStatus rv; NSTPStatus rsts; NSTPWorkItem work; PRIntervalTime epoch; /* Pretend loop to avoid goto */ while (1) { /* Initialize work item on stack */ work.next = NULL; work.workfn = workfn; work.workarg = workarg; work.work_status = NSTP_STATUS_WORK_QUEUED; work.work_complete = PR_FALSE; /* Acquire thread pool lock */ PR_Lock (tpool -> lock); /* Determine whether work queue is full, or if shutdown in progress */ if ((tpool -> config.maxQueue && (tpool -> stats.queueCount >= tpool -> config.maxQueue)) || tpool -> shutdown) { /* Work queue is full, so reject request */ PR_Unlock (tpool->lock); rsts = tpool->shutdown ? NSTP_STATUS_BUSY_REJECT : NSTP_STATUS_SHUTDOWN_REJECT; break; } // ruslan: add locks/cvars recycling if (tpool -> waiter_mon != NULL) { work.waiter_mon = tpool -> waiter_mon; tpool -> waiter_mon = tpool -> waiter_mon -> next; } else { CL_elem *lv = new CL_elem (); if (lv == NULL || lv -> lock == NULL || lv -> cvar == NULL) { PR_Unlock (tpool -> lock); if (lv != NULL) delete lv; rsts = NSTP_STATUS_NSPR_FAILURE; break; } else work.waiter_mon = lv; } /* Queue work item */ if (tpool -> tail) tpool -> tail -> next = &work; else tpool -> head = &work; tpool->tail = &work; /* Count number of work items queued */ if (++tpool -> stats.queueCount > tpool -> stats.maxQueue) tpool -> stats.maxQueue = tpool -> stats.queueCount; /* If all threads are busy, consider creating a new thread */ if ((tpool->stats.freeCount <= 0) && tpool->config.maxThread && (tpool->stats.threadCount < tpool->config.maxThread)) { PRThread *thread; /* * Bump the total thread count before we release the pool * lock, so that we don't create more than the allowed * number of threads. */ ++tpool -> stats.threadCount; ++tpool -> stats.freeCount; PR_Unlock (tpool->lock); thread = PR_CreateThread(PR_USER_THREAD, NSTP_ThreadMain, (void *)tpool, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, tpool->config.stackSize); /* Reacquire the pool lock */ PR_Lock(tpool->lock); if (!thread) { --tpool->stats.freeCount; --tpool->stats.threadCount; PR_Unlock(tpool->lock); rsts = PR_FAILURE; break; } } /* Wakeup a waiting thread */ PR_NotifyCondVar (tpool -> cvar); /* Release thread pool lock */ PR_Unlock (tpool -> lock); /* Record start time of wait */ if (hasTimeout && timeout != PR_INTERVAL_NO_TIMEOUT && timeout != PR_INTERVAL_NO_WAIT) epoch = PR_IntervalNow (); /* Acquire the work item lock */ PR_Lock (work.waiter_mon -> lock); /* Now wait for work to be completed */ while (work.work_complete == PR_FALSE) { rv = PR_WaitCondVar (work.waiter_mon -> cvar, hasTimeout ? timeout : PR_INTERVAL_NO_TIMEOUT); if (rv == PR_FAILURE) { /* Some kind of NSPR error */ work.work_status = NSTP_STATUS_NSPR_FAILURE; work.work_complete = PR_TRUE; } else if (hasTimeout && !work.work_complete && timeout != PR_INTERVAL_NO_TIMEOUT && timeout != PR_INTERVAL_NO_WAIT && ((PRIntervalTime)(PR_IntervalNow() - epoch) > timeout)) { /* Wait timed out */ /* XXX if (work not started yet) */ work.work_status = NSTP_STATUS_WORK_TIMEOUT; work.work_complete = PR_TRUE; rv = PR_FAILURE; /* XXX else PR_Interrupt thread? */ } if (rv == PR_FAILURE) { /* XXX remove work item from queue */ break; } } /* while work_complete */ rsts = work.work_status; /* Release the work item lock */ PR_Unlock (work.waiter_mon -> lock); PR_Lock (tpool -> lock); /* Cleanup and continue */ work.waiter_mon -> next = tpool -> waiter_mon; tpool -> waiter_mon = work.waiter_mon; PR_Unlock (tpool -> lock); break; } /* while */ return rsts; }
static void UnlockArena( void ) { PR_Unlock( arenaLock ); return; } /* end UnlockArena() */
static PRProcess * ForkAndExec( const char *path, char *const *argv, char *const *envp, const PRProcessAttr *attr) { PRProcess *process; int nEnv, idx; char *const *childEnvp; char **newEnvp = NULL; int flags; #ifdef VMS char VMScurdir[FILENAME_MAX+1] = { '\0' } ; #endif process = PR_NEW(PRProcess); if (!process) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return NULL; } childEnvp = envp; if (attr && attr->fdInheritBuffer) { if (NULL == childEnvp) { #ifdef DARWIN childEnvp = *(_NSGetEnviron()); #else childEnvp = environ; #endif } for (nEnv = 0; childEnvp[nEnv]; nEnv++) { } newEnvp = (char **) PR_MALLOC((nEnv + 2) * sizeof(char *)); if (NULL == newEnvp) { PR_DELETE(process); PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return NULL; } for (idx = 0; idx < nEnv; idx++) { newEnvp[idx] = childEnvp[idx]; } newEnvp[idx++] = attr->fdInheritBuffer; newEnvp[idx] = NULL; childEnvp = newEnvp; } #ifdef VMS /* ** Since vfork/exec is implemented VERY differently on OpenVMS, we have to ** handle the setting up of the standard streams very differently. And since ** none of this code can ever execute in the context of the child, we have ** to perform the chdir in the parent so the child is born into the correct ** directory (and then switch the parent back again). */ { int decc$set_child_standard_streams(int,int,int); int n, fd_stdin=0, fd_stdout=1, fd_stderr=2; /* Set up any standard streams we are given, assuming defaults */ if (attr) { if (attr->stdinFd) fd_stdin = attr->stdinFd->secret->md.osfd; if (attr->stdoutFd) fd_stdout = attr->stdoutFd->secret->md.osfd; if (attr->stderrFd) fd_stderr = attr->stderrFd->secret->md.osfd; } /* ** Put a lock around anything that isn't going to be thread-safe. */ PR_Lock(_pr_vms_fork_lock); /* ** Prepare the child's streams. We always do this in case a previous fork ** has left the stream assignments in some non-standard way. */ n = decc$set_child_standard_streams(fd_stdin,fd_stdout,fd_stderr); if (n == -1) { PR_SetError(PR_BAD_DESCRIPTOR_ERROR, errno); PR_DELETE(process); if (newEnvp) { PR_DELETE(newEnvp); } PR_Unlock(_pr_vms_fork_lock); return NULL; } /* Switch directory if we have to */ if (attr) { if (attr->currentDirectory) { if ( (getcwd(VMScurdir,sizeof(VMScurdir)) == NULL) || (chdir(attr->currentDirectory) < 0) ) { PR_SetError(PR_DIRECTORY_OPEN_ERROR, errno); PR_DELETE(process); if (newEnvp) { PR_DELETE(newEnvp); } PR_Unlock(_pr_vms_fork_lock); return NULL; } } } } #endif /* VMS */ #ifdef AIX process->md.pid = (*pr_wp.forkptr)(); #elif defined(NTO) /* * fork() & exec() does not work in a multithreaded process. * Use spawn() instead. */ { int fd_map[3] = { 0, 1, 2 }; if (attr) { if (attr->stdinFd && attr->stdinFd->secret->md.osfd != 0) { fd_map[0] = dup(attr->stdinFd->secret->md.osfd); flags = fcntl(fd_map[0], F_GETFL, 0); if (flags & O_NONBLOCK) fcntl(fd_map[0], F_SETFL, flags & ~O_NONBLOCK); } if (attr->stdoutFd && attr->stdoutFd->secret->md.osfd != 1) { fd_map[1] = dup(attr->stdoutFd->secret->md.osfd); flags = fcntl(fd_map[1], F_GETFL, 0); if (flags & O_NONBLOCK) fcntl(fd_map[1], F_SETFL, flags & ~O_NONBLOCK); } if (attr->stderrFd && attr->stderrFd->secret->md.osfd != 2) { fd_map[2] = dup(attr->stderrFd->secret->md.osfd); flags = fcntl(fd_map[2], F_GETFL, 0); if (flags & O_NONBLOCK) fcntl(fd_map[2], F_SETFL, flags & ~O_NONBLOCK); } PR_ASSERT(attr->currentDirectory == NULL); /* not implemented */ } process->md.pid = spawn(path, 3, fd_map, NULL, argv, childEnvp); if (fd_map[0] != 0) close(fd_map[0]); if (fd_map[1] != 1) close(fd_map[1]); if (fd_map[2] != 2) close(fd_map[2]); } #else process->md.pid = fork(); #endif if ((pid_t) -1 == process->md.pid) { PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, errno); PR_DELETE(process); if (newEnvp) { PR_DELETE(newEnvp); } return NULL; } else if (0 == process->md.pid) { /* the child process */ /* * If the child process needs to exit, it must call _exit(). * Do not call exit(), because exit() will flush and close * the standard I/O file descriptors, and hence corrupt * the parent process's standard I/O data structures. */ #if !defined(NTO) #ifdef VMS /* OpenVMS has already handled all this above */ #else if (attr) { /* the osfd's to redirect stdin, stdout, and stderr to */ int in_osfd = -1, out_osfd = -1, err_osfd = -1; if (attr->stdinFd && attr->stdinFd->secret->md.osfd != 0) { in_osfd = attr->stdinFd->secret->md.osfd; if (dup2(in_osfd, 0) != 0) { _exit(1); /* failed */ } flags = fcntl(0, F_GETFL, 0); if (flags & O_NONBLOCK) { fcntl(0, F_SETFL, flags & ~O_NONBLOCK); } } if (attr->stdoutFd && attr->stdoutFd->secret->md.osfd != 1) { out_osfd = attr->stdoutFd->secret->md.osfd; if (dup2(out_osfd, 1) != 1) { _exit(1); /* failed */ } flags = fcntl(1, F_GETFL, 0); if (flags & O_NONBLOCK) { fcntl(1, F_SETFL, flags & ~O_NONBLOCK); } } if (attr->stderrFd && attr->stderrFd->secret->md.osfd != 2) { err_osfd = attr->stderrFd->secret->md.osfd; if (dup2(err_osfd, 2) != 2) { _exit(1); /* failed */ } flags = fcntl(2, F_GETFL, 0); if (flags & O_NONBLOCK) { fcntl(2, F_SETFL, flags & ~O_NONBLOCK); } } if (in_osfd != -1) { close(in_osfd); } if (out_osfd != -1 && out_osfd != in_osfd) { close(out_osfd); } if (err_osfd != -1 && err_osfd != in_osfd && err_osfd != out_osfd) { close(err_osfd); } if (attr->currentDirectory) { if (chdir(attr->currentDirectory) < 0) { _exit(1); /* failed */ } } } #endif /* !VMS */ if (childEnvp) { (void)execve(path, argv, childEnvp); } else { /* Inherit the environment of the parent. */ (void)execv(path, argv); } /* Whoops! It returned. That's a bad sign. */ #ifdef VMS /* ** On OpenVMS we are still in the context of the parent, and so we ** can (and should!) perform normal error handling. */ PR_SetError(PR_UNKNOWN_ERROR, errno); PR_DELETE(process); if (newEnvp) { PR_DELETE(newEnvp); } if (VMScurdir[0] != '\0') chdir(VMScurdir); PR_Unlock(_pr_vms_fork_lock); return NULL; #else _exit(1); #endif /* VMS */ #endif /* !NTO */ } if (newEnvp) { PR_DELETE(newEnvp); } #ifdef VMS /* If we switched directories, then remember to switch back */ if (VMScurdir[0] != '\0') { chdir(VMScurdir); /* can't do much if it fails */ } PR_Unlock(_pr_vms_fork_lock); #endif /* VMS */ #if defined(_PR_NATIVE_THREADS) PR_Lock(pr_wp.ml); if (0 == pr_wp.numProcs++) { PR_NotifyCondVar(pr_wp.cv); } PR_Unlock(pr_wp.ml); #endif return process; }
PR_IMPLEMENT(PRStatus) PR_Cleanup() { PRThread *me = PR_GetCurrentThread(); PR_ASSERT((NULL != me) && (me->flags & _PR_PRIMORDIAL)); if ((NULL != me) && (me->flags & _PR_PRIMORDIAL)) { PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("PR_Cleanup: shutting down NSPR")); /* * No more recycling of threads */ _pr_recycleThreads = 0; /* * Wait for all other user (non-system/daemon) threads * to terminate. */ PR_Lock(_pr_activeLock); while (_pr_userActive > _pr_primordialExitCount) { PR_WaitCondVar(_pr_primordialExitCVar, PR_INTERVAL_NO_TIMEOUT); } if (me->flags & _PR_SYSTEM) { _pr_systemActive--; } else { _pr_userActive--; } PR_Unlock(_pr_activeLock); #ifdef IRIX _PR_MD_PRE_CLEANUP(me); /* * The primordial thread must now be running on the primordial cpu */ PR_ASSERT((_PR_IS_NATIVE_THREAD(me)) || (me->cpu->id == 0)); #endif _PR_MD_EARLY_CLEANUP(); _PR_CleanupMW(); _PR_CleanupTime(); _PR_CleanupDtoa(); _PR_CleanupCallOnce(); _PR_ShutdownLinker(); _PR_CleanupNet(); _PR_CleanupIO(); /* Release the primordial thread's private data, etc. */ _PR_CleanupThread(me); _PR_MD_STOP_INTERRUPTS(); PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("PR_Cleanup: clean up before destroying thread")); _PR_LogCleanup(); /* * This part should look like the end of _PR_NativeRunThread * and _PR_UserRunThread. */ if (_PR_IS_NATIVE_THREAD(me)) { _PR_MD_EXIT_THREAD(me); _PR_NativeDestroyThread(me); } else { _PR_UserDestroyThread(me); PR_DELETE(me->stack); PR_DELETE(me); } /* * XXX: We are freeing the heap memory here so that Purify won't * complain, but we should also free other kinds of resources * that are allocated by the _PR_InitXXX() functions. * Ideally, for each _PR_InitXXX(), there should be a corresponding * _PR_XXXCleanup() that we can call here. */ #ifdef WINNT _PR_CleanupCPUs(); #endif _PR_CleanupThreads(); _PR_CleanupCMon(); PR_DestroyLock(_pr_sleeplock); _pr_sleeplock = NULL; _PR_CleanupLayerCache(); _PR_CleanupEnv(); _PR_CleanupStacks(); _PR_CleanupBeforeExit(); _pr_initialized = PR_FALSE; return PR_SUCCESS; } return PR_FAILURE; }
void nsPluginInstance::threadMain(void) { DBG("nsPluginInstance::threadMain started\n"); DBG("URL: %s\n", _url.c_str()); PR_Lock(playerLock); // Initialize Gnash core library. DBG("Gnash core initialized.\n"); // Init logfile. gnash::RcInitFile& rcinit = gnash::RcInitFile::getDefaultInstance(); std::string logfilename = std::string(std::getenv("TEMP")) + std::string("\\npgnash.log"); rcinit.setDebugLog(logfilename); gnash::LogFile& dbglogfile = gnash::LogFile::getDefaultInstance(); dbglogfile.setWriteDisk(true); dbglogfile.setVerbosity(GNASH_DEBUG_LEVEL); DBG("Gnash logging initialized: %s\n", logfilename.c_str()); // Init sound. _sound_handler.reset(gnash::sound::create_sound_handler_sdl()); gnash::set_sound_handler(_sound_handler.get()); DBG("Gnash sound initialized.\n"); // Init GUI. int old_mouse_x = 0, old_mouse_y = 0, old_mouse_buttons = 0; _render_handler = (gnash::render_handler *) gnash::create_render_handler_agg("BGR24"); // _memaddr = (unsigned char *) malloc(getMemSize()); static_cast<gnash::render_handler_agg_base *>(_render_handler)->init_buffer( getMemAddr(), getMemSize(), _width, _height, _rowstride); gnash::set_render_handler(_render_handler); DBG("Gnash GUI initialized: %ux%u\n", _width, _height); gnash::URL url(_url); VariableMap vars; gnash::URL::parse_querystring(url.querystring(), vars); for (VariableMap::iterator i = vars.begin(), ie = vars.end(); i != ie; ++i) { _flashVars[i->first] = i->second; } gnash::set_base_url(url); gnash::movie_definition* md = NULL; try { md = gnash::createMovie(url, _url.c_str(), false); } catch (const gnash::GnashException& err) { md = NULL; } if (!md) { /* * N.B. Can't use the goto here, as C++ complains about "jump to * label 'done' from here crosses initialization of ..." a bunch * of things. Sigh. So, instead, I duplicate the cleanup code * here. TODO: Remove this duplication. */ // goto done; PR_Unlock(playerLock); DBG("Clean up Gnash.\n"); gnash::clear(); DBG("nsPluginInstance::threadMain exiting\n"); return; } DBG("Movie created: %s\n", _url.c_str()); int movie_width = static_cast<int>(md->get_width_pixels()); int movie_height = static_cast<int>(md->get_height_pixels()); float movie_fps = md->get_frame_rate(); DBG("Movie dimensions: %ux%u (%.2f fps)\n", movie_width, movie_height, movie_fps); gnash::SystemClock clock; // use system clock here... gnash::movie_root& root = gnash::VM::init(*md, clock).getRoot(); DBG("Gnash VM initialized.\n"); // Register this plugin as listener for FsCommands from the core // (movie_root) #if 0 /* Commenting out for now as registerFSCommandCallback() has changed. */ root.registerFSCommandCallback(FSCommand_callback); #endif // Register a static function to handle ActionScript events such // as Mouse.hide, Stage.align etc. // root.registerEventCallback(&staticEventHandlingFunction); md->completeLoad(); DBG("Movie loaded.\n"); std::unique_ptr<gnash::Movie> mr(md->createMovie()); mr->setVariables(_flashVars); root.setRootMovie(mr.release()); root.set_display_viewport(0, 0, _width, _height); root.set_background_alpha(1.0f); gnash::Movie* mi = root.getRootMovie(); DBG("Movie instance created.\n"); ShowWindow(_window, SW_SHOW); for (;;) { // DBG("Inside main thread loop.\n"); if (_shutdown) { DBG("Main thread shutting down.\n"); break; } size_t cur_frame = mi->get_current_frame(); // DBG("Got current frame number: %d.\n", cur_frame); size_t tot_frames = mi->get_frame_count(); // DBG("Got total frame count: %d.\n", tot_frames); // DBG("Advancing one frame.\n"); root.advance(); // DBG("Going to next frame.\n"); root.goto_frame(cur_frame + 1); // DBG("Ensuring frame is loaded.\n"); root.get_movie_definition()->ensure_frame_loaded(tot_frames); // DBG("Setting play state to PLAY.\n"); root.set_play_state(gnash::MovieClip::PLAY); if (old_mouse_x != mouse_x || old_mouse_y != mouse_y) { old_mouse_x = mouse_x; old_mouse_y = mouse_y; root.notify_mouse_moved(mouse_x, mouse_y); } if (old_mouse_buttons != mouse_buttons) { old_mouse_buttons = mouse_buttons; int mask = 1; root.notify_mouse_clicked(mouse_buttons > 0, mask); } root.display(); RECT rt; GetClientRect(_window, &rt); InvalidateRect(_window, &rt, FALSE); #if 0 InvalidatedRanges ranges; ranges.setSnapFactor(1.3f); ranges.setSingleMode(false); root.add_invalidated_bounds(ranges, false); ranges.growBy(40.0f); ranges.combine_ranges(); if (!ranges.isNull()) { InvalidateRect(_window, &rt, FALSE); } root.display(); #endif // DBG("Unlocking playerLock mutex.\n"); PR_Unlock(playerLock); // DBG("Sleeping.\n"); PR_Sleep(PR_INTERVAL_MIN); // DBG("Acquiring playerLock mutex.\n"); PR_Lock(playerLock); } done: PR_Unlock(playerLock); DBG("Clean up Gnash.\n"); /* * N.B. As per server/impl.cpp:clear(), all of Gnash's threads aren't * guaranteed to be terminated by this, yet. Therefore, when Firefox * unloads npgnash.dll after calling NS_PluginShutdown(), and there are * still Gnash threads running, they will try and access memory that was * freed as part of the unloading of npgnash.dll, resulting in a process * abend. */ gnash::clear(); DBG("nsPluginInstance::threadMain exiting\n"); }
NSTP_CreatePool (NSTPPoolConfig *pcfg, NSTPPool *pool) { PRStatus rv = PR_SUCCESS; NSTPPool pip = NULL; /* Do default module initialization if it hasn't been done yet */ if (!NSTPLock) { NSTP_Initialize(NULL, NULL, NULL); } /* Enter global lock */ PR_Lock(NSTPLock); /* Pretend loop to avoid goto */ while (1) { /* Allocate a new pool instance from the global pool */ pip = PR_NEWZAP (struct NSTPPool_s); if (!pip) { /* Failed to allocate pool instance structure */ rv = PR_FAILURE; break; } if (pcfg->version != NSTP_API_VERSION) { /* Unsupported API version */ PR_DELETE(pip); rv = PR_FAILURE; break; } /* Copy the configuration parameters into the instance */ pip->config = *pcfg; /* Get a new lock for this instance */ pip->lock = PR_NewLock(); if (!pip->lock) { /* Failed to create lock for new pool instance */ PR_DELETE(pip); rv = PR_FAILURE; break; } /* Create a condition variable for the lock */ pip->cvar = PR_NewCondVar(pip->lock); if (!pip->cvar) { /* Failed to create condition variable for new pool instance */ PR_DestroyLock(pip->lock); PR_DELETE(pip); rv = PR_FAILURE; break; } /* Add this instance to the global list of instances */ pip->next = NSTPInstanceList; NSTPInstanceList = pip; ++NSTPInstanceCount; break; // ruslan: need to get out of the loop, right Howard :-)? } PR_Unlock(NSTPLock); /* If that went well, continue initializing the new instance */ if (rv == PR_SUCCESS) { /* Create initial threads */ if (pip->config.initThread > 0) { PRThread *thread; int i; for (i = 0; i < pip->config.initThread; ++i) { /* * In solaris, all the threads which are going to run * java needs to be bound thread so that we can reliably * get the register state to do GC. */ thread = PR_CreateThread(PR_USER_THREAD, NSTP_ThreadMain, (void *)pip, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, pip->config.stackSize); if (!thread) { /* Failed, so shutdown threads already created */ PR_Lock(pip->lock); if (pip->stats.threadCount > 0) { pip->shutdown = PR_TRUE; rv = PR_NotifyAllCondVar(pip->cvar); } PR_Unlock(pip->lock); rv = PR_FAILURE; break; } PR_Lock(pip->lock); ++pip->stats.threadCount; ++pip->stats.freeCount; PR_Unlock(pip->lock); } } /* initThread > 0 */ *pool = pip; // ruslan: need to assign it back } return rv; }
CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex) { PRInt32 err; PRFileDesc *model = NULL; PRBool ssl2 = PR_FALSE; PRBool ssl3 = PR_FALSE; PRBool tlsv1 = PR_FALSE; struct SessionHandle *data = conn->data; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; int curlerr; const int *cipher_to_enable; PRSocketOptionData sock_opt; long time_left; PRUint32 timeout; if (connssl->state == ssl_connection_complete) return CURLE_OK; connssl->data = data; #ifdef HAVE_PK11_CREATEGENERICOBJECT /* list of all NSS objects we need to destroy in Curl_nss_close() */ connssl->obj_list = Curl_llist_alloc(nss_destroy_object); if(!connssl->obj_list) return CURLE_OUT_OF_MEMORY; #endif /* FIXME. NSS doesn't support multiple databases open at the same time. */ PR_Lock(nss_initlock); curlerr = init_nss(conn->data); if(CURLE_OK != curlerr) { PR_Unlock(nss_initlock); goto error; } curlerr = CURLE_SSL_CONNECT_ERROR; #ifdef HAVE_PK11_CREATEGENERICOBJECT if(!mod) { char *configstring = aprintf("library=%s name=PEM", pem_library); if(!configstring) { PR_Unlock(nss_initlock); goto error; } mod = SECMOD_LoadUserModule(configstring, NULL, PR_FALSE); free(configstring); if(!mod || !mod->loaded) { if(mod) { SECMOD_DestroyModule(mod); mod = NULL; } infof(data, "WARNING: failed to load NSS PEM library %s. Using " "OpenSSL PEM certificates will not work.\n", pem_library); } } #endif PK11_SetPasswordFunc(nss_get_password); PR_Unlock(nss_initlock); model = PR_NewTCPSocket(); if(!model) goto error; model = SSL_ImportFD(NULL, model); /* make the socket nonblocking */ sock_opt.option = PR_SockOpt_Nonblocking; sock_opt.value.non_blocking = PR_TRUE; if(PR_SetSocketOption(model, &sock_opt) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_SECURITY, PR_TRUE) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_SERVER, PR_FALSE) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_CLIENT, PR_TRUE) != SECSuccess) goto error; switch (data->set.ssl.version) { default: case CURL_SSLVERSION_DEFAULT: ssl3 = PR_TRUE; if (data->state.ssl_connect_retry) infof(data, "TLS disabled due to previous handshake failure\n"); else tlsv1 = PR_TRUE; break; case CURL_SSLVERSION_TLSv1: tlsv1 = PR_TRUE; break; case CURL_SSLVERSION_SSLv2: ssl2 = PR_TRUE; break; case CURL_SSLVERSION_SSLv3: ssl3 = PR_TRUE; break; } if(SSL_OptionSet(model, SSL_ENABLE_SSL2, ssl2) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_ENABLE_SSL3, ssl3) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_ENABLE_TLS, tlsv1) != SECSuccess) goto error; if(SSL_OptionSet(model, SSL_V2_COMPATIBLE_HELLO, ssl2) != SECSuccess) goto error; /* reset the flag to avoid an infinite loop */ data->state.ssl_connect_retry = FALSE; /* enable all ciphers from enable_ciphers_by_default */ cipher_to_enable = enable_ciphers_by_default; while (SSL_NULL_WITH_NULL_NULL != *cipher_to_enable) { if (SSL_CipherPrefSet(model, *cipher_to_enable, PR_TRUE) != SECSuccess) { curlerr = CURLE_SSL_CIPHER; goto error; } cipher_to_enable++; } if(data->set.ssl.cipher_list) { if(set_ciphers(data, model, data->set.ssl.cipher_list) != SECSuccess) { curlerr = CURLE_SSL_CIPHER; goto error; } } if(data->set.ssl.verifyhost == 1) infof(data, "warning: ignoring unsupported value (1) of ssl.verifyhost\n"); data->set.ssl.certverifyresult=0; /* not checked yet */ if(SSL_BadCertHook(model, (SSLBadCertHandler) BadCertHandler, conn) != SECSuccess) { goto error; } if(SSL_HandshakeCallback(model, (SSLHandshakeCallback) HandshakeCallback, NULL) != SECSuccess) goto error; if(data->set.ssl.verifypeer && (CURLE_OK != (curlerr = nss_load_ca_certificates(conn, sockindex)))) goto error; if (data->set.ssl.CRLfile) { if(SECSuccess != nss_load_crl(data->set.ssl.CRLfile)) { curlerr = CURLE_SSL_CRL_BADFILE; goto error; } infof(data, " CRLfile: %s\n", data->set.ssl.CRLfile ? data->set.ssl.CRLfile : "none"); } if(data->set.str[STRING_CERT]) { bool is_nickname; char *nickname = fmt_nickname(data, STRING_CERT, &is_nickname); if(!nickname) return CURLE_OUT_OF_MEMORY; if(!is_nickname && !cert_stuff(conn, sockindex, data->set.str[STRING_CERT], data->set.str[STRING_KEY])) { /* failf() is already done in cert_stuff() */ free(nickname); return CURLE_SSL_CERTPROBLEM; } /* store the nickname for SelectClientCert() called during handshake */ connssl->client_nickname = nickname; } else connssl->client_nickname = NULL; if(SSL_GetClientAuthDataHook(model, SelectClientCert, (void *)connssl) != SECSuccess) { curlerr = CURLE_SSL_CERTPROBLEM; goto error; } /* Import our model socket onto the existing file descriptor */ connssl->handle = PR_ImportTCPSocket(sockfd); connssl->handle = SSL_ImportFD(model, connssl->handle); if(!connssl->handle) goto error; PR_Close(model); /* We don't need this any more */ model = NULL; /* This is the password associated with the cert that we're using */ if (data->set.str[STRING_KEY_PASSWD]) { SSL_SetPKCS11PinArg(connssl->handle, data->set.str[STRING_KEY_PASSWD]); } /* Force handshake on next I/O */ SSL_ResetHandshake(connssl->handle, /* asServer */ PR_FALSE); SSL_SetURL(connssl->handle, conn->host.name); /* check timeout situation */ time_left = Curl_timeleft(data, NULL, TRUE); if(time_left < 0L) { failf(data, "timed out before SSL handshake"); goto error; } timeout = PR_MillisecondsToInterval((PRUint32) time_left); /* Force the handshake now */ if(SSL_ForceHandshakeWithTimeout(connssl->handle, timeout) != SECSuccess) { if(conn->data->set.ssl.certverifyresult == SSL_ERROR_BAD_CERT_DOMAIN) curlerr = CURLE_PEER_FAILED_VERIFICATION; else if(conn->data->set.ssl.certverifyresult!=0) curlerr = CURLE_SSL_CACERT; goto error; } connssl->state = ssl_connection_complete; conn->recv[sockindex] = nss_recv; conn->send[sockindex] = nss_send; display_conn_info(conn, connssl->handle); if (data->set.str[STRING_SSL_ISSUERCERT]) { SECStatus ret = SECFailure; bool is_nickname; char *nickname = fmt_nickname(data, STRING_SSL_ISSUERCERT, &is_nickname); if(!nickname) return CURLE_OUT_OF_MEMORY; if(is_nickname) /* we support only nicknames in case of STRING_SSL_ISSUERCERT for now */ ret = check_issuer_cert(connssl->handle, nickname); free(nickname); if(SECFailure == ret) { infof(data,"SSL certificate issuer check failed\n"); curlerr = CURLE_SSL_ISSUER_ERROR; goto error; } else { infof(data, "SSL certificate issuer check ok\n"); } } return CURLE_OK; error: /* reset the flag to avoid an infinite loop */ data->state.ssl_connect_retry = FALSE; err = PR_GetError(); if(handle_cc_error(err, data)) curlerr = CURLE_SSL_CERTPROBLEM; else infof(data, "NSS error %d\n", err); if(model) PR_Close(model); if (ssl3 && tlsv1 && isTLSIntoleranceError(err)) { /* schedule reconnect through Curl_retry_request() */ data->state.ssl_connect_retry = TRUE; infof(data, "Error in TLS handshake, trying SSLv3...\n"); return CURLE_OK; } return curlerr; }
static PRIntervalTime Alarms3(PRUint32 loops) { PRIntn i; PRStatus rv; PRAlarm *alarm; AlarmData ad[3]; PRIntervalTime duration = PR_SecondsToInterval(30); PRIntervalTime overhead, timein = PR_IntervalNow(); PRLock *ml = PR_NewLock(); PRCondVar *cv = PR_NewCondVar(ml); for (i = 0; i < 3; ++i) { ad[i].ml = ml; ad[i].cv = cv; ad[i].rate = 1; ad[i].times = loops; ad[i].duration = duration; ad[i].late = ad[i].times = 0; ad[i].timein = PR_IntervalNow(); ad[i].period = PR_SecondsToInterval(1); /* more loops, faster rate => same elapsed time */ ad[i].times = (i + 1) * loops; ad[i].rate = (i + 1) * 10; } alarm = PR_CreateAlarm(); for (i = 0; i < 3; ++i) { (void)PR_SetAlarm( alarm, ad[i].period, ad[i].rate, AlarmFn2, &ad[i]); } overhead = PR_IntervalNow() - timein; PR_Lock(ml); for (i = 0; i < 3; ++i) { while ((PRIntervalTime)(PR_IntervalNow() - ad[i].timein) < duration) PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(ml); timein = PR_IntervalNow(); if (debug_mode) printf ("Alarms3 finished at %u, %u, %u\n", ad[0].timein, ad[1].timein, ad[2].timein); rv = PR_DestroyAlarm(alarm); if (rv != PR_SUCCESS) { if (!debug_mode) failed_already=1; else printf("***Destroying alarm status: FAIL\n"); } PR_DestroyCondVar(cv); PR_DestroyLock(ml); overhead += (duration / 3); overhead += (PR_IntervalNow() - timein); return overhead; } /* Alarms3 */
PR_IMPLEMENT(PRStatus) PR_CancelWaitFileDesc(PRWaitGroup *group, PRRecvWait *desc) { #if !defined(WINNT) PRRecvWait **recv_wait; #endif PRStatus rv = PR_SUCCESS; if (NULL == group) group = mw_state->group; PR_ASSERT(NULL != group); if (NULL == group) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); rv = PR_FAILURE; goto unlock; } #ifdef WINNT if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { PRFileDesc *bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); goto unlock; } bottom->secret->state = _PR_FILEDESC_CLOSED; #if 0 fprintf(stderr, "cancel wait recv: closing socket\n"); #endif if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); exit(1); } } #else if (NULL != (recv_wait = _MW_LookupInternal(group, desc->fd))) { /* it was in the wait table */ _MW_DoneInternal(group, recv_wait, PR_MW_INTERRUPT); goto unlock; } if (!PR_CLIST_IS_EMPTY(&group->io_ready)) { /* is it already complete? */ PRCList *head = PR_LIST_HEAD(&group->io_ready); do { PRRecvWait *done = (PRRecvWait*)head; if (done == desc) goto unlock; head = PR_NEXT_LINK(head); } while (head != &group->io_ready); } PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); rv = PR_FAILURE; #endif unlock: PR_Unlock(group->ml); return rv; } /* PR_CancelWaitFileDesc */
static void PR_CALLBACK Client(void *arg) { PRStatus rv; PRIntn index; char buffer[1024]; PRFileDesc *fd = NULL; PRUintn clipping = DEFAULT_CLIPPING; PRThread *me = PR_GetCurrentThread(); CSClient_t *client = (CSClient_t*)arg; CSDescriptor_t *descriptor = PR_NEW(CSDescriptor_t); PRIntervalTime timeout = PR_MillisecondsToInterval(DEFAULT_CLIENT_TIMEOUT); for (index = 0; index < sizeof(buffer); ++index) buffer[index] = (char)index; client->started = PR_IntervalNow(); PR_Lock(client->ml); client->state = cs_run; PR_NotifyCondVar(client->stateChange); PR_Unlock(client->ml); TimeOfDayMessage("Client started at", me); while (cs_run == client->state) { PRInt32 bytes, descbytes, filebytes, netbytes; (void)PR_NetAddrToString(&client->serverAddress, buffer, sizeof(buffer)); TEST_LOG(cltsrv_log_file, TEST_LOG_INFO, ("\tClient(0x%p): connecting to server at %s\n", me, buffer)); fd = PR_Socket(domain, SOCK_STREAM, protocol); TEST_ASSERT(NULL != fd); rv = PR_Connect(fd, &client->serverAddress, timeout); if (PR_FAILURE == rv) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): conection failed (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto aborted; } memset(descriptor, 0, sizeof(*descriptor)); descriptor->size = PR_htonl(descbytes = rand() % clipping); PR_snprintf( descriptor->filename, sizeof(descriptor->filename), "CS%p%p-%p.dat", client->started, me, client->operations); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): sending descriptor for %u bytes\n", me, descbytes)); bytes = PR_Send( fd, descriptor, sizeof(*descriptor), SEND_FLAGS, timeout); if (sizeof(CSDescriptor_t) != bytes) { if (Aborted(PR_FAILURE)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): send descriptor timeout\n", me)); goto retry; } } TEST_ASSERT(sizeof(*descriptor) == bytes); netbytes = 0; while (netbytes < descbytes) { filebytes = sizeof(buffer); if ((descbytes - netbytes) < filebytes) filebytes = descbytes - netbytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): sending %d bytes\n", me, filebytes)); bytes = PR_Send(fd, buffer, filebytes, SEND_FLAGS, timeout); if (filebytes != bytes) { if (Aborted(PR_FAILURE)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): send data timeout\n", me)); goto retry; } } TEST_ASSERT(bytes == filebytes); netbytes += bytes; } filebytes = 0; while (filebytes < descbytes) { netbytes = sizeof(buffer); if ((descbytes - filebytes) < netbytes) netbytes = descbytes - filebytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): receiving %d bytes\n", me, netbytes)); bytes = PR_Recv(fd, buffer, netbytes, RECV_FLAGS, timeout); if (-1 == bytes) { if (Aborted(PR_FAILURE)) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive data aborted\n", me)); goto aborted; } else if (PR_IO_TIMEOUT_ERROR == PR_GetError()) TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive data timeout\n", me)); else TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive error (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto retry; } if (0 == bytes) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tClient(0x%p): unexpected end of stream\n", PR_GetCurrentThread())); break; } filebytes += bytes; } rv = PR_Shutdown(fd, PR_SHUTDOWN_BOTH); if (Aborted(rv)) goto aborted; TEST_ASSERT(PR_SUCCESS == rv); retry: (void)PR_Close(fd); fd = NULL; TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("\tClient(0x%p): disconnected from server\n", me)); PR_Lock(client->ml); client->operations += 1; client->bytesTransferred += 2 * descbytes; rv = PR_WaitCondVar(client->stateChange, rand() % clipping); PR_Unlock(client->ml); if (Aborted(rv)) break; } aborted: client->stopped = PR_IntervalNow(); PR_ClearInterrupt(); if (NULL != fd) rv = PR_Close(fd); PR_Lock(client->ml); client->state = cs_exit; PR_NotifyCondVar(client->stateChange); PR_Unlock(client->ml); PR_DELETE(descriptor); TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("\tClient(0x%p): stopped after %u operations and %u bytes\n", PR_GetCurrentThread(), client->operations, client->bytesTransferred)); } /* Client */
PR_IMPLEMENT(PRWaitGroup*) PR_CreateWaitGroup(PRInt32 size /* ignored */) { PRWaitGroup *wg; if (NULL == (wg = PR_NEWZAP(PRWaitGroup))) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); goto failed; } /* the wait group itself */ wg->ml = PR_NewLock(); if (NULL == wg->ml) goto failed_lock; wg->io_taken = PR_NewCondVar(wg->ml); if (NULL == wg->io_taken) goto failed_cvar0; wg->io_complete = PR_NewCondVar(wg->ml); if (NULL == wg->io_complete) goto failed_cvar1; wg->new_business = PR_NewCondVar(wg->ml); if (NULL == wg->new_business) goto failed_cvar2; wg->mw_manage = PR_NewCondVar(wg->ml); if (NULL == wg->mw_manage) goto failed_cvar3; PR_INIT_CLIST(&wg->group_link); PR_INIT_CLIST(&wg->io_ready); /* the waiters sequence */ wg->waiter = (_PRWaiterHash*)PR_CALLOC( sizeof(_PRWaiterHash) + (_PR_DEFAULT_HASH_LENGTH * sizeof(PRRecvWait*))); if (NULL == wg->waiter) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); goto failed_waiter; } wg->waiter->count = 0; wg->waiter->length = _PR_DEFAULT_HASH_LENGTH; #ifdef WINNT _PR_MD_NEW_LOCK(&wg->mdlock); PR_INIT_CLIST(&wg->wait_list); #endif /* WINNT */ PR_Lock(mw_lock); PR_APPEND_LINK(&wg->group_link, &mw_state->group_list); PR_Unlock(mw_lock); return wg; failed_waiter: PR_DestroyCondVar(wg->mw_manage); failed_cvar3: PR_DestroyCondVar(wg->new_business); failed_cvar2: PR_DestroyCondVar(wg->io_complete); failed_cvar1: PR_DestroyCondVar(wg->io_taken); failed_cvar0: PR_DestroyLock(wg->ml); failed_lock: PR_DELETE(wg); wg = NULL; failed: return wg; } /* MW_CreateWaitGroup */
static PRStatus ProcessRequest(PRFileDesc *fd, CSServer_t *server) { PRStatus drv, rv; char buffer[1024]; PRFileDesc *file = NULL; PRThread * me = PR_GetCurrentThread(); PRInt32 bytes, descbytes, netbytes, filebytes = 0; CSDescriptor_t *descriptor = PR_NEW(CSDescriptor_t); PRIntervalTime timeout = PR_MillisecondsToInterval(DEFAULT_SERVER_TIMEOUT); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tProcessRequest(0x%p): receiving desciptor\n", me)); bytes = PR_Recv( fd, descriptor, sizeof(*descriptor), RECV_FLAGS, timeout); if (-1 == bytes) { rv = PR_FAILURE; if (Aborted(rv)) goto exit; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tProcessRequest(0x%p): receive timeout\n", me)); } goto exit; } if (0 == bytes) { rv = PR_FAILURE; TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tProcessRequest(0x%p): unexpected end of file\n", me)); goto exit; } descbytes = PR_ntohl(descriptor->size); TEST_ASSERT(sizeof(*descriptor) == bytes); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tProcessRequest(0x%p): read descriptor {%d, %s}\n", me, descbytes, descriptor->filename)); file = PR_Open( descriptor->filename, (PR_CREATE_FILE | PR_WRONLY), 0666); if (NULL == file) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tProcessRequest(0x%p): open file timeout\n", me)); goto aborted; } } TEST_ASSERT(NULL != file); filebytes = 0; while (filebytes < descbytes) { netbytes = sizeof(buffer); if ((descbytes - filebytes) < netbytes) netbytes = descbytes - filebytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tProcessRequest(0x%p): receive %d bytes\n", me, netbytes)); bytes = PR_Recv(fd, buffer, netbytes, RECV_FLAGS, timeout); if (-1 == bytes) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): receive data timeout\n", me)); goto aborted; } /* * XXX: I got (PR_CONNECT_RESET_ERROR, ERROR_NETNAME_DELETED) * on NT here. This is equivalent to ECONNRESET on Unix. * -wtc */ TEST_LOG( cltsrv_log_file, TEST_LOG_WARNING, ("\t\tProcessRequest(0x%p): unexpected error (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto aborted; } if(0 == bytes) { TEST_LOG( cltsrv_log_file, TEST_LOG_WARNING, ("\t\tProcessRequest(0x%p): unexpected end of stream\n", me)); rv = PR_FAILURE; goto aborted; } filebytes += bytes; netbytes = bytes; /* The byte count for PR_Write should be positive */ MY_ASSERT(netbytes > 0); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tProcessRequest(0x%p): write %d bytes to file\n", me, netbytes)); bytes = PR_Write(file, buffer, netbytes); if (netbytes != bytes) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): write file timeout\n", me)); goto aborted; } } TEST_ASSERT(bytes > 0); } PR_Lock(server->ml); server->operations += 1; server->bytesTransferred += filebytes; PR_Unlock(server->ml); rv = PR_Close(file); if (Aborted(rv)) goto aborted; TEST_ASSERT(PR_SUCCESS == rv); file = NULL; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tProcessRequest(0x%p): opening %s\n", me, descriptor->filename)); file = PR_Open(descriptor->filename, PR_RDONLY, 0); if (NULL == file) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): open file timeout\n", PR_GetCurrentThread())); goto aborted; } TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): other file open error (%u, %u)\n", me, PR_GetError(), PR_GetOSError())); goto aborted; } TEST_ASSERT(NULL != file); netbytes = 0; while (netbytes < descbytes) { filebytes = sizeof(buffer); if ((descbytes - netbytes) < filebytes) filebytes = descbytes - netbytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tProcessRequest(0x%p): read %d bytes from file\n", me, filebytes)); bytes = PR_Read(file, buffer, filebytes); if (filebytes != bytes) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): read file timeout\n", me)); else TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): other file error (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto aborted; } TEST_ASSERT(bytes > 0); netbytes += bytes; filebytes = bytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tProcessRequest(0x%p): sending %d bytes\n", me, filebytes)); bytes = PR_Send(fd, buffer, filebytes, SEND_FLAGS, timeout); if (filebytes != bytes) { rv = PR_FAILURE; if (Aborted(rv)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tProcessRequest(0x%p): send data timeout\n", me)); goto aborted; } break; } TEST_ASSERT(bytes > 0); } PR_Lock(server->ml); server->bytesTransferred += filebytes; PR_Unlock(server->ml); rv = PR_Shutdown(fd, PR_SHUTDOWN_BOTH); if (Aborted(rv)) goto aborted; rv = PR_Close(file); if (Aborted(rv)) goto aborted; TEST_ASSERT(PR_SUCCESS == rv); file = NULL; aborted: PR_ClearInterrupt(); if (NULL != file) PR_Close(file); drv = PR_Delete(descriptor->filename); TEST_ASSERT(PR_SUCCESS == drv); exit: TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tProcessRequest(0x%p): Finished\n", me)); PR_DELETE(descriptor); #if defined(WIN95) PR_Sleep(PR_MillisecondsToInterval(200)); /* lth. see note [1] */ #endif return rv; } /* ProcessRequest */
static PRStatus _MW_PollInternal(PRWaitGroup *group) { PRRecvWait **waiter; PRStatus rv = PR_FAILURE; PRInt32 count, count_ready; PRIntervalTime polling_interval; group->poller = PR_GetCurrentThread(); while (PR_TRUE) { PRIntervalTime now, since_last_poll; PRPollDesc *poll_list; while (0 == group->waiter->count) { PRStatus st; st = PR_WaitCondVar(group->new_business, PR_INTERVAL_NO_TIMEOUT); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto aborted; } if (_MW_ABORTED(st)) goto aborted; } /* ** There's something to do. See if our existing polling list ** is large enough for what we have to do? */ while (group->polling_count < group->waiter->count) { PRUint32 old_count = group->waiter->count; PRUint32 new_count = PR_ROUNDUP(old_count, _PR_POLL_COUNT_FUDGE); PRSize new_size = sizeof(PRPollDesc) * new_count; PRPollDesc *old_polling_list = group->polling_list; PR_Unlock(group->ml); poll_list = (PRPollDesc*)PR_CALLOC(new_size); if (NULL == poll_list) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); PR_Lock(group->ml); goto failed_alloc; } if (NULL != old_polling_list) PR_DELETE(old_polling_list); PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto aborted; } group->polling_list = poll_list; group->polling_count = new_count; } now = PR_IntervalNow(); polling_interval = max_polling_interval; since_last_poll = now - group->last_poll; waiter = &group->waiter->recv_wait; poll_list = group->polling_list; for (count = 0; count < group->waiter->count; ++waiter) { PR_ASSERT(waiter < &group->waiter->recv_wait + group->waiter->length); if (NULL != *waiter) /* a live one! */ { if ((PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout) && (since_last_poll >= (*waiter)->timeout)) _MW_DoneInternal(group, waiter, PR_MW_TIMEOUT); else { if (PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout) { (*waiter)->timeout -= since_last_poll; if ((*waiter)->timeout < polling_interval) polling_interval = (*waiter)->timeout; } PR_ASSERT(poll_list < group->polling_list + group->polling_count); poll_list->fd = (*waiter)->fd; poll_list->in_flags = PR_POLL_READ; poll_list->out_flags = 0; #if 0 printf( "Polling 0x%x[%d]: [fd: 0x%x, tmo: %u]\n", poll_list, count, poll_list->fd, (*waiter)->timeout); #endif poll_list += 1; count += 1; } } } PR_ASSERT(count == group->waiter->count); /* ** If there are no more threads waiting for completion, ** we need to return. */ if ((!PR_CLIST_IS_EMPTY(&group->io_ready)) && (1 == group->waiting_threads)) break; if (0 == count) continue; /* wait for new business */ group->last_poll = now; PR_Unlock(group->ml); count_ready = PR_Poll(group->polling_list, count, polling_interval); PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto aborted; } if (-1 == count_ready) { goto failed_poll; /* that's a shame */ } else if (0 < count_ready) { for (poll_list = group->polling_list; count > 0; poll_list++, count--) { PR_ASSERT( poll_list < group->polling_list + group->polling_count); if (poll_list->out_flags != 0) { waiter = _MW_LookupInternal(group, poll_list->fd); /* ** If 'waiter' is NULL, that means the wait receive ** descriptor has been canceled. */ if (NULL != waiter) _MW_DoneInternal(group, waiter, PR_MW_SUCCESS); } } } /* ** If there are no more threads waiting for completion, ** we need to return. ** This thread was "borrowed" to do the polling, but it really ** belongs to the client. */ if ((!PR_CLIST_IS_EMPTY(&group->io_ready)) && (1 == group->waiting_threads)) break; } rv = PR_SUCCESS; aborted: failed_poll: failed_alloc: group->poller = NULL; /* we were that, not we ain't */ if ((_prmw_running == group->state) && (group->waiting_threads > 1)) { /* Wake up one thread to become the new poller. */ PR_NotifyCondVar(group->io_complete); } return rv; /* we return with the lock held */ } /* _MW_PollInternal */
static void PR_CALLBACK Worker(void *arg) { PRStatus rv; PRNetAddr from; PRFileDesc *fd = NULL; PRThread *me = PR_GetCurrentThread(); CSWorker_t *worker = (CSWorker_t*)arg; CSServer_t *server = worker->server; CSPool_t *pool = &server->pool; TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\t\tWorker(0x%p): started [%u]\n", me, pool->workers + 1)); PR_Lock(server->ml); PR_APPEND_LINK(&worker->element, &server->list); pool->workers += 1; /* define our existance */ while (cs_run == server->state) { while (pool->accepting >= server->workers.accepting) { TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tWorker(0x%p): waiting for accept slot[%d]\n", me, pool->accepting)); rv = PR_WaitCondVar(pool->acceptComplete, PR_INTERVAL_NO_TIMEOUT); if (Aborted(rv) || (cs_run != server->state)) { TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\tWorker(0x%p): has been %s\n", me, (Aborted(rv) ? "interrupted" : "stopped"))); goto exit; } } pool->accepting += 1; /* how many are really in accept */ PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tWorker(0x%p): calling accept\n", me)); fd = PR_Accept(server->listener, &from, PR_INTERVAL_NO_TIMEOUT); PR_Lock(server->ml); pool->accepting -= 1; PR_NotifyCondVar(pool->acceptComplete); if ((NULL == fd) && Aborted(PR_FAILURE)) { if (NULL != server->listener) { PR_Close(server->listener); server->listener = NULL; } goto exit; } if (NULL != fd) { /* ** Create another worker of the total number of workers is ** less than the minimum specified or we have none left in ** accept() AND we're not over the maximum. ** This sort of presumes that the number allowed in accept ** is at least as many as the minimum. Otherwise we'll keep ** creating new threads and deleting them soon after. */ PRBool another = ((pool->workers < server->workers.minimum) || ((0 == pool->accepting) && (pool->workers < server->workers.maximum))) ? PR_TRUE : PR_FALSE; pool->active += 1; PR_Unlock(server->ml); if (another) (void)CreateWorker(server, pool); rv = ProcessRequest(fd, server); if (PR_SUCCESS != rv) TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tWorker(0x%p): server process ended abnormally\n", me)); (void)PR_Close(fd); fd = NULL; PR_Lock(server->ml); pool->active -= 1; } } exit: PR_ClearInterrupt(); PR_Unlock(server->ml); if (NULL != fd) { (void)PR_Shutdown(fd, PR_SHUTDOWN_BOTH); (void)PR_Close(fd); } TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\t\tWorker(0x%p): exiting [%u]\n", PR_GetCurrentThread(), pool->workers)); PR_Lock(server->ml); pool->workers -= 1; /* undefine our existance */ PR_REMOVE_AND_INIT_LINK(&worker->element); PR_NotifyCondVar(pool->exiting); PR_Unlock(server->ml); PR_DELETE(worker); /* destruction of the "worker" object */ } /* Worker */
PR_IMPLEMENT(PRRecvWait*) PR_WaitRecvReady(PRWaitGroup *group) { PRCList *io_ready = NULL; #ifdef WINNT PRThread *me = _PR_MD_CURRENT_THREAD(); _MDOverlapped *overlapped; #endif if (!_pr_initialized) _PR_ImplicitInitialization(); if ((NULL == group) && (NULL == (group = MW_Init2()))) goto failed_init; PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto invalid_state; } group->waiting_threads += 1; /* the polling thread is counted */ #ifdef WINNT _PR_MD_LOCK(&group->mdlock); while (PR_CLIST_IS_EMPTY(&group->io_ready)) { _PR_THREAD_LOCK(me); me->state = _PR_IO_WAIT; PR_APPEND_LINK(&me->waitQLinks, &group->wait_list); if (!_PR_IS_NATIVE_THREAD(me)) { _PR_SLEEPQ_LOCK(me->cpu); _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT); _PR_SLEEPQ_UNLOCK(me->cpu); } _PR_THREAD_UNLOCK(me); _PR_MD_UNLOCK(&group->mdlock); PR_Unlock(group->ml); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); me->state = _PR_RUNNING; PR_Lock(group->ml); _PR_MD_LOCK(&group->mdlock); if (_PR_PENDING_INTERRUPT(me)) { PR_REMOVE_LINK(&me->waitQLinks); _PR_MD_UNLOCK(&group->mdlock); me->flags &= ~_PR_INTERRUPT; me->io_suspended = PR_FALSE; PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); goto aborted; } } io_ready = PR_LIST_HEAD(&group->io_ready); PR_ASSERT(io_ready != NULL); PR_REMOVE_LINK(io_ready); _PR_MD_UNLOCK(&group->mdlock); overlapped = (_MDOverlapped *) ((char *)io_ready - offsetof(_MDOverlapped, data)); io_ready = &overlapped->data.mw.desc->internal; #else do { /* ** If the I/O ready list isn't empty, have this thread ** return with the first receive wait object that's available. */ if (PR_CLIST_IS_EMPTY(&group->io_ready)) { /* ** Is there a polling thread yet? If not, grab this thread ** and use it. */ if (NULL == group->poller) { /* ** This thread will stay do polling until it becomes the only one ** left to service a completion. Then it will return and there will ** be none left to actually poll or to run completions. ** ** The polling function should only return w/ failure or ** with some I/O ready. */ if (PR_FAILURE == _MW_PollInternal(group)) goto failed_poll; } else { /* ** There are four reasons a thread can be awakened from ** a wait on the io_complete condition variable. ** 1. Some I/O has completed, i.e., the io_ready list ** is nonempty. ** 2. The wait group is canceled. ** 3. The thread is interrupted. ** 4. The current polling thread has to leave and needs ** a replacement. ** The logic to find a new polling thread is made more ** complicated by all the other possible events. ** I tried my best to write the logic clearly, but ** it is still full of if's with continue and goto. */ PRStatus st; do { st = PR_WaitCondVar(group->io_complete, PR_INTERVAL_NO_TIMEOUT); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto aborted; } if (_MW_ABORTED(st) || (NULL == group->poller)) break; } while (PR_CLIST_IS_EMPTY(&group->io_ready)); /* ** The thread is interrupted and has to leave. It might ** have also been awakened to process ready i/o or be the ** new poller. To be safe, if either condition is true, ** we awaken another thread to take its place. */ if (_MW_ABORTED(st)) { if ((NULL == group->poller || !PR_CLIST_IS_EMPTY(&group->io_ready)) && group->waiting_threads > 1) PR_NotifyCondVar(group->io_complete); goto aborted; } /* ** A new poller is needed, but can I be the new poller? ** If there is no i/o ready, sure. But if there is any ** i/o ready, it has a higher priority. I want to ** process the ready i/o first and wake up another ** thread to be the new poller. */ if (NULL == group->poller) { if (PR_CLIST_IS_EMPTY(&group->io_ready)) continue; if (group->waiting_threads > 1) PR_NotifyCondVar(group->io_complete); } } PR_ASSERT(!PR_CLIST_IS_EMPTY(&group->io_ready)); } io_ready = PR_LIST_HEAD(&group->io_ready); PR_NotifyCondVar(group->io_taken); PR_ASSERT(io_ready != NULL); PR_REMOVE_LINK(io_ready); } while (NULL == io_ready); failed_poll: #endif aborted: group->waiting_threads -= 1; invalid_state: (void)MW_TestForShutdownInternal(group); PR_Unlock(group->ml); failed_init: if (NULL != io_ready) { /* If the operation failed, record the reason why */ switch (((PRRecvWait*)io_ready)->outcome) { case PR_MW_PENDING: PR_ASSERT(0); break; case PR_MW_SUCCESS: #ifndef WINNT _MW_InitialRecv(io_ready); #endif break; #ifdef WINNT case PR_MW_FAILURE: _PR_MD_MAP_READ_ERROR(overlapped->data.mw.error); break; #endif case PR_MW_TIMEOUT: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); break; case PR_MW_INTERRUPT: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); break; default: break; } #ifdef WINNT if (NULL != overlapped->data.mw.timer) { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT != overlapped->data.mw.desc->timeout); CancelTimer(overlapped->data.mw.timer); } else { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT == overlapped->data.mw.desc->timeout); } PR_DELETE(overlapped); #endif } return (PRRecvWait*)io_ready; } /* PR_WaitRecvReady */
static void PR_CALLBACK Server(void *arg) { PRStatus rv; PRNetAddr serverAddress; PRThread *me = PR_GetCurrentThread(); CSServer_t *server = (CSServer_t*)arg; PRSocketOptionData sockOpt; server->listener = PR_Socket(domain, SOCK_STREAM, protocol); sockOpt.option = PR_SockOpt_Reuseaddr; sockOpt.value.reuse_addr = PR_TRUE; rv = PR_SetSocketOption(server->listener, &sockOpt); TEST_ASSERT(PR_SUCCESS == rv); memset(&serverAddress, 0, sizeof(serverAddress)); if (PR_AF_INET6 != domain) rv = PR_InitializeNetAddr(PR_IpAddrAny, DEFAULT_PORT, &serverAddress); else rv = PR_SetNetAddr(PR_IpAddrAny, PR_AF_INET6, DEFAULT_PORT, &serverAddress); rv = PR_Bind(server->listener, &serverAddress); TEST_ASSERT(PR_SUCCESS == rv); rv = PR_Listen(server->listener, server->backlog); TEST_ASSERT(PR_SUCCESS == rv); server->started = PR_IntervalNow(); TimeOfDayMessage("Server started at", me); PR_Lock(server->ml); server->state = cs_run; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); /* ** Create the first worker (actually, a thread that accepts ** connections and then processes the work load as needed). ** From this point on, additional worker threads are created ** as they are needed by existing worker threads. */ rv = CreateWorker(server, &server->pool); TEST_ASSERT(PR_SUCCESS == rv); /* ** From here on this thread is merely hanging around as the contact ** point for the main test driver. It's just waiting for the driver ** to declare the test complete. */ TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): waiting for state change\n", me)); PR_Lock(server->ml); while ((cs_run == server->state) && !Aborted(rv)) { rv = PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(server->ml); PR_ClearInterrupt(); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("\tServer(0x%p): shutting down workers\n", me)); /* ** Get all the worker threads to exit. They know how to ** clean up after themselves, so this is just a matter of ** waiting for clorine in the pool to take effect. During ** this stage we're ignoring interrupts. */ server->workers.minimum = server->workers.maximum = 0; PR_Lock(server->ml); while (!PR_CLIST_IS_EMPTY(&server->list)) { PRCList *head = PR_LIST_HEAD(&server->list); CSWorker_t *worker = (CSWorker_t*)head; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): interrupting worker(0x%p)\n", me, worker)); rv = PR_Interrupt(worker->thread); TEST_ASSERT(PR_SUCCESS == rv); PR_REMOVE_AND_INIT_LINK(head); } while (server->pool.workers > 0) { TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\tServer(0x%p): waiting for %u workers to exit\n", me, server->pool.workers)); (void)PR_WaitCondVar(server->pool.exiting, PR_INTERVAL_NO_TIMEOUT); } server->state = cs_exit; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("\tServer(0x%p): stopped after %u operations and %u bytes\n", me, server->operations, server->bytesTransferred)); if (NULL != server->listener) PR_Close(server->listener); server->stopped = PR_IntervalNow(); } /* Server */
PR_IMPLEMENT(PRDescIdentity) PR_GetUniqueIdentity(const char *layer_name) { PRDescIdentity identity, length; char **names = NULL, *name = NULL, **old = NULL; if (!_pr_initialized) _PR_ImplicitInitialization(); PR_ASSERT((PRDescIdentity)0x7fff > identity_cache.ident); if (NULL != layer_name) { name = (char*)PR_Malloc(strlen(layer_name) + 1); if (NULL == name) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return PR_INVALID_IO_LAYER; } strcpy(name, layer_name); } /* this initial code runs unsafe */ retry: PR_ASSERT(NULL == names); length = identity_cache.length; if (length < (identity_cache.ident + 1)) { length += ID_CACHE_INCREMENT; names = (char**)PR_CALLOC(length * sizeof(char*)); if (NULL == names) { if (NULL != name) PR_DELETE(name); PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); return PR_INVALID_IO_LAYER; } } /* now we get serious about thread safety */ PR_Lock(identity_cache.ml); PR_ASSERT(identity_cache.ident <= identity_cache.length); identity = identity_cache.ident + 1; if (identity > identity_cache.length) /* there's no room */ { /* we have to do something - hopefully it's already done */ if ((NULL != names) && (length >= identity)) { /* what we did is still okay */ memcpy( names, identity_cache.name, identity_cache.length * sizeof(char*)); old = identity_cache.name; identity_cache.name = names; identity_cache.length = length; names = NULL; } else { PR_ASSERT(identity_cache.ident <= identity_cache.length); PR_Unlock(identity_cache.ml); if (NULL != names) PR_DELETE(names); goto retry; } } if (NULL != name) /* there's a name to be stored */ { identity_cache.name[identity] = name; } identity_cache.ident = identity; PR_ASSERT(identity_cache.ident <= identity_cache.length); PR_Unlock(identity_cache.ml); if (NULL != old) PR_DELETE(old); if (NULL != names) PR_DELETE(names); return identity; } /* PR_GetUniqueIdentity */
PRIntn main(PRIntn argc, char** argv) { PRUintn index; PRBool boolean; CSClient_t *client; PRStatus rv, joinStatus; CSServer_t *server = NULL; PRUintn backlog = DEFAULT_BACKLOG; PRUintn clients = DEFAULT_CLIENTS; const char *serverName = DEFAULT_SERVER; PRBool serverIsLocal = PR_TRUE; PRUintn accepting = ALLOWED_IN_ACCEPT; PRUintn workersMin = DEFAULT_WORKERS_MIN; PRUintn workersMax = DEFAULT_WORKERS_MAX; PRIntn execution = DEFAULT_EXECUTION_TIME; PRIntn low = DEFAULT_LOW, high = DEFAULT_HIGH; /* * -G use global threads * -a <n> threads allowed in accept * -b <n> backlock for listen * -c <threads> number of clients to create * -f <low> low water mark for caching FDs * -F <high> high water mark for caching FDs * -w <threads> minimal number of server threads * -W <threads> maximum number of server threads * -e <seconds> duration of the test in seconds * -s <string> dsn name of server (implies no server here) * -v verbosity */ PLOptStatus os; PLOptState *opt = PL_CreateOptState(argc, argv, "GX6b:a:c:f:F:w:W:e:s:vdhp"); debug_out = PR_GetSpecialFD(PR_StandardError); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'G': /* use global threads */ thread_scope = PR_GLOBAL_THREAD; break; case 'X': /* use XTP as transport */ protocol = 36; break; case '6': /* Use IPv6 */ domain = PR_AF_INET6; break; case 'a': /* the value for accepting */ accepting = atoi(opt->value); break; case 'b': /* the value for backlock */ backlog = atoi(opt->value); break; case 'c': /* number of client threads */ clients = atoi(opt->value); break; case 'f': /* low water fd cache */ low = atoi(opt->value); break; case 'F': /* low water fd cache */ high = atoi(opt->value); break; case 'w': /* minimum server worker threads */ workersMin = atoi(opt->value); break; case 'W': /* maximum server worker threads */ workersMax = atoi(opt->value); break; case 'e': /* program execution time in seconds */ execution = atoi(opt->value); break; case 's': /* server's address */ serverName = opt->value; break; case 'v': /* verbosity */ verbosity = IncrementVerbosity(); break; case 'd': /* debug mode */ debug_mode = PR_TRUE; break; case 'p': /* pthread mode */ pthread_stats = PR_TRUE; break; case 'h': default: Help(); return 2; } } PL_DestroyOptState(opt); if (0 != PL_strcmp(serverName, DEFAULT_SERVER)) serverIsLocal = PR_FALSE; if (0 == execution) execution = DEFAULT_EXECUTION_TIME; if (0 == workersMax) workersMax = DEFAULT_WORKERS_MAX; if (0 == workersMin) workersMin = DEFAULT_WORKERS_MIN; if (0 == accepting) accepting = ALLOWED_IN_ACCEPT; if (0 == backlog) backlog = DEFAULT_BACKLOG; if (workersMin > accepting) accepting = workersMin; PR_STDIO_INIT(); TimeOfDayMessage("Client/Server started at", PR_GetCurrentThread()); cltsrv_log_file = PR_NewLogModule("cltsrv_log"); MY_ASSERT(NULL != cltsrv_log_file); boolean = PR_SetLogFile("cltsrv.log"); MY_ASSERT(boolean); #ifdef XP_MAC debug_mode = PR_TRUE; #endif rv = PR_SetFDCacheSize(low, high); PR_ASSERT(PR_SUCCESS == rv); if (serverIsLocal) { /* Establish the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): starting server\n", PR_GetCurrentThread())); server = PR_NEWZAP(CSServer_t); PR_INIT_CLIST(&server->list); server->state = cs_init; server->ml = PR_NewLock(); server->backlog = backlog; server->port = DEFAULT_PORT; server->workers.minimum = workersMin; server->workers.maximum = workersMax; server->workers.accepting = accepting; server->stateChange = PR_NewCondVar(server->ml); server->pool.exiting = PR_NewCondVar(server->ml); server->pool.acceptComplete = PR_NewCondVar(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): creating server thread\n", PR_GetCurrentThread())); server->thread = PR_CreateThread( PR_USER_THREAD, Server, server, PR_PRIORITY_HIGH, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != server->thread); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): waiting for server init\n", PR_GetCurrentThread())); PR_Lock(server->ml); while (server->state == cs_init) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): server init complete (port #%d)\n", PR_GetCurrentThread(), server->port)); } if (clients != 0) { /* Create all of the clients */ PRHostEnt host; char buffer[BUFFER_SIZE]; client = (CSClient_t*)PR_CALLOC(clients * sizeof(CSClient_t)); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): creating %d client threads\n", PR_GetCurrentThread(), clients)); if (!serverIsLocal) { rv = PR_GetHostByName(serverName, buffer, BUFFER_SIZE, &host); if (PR_SUCCESS != rv) { PL_FPrintError(PR_STDERR, "PR_GetHostByName"); return 2; } } for (index = 0; index < clients; ++index) { client[index].state = cs_init; client[index].ml = PR_NewLock(); if (serverIsLocal) { if (PR_AF_INET6 != domain) (void)PR_InitializeNetAddr( PR_IpAddrLoopback, DEFAULT_PORT, &client[index].serverAddress); else rv = PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6, DEFAULT_PORT, &client[index].serverAddress); } else { (void)PR_EnumerateHostEnt( 0, &host, DEFAULT_PORT, &client[index].serverAddress); } client[index].stateChange = PR_NewCondVar(client[index].ml); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): creating client threads\n", PR_GetCurrentThread())); client[index].thread = PR_CreateThread( PR_USER_THREAD, Client, &client[index], PR_PRIORITY_NORMAL, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != client[index].thread); PR_Lock(client[index].ml); while (cs_init == client[index].state) PR_WaitCondVar(client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(client[index].ml); } } /* Then just let them go at it for a bit */ TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): waiting for execution interval (%d seconds)\n", PR_GetCurrentThread(), execution)); WaitForCompletion(execution); TimeOfDayMessage("Shutting down", PR_GetCurrentThread()); if (clients != 0) { for (index = 0; index < clients; ++index) { TEST_LOG(cltsrv_log_file, TEST_LOG_STATUS, ("main(0x%p): notifying client(0x%p) to stop\n", PR_GetCurrentThread(), client[index].thread)); PR_Lock(client[index].ml); if (cs_run == client[index].state) { client[index].state = cs_stop; PR_Interrupt(client[index].thread); while (cs_stop == client[index].state) PR_WaitCondVar( client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(client[index].ml); TEST_LOG(cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): joining client(0x%p)\n", PR_GetCurrentThread(), client[index].thread)); joinStatus = PR_JoinThread(client[index].thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(client[index].stateChange); PR_DestroyLock(client[index].ml); } PR_DELETE(client); } if (NULL != server) { /* All clients joined - retrieve the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): notifying server(0x%p) to stop\n", PR_GetCurrentThread(), server->thread)); PR_Lock(server->ml); server->state = cs_stop; PR_Interrupt(server->thread); while (cs_exit != server->state) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): joining server(0x%p)\n", PR_GetCurrentThread(), server->thread)); joinStatus = PR_JoinThread(server->thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(server->stateChange); PR_DestroyCondVar(server->pool.exiting); PR_DestroyCondVar(server->pool.acceptComplete); PR_DestroyLock(server->ml); PR_DELETE(server); } TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): test complete\n", PR_GetCurrentThread())); PT_FPrintStats(debug_out, "\nPThread Statistics\n"); TimeOfDayMessage("Test exiting at", PR_GetCurrentThread()); PR_Cleanup(); return 0; } /* main */
void WorkerThreadFunc(void *_listenSock) { PRFileDesc *listenSock = (PRFileDesc *)_listenSock; PRInt32 bytesRead; PRInt32 bytesWritten; char *dataBuf; char *sendBuf; if (debug_mode) DPRINTF("\tServer buffer is %d bytes; %d data, %d netaddrs\n", _client_data+(2*sizeof(PRNetAddr))+32, _client_data, (2*sizeof(PRNetAddr))+32); dataBuf = (char *)PR_MALLOC(_client_data + 2*sizeof(PRNetAddr) + 32); if (!dataBuf) if (debug_mode) printf("\tServer could not malloc space!?\n"); sendBuf = (char *)PR_MALLOC(_server_data *sizeof(char)); if (!sendBuf) if (debug_mode) printf("\tServer could not malloc space!?\n"); if (debug_mode) DPRINTF("\tServer worker thread running\n"); while(1) { PRInt32 bytesToRead = _client_data; PRInt32 bytesToWrite = _server_data; PRFileDesc *newSock; PRNetAddr *rAddr; PRInt32 loops = 0; loops++; if (debug_mode) DPRINTF("\tServer thread going into accept\n"); bytesRead = PR_AcceptRead(listenSock, &newSock, &rAddr, dataBuf, bytesToRead, PR_INTERVAL_NO_TIMEOUT); if (bytesRead < 0) { if (debug_mode) printf("\tServer error in accept (%d)\n", bytesRead); continue; } if (debug_mode) DPRINTF("\tServer accepted connection (%d bytes)\n", bytesRead); PR_AtomicIncrement(&workerThreadsBusy); #ifdef SYMBIAN if (workerThreadsBusy == workerThreads && workerThreads<1) { #else if (workerThreadsBusy == workerThreads) { #endif PR_Lock(workerThreadsLock); if (workerThreadsBusy == workerThreads) { PRThread *WorkerThread; WorkerThread = PR_CreateThread( PR_SYSTEM_THREAD, WorkerThreadFunc, listenSock, PR_PRIORITY_NORMAL, ServerScope, PR_UNJOINABLE_THREAD, THREAD_STACKSIZE); if (!WorkerThread) { if (debug_mode) printf("Error creating client thread %d\n", workerThreads); } else { PR_AtomicIncrement(&workerThreads); if (debug_mode) DPRINTF("\tServer creates worker (%d)\n", workerThreads); } } PR_Unlock(workerThreadsLock); } bytesToRead -= bytesRead; while (bytesToRead) { bytesRead = PR_Recv(newSock, dataBuf, bytesToRead, 0, PR_INTERVAL_NO_TIMEOUT); if (bytesRead < 0) { if (debug_mode) printf("\tServer error receiving data (%d)\n", bytesRead); continue; } if (debug_mode) DPRINTF("\tServer received %d bytes\n", bytesRead); } bytesWritten = PR_Send(newSock, sendBuf, bytesToWrite, 0, PR_INTERVAL_NO_TIMEOUT); if (bytesWritten != _server_data) { if (debug_mode) printf("\tError sending data to client (%d, %d)\n", bytesWritten, PR_GetOSError()); } else { if (debug_mode) DPRINTF("\tServer sent %d bytes\n", bytesWritten); } PR_Close(newSock); PR_AtomicDecrement(&workerThreadsBusy); } } PRFileDesc * ServerSetup(void) { PRFileDesc *listenSocket; PRSocketOptionData sockOpt; PRNetAddr serverAddr; PRThread *WorkerThread; if ( (listenSocket = PR_NewTCPSocket()) == NULL) { if (debug_mode) printf("\tServer error creating listen socket\n"); else failed_already=1; return NULL; } sockOpt.option = PR_SockOpt_Reuseaddr; sockOpt.value.reuse_addr = PR_TRUE; if ( PR_SetSocketOption(listenSocket, &sockOpt) == PR_FAILURE) { if (debug_mode) printf("\tServer error setting socket option: OS error %d\n", PR_GetOSError()); else failed_already=1; PR_Close(listenSocket); return NULL; } memset(&serverAddr, 0, sizeof(PRNetAddr)); serverAddr.inet.family = PR_AF_INET; serverAddr.inet.port = PR_htons(PORT); serverAddr.inet.ip = PR_htonl(PR_INADDR_ANY); if ( PR_Bind(listenSocket, &serverAddr) == PR_FAILURE) { if (debug_mode) printf("\tServer error binding to server address: OS error %d\n", PR_GetOSError()); else failed_already=1; PR_Close(listenSocket); return NULL; } if ( PR_Listen(listenSocket, 128) == PR_FAILURE) { if (debug_mode) printf("\tServer error listening to server socket\n"); else failed_already=1; PR_Close(listenSocket); return NULL; } /* Create Clients */ workerThreads = 0; workerThreadsBusy = 0; workerThreadsLock = PR_NewLock(); WorkerThread = PR_CreateThread( PR_SYSTEM_THREAD, WorkerThreadFunc, listenSocket, PR_PRIORITY_NORMAL, ServerScope, PR_UNJOINABLE_THREAD, THREAD_STACKSIZE); if (!WorkerThread) { if (debug_mode) printf("error creating working thread\n"); PR_Close(listenSocket); return NULL; } PR_AtomicIncrement(&workerThreads); if (debug_mode) DPRINTF("\tServer created primordial worker thread\n"); return listenSocket; }
PR_CreateCounter( const char *qName, const char *rName, const char *description ) { QName *qnp; RName *rnp; PRBool matchQname = PR_FALSE; /* Self initialize, if necessary */ if ( counterLock == NULL ) _PR_CounterInitialize(); /* Validate input arguments */ PR_ASSERT( strlen(qName) <= PRCOUNTER_NAME_MAX ); PR_ASSERT( strlen(rName) <= PRCOUNTER_NAME_MAX ); PR_ASSERT( strlen(description) <= PRCOUNTER_DESC_MAX ); /* Lock the Facility */ PR_Lock( counterLock ); /* Do we already have a matching QName? */ if (!PR_CLIST_IS_EMPTY( &qNameList )) { qnp = (QName *) PR_LIST_HEAD( &qNameList ); do { if ( strcmp(qnp->name, qName) == 0) { matchQname = PR_TRUE; break; } qnp = (QName *)PR_NEXT_LINK( &qnp->link ); } while( qnp != (QName *)&qNameList ); } /* ** If we did not find a matching QName, ** allocate one and initialize it. ** link it onto the qNameList. ** */ if ( matchQname != PR_TRUE ) { qnp = PR_NEWZAP( QName ); PR_ASSERT( qnp != NULL ); PR_INIT_CLIST( &qnp->link ); PR_INIT_CLIST( &qnp->rNameList ); strcpy( qnp->name, qName ); PR_APPEND_LINK( &qnp->link, &qNameList ); } /* Do we already have a matching RName? */ if (!PR_CLIST_IS_EMPTY( &qnp->rNameList )) { rnp = (RName *) PR_LIST_HEAD( &qnp->rNameList ); do { /* ** No duplicate RNames are allowed within a QName ** */ PR_ASSERT( strcmp(rnp->name, rName)); rnp = (RName *)PR_NEXT_LINK( &rnp->link ); } while( rnp != (RName *)&qnp->rNameList ); } /* Get a new RName structure; initialize its members */ rnp = PR_NEWZAP( RName ); PR_ASSERT( rnp != NULL ); PR_INIT_CLIST( &rnp->link ); strcpy( rnp->name, rName ); strcpy( rnp->desc, description ); rnp->lock = PR_NewLock(); if ( rnp->lock == NULL ) { PR_ASSERT(0); } PR_APPEND_LINK( &rnp->link, &qnp->rNameList ); /* add RName to QName's rnList */ rnp->qName = qnp; /* point the RName to the QName */ /* Unlock the Facility */ PR_Unlock( counterLock ); PR_LOG( lm, PR_LOG_DEBUG, ("PR_Counter: Create: QName: %s %p, RName: %s %p\n\t", qName, qnp, rName, rnp )); return((PRCounterHandle)rnp); } /* end PR_CreateCounter() */