JNIEXPORT void JNICALL Java_org_mozilla_jss_ssl_SSLSocket_abortReadWrite( JNIEnv *env, jobject self) { JSSL_SocketData *sock = NULL; if( JSSL_getSockData(env, self, &sock) != PR_SUCCESS ) goto finish; /* * The java layer prevents I/O once close has been * called but if an I/O operation is in progress then abort it. * For WINNT the read and write methods must check for the * PR_PENDING_INTERRUPT_ERROR and call PR_NT_CancelIo. */ PR_Lock(sock->lock); if ( sock->reader ) { PR_Interrupt(sock->reader); } if ( sock->writer ) { PR_Interrupt(sock->writer); } sock->closePending = PR_TRUE; /* socket is to be closed */ PR_Unlock(sock->lock); finish: EXCEPTION_CHECK(env, sock) return; }
void stop_server() { stopping = 1; PR_Interrupt(acceptorThread); PZ_TraceFlush(); }
void Wakeup() { // PR_CreateThread could have failed earlier if (mHangMonitorThread) { // Use PR_Interrupt to avoid potentially taking a lock PR_Interrupt(mHangMonitorThread); } }
PRStatus RCThread::Interrupt() { PRStatus rv; if (RCThread::ex_unstarted == execution) { rv = PR_FAILURE; PR_SetError(PR_INVALID_STATE_ERROR, 0); } else rv = PR_Interrupt(identity); return rv; } /* RCThread::Interrupt */
PRIntn main () { PRUint32 elapsed; PRThread *thread; struct timeval timein, timeout; PRInt32 onePercent = 3000000UL / 100UL; fprintf (stderr, "First sleep will sleep 3 seconds.\n"); fprintf (stderr, " sleep 1 begin\n"); (void)GTOD(&timein); sleep (3); (void)GTOD(&timeout); fprintf (stderr, " sleep 1 end\n"); elapsed = 1000000UL * (timeout.tv_sec - timein.tv_sec); elapsed += (timeout.tv_usec - timein.tv_usec); fprintf(stderr, "elapsed %u usecs\n", elapsed); if (labs(elapsed - 3000000UL) > onePercent) rv = 1; PR_Init (PR_USER_THREAD, PR_PRIORITY_NORMAL, 100); PR_STDIO_INIT(); fprintf (stderr, "Second sleep should do the same (does it?).\n"); fprintf (stderr, " sleep 2 begin\n"); (void)GTOD(&timein); sleep (3); (void)GTOD(&timeout); fprintf (stderr, " sleep 2 end\n"); elapsed = 1000000UL * (timeout.tv_sec - timein.tv_sec); elapsed += (timeout.tv_usec - timein.tv_usec); fprintf(stderr, "elapsed %u usecs\n", elapsed); if (labs(elapsed - 3000000UL) > onePercent) rv = 1; fprintf (stderr, "What happens to other threads?\n"); fprintf (stderr, "You should see dots every quarter second.\n"); fprintf (stderr, "If you don't, you're probably running on classic NSPR.\n"); thread = PR_CreateThread( PR_USER_THREAD, Other, NULL, PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0); fprintf (stderr, " sleep 2 begin\n"); (void)GTOD(&timein); sleep (3); (void)GTOD(&timeout); fprintf (stderr, " sleep 2 end\n"); PR_Interrupt(thread); PR_JoinThread(thread); elapsed = 1000000UL * (timeout.tv_sec - timein.tv_sec); elapsed += (timeout.tv_usec - timein.tv_usec); fprintf(stderr, "elapsed %u usecs\n", elapsed); if (labs(elapsed - 3000000UL) > onePercent) rv = 1; fprintf(stderr, "%s\n", (0 == rv) ? "PASSED" : "FAILED"); return rv; }
PRStatus RCThread::Start() { PRStatus rv; /* This is an unsafe check, but not too critical */ if (RCThread::ex_unstarted == execution) { execution = RCThread::ex_started; rv = PR_Interrupt(identity); PR_ASSERT(PR_SUCCESS == rv); } else { rv = PR_FAILURE; PR_SetError(PR_INVALID_STATE_ERROR, 0); } return rv; } /* RCThread::Start */
void joinWithUnjoinable(void) { PRThread *thread; /* create the unjoinable thread */ thread = PR_CreateThread(PR_USER_THREAD, unjoinable, 0, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD, 0); if (!thread) { if (debug_mode) printf("\tcannot create unjoinable thread\n"); else Test_Result(FAIL); return; } if (PR_JoinThread(thread) == PR_SUCCESS) { if (debug_mode) printf("\tsuccessfully joined with unjoinable thread?!\n"); else Test_Result(FAIL); return; } else { if (debug_mode) printf("\tcannot join with unjoinable thread, as expected\n"); if (PR_GetError() != PR_INVALID_ARGUMENT_ERROR) { if (debug_mode) printf("\tWrong error code\n"); else Test_Result(FAIL); return; } } if (PR_Interrupt(thread) == PR_FAILURE) { if (debug_mode) printf("\tcannot interrupt unjoinable thread\n"); else Test_Result(FAIL); return; } else { if (debug_mode) printf("\tinterrupted unjoinable thread\n"); } }
PRIntn PR_CALLBACK Switch(PRIntn argc, char **argv) { PLOptStatus os; PRStatus status; PRBool help = PR_FALSE; PRUintn concurrency = 1; Shared *shared, *link; PRIntervalTime timein, timeout; PRThreadScope thread_scope = PR_LOCAL_THREAD; PRUintn thread_count, inner_count, loop_count, average; PRUintn thread_limit = DEFAULT_THREADS, loop_limit = DEFAULT_LOOPS; PLOptState *opt = PL_CreateOptState(argc, argv, "hdvc:t:C:G"); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'v': /* verbose mode */ verbosity = PR_TRUE; case 'd': /* debug mode */ debug_mode = PR_TRUE; break; case 'c': /* loop counter */ loop_limit = atoi(opt->value); break; case 't': /* thread limit */ thread_limit = atoi(opt->value); break; case 'C': /* Concurrency limit */ concurrency = atoi(opt->value); break; case 'G': /* global threads only */ thread_scope = PR_GLOBAL_THREAD; break; case 'h': /* help message */ Help(); help = PR_TRUE; break; default: break; } } PL_DestroyOptState(opt); if (help) return -1; if (PR_TRUE == debug_mode) { debug_out = PR_STDOUT; PR_fprintf(debug_out, "Test parameters\n"); PR_fprintf(debug_out, "\tThreads involved: %d\n", thread_limit); PR_fprintf(debug_out, "\tIteration limit: %d\n", loop_limit); PR_fprintf(debug_out, "\tConcurrency: %d\n", concurrency); PR_fprintf( debug_out, "\tThread type: %s\n", (PR_GLOBAL_THREAD == thread_scope) ? "GLOBAL" : "LOCAL"); } PR_SetConcurrency(concurrency); link = &home; home.ml = PR_NewLock(); home.cv = PR_NewCondVar(home.ml); home.twiddle = PR_FALSE; home.next = NULL; timeout = 0; for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { shared = PR_NEWZAP(Shared); shared->ml = home.ml; shared->cv = PR_NewCondVar(home.ml); shared->twiddle = PR_TRUE; shared->next = link; link = shared; shared->thread = PR_CreateThread( PR_USER_THREAD, Notified, shared, PR_PRIORITY_HIGH, thread_scope, PR_JOINABLE_THREAD, 0); PR_ASSERT(shared->thread != NULL); if (NULL == shared->thread) failed = PR_TRUE; } for (loop_count = 1; loop_count <= loop_limit; ++loop_count) { timein = PR_IntervalNow(); for (inner_count = 0; inner_count < INNER_LOOPS; ++inner_count) { PR_Lock(home.ml); home.twiddle = PR_TRUE; shared->twiddle = PR_FALSE; PR_NotifyCondVar(shared->cv); while (home.twiddle) { status = PR_WaitCondVar(home.cv, PR_INTERVAL_NO_TIMEOUT); if (PR_FAILURE == status) failed = PR_TRUE; } PR_Unlock(home.ml); } timeout += (PR_IntervalNow() - timein); } if (debug_mode) { average = PR_IntervalToMicroseconds(timeout) / (INNER_LOOPS * loop_limit * thread_count); PR_fprintf( debug_out, "Average switch times %d usecs for %d threads\n", average, thread_limit); } link = shared; for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { if (&home == link) break; status = PR_Interrupt(link->thread); if (PR_SUCCESS != status) { failed = PR_TRUE; if (debug_mode) PL_FPrintError(debug_out, "Failed to interrupt"); } link = link->next; } for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { link = shared->next; status = PR_JoinThread(shared->thread); if (PR_SUCCESS != status) { failed = PR_TRUE; if (debug_mode) PL_FPrintError(debug_out, "Failed to join"); } PR_DestroyCondVar(shared->cv); PR_DELETE(shared); if (&home == link) break; shared = link; } PR_DestroyCondVar(home.cv); PR_DestroyLock(home.ml); PR_fprintf(PR_STDOUT, ((failed) ? "FAILED\n" : "PASSED\n")); return ((failed) ? 1 : 0); } /* Switch */
int main(int argc, char** argv) { PRUintn index; PRBool boolean; CSClient_t *client; PRStatus rv, joinStatus; CSServer_t *server = NULL; PRUintn backlog = DEFAULT_BACKLOG; PRUintn clients = DEFAULT_CLIENTS; const char *serverName = DEFAULT_SERVER; PRBool serverIsLocal = PR_TRUE; PRUintn accepting = ALLOWED_IN_ACCEPT; PRUintn workersMin = DEFAULT_WORKERS_MIN; PRUintn workersMax = DEFAULT_WORKERS_MAX; PRIntn execution = DEFAULT_EXECUTION_TIME; PRIntn low = DEFAULT_LOW, high = DEFAULT_HIGH; /* * -G use global threads * -a <n> threads allowed in accept * -b <n> backlock for listen * -c <threads> number of clients to create * -f <low> low water mark for caching FDs * -F <high> high water mark for caching FDs * -w <threads> minimal number of server threads * -W <threads> maximum number of server threads * -e <seconds> duration of the test in seconds * -s <string> dsn name of server (implies no server here) * -v verbosity */ PLOptStatus os; PLOptState *opt = PL_CreateOptState(argc, argv, "GX6b:a:c:f:F:w:W:e:s:vdhp"); debug_out = PR_GetSpecialFD(PR_StandardError); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'G': /* use global threads */ thread_scope = PR_GLOBAL_THREAD; break; case 'X': /* use XTP as transport */ protocol = 36; break; case '6': /* Use IPv6 */ domain = PR_AF_INET6; break; case 'a': /* the value for accepting */ accepting = atoi(opt->value); break; case 'b': /* the value for backlock */ backlog = atoi(opt->value); break; case 'c': /* number of client threads */ clients = atoi(opt->value); break; case 'f': /* low water fd cache */ low = atoi(opt->value); break; case 'F': /* low water fd cache */ high = atoi(opt->value); break; case 'w': /* minimum server worker threads */ workersMin = atoi(opt->value); break; case 'W': /* maximum server worker threads */ workersMax = atoi(opt->value); break; case 'e': /* program execution time in seconds */ execution = atoi(opt->value); break; case 's': /* server's address */ serverName = opt->value; break; case 'v': /* verbosity */ verbosity = IncrementVerbosity(); break; case 'd': /* debug mode */ debug_mode = PR_TRUE; break; case 'p': /* pthread mode */ pthread_stats = PR_TRUE; break; case 'h': default: Help(); return 2; } } PL_DestroyOptState(opt); if (0 != PL_strcmp(serverName, DEFAULT_SERVER)) serverIsLocal = PR_FALSE; if (0 == execution) execution = DEFAULT_EXECUTION_TIME; if (0 == workersMax) workersMax = DEFAULT_WORKERS_MAX; if (0 == workersMin) workersMin = DEFAULT_WORKERS_MIN; if (0 == accepting) accepting = ALLOWED_IN_ACCEPT; if (0 == backlog) backlog = DEFAULT_BACKLOG; if (workersMin > accepting) accepting = workersMin; PR_STDIO_INIT(); TimeOfDayMessage("Client/Server started at", PR_GetCurrentThread()); cltsrv_log_file = PR_NewLogModule("cltsrv_log"); MY_ASSERT(NULL != cltsrv_log_file); boolean = PR_SetLogFile("cltsrv.log"); MY_ASSERT(boolean); rv = PR_SetFDCacheSize(low, high); PR_ASSERT(PR_SUCCESS == rv); if (serverIsLocal) { /* Establish the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): starting server\n", PR_GetCurrentThread())); server = PR_NEWZAP(CSServer_t); PR_INIT_CLIST(&server->list); server->state = cs_init; server->ml = PR_NewLock(); server->backlog = backlog; server->port = DEFAULT_PORT; server->workers.minimum = workersMin; server->workers.maximum = workersMax; server->workers.accepting = accepting; server->stateChange = PR_NewCondVar(server->ml); server->pool.exiting = PR_NewCondVar(server->ml); server->pool.acceptComplete = PR_NewCondVar(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): creating server thread\n", PR_GetCurrentThread())); server->thread = PR_CreateThread( PR_USER_THREAD, Server, server, PR_PRIORITY_HIGH, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != server->thread); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): waiting for server init\n", PR_GetCurrentThread())); PR_Lock(server->ml); while (server->state == cs_init) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): server init complete (port #%d)\n", PR_GetCurrentThread(), server->port)); } if (clients != 0) { /* Create all of the clients */ PRHostEnt host; char buffer[BUFFER_SIZE]; client = (CSClient_t*)PR_CALLOC(clients * sizeof(CSClient_t)); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): creating %d client threads\n", PR_GetCurrentThread(), clients)); if (!serverIsLocal) { rv = PR_GetHostByName(serverName, buffer, BUFFER_SIZE, &host); if (PR_SUCCESS != rv) { PL_FPrintError(PR_STDERR, "PR_GetHostByName"); return 2; } } for (index = 0; index < clients; ++index) { client[index].state = cs_init; client[index].ml = PR_NewLock(); if (serverIsLocal) { if (PR_AF_INET6 != domain) (void)PR_InitializeNetAddr( PR_IpAddrLoopback, DEFAULT_PORT, &client[index].serverAddress); else rv = PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6, DEFAULT_PORT, &client[index].serverAddress); } else { (void)PR_EnumerateHostEnt( 0, &host, DEFAULT_PORT, &client[index].serverAddress); } client[index].stateChange = PR_NewCondVar(client[index].ml); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): creating client threads\n", PR_GetCurrentThread())); client[index].thread = PR_CreateThread( PR_USER_THREAD, Client, &client[index], PR_PRIORITY_NORMAL, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != client[index].thread); PR_Lock(client[index].ml); while (cs_init == client[index].state) PR_WaitCondVar(client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(client[index].ml); } } /* Then just let them go at it for a bit */ TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): waiting for execution interval (%d seconds)\n", PR_GetCurrentThread(), execution)); WaitForCompletion(execution); TimeOfDayMessage("Shutting down", PR_GetCurrentThread()); if (clients != 0) { for (index = 0; index < clients; ++index) { TEST_LOG(cltsrv_log_file, TEST_LOG_STATUS, ("main(0x%p): notifying client(0x%p) to stop\n", PR_GetCurrentThread(), client[index].thread)); PR_Lock(client[index].ml); if (cs_run == client[index].state) { client[index].state = cs_stop; PR_Interrupt(client[index].thread); while (cs_stop == client[index].state) PR_WaitCondVar( client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(client[index].ml); TEST_LOG(cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): joining client(0x%p)\n", PR_GetCurrentThread(), client[index].thread)); joinStatus = PR_JoinThread(client[index].thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(client[index].stateChange); PR_DestroyLock(client[index].ml); } PR_DELETE(client); } if (NULL != server) { /* All clients joined - retrieve the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): notifying server(0x%p) to stop\n", PR_GetCurrentThread(), server->thread)); PR_Lock(server->ml); server->state = cs_stop; PR_Interrupt(server->thread); while (cs_exit != server->state) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): joining server(0x%p)\n", PR_GetCurrentThread(), server->thread)); joinStatus = PR_JoinThread(server->thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(server->stateChange); PR_DestroyCondVar(server->pool.exiting); PR_DestroyCondVar(server->pool.acceptComplete); PR_DestroyLock(server->ml); PR_DELETE(server); } TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): test complete\n", PR_GetCurrentThread())); PT_FPrintStats(debug_out, "\nPThread Statistics\n"); TimeOfDayMessage("Test exiting at", PR_GetCurrentThread()); PR_Cleanup(); return 0; } /* main */
static void PR_CALLBACK Server(void *arg) { PRStatus rv; PRNetAddr serverAddress; PRThread *me = PR_GetCurrentThread(); CSServer_t *server = (CSServer_t*)arg; PRSocketOptionData sockOpt; server->listener = PR_Socket(domain, SOCK_STREAM, protocol); sockOpt.option = PR_SockOpt_Reuseaddr; sockOpt.value.reuse_addr = PR_TRUE; rv = PR_SetSocketOption(server->listener, &sockOpt); TEST_ASSERT(PR_SUCCESS == rv); memset(&serverAddress, 0, sizeof(serverAddress)); if (PR_AF_INET6 != domain) rv = PR_InitializeNetAddr(PR_IpAddrAny, DEFAULT_PORT, &serverAddress); else rv = PR_SetNetAddr(PR_IpAddrAny, PR_AF_INET6, DEFAULT_PORT, &serverAddress); rv = PR_Bind(server->listener, &serverAddress); TEST_ASSERT(PR_SUCCESS == rv); rv = PR_Listen(server->listener, server->backlog); TEST_ASSERT(PR_SUCCESS == rv); server->started = PR_IntervalNow(); TimeOfDayMessage("Server started at", me); PR_Lock(server->ml); server->state = cs_run; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); /* ** Create the first worker (actually, a thread that accepts ** connections and then processes the work load as needed). ** From this point on, additional worker threads are created ** as they are needed by existing worker threads. */ rv = CreateWorker(server, &server->pool); TEST_ASSERT(PR_SUCCESS == rv); /* ** From here on this thread is merely hanging around as the contact ** point for the main test driver. It's just waiting for the driver ** to declare the test complete. */ TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): waiting for state change\n", me)); PR_Lock(server->ml); while ((cs_run == server->state) && !Aborted(rv)) { rv = PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(server->ml); PR_ClearInterrupt(); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("\tServer(0x%p): shutting down workers\n", me)); /* ** Get all the worker threads to exit. They know how to ** clean up after themselves, so this is just a matter of ** waiting for clorine in the pool to take effect. During ** this stage we're ignoring interrupts. */ server->workers.minimum = server->workers.maximum = 0; PR_Lock(server->ml); while (!PR_CLIST_IS_EMPTY(&server->list)) { PRCList *head = PR_LIST_HEAD(&server->list); CSWorker_t *worker = (CSWorker_t*)head; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): interrupting worker(0x%p)\n", me, worker)); rv = PR_Interrupt(worker->thread); TEST_ASSERT(PR_SUCCESS == rv); PR_REMOVE_AND_INIT_LINK(head); } while (server->pool.workers > 0) { TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\tServer(0x%p): waiting for %u workers to exit\n", me, server->pool.workers)); (void)PR_WaitCondVar(server->pool.exiting, PR_INTERVAL_NO_TIMEOUT); } server->state = cs_exit; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("\tServer(0x%p): stopped after %u operations and %u bytes\n", me, server->operations, server->bytesTransferred)); if (NULL != server->listener) PR_Close(server->listener); server->stopped = PR_IntervalNow(); } /* Server */
NSAPI_PUBLIC void systhread_terminate(SYS_THREAD thr) { PR_Interrupt((PRThread *)thr); }