ThreadReturnType PEGASUS_THREAD_CDECL createSubscriptionFunc(void *parm) { Thread* my_handle = reinterpret_cast<Thread *>(parm); Boolean cimExceptionCaught = false; CIMClient client; CIMException theCIMException; try { client.connectLocal(); SubscriptionPath = CreateSbscriptionInstance(client, HandlerPath, FilterPath, PEGASUS_NAMESPACENAME_INTEROP); } catch (CIMException& e) { PEGASUS_TEST_ASSERT(e.getCode() == CIM_ERR_ALREADY_EXISTS || e.getCode() == CIM_ERR_FAILED); exceptionCount++; } catch (Exception& e) { cout << e.getMessage() << endl; } return ThreadReturnType(0); }
static ThreadReturnType PEGASUS_THREAD_CDECL _reader(void* self_) { Thread* self = (Thread*)self_; Queue* queue = (Queue*)self->get_parm(); for (Uint32 i = 0; i < ITERATIONS; i++) { TestMessage* message = queue->dequeue_wait(); PEGASUS_TEST_ASSERT(message); if (verbose) { if (((i + 1) % 1000) == 0) printf("iterations: %05u\n", message->x); } // The following was a noted issue for earlier versions of // Solaris (ex. 5.8) It has been commented out for later // version (ex. 10 and 11). NOTE: if we are to guarantee // compatibility with earlier versions this may have to be // reinstated for those versions. //#ifdef PEGASUS_OS_SOLARIS // special dish of the day for Sun Solaris // reports say that running as root causes // the thread not being scheduled-out // until this is resolved the yield() // will stay here just for Solaris // Threads::yield(); //#endif delete message; } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL fibonacci(void * parm) { Thread* my_thread = (Thread *)parm; parmdef * Parm = (parmdef *)my_thread->get_parm(); int first = Parm->first; int second = Parm->second; int count = Parm->count; Condition * condstart = Parm->cond_start; MessageQueue * mq = Parm->mq; condstart->signal(); int add_to_type = 0; if (count < 20) add_to_type = 100; for (int i=0; i < count; i++) { int sum = first + second; first = second; second = sum; Message * message = new Message(i+add_to_type, 0, sum); mq->enqueue(message); } if (!add_to_type) Parm->th->thread_switch(); return ThreadReturnType(0); }
static ThreadReturnType PEGASUS_THREAD_CDECL _writer(void* self_) { Thread* self = (Thread*)self_; Queue* queue = (Queue*)self->get_parm(); for (Uint32 i = 0; i < ITERATIONS; i++) { queue->enqueue(new TestMessage(i)); // The following was a noted issue for earlier versions of // Solaris (ex. 5.8) It has been commented out for later // version (ex. 10 and 11). NOTE: if we are to guarantee // compatibility with earlier versions this may have to be // reinstated for those versions. // special dish of the day for Sun Solaris // reports say that running as root causes // the thread not being scheduled-out // until this is resolved the yield() // will stay here just for Solaris //#ifdef PEGASUS_OS_SOLARIS // Threads::yield(); //#endif } return ThreadReturnType(0); }
// The following thread trys to get the lock on an already reserved semaphore // means the thread will just deadlock and wait ThreadReturnType PEGASUS_THREAD_CDECL testDeadlockThread( void* parm ) { // Lock the semaphore the deadlocked thread will wait for if (verbose) cout << "DeadLock Thread going to lock Semaphore" << endl; deadLockSemaphore.lock(); if (verbose) cout << "DeadLock Thread waiting for Condition" << endl; deadLockCondition.wait(deadLockSemaphore); if (verbose) cout << "DeadLock Thread got Semaphore free signal" << endl; if (verbose) cout << "This should not ever happen..." << endl; abort(); return ThreadReturnType(52); }
ThreadReturnType PEGASUS_THREAD_CDECL atomicIncrement(void * parm) { Thread* my_thread = (Thread *)parm; AtomicInt * atom = (AtomicInt *)my_thread->get_parm(); (*atom)++; (*atom)++; (*atom)++; (*atom)++; (*atom)++; (*atom)--; (*atom)--; (*atom)--; Boolean zero = atom->decAndTestIfZero(); PEGASUS_TEST_ASSERT(zero == false); return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL _idleThd(void *parm) { Thread *my_thread = (Thread *)parm; AutoPtr<T_Parms> parms((T_Parms *)my_thread->get_parm()); Uint32 durationSeconds = parms->durationSeconds; const char * testUserid = parms->testUserid; const char * testPasswd = parms->testPasswd; CIMClient client; try { if (testUserid == NULL) { client.connectLocal(); } else { client.connect(System::getHostName(), 5988, String(testUserid), String(testPasswd)); } CIMClass tmpClass = client.getClass (OSINFO_NAMESPACE, OSINFO_CLASSNAME); cout << "Test 2 of 2: Begin " << durationSeconds << " second idle period..." << endl; while (sleepIterations.get() < durationSeconds) { Threads::sleep(1000); } //the client shall reconnect if the connection was closed and //so the operation should succeed. CIMClass tmpClass2 = client.getClass (OSINFO_NAMESPACE, OSINFO_CLASSNAME); } catch(Exception& e) { test2CaughtException = true; cerr << "Error: " << e.getMessage() << endl; } client.disconnect(); return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL deq(void * parm) { Thread* my_thread = (Thread *)parm; parmdef * Parm = (parmdef *)my_thread->get_parm(); MessageType type; int first = Parm->first; int second = Parm->second; int count = Parm->count; Condition * condstart = Parm->cond_start; MessageQueue * mq = Parm->mq; condstart->signal(); Message * message; type = 0; while (type != CLOSE_CONNECTION_MESSAGE) { message = mq->dequeue(); while (!message) { message = mq->dequeue(); } type = message->getType(); delete message; } if (verbose) { #if defined (PEGASUS_OS_VMS) // // Threads::self returns long-long-unsigned. // printf("Received Cancel Message, %llu about to end\n", Threads::self()); #else cout << "Received Cancel Message, " << Threads::self() << " about to end\n"; #endif } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL server_func (void *parm) { Thread *myself = reinterpret_cast < Thread * >(parm); test_async_queue *server = new test_async_queue (test_async_queue::SERVER); while (server->_die_now.get () < 1) { Threads::yield (); } if (verbose) cout << "server shutting down" << endl; delete server; return ThreadReturnType(0); }
// Thread function for testMultipleThreads. simply tests the // thread local data validity and derefences the data // ThreadReturnType PEGASUS_THREAD_CDECL testMultipleThread( void* parm ) { Thread* thread = (Thread*)parm; TestThreadData* data = (TestThreadData*)thread->reference_tsd( TSD_RESERVED_1); PEGASUS_TEST_ASSERT (data != NULL); PEGASUS_TEST_ASSERT (data->chars[0] == 'B'); PEGASUS_TEST_ASSERT (data->chars[1] == 'E'); PEGASUS_TEST_ASSERT (data->lInteger == 3456); PEGASUS_TEST_ASSERT (data->lArray.size() == 2); PEGASUS_TEST_ASSERT (data->lArray[0] == 1); PEGASUS_TEST_ASSERT (data->lArray[1] == 9999); thread->dereference_tsd(); return ThreadReturnType(32); }
ThreadReturnType PEGASUS_THREAD_CDECL ProviderAgent::_processRequestAndWriteResponse(void* arg) { PEG_METHOD_ENTER(TRC_PROVIDERAGENT, "ProviderAgent::_processRequestAndWriteResponse"); AutoPtr<ProviderAgentRequest> agentRequest( reinterpret_cast<ProviderAgentRequest*>(arg)); PEGASUS_ASSERT(agentRequest.get() != 0); try { // Get the ProviderAgent and request message from the argument ProviderAgent* agent = agentRequest->agent; AutoPtr<CIMRequestMessage> request(agentRequest->request); const AcceptLanguageListContainer acceptLang = request->operationContext.get(AcceptLanguageListContainer::NAME); Thread::setLanguages(acceptLang.getLanguages()); // Process the request AutoPtr<Message> response(agent->_processRequest(request.get())); // Write the response agent->_writeResponse(response.get()); } catch (const Exception& e) { PEG_TRACE((TRC_DISCARDED_DATA, Tracer::LEVEL1, "Exiting _processRequestAndWriteResponse. Caught Exception: %s", (const char*)e.getMessage().getCString())); } catch (...) { PEG_TRACE_CSTRING(TRC_DISCARDED_DATA, Tracer::LEVEL1, "Caught unrecognized exception. " "Exiting _processRequestAndWriteResponse."); } PEG_METHOD_EXIT(); return(ThreadReturnType(0)); }
ThreadReturnType PEGASUS_THREAD_CDECL _runningThd(void *parm) { Thread *my_thread = (Thread *)parm; AutoPtr<T_Parms> parms((T_Parms *)my_thread->get_parm()); Uint32 durationSeconds = parms->durationSeconds; const char * testUserid = parms->testUserid; const char * testPasswd = parms->testPasswd; CIMClient client; for(Uint32 i = 0; i <= durationSeconds; i++) { Threads::sleep(1000); sleepIterations++; if (testUserid == NULL) { client.connectLocal(); } else { client.connect(System::getHostName(), 5988, String(testUserid), String(testPasswd)); } try { CIMClass tmpClass = client.getClass (OSINFO_NAMESPACE, OSINFO_CLASSNAME); } catch(Exception& e) { cerr << "Error: " << e.getMessage() << endl; throw; } client.disconnect(); } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL writing_thread(void *parm) { Thread *my_handle = (Thread *)parm; ReadWriteSem * my_parm = (ReadWriteSem *)my_handle->get_parm(); ThreadType myself = Threads::self(); if (verbose) cout << "w"; while(die == false) { try { my_parm->waitWrite(); } catch (...) { cout << "Exception while trying to get a write lock" << endl; abort(); } write_count++; if (verbose) cout << "*"; my_handle->sleep(1); try { my_parm->unlockWrite(); } catch (...) { cout << "Exception while trying to release a write lock:" << Threads::id(myself).buffer << endl; abort(); } } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL _executeTests(void *parm) { Thread *my_thread = (Thread *)parm; AutoPtr<T_Parms> parms((T_Parms *)my_thread->get_parm()); CIMClient* client = parms->client; Uint32 indicationSendCount = parms->indicationSendCount; Uint32 id = parms->uniqueID; char id_[4]; memset(id_,0x00,sizeof(id_)); sprintf(id_,"%u",id); String uniqueID = "_"; uniqueID.append(id_); try { Stopwatch elapsedTime; elapsedTime.start(); try { _sendTestIndication (client, CIMName ("SendTestIndicationTrap"), indicationSendCount); } catch (Exception & e) { cerr << "----- sendTestIndication failed: " << e.getMessage () << endl; exit (-1); } elapsedTime.stop(); _testEnd(uniqueID, elapsedTime.getElapsed()); } catch(Exception & e) { cout << e.getMessage() << endl; } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL ProviderAgent::_unloadIdleProvidersHandler(void* arg) throw() { try { PEG_METHOD_ENTER(TRC_PROVIDERAGENT, "ProviderAgent::unloadIdleProvidersHandler"); ProviderAgent* myself = reinterpret_cast<ProviderAgent*>(arg); try { myself->_providerManagerRouter.unloadIdleProviders(); } catch (...) { // Ignore errors PEG_TRACE_CSTRING(TRC_PROVIDERAGENT, Tracer::LEVEL2, "Unexpected exception in _unloadIdleProvidersHandler"); } myself->_unloadIdleProvidersBusy--; } catch (...) { // Ignore errors try { PEG_TRACE_CSTRING(TRC_PROVIDERAGENT, Tracer::LEVEL2, "Unexpected exception in _unloadIdleProvidersHandler"); } catch (...) { } } // PEG_METHOD_EXIT(); // Note: This statement could throw an exception return(ThreadReturnType(0)); }
ThreadReturnType PEGASUS_THREAD_CDECL test_client(void *parm) { CIMClient client; Thread* myHandle = (Thread *)parm; String host = String (); Uint32 portNumber = 0; Boolean connectToLocal = false; // // Construct host address // try { if ((!MTTestClient::_hostNameSet) && (!MTTestClient::_portNumberSet) && (!MTTestClient::_userNameSet) && (!MTTestClient::_passwordSet)) { connectToLocal = true; } else { if (!MTTestClient::_hostNameSet) { MTTestClient::_hostName = System::getHostName(); } if( !MTTestClient::_portNumberSet ) { if( MTTestClient::_useSSL ) { MTTestClient::_portNumber = System::lookupPort( WBEM_HTTPS_SERVICE_NAME, WBEM_DEFAULT_HTTPS_PORT ); } else { MTTestClient::_portNumber = System::lookupPort( WBEM_HTTP_SERVICE_NAME, WBEM_DEFAULT_HTTP_PORT ); } char buffer[32]; sprintf( buffer, "%lu", (unsigned long) MTTestClient::_portNumber ); MTTestClient::_portNumberStr = buffer; } } host = MTTestClient::_hostName; portNumber = MTTestClient::_portNumber; if( connectToLocal ) { client.connectLocal(); } else if( MTTestClient::_useSSL ) { // // Get environment variables: // const char* pegasusHome = getenv("PEGASUS_HOME"); String certpath = FileSystem::getAbsolutePath( pegasusHome, PEGASUS_SSLCLIENT_CERTIFICATEFILE); String randFile; randFile = FileSystem::getAbsolutePath( pegasusHome, PEGASUS_SSLCLIENT_RANDOMFILE); SSLContext sslcontext (certpath, verifyCertificate, randFile); if (!MTTestClient::_userNameSet) { MTTestClient::_userName = System::getEffectiveUserName(); } if (!MTTestClient::_passwordSet) { MTTestClient::_password = MTTestClient::_promptForPassword( cout ); } client.connect(host, portNumber, sslcontext, MTTestClient::_userName, MTTestClient::_password ); } else { if (!MTTestClient::_passwordSet) { MTTestClient::_password = MTTestClient::_promptForPassword( cout ); } client.connect(host, portNumber, MTTestClient::_userName, MTTestClient::_password ); } // Enumerate Instances. Array<CIMObjectPath> instanceNames = client.enumerateInstanceNames( NAMESPACE, "CIM_ManagedElement"); #ifdef DEBUG if ( instanceNames.size() == 0 ) { PEGASUS_STD(cout) << "<<<<<<<<<<<<< No Instances Found >>>>>>>>>>>" << endl; } else { PEGASUS_STD(cout) << "<<<<<<<<<<<<< Instances Found : " << instanceNames.size() << ">>>>>>>>>>>" << endl; } PEGASUS_STD(cout) << endl << "++++++++ Completed Operation +++++++++ " << endl; #endif client.disconnect(); #ifdef DEBUG PEGASUS_STD(cout) << endl << "++++++++ Completed Disconnect +++++++++ " << endl; #endif } catch(const Exception& e) { PEGASUS_STD(cout) << "Error: " << e.getMessage() << endl; } return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL client_func (void *parm) { Thread *myself = reinterpret_cast < Thread * >(parm); test_async_queue *client = new test_async_queue (test_async_queue::CLIENT); // find the server MessageQueue *serverQueue = 0; while (serverQueue == 0) { serverQueue = MessageQueue::lookup("server"); // It is a good idea to yield to other threads. You should do this, // but this test-case stresses situations in which does not happen. //Threads::yield (); } if (verbose) { cout << "testing low-level async send " << endl; } Uint32 requestCount = 0; while (requestCount < 10000) { // The problem on multi-processor machines is that if we make it // continue on sending the messages, and the MessageQueueService // does not get to pick up the messages, the machine can crawl to // halt with about 300-400 threads and ever-continuing number of // them created. This is an evil stress test so lets leave it behind. try { Message *cim_rq = new Message (CIM_GET_INSTANCE_REQUEST_MESSAGE); AsyncOpNode *op = client->get_op(); AsyncOperationStart *async_rq = new AsyncOperationStart( op, serverQueue->getQueueId(), client->getQueueId(), false, cim_rq); client->SendAsync( op, serverQueue->getQueueId(), test_async_queue::async_handleEnqueue, client, (void *) 0); } catch (const PEGASUS_STD(bad_alloc) &) { cerr << "Out of memory! Continuing tests." << endl; continue; } requestCount++; // You really ought to allow other threads to their job (like picking // up all of these messages, but we want to stress test unfair // circumstances. //Threads::yield (); } if (verbose) { cout << "Waiting until all messages are flushed." << endl; } while (test_async_queue::msg_count.get() != requestCount) { if (verbose) { if (test_async_queue::msg_count.get() % (requestCount/10) == 0) { cout << test_async_queue::msg_count.get() / (requestCount/100) << "% complete" << endl; } } Threads::yield(); } if (verbose) { cout << "Waiting until all messages are flushed. " << endl; } while (test_async_queue::msg_count.get() != requestCount) { if (verbose) { if (test_async_queue::msg_count.get() % (requestCount/10) == 0) { cout << test_async_queue::msg_count.get() / (requestCount/100) << "% complete" << endl; } } Threads::yield(); } if (verbose) { cout << "sending stop to server " << endl; } try { CimServiceStop *stop = new CimServiceStop( 0, serverQueue->getQueueId(), client->getQueueId (), true); AsyncMessage *reply = client->SendWait (stop); delete stop; delete reply; } catch (const PEGASUS_STD(bad_alloc) &) { cerr <<" Out of memory! Continuing tests." << endl; } // wait for the server to shut down while (serverQueue) { serverQueue = MessageQueue::lookup("server"); Threads::yield (); } if (verbose) { cout << "shutting down client" << endl; } delete client; return ThreadReturnType(0); }
ThreadReturnType PEGASUS_THREAD_CDECL reading_thread(void *parm) { Thread *my_handle = (Thread *)parm; ReadWriteSem * my_parm = (ReadWriteSem *)my_handle->get_parm(); ThreadType myself = Threads::self(); if (verbose) cout << "r"; const TSD_Key keys[] = { TSD_RESERVED_1, TSD_RESERVED_2, TSD_RESERVED_3, TSD_RESERVED_4, }; try { my_handle->cleanup_push(exit_one , my_handle ); } catch (...) { cout << "Exception while trying to push cleanup handler" << endl; abort(); } try { my_handle->cleanup_push(exit_two , my_handle ); } catch (...) { cout << "Exception while trying to push cleanup handler" << endl; abort(); } while(die == false) { int i = 0; #ifndef PEGASUS_OS_ZOS char *my_storage = (char *)calloc(256, sizeof(char)); #else char *my_storage = (char *)::operator new(256); #endif // sprintf(my_storage, "%ld", myself + i); try { #ifndef PEGASUS_OS_ZOS my_handle->put_tsd(keys[i % 4], free, 256, my_storage); #else my_handle->put_tsd(keys[i % 4], ::operator delete, 256, my_storage); #endif } catch (...) { cout << "Exception while trying to put local storage: " << Threads::id(myself).buffer << endl; abort(); } try { my_parm->waitRead(); } catch (...) { cout << "Exception while trying to get a read lock" << endl; abort(); } read_count++; //if (verbose) // cout << "+"; my_handle->sleep(1); try { my_handle->cleanup_push(deref , my_handle ); } catch (...) { cout << "Exception while trying to push cleanup handler" << endl; abort(); } try { my_handle->reference_tsd(keys[i % 4]); } catch (...) { cout << "Exception while trying to reference local storage" << endl; abort(); } try { my_handle->cleanup_pop(true); } catch (...) { cout << "Exception while trying to pop cleanup handler" << endl; abort(); } try { my_parm->unlockRead(); } catch (...) { cout << "Exception while trying to release a read lock" << endl; abort(); } try { my_handle->delete_tsd(keys[i % 4]); } catch (...) { cout << "Exception while trying to delete local storage: " << Threads::id(myself).buffer << endl; abort(); } i++; } return ThreadReturnType(0); }
// Thread execution function for TestOneThread() ThreadReturnType PEGASUS_THREAD_CDECL test1_thread( void* parm ) { Threads::sleep( 1000 ); return ThreadReturnType(32); }