void MMSThread::run() { try { #ifdef __HAVE_DIRECTFB__ direct_thread_set_name( this->identity.c_str() ); #endif /* __HAVE_DIRECTFB__ */ if(this->autodetach) { this->detach(); } // switch from starting state to running this->running = true; this->starting = false; // call real routine threadMain(); // mark thread as stopped this->running = false; } catch(MMSError &error) { this->running = false; this->starting = false; DEBUGMSG(this->identity.c_str(), "Abort due to: " + error.getMessage()); } }
void BackgroundThread::execute() { try { threadMain(); running_ = false; finished_ = true; } catch (boost::thread_interrupted& /*interruption*/) { //thread has been interrupted: handle the interruption handleInterruption(); running_ = false; return; } }
bool GThread::start(SpawnBehavior behavior) { debugAssertM(! started(), "Thread has already executed."); if (started()) { return false; } m_status = STATUS_STARTED; if (behavior == USE_CURRENT_THREAD) { // Run on this thread m_status = STATUS_RUNNING; threadMain(); m_status = STATUS_COMPLETED; return true; } # ifdef G3D_WIN32 DWORD threadId; m_event = ::CreateEvent(NULL, TRUE, FALSE, NULL); debugAssert(m_event); m_handle = ::CreateThread(NULL, 0, &internalThreadProc, this, 0, &threadId); if (m_handle == NULL) { ::CloseHandle(m_event); m_event = NULL; } return (m_handle != NULL); # else if (!pthread_create(&m_handle, NULL, &internalThreadProc, this)) { return true; } else { // system-independent clear of handle System::memset(&m_handle, 0, sizeof(m_handle)); return false; } # endif }
int main() { int x[N], i; pthread_t t[N]; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 1024 * 1024); for (i = 0; i < N - 1; i++) { x[i] = i; if (pthread_create(&t[i], &attr, &threadMain, x + i) < 0) { fprintf(stderr, "error creating thread: %s\n", strerror(errno)); return -1; } } x[N - 1] = N - 1; threadMain(x + N - 1); return 0; }
/* * Process the requests */ static void processing() { if (chunkSize) { mprAddItem(headers, mprCreateKeyPair(headers, "X-Appweb-Chunk-Size", chunkSize)); } #if BLD_FEATURE_MULTITHREAD { MprThread *tp; int j; activeLoadThreads = loadThreads; for (j = 0; j < loadThreads; j++) { char name[64]; mprSprintf(name, sizeof(name), "http.%d", j); tp = mprCreateThread(mpr, name, threadMain, mpr, MPR_NORMAL_PRIORITY, 0); mprStartThread(tp); } } #else threadMain(mpr, NULL); #endif }
int main(int argc, char* argv[]) { #ifdef DEBUG printf("\nBegin Test\n"); #endif if (argc != 4) { fprintf(stderr, "argc=%d\n", argc); // fprintf(stderr, "\n[Usage]: mem_interleaving <Number of Mem Requests Issued in Parallel> <Number of Array Elements (Per Page)> <Interleaving Size (in Elements)> <Number of Iterations>\n\n"); fprintf(stderr, "\n[Usage]: mem_interleaving <Number of Mem Requests Issued in Parallel> <Number of Array Elements (Per Page)> <Number of Iterations>\n\n"); return 1; } g_num_requests = atoi(argv[1]); g_num_elements_per_page = atoi(argv[2]); // g_interleaving_sz = atoi(argv[3]); g_num_iterations = atoi(argv[3]); uint32_t stride = CACHELINE_SZ; if (stride > g_num_elements_per_page) g_num_elements_per_page = stride; g_interleaving_sz = g_num_requests * g_num_elements_per_page; #ifdef DEBUG fprintf(stderr, "Number of Parallel Requests = %d\n", g_num_requests); fprintf(stderr, "Size of each page = %d\n", g_num_elements_per_page); fprintf(stderr, "Number of Iterations = %d\n\n", g_num_iterations); #endif g_num_elements_working_set = WORKINGSET_SZ/sizeof(uint32_t); // printf("before g_num_elements_working_set = %d\n", g_num_elements_working_set); uint32_t min_size = g_num_requests * g_num_elements_per_page; g_num_elements_working_set = g_num_elements_working_set + (min_size - (g_num_elements_working_set % min_size)); if (g_num_elements_working_set % min_size != 0) printf("error in my math\n"); // else // printf("math is good to go!\n"); // printf("adjusted g_num_elements_working_set = %d\n", g_num_elements_working_set); uint32_t* arr_n_ptr = (uint32_t *) malloc((g_num_elements_working_set) * sizeof(uint32_t)); // Provide each request its own "array" to pointer chase on This prevents // the processor from consolidating request streams The fact that we are // using a single array to hold all of this is a bit too "clever", but it // saves cycles in the critical loop from figuring out which array to use. for (int i=0; i < g_num_requests; i++) { uint32_t num_elements_per_req = g_num_elements_working_set / g_num_requests; uint32_t arr_offset = i*g_num_elements_per_page; //interleave pages initializeGlobalArrays( arr_n_ptr, num_elements_per_req, g_num_elements_per_page, stride, g_interleaving_sz, arr_offset); } // this volatile ret_val is crucial, otherwise the entire run-loop // gets optimized away! :( uint32_t volatile ret_val = threadMain(arr_n_ptr, g_num_requests); #ifdef PRINT_SCRIPT_FRIENDLY fprintf(stdout, "App:[mem_interleaving],NumRequests:[%d],AppSize:[%d],Time:[%g], TimeUnits:[Time Per Request (ns)], Bandwidth:[%g], BandwidthUnits:[Bandwidth (Req/s)],NumIterations:[%u]\n", g_num_requests, g_num_elements_per_page, ((double) run_time_ns / (double) g_num_iterations / (double) g_num_requests), ((double) g_num_requests * (double) g_num_iterations / (double) run_time_s), g_num_iterations ); #endif #ifdef DEBUG fprintf(stderr, "Done. Exiting...\n\n"); #endif return 0; }